summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_display.c
diff options
context:
space:
mode:
authorChris Wilson2016-10-28 14:58:44 +0200
committerChris Wilson2016-10-28 21:53:50 +0200
commitd07f0e59b2c762584478920cd2d11fba2980a94a (patch)
treea9fc0cade87570bdec356493e6744ee5223bdaaf /drivers/gpu/drm/i915/intel_display.c
parentdrm/i915: Use lockless object free (diff)
downloadkernel-qcow2-linux-d07f0e59b2c762584478920cd2d11fba2980a94a.tar.gz
kernel-qcow2-linux-d07f0e59b2c762584478920cd2d11fba2980a94a.tar.xz
kernel-qcow2-linux-d07f0e59b2c762584478920cd2d11fba2980a94a.zip
drm/i915: Move GEM activity tracking into a common struct reservation_object
In preparation to support many distinct timelines, we need to expand the activity tracking on the GEM object to handle more than just a request per engine. We already use the struct reservation_object on the dma-buf to handle many fence contexts, so integrating that into the GEM object itself is the preferred solution. (For example, we can now share the same reservation_object between every consumer/producer using this buffer and skip the manual import/export via dma-buf.) v2: Reimplement busy-ioctl (by walking the reservation object), postpone the ABI change for another day. Similarly use the reservation object to find the last_write request (if active and from i915) for choosing display CS flips. Caveats: * busy-ioctl: busy-ioctl only reports on the native fences, it will not warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is being rendered to by external fences. It also will not report the same busy state as wait-ioctl (or polling on the dma-buf) in the same circumstances. On the plus side, it does retain reporting of which *i915* engines are engaged with this object. * non-blocking atomic modesets take a step backwards as the wait for render completion blocks the ioctl. This is fixed in a subsequent patch to use a fence instead for awaiting on the rendering, see "drm/i915: Restore nonblocking awaits for modesetting" * dynamic array manipulation for shared-fences in reservation is slower than the previous lockless static assignment (e.g. gem_exec_lut_handle runtime on ivb goes from 42s to 66s), mainly due to atomic operations (maintaining the fence refcounts). * loss of object-level retirement callbacks, emulated by VMA retirement tracking. * minor loss of object-level last activity information from debugfs, could be replaced with per-vma information if desired Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r--drivers/gpu/drm/i915/intel_display.c114
1 files changed, 21 insertions, 93 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 87135d93e828..622f733b9347 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,7 +37,6 @@
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
@@ -11967,8 +11966,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
- struct reservation_object *resv;
-
/*
* This is not being used for older platforms, because
* non-availability of flip done interrupt forces us to use
@@ -11990,12 +11987,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
else if (i915.enable_execlists)
return true;
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv && !reservation_object_test_signaled_rcu(resv, false))
- return true;
-
- return engine != i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ return engine != i915_gem_object_last_write_engine(obj);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -12068,17 +12060,8 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
- struct reservation_object *resv;
- if (work->flip_queued_req)
- WARN_ON(i915_wait_request(work->flip_queued_req,
- 0, MAX_SCHEDULE_TIMEOUT) < 0);
-
- /* For framebuffer backed by dmabuf, wait for fence */
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv)
- WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
- MAX_SCHEDULE_TIMEOUT) < 0);
+ WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
intel_pipe_update_start(crtc);
@@ -12279,8 +12262,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
} else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
engine = dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- engine = i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ engine = i915_gem_object_last_write_engine(obj);
if (engine == NULL || engine->id != RCS)
engine = dev_priv->engine[BCS];
} else {
@@ -12312,9 +12294,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (mmio_flip) {
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
-
- work->flip_queued_req = i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
queue_work(system_unbound_wq, &work->mmio_work);
} else {
request = i915_gem_request_alloc(engine, engine->last_context);
@@ -14154,13 +14133,10 @@ static int intel_atomic_check(struct drm_device *dev,
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+ struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
- struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
@@ -14183,30 +14159,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
ret = drm_atomic_helper_prepare_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
- if (!ret && !nonblock) {
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
- long timeout;
-
- if (!intel_plane_state->wait_req)
- continue;
-
- timeout = i915_wait_request(intel_plane_state->wait_req,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (timeout < 0) {
- /* Any hang should be swallowed by the wait */
- WARN_ON(timeout == -EIO);
- mutex_lock(&dev->struct_mutex);
- drm_atomic_helper_cleanup_planes(dev, state);
- mutex_unlock(&dev->struct_mutex);
- ret = timeout;
- break;
- }
- }
- }
-
return ret;
}
@@ -14400,26 +14352,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
int i;
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane->state);
-
- if (!intel_plane_state->wait_req)
- continue;
-
- /* EIO should be eaten, and we can't get interrupted in the
- * worker, and blocking commits have waited already. */
- WARN_ON(i915_wait_request(intel_plane_state->wait_req,
- 0, MAX_SCHEDULE_TIMEOUT) < 0);
- }
-
drm_atomic_helper_wait_for_dependencies(state);
if (intel_state->modeset) {
@@ -14626,7 +14563,7 @@ static int intel_atomic_commit(struct drm_device *dev,
INIT_WORK(&state->commit_work, intel_atomic_commit_work);
- ret = intel_atomic_prepare_commit(dev, state, nonblock);
+ ret = intel_atomic_prepare_commit(dev, state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
return ret;
@@ -14759,7 +14696,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
- struct reservation_object *resv;
+ long lret;
int ret = 0;
if (!obj && !old_obj)
@@ -14797,39 +14734,34 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return 0;
/* For framebuffer backed by dmabuf, wait for fence */
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- long lret;
+ lret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
+ if (lret == -ERESTARTSYS)
+ return lret;
- lret = reservation_object_wait_timeout_rcu(resv, false, true,
- MAX_SCHEDULE_TIMEOUT);
- if (lret == -ERESTARTSYS)
- return lret;
-
- WARN(lret < 0, "waiting returns %li\n", lret);
- }
+ WARN(lret < 0, "waiting returns %li\n", lret);
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev)->cursor_needs_physical) {
int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
- if (ret)
+ if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n");
+ return ret;
+ }
} else {
struct i915_vma *vma;
vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
- if (IS_ERR(vma))
- ret = PTR_ERR(vma);
- }
-
- if (ret == 0) {
- to_intel_plane_state(new_state)->wait_req =
- i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_KMS("failed to pin object\n");
+ return PTR_ERR(vma);
+ }
}
- return ret;
+ return 0;
}
/**
@@ -14847,7 +14779,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct intel_plane_state *old_intel_state;
- struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -14859,9 +14790,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
-
- i915_gem_request_assign(&intel_state->wait_req, NULL);
- i915_gem_request_assign(&old_intel_state->wait_req, NULL);
}
int