summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_request.c
diff options
context:
space:
mode:
authorChris Wilson2016-09-09 15:11:51 +0200
committerChris Wilson2016-09-09 15:23:04 +0200
commit221fe7994554cc3985fc5d761ed7e44dcae0fa52 (patch)
tree797b1e03563c410b3fcb2e700f628f5ad5cbf29f /drivers/gpu/drm/i915/i915_gem_request.c
parentdrm/i915: Mark up all locked waiters (diff)
downloadkernel-qcow2-linux-221fe7994554cc3985fc5d761ed7e44dcae0fa52.tar.gz
kernel-qcow2-linux-221fe7994554cc3985fc5d761ed7e44dcae0fa52.tar.xz
kernel-qcow2-linux-221fe7994554cc3985fc5d761ed7e44dcae0fa52.zip
drm/i915: Perform a direct reset of the GPU from the waiter
If a waiter is holding the struct_mutex, then the reset worker cannot reset the GPU until the waiter returns. We do not want to return -EAGAIN form i915_wait_request as that breaks delicate operations like i915_vma_unbind() which often cannot be restarted easily, and returning -EIO is just as useless (and has in the past proven dangerous). The remaining WARN_ON(i915_wait_request) serve as a valuable reminder that handling errors from an indefinite wait are tricky. We can keep the current semantic that knowing after a reset is complete, so is the request, by performing the reset ourselves if we hold the mutex. uevent emission is still handled by the reset worker, so it may appear slightly out of order with respect to the actual reset (and concurrent use of the device). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20160909131201.16673-11-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 5f89801e6a16..64c370681a81 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -533,6 +533,16 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
engine->submit_request(request);
}
+static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (list_empty(&wait->task_list))
+ __add_wait_queue(q, wait);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
static unsigned long local_clock_us(unsigned int *cpu)
{
unsigned long t;
@@ -710,6 +720,25 @@ wakeup:
if (__i915_request_irq_complete(req))
break;
+ /* If the GPU is hung, and we hold the lock, reset the GPU
+ * and then check for completion. On a full reset, the engine's
+ * HW seqno will be advanced passed us and we are complete.
+ * If we do a partial reset, we have to wait for the GPU to
+ * resume and update the breadcrumb.
+ *
+ * If we don't hold the mutex, we can just wait for the worker
+ * to come along and update the breadcrumb (either directly
+ * itself, or indirectly by recovering the GPU).
+ */
+ if (flags & I915_WAIT_LOCKED &&
+ i915_reset_in_progress(&req->i915->gpu_error)) {
+ __set_current_state(TASK_RUNNING);
+ i915_reset(req->i915);
+ reset_wait_queue(&req->i915->gpu_error.wait_queue,
+ &reset);
+ continue;
+ }
+
/* Only spin if we know the GPU is processing this request */
if (i915_spin_request(req, state, 2))
break;