summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
diff options
context:
space:
mode:
authorChris Wilson2018-07-06 12:39:44 +0200
committerChris Wilson2018-07-06 19:22:37 +0200
commita523697857cdfb6a548f6caf38f42f4fe0f7d757 (patch)
tree298d0fdefdd1db85c061a611d2752505015982bb /drivers/gpu/drm/i915/selftests/intel_hangcheck.c
parentdrm/i915: Export i915_request_skip() (diff)
downloadkernel-qcow2-linux-a523697857cdfb6a548f6caf38f42f4fe0f7d757.tar.gz
kernel-qcow2-linux-a523697857cdfb6a548f6caf38f42f4fe0f7d757.tar.xz
kernel-qcow2-linux-a523697857cdfb6a548f6caf38f42f4fe0f7d757.zip
drm/i915: Start returning an error from i915_vma_move_to_active()
Handling such a late error in request construction is tricky, but to accommodate future patches which may allocate here, we potentially could err. To handle the error after already adjusting global state to track the new request, we must finish and submit the request. But we don't want to use the request as not everything is being tracked by it, so we opt to cancel the commands inside the request. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-3-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/intel_hangcheck.c')
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index c838f7d08cb9..73462a65a330 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -130,13 +130,19 @@ static int emit_recurse_batch(struct hang *h,
if (err)
goto unpin_vma;
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
- i915_vma_move_to_active(hws, rq, 0);
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
@@ -205,6 +211,7 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);