summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJohn Harrison2015-05-29 18:43:49 +0200
committerDaniel Vetter2015-06-23 14:02:15 +0200
commit75289874e4484cd4702b3341b654b45b4a09b9d3 (patch)
tree08e557f1122a41b8051099a69e65773411f10978 /drivers
parentdrm/i915: Update queue_flip() to take a request structure (diff)
downloadkernel-qcow2-linux-75289874e4484cd4702b3341b654b45b4a09b9d3.tar.gz
kernel-qcow2-linux-75289874e4484cd4702b3341b654b45b4a09b9d3.tar.xz
kernel-qcow2-linux-75289874e4484cd4702b3341b654b45b4a09b9d3.zip
drm/i915: Update add_request() to take a request structure
Now that all callers of i915_add_request() have a request pointer to hand, it is possible to update the add request function to take a request pointer rather than pulling it out of the OLR. For: VIZ-5115 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Tomas Elf <tomas.elf@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c3
7 files changed, 23 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0bb6a340d1c9..da7cb141a4d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2890,14 +2890,14 @@ void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
-void __i915_add_request(struct intel_engine_cs *ring,
+void __i915_add_request(struct drm_i915_gem_request *req,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
bool flush_caches);
-#define i915_add_request(ring) \
- __i915_add_request(ring, NULL, NULL, true)
-#define i915_add_request_no_flush(ring) \
- __i915_add_request(ring, NULL, NULL, false)
+#define i915_add_request(req) \
+ __i915_add_request(req, NULL, NULL, true)
+#define i915_add_request_no_flush(req) \
+ __i915_add_request(req, NULL, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e80b08b864e7..c12bdd855be7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1158,7 +1158,7 @@ i915_gem_check_olr(struct drm_i915_gem_request *req)
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
if (req == req->ring->outstanding_lazy_request)
- i915_add_request(req->ring);
+ i915_add_request(req);
return 0;
}
@@ -2468,25 +2468,25 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void __i915_add_request(struct intel_engine_cs *ring,
+void __i915_add_request(struct drm_i915_gem_request *request,
struct drm_file *file,
struct drm_i915_gem_object *obj,
bool flush_caches)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct drm_i915_gem_request *request;
+ struct intel_engine_cs *ring;
+ struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
int ret;
- request = ring->outstanding_lazy_request;
if (WARN_ON(request == NULL))
return;
- if (i915.enable_execlists) {
- ringbuf = request->ctx->engine[ring->id].ringbuf;
- } else
- ringbuf = ring->buffer;
+ ring = request->ring;
+ dev_priv = ring->dev->dev_private;
+ ringbuf = request->ringbuf;
+
+ WARN_ON(request != ring->outstanding_lazy_request);
/*
* To ensure that this call will not fail, space for its emissions
@@ -3338,7 +3338,7 @@ int i915_gpu_idle(struct drm_device *dev)
return ret;
}
- i915_add_request_no_flush(req->ring);
+ i915_add_request_no_flush(req);
}
WARN_ON(ring->outstanding_lazy_request);
@@ -5122,7 +5122,7 @@ i915_gem_init_hw(struct drm_device *dev)
goto out;
}
- i915_add_request_no_flush(ring);
+ i915_add_request_no_flush(req);
}
out:
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 9968c02f76f3..896f7a117b99 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1066,7 +1066,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
params->ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- __i915_add_request(params->ring, params->file, params->batch_obj, true);
+ __i915_add_request(params->request, params->file, params->batch_obj, true);
}
static int
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 36d8cdeaed03..7ec2421f0a97 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11497,7 +11497,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
}
if (request)
- i915_add_request_no_flush(request->ring);
+ i915_add_request_no_flush(request);
work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7bcf1ec4d6aa..d142d284afd7 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2242,7 +2242,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
goto error;
}
- i915_add_request_no_flush(req->ring);
+ i915_add_request_no_flush(req);
}
ctx->rcs_initialized = true;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 3adb63eb0b99..3f709042b86c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -217,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req);
- i915_add_request(req->ring);
+ i915_add_request(req);
overlay->flip_tail = tail;
ret = i915_wait_request(overlay->last_flip_req);
@@ -299,7 +299,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req);
- i915_add_request(req->ring);
+ i915_add_request(req);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 38fa1fad594f..049bc7fa3c42 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2167,8 +2167,9 @@ int intel_ring_idle(struct intel_engine_cs *ring)
struct drm_i915_gem_request *req;
/* We need to add any requests required to flush the objects and ring */
+ WARN_ON(ring->outstanding_lazy_request);
if (ring->outstanding_lazy_request)
- i915_add_request(ring);
+ i915_add_request(ring->outstanding_lazy_request);
/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))