summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorBrad Volkin2014-02-18 19:15:45 +0100
committerDaniel Vetter2014-03-07 22:36:59 +0100
commit4c914c0c7c787b8f730128a8cdcca9c50b0784ab (patch)
treef510536f90a2f9f0d2d945e4971154a0da9ccb8d /drivers/gpu/drm/i915/i915_gem.c
parentdrm/i915: Avoid div by zero when pixel clock is large (diff)
downloadkernel-qcow2-linux-4c914c0c7c787b8f730128a8cdcca9c50b0784ab.tar.gz
kernel-qcow2-linux-4c914c0c7c787b8f730128a8cdcca9c50b0784ab.tar.xz
kernel-qcow2-linux-4c914c0c7c787b8f730128a8cdcca9c50b0784ab.zip
drm/i915: Refactor shmem pread setup
The command parser is going to need the same synchronization and setup logic, so factor it out for reuse. v2: Add a check that the object is backed by shmem Signed-off-by: Brad Volkin <bradley.d.volkin@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c51
1 files changed, 37 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 18ea6bccbbf0..177c20722656 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -327,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
return 0;
}
+/*
+ * Pins the specified object's pages and synchronizes the object with
+ * GPU accesses. Sets needs_clflush to non-zero if the caller should
+ * flush the object from the CPU cache.
+ */
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+ int *needs_clflush)
+{
+ int ret;
+
+ *needs_clflush = 0;
+
+ if (!obj->base.filp)
+ return -EINVAL;
+
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens. */
+ *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
+ obj->cache_level);
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
+ return ret;
+}
+
/* Per-page copy function for the shmem pread fastpath.
* Flushes invalid cachelines before reading the target if
* needs_clflush is set. */
@@ -424,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
- /* If we're not in the cpu read domain, set ourself into the gtt
- * read domain and manually flush cachelines (if required). This
- * optimizes for the case when the gpu will dirty the data
- * anyway again before the next pread happens. */
- needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
offset = args->offset;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,