summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson2014-05-22 10:16:52 +0200
committerDaniel Vetter2014-05-22 15:06:34 +0200
commit340fbd8ca1c7d6006a6b6afe716c10007bbfde85 (patch)
treeee772a48d1320e9f75c09fe278d3887bc70dbf5b /drivers/gpu/drm/i915/i915_gem.c
parentdrm/i915: Wait for pending page flips before enabling/disabling the primary p... (diff)
downloadkernel-qcow2-linux-340fbd8ca1c7d6006a6b6afe716c10007bbfde85.tar.gz
kernel-qcow2-linux-340fbd8ca1c7d6006a6b6afe716c10007bbfde85.tar.xz
kernel-qcow2-linux-340fbd8ca1c7d6006a6b6afe716c10007bbfde85.zip
drm/i915: Only discard backing storage on releasing the last ref
Before purging our pages (as opposed to copying back the contents from the GPU), make sure that there is not an exposed CPU mmapping through which the user can inspect the results. Regression from commit 5537252b6b6d71fb1a8ed7395a8e5babf91953fd Author: Chris Wilson <chris@chris-wilson.co.uk> Date: Tue Mar 25 13:23:06 2014 +0000 drm/i915: Invalidate our pages under memory pressure Testcase: igt/gem_mmap/new-object Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=79005 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Tested-by: Guo Jinxian <jinxianx.guo@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 440979f44a1a..6d64cff37fe2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4246,6 +4246,30 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return obj;
}
+static bool discard_backing_storage(struct drm_i915_gem_object *obj)
+{
+ /* If we are the last user of the backing storage (be it shmemfs
+ * pages or stolen etc), we know that the pages are going to be
+ * immediately released. In this case, we can then skip copying
+ * back the contents from the GPU.
+ */
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ return false;
+
+ if (obj->base.filp == NULL)
+ return true;
+
+ /* At first glance, this looks racy, but then again so would be
+ * userspace racing mmap against close. However, the first external
+ * reference to the filp can only be obtained through the
+ * i915_gem_mmap_ioctl() which safeguards us against the user
+ * acquiring such a reference whilst we are in the middle of
+ * freeing the object.
+ */
+ return atomic_long_read(&obj->base.filp->f_count) == 1;
+}
+
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4284,7 +4308,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
- if (obj->madv != __I915_MADV_PURGED)
+ if (discard_backing_storage(obj))
obj->madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);