summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson2014-09-09 12:16:08 +0200
committerDaniel Vetter2014-09-19 14:41:14 +0200
commit21ab4e746d4d455fef66e2bef8c0442eb369a1d4 (patch)
tree57d7d64809609c142e8fed7ff31be1615ab10d22 /drivers/gpu/drm/i915/i915_gem.c
parentdrm/i915/edp: use lane count and link rate from DPCD for eDP (diff)
downloadkernel-qcow2-linux-21ab4e746d4d455fef66e2bef8c0442eb369a1d4.tar.gz
kernel-qcow2-linux-21ab4e746d4d455fef66e2bef8c0442eb369a1d4.tar.xz
kernel-qcow2-linux-21ab4e746d4d455fef66e2bef8c0442eb369a1d4.zip
drm/i915: Objects on the unbound list may still have an active reference
Due to the lazy retirement semantics, even though we have unbound an object, it may still hold onto an active reference. So in the debug code, play safe. v2: Export i915_gem_shrink() rather than opencoding it. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c111
1 files changed, 64 insertions, 47 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4ca3a6dcf10b..1ef6700e5d98 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
static int i915_gem_shrinker_oom(struct notifier_block *nb,
unsigned long event,
void *ptr);
-static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1741,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
* offsets on purgeable objects by truncating it and marking it purged,
* which prevents userspace from ever using that object again.
*/
- i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+ i915_gem_shrink(dev_priv,
+ obj->base.size >> PAGE_SHIFT,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_PURGEABLE);
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
goto out;
@@ -1938,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0;
}
-static unsigned long
-__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
- bool purgeable_only)
+unsigned long
+i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target, unsigned flags)
{
- struct list_head still_in_list;
- struct drm_i915_gem_object *obj;
+ const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
unsigned long count = 0;
/*
@@ -1965,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
* dev->struct_mutex and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
- obj = list_first_entry(&dev_priv->mm.unbound_list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
+ if (flags & I915_SHRINK_UNBOUND) {
+ struct list_head still_in_list;
- if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
- continue;
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
+ struct drm_i915_gem_object *obj;
- drm_gem_object_reference(&obj->base);
+ obj = list_first_entry(&dev_priv->mm.unbound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_reference(&obj->base);
+
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, &dev_priv->mm.unbound_list);
}
- list_splice(&still_in_list, &dev_priv->mm.unbound_list);
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
- struct i915_vma *vma, *v;
+ if (flags & I915_SHRINK_BOUND) {
+ struct list_head still_in_list;
- obj = list_first_entry(&dev_priv->mm.bound_list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma, *v;
- if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
- continue;
+ obj = list_first_entry(&dev_priv->mm.bound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
- drm_gem_object_reference(&obj->base);
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
- list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
- if (i915_vma_unbind(vma))
- break;
+ drm_gem_object_reference(&obj->base);
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
+ list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
- drm_gem_object_unreference(&obj->base);
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, &dev_priv->mm.bound_list);
}
- list_splice(&still_in_list, &dev_priv->mm.bound_list);
return count;
}
static unsigned long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
-{
- return __i915_gem_shrink(dev_priv, target, true);
-}
-
-static unsigned long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
i915_gem_evict_everything(dev_priv->dev);
- return __i915_gem_shrink(dev_priv, LONG_MAX, false);
+ return i915_gem_shrink(dev_priv, LONG_MAX,
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
}
static int
@@ -2067,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) {
- i915_gem_purge(dev_priv, page_count);
+ i915_gem_shrink(dev_priv,
+ page_count,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_PURGEABLE);
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
}
if (IS_ERR(page)) {
@@ -5261,11 +5273,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (!i915_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP;
- freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+ freed = i915_gem_shrink(dev_priv,
+ sc->nr_to_scan,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan)
- freed += __i915_gem_shrink(dev_priv,
- sc->nr_to_scan - freed,
- false);
+ freed += i915_gem_shrink(dev_priv,
+ sc->nr_to_scan - freed,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND);
if (unlock)
mutex_unlock(&dev->struct_mutex);