summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson2016-10-28 14:58:37 +0200
committerChris Wilson2016-10-28 21:53:47 +0200
commit1233e2db199dea015391db03d3478b3392201c41 (patch)
tree264eee9509dcecd6cdbeeb44200e5ce1af54b0c6 /drivers/gpu/drm/i915/i915_gem.c
parentdrm/i915: Pass around sg_table to get_pages/put_pages backend (diff)
downloadkernel-qcow2-linux-1233e2db199dea015391db03d3478b3392201c41.tar.gz
kernel-qcow2-linux-1233e2db199dea015391db03d3478b3392201c41.tar.xz
kernel-qcow2-linux-1233e2db199dea015391db03d3478b3392201c41.zip
drm/i915: Move object backing storage manipulation to its own locking
Break the allocation of the backing storage away from struct_mutex into a per-object lock. This allows parallel page allocation, provided we can do so outside of struct_mutex (i.e. set-domain-ioctl, pwrite, GTT fault), i.e. before execbuf! The increased cost of the atomic counters are hidden behind i915_vma_pin() for the typical case of execbuf, i.e. as the object is typically bound between execbufs, the page_pin_count is static. The cost will be felt around set-domain and pwrite, but offset by the improvement from reduced struct_mutex contention. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-14-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c93
1 files changed, 62 insertions, 31 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 56060aeb12e3..c8e9548adfeb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2269,6 +2269,9 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
struct address_space *mapping;
+ lockdep_assert_held(&obj->mm.lock);
+ GEM_BUG_ON(obj->mm.pages);
+
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
@@ -2325,12 +2328,17 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
if (i915_gem_object_has_pinned_pages(obj))
return;
GEM_BUG_ON(obj->bind_count);
+ if (!READ_ONCE(obj->mm.pages))
+ return;
+
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, SINGLE_DEPTH_NESTING);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ goto unlock;
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
@@ -2353,6 +2361,8 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->ops->put_pages(obj, pages);
+unlock:
+ mutex_unlock(&obj->mm.lock);
}
static unsigned int swiotlb_max_size(void)
@@ -2486,7 +2496,7 @@ err_pages:
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held(&obj->mm.lock);
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
@@ -2512,9 +2522,9 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}
/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_get_pages() may be called
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
* multiple times before they are released by a single call to
- * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
* either as a result of memory pressure (reaping pages under the shrinker)
* or as the object is itself released.
*/
@@ -2522,15 +2532,23 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ return err;
- if (obj->mm.pages)
- return 0;
+ if (likely(obj->mm.pages)) {
+ __i915_gem_object_pin_pages(obj);
+ goto unlock;
+ }
+
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
- if (err)
- __i915_gem_object_unpin_pages(obj);
+ if (!err)
+ atomic_set_release(&obj->mm.pages_pin_count, 1);
+unlock:
+ mutex_unlock(&obj->mm.lock);
return err;
}
@@ -2590,20 +2608,29 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *ptr;
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
- ret = i915_gem_object_pin_pages(obj);
+ ret = mutex_lock_interruptible(&obj->mm.lock);
if (ret)
return ERR_PTR(ret);
- pinned = obj->mm.pages_pin_count > 1;
+ pinned = true;
+ if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+ ret = ____i915_gem_object_get_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count));
+ atomic_set_release(&obj->mm.pages_pin_count, 1);
+ pinned = false;
+ }
+ GEM_BUG_ON(!obj->mm.pages);
ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
if (ptr && has_type != type) {
if (pinned) {
ret = -EBUSY;
- goto err;
+ goto err_unpin;
}
if (is_vmalloc_addr(ptr))
@@ -2618,17 +2645,21 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
ptr = i915_gem_object_map(obj, type);
if (!ptr) {
ret = -ENOMEM;
- goto err;
+ goto err_unpin;
}
obj->mm.mapping = ptr_pack_bits(ptr, type);
}
+out_unlock:
+ mutex_unlock(&obj->mm.lock);
return ptr;
-err:
- i915_gem_object_unpin_pages(obj);
- return ERR_PTR(ret);
+err_unpin:
+ atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+ ptr = ERR_PTR(ret);
+ goto out_unlock;
}
static void
@@ -4268,7 +4299,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
- int ret;
+ int err;
switch (args->madv) {
case I915_MADV_DONTNEED:
@@ -4278,15 +4309,13 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
obj = i915_gem_object_lookup(file_priv, args->handle);
- if (!obj) {
- ret = -ENOENT;
- goto unlock;
- }
+ if (!obj)
+ return -ENOENT;
+
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ goto out;
if (obj->mm.pages &&
i915_gem_object_is_tiled(obj) &&
@@ -4305,11 +4334,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED;
+ mutex_unlock(&obj->mm.lock);
+out:
i915_gem_object_put(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ return err;
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -4317,6 +4346,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
{
int i;
+ mutex_init(&obj->mm.lock);
+
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->userfault_link);
for (i = 0; i < I915_NUM_ENGINES; i++)
@@ -4479,7 +4510,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
obj->ops->release(obj);
if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
- obj->mm.pages_pin_count = 0;
+ atomic_set(&obj->mm.pages_pin_count, 0);
if (discard_backing_storage(obj))
obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_put_pages(obj);