From c1d6798d20eed38b842eee01813ca6c48630d563 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:30 +0200 Subject: drm: use common drm_gem_dmabuf_release in i915/exynos drivers Note that this is slightly tricky since both drivers store their native objects in dma_buf->priv. But both also embed the base drm_gem_object at the first position, so the implicit cast is ok. To use the release helper we need to export it, too. Cc: Inki Dae Cc: Intel Graphics Development Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/drm_prime.c') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 85e450e3241c..a35f206bdc34 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -192,7 +192,7 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, /* nothing to be done here */ } -static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) +void drm_gem_dmabuf_release(struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; @@ -202,6 +202,7 @@ static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) drm_gem_object_unreference_unlocked(obj); } } +EXPORT_SYMBOL(drm_gem_dmabuf_release); static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) { -- cgit v1.2.3-55-g7522 From 01ce605a7bd8f4aaaf3c0accdaa5e106982b698c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:32 +0200 Subject: drm/prime: remove cargo-cult locking from map_sg helper I've checked both implementations (radeon/nouveau) and they both grab the page array from ttm simply by dereferencing it and then wrapping it up with drm_prime_pages_to_sg in the callback and map it with dma_map_sg (in the helper). Only the grabbing of the underlying page array is anything we need to be concerned about, and either those pages are pinned independently, or we're screwed no matter what. And indeed, nouveau/radeon pin the backing storage in their attach/detach functions. Since I've created this patch cma prime support for dma_buf was added. drm_gem_cma_prime_get_sg_table only calls kzalloc and the creates&maps the sg table with dma_get_sgtable. It doesn't touch any gem object state otherwise. So the cma helpers also look safe. The only thing we might claim it does is prevent concurrent mapping of dma_buf attachments. But a) that's not allowed and b) the current code is racy already since it checks whether the sg mapping exists _before_ grabbing the lock. So the dev->struct_mutex locking here does absolutely nothing useful, but only distracts. Remove it. This should also help Maarten's work to eventually pin the backing storage more dynamically by preventing locking inversions around dev->struct_mutex. v2: Add analysis for recently added cma helper prime code. Cc: Laurent Pinchart Cc: Maarten Lankhorst Acked-by: Laurent Pinchart Acked-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/drm_prime.c') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index a35f206bdc34..f1159624c68e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -167,8 +167,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, if (WARN_ON(prime_attach->dir != DMA_NONE)) return ERR_PTR(-EBUSY); - mutex_lock(&obj->dev->struct_mutex); - sgt = obj->dev->driver->gem_prime_get_sg_table(obj); if (!IS_ERR(sgt)) { @@ -182,7 +180,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, } } - mutex_unlock(&obj->dev->struct_mutex); return sgt; } -- cgit v1.2.3-55-g7522 From 730c4ff95eb54e5bab39357baddd0aa6da10d4fb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:38 +0200 Subject: drm/prime: fix error path in drm_gem_prime_fd_to_handle handle_unreference only clears up the obj->name and the reference, but would leave a dangling handle in the idr. The right thing to do is to call handle_delete. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/drm_prime.c') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index f1159624c68e..82cd83e62e7d 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -476,7 +476,7 @@ fail: /* hmm, if driver attached, we are relying on the free-object path * to detach.. which seems ok.. */ - drm_gem_object_handle_unreference_unlocked(obj); + drm_gem_handle_delete(file_priv, *handle); out_put: dma_buf_put(dma_buf); mutex_unlock(&file_priv->prime.lock); -- cgit v1.2.3-55-g7522 From 4332bf438bbbc31319abed61d2ac6d9932ff980c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:41 +0200 Subject: drm/prime: use proper pointer in drm_gem_prime_handle_to_fd Part of the function uses the properly-typed dmabuf variable, the other an untyped void *buf. Kill the later. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/drm_prime.c') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 82cd83e62e7d..c2d6d54e10e0 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -303,7 +303,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, int *prime_fd) { struct drm_gem_object *obj; - void *buf; int ret = 0; struct dma_buf *dmabuf; @@ -323,15 +322,15 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, goto out_have_obj; } - buf = dev->driver->gem_prime_export(dev, obj, flags); - if (IS_ERR(buf)) { + dmabuf = dev->driver->gem_prime_export(dev, obj, flags); + if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ - ret = PTR_ERR(buf); + ret = PTR_ERR(dmabuf); goto out; } - obj->export_dma_buf = buf; + obj->export_dma_buf = dmabuf; /* if we've exported this buffer the cheat and add it to the import list * so we get the correct handle back @@ -341,7 +340,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, if (ret) goto fail_put_dmabuf; - ret = dma_buf_fd(buf, flags); + ret = dma_buf_fd(dmabuf, flags); if (ret < 0) goto fail_rm_handle; @@ -362,11 +361,12 @@ out_have_obj: goto out; fail_rm_handle: - drm_prime_remove_buf_handle_locked(&file_priv->prime, buf); + drm_prime_remove_buf_handle_locked(&file_priv->prime, + dmabuf); fail_put_dmabuf: /* clear NOT to be checked when releasing dma_buf */ obj->export_dma_buf = NULL; - dma_buf_put(buf); + dma_buf_put(dmabuf); out: drm_gem_object_unreference_unlocked(obj); mutex_unlock(&file_priv->prime.lock); -- cgit v1.2.3-55-g7522 From bdf655de47b0d17ee2efc3bea5f617445ff77adc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:42 +0200 Subject: drm/prime: shrink critical section protected by prime lock When exporting a gem object as a dma-buf the critical section for the per-fd prime lock is just the adding (and in case of errors, removing) of the handle to the per-fd lookup cache. So restrict the critical section to just that part of the function. This simplifies later reordering. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/drm_prime.c') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index c2d6d54e10e0..cb0451679e81 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -310,7 +310,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, if (!obj) return -ENOENT; - mutex_lock(&file_priv->prime.lock); /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; @@ -332,6 +331,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, } obj->export_dma_buf = dmabuf; + mutex_lock(&file_priv->prime.lock); /* if we've exported this buffer the cheat and add it to the import list * so we get the correct handle back */ @@ -363,13 +363,13 @@ out_have_obj: fail_rm_handle: drm_prime_remove_buf_handle_locked(&file_priv->prime, dmabuf); + mutex_unlock(&file_priv->prime.lock); fail_put_dmabuf: /* clear NOT to be checked when releasing dma_buf */ obj->export_dma_buf = NULL; dma_buf_put(dmabuf); out: drm_gem_object_unreference_unlocked(obj); - mutex_unlock(&file_priv->prime.lock); return ret; } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); -- cgit v1.2.3-55-g7522 From 84341c280acb8217a301344082c7ad8b9af870a6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:43 +0200 Subject: drm/prime: clarify logic a bit in drm_gem_prime_fd_to_handle if (!ret) implies that ret == 0, so no need to clear it again. And explicitly check for ret == 0 to indicate that we're checking an errno integer. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/drm_prime.c') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index cb0451679e81..3d576018893a 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -444,10 +444,8 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, ret = drm_prime_lookup_buf_handle(&file_priv->prime, dma_buf, handle); - if (!ret) { - ret = 0; + if (ret == 0) goto out_put; - } /* never seen this one, need to import */ obj = dev->driver->gem_prime_import(dev, dma_buf); -- cgit v1.2.3-55-g7522 From 319c933c71f3dbdb2b3274d1634d3494c70efa06 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:46 +0200 Subject: drm/prime: proper locking+refcounting for obj->dma_buf link The export dma-buf cache is semantically similar to an flink name. So semantically it makes sense to treat it the same and remove the name (i.e. the dma_buf pointer) and its references when the last gem handle disappears. Again we need to be careful, but double so: Not just could someone race and export with a gem close ioctl (so we need to recheck obj->handle_count again when assigning the new name), but multiple exports can also race against each another. This is prevented by holding the dev->object_name_lock across the entire section which touches obj->dma_buf. With the new scheme we also need to reinstate the obj->dma_buf link at import time (in case the only reference userspace has held in-between was through the dma-buf fd and not through any native gem handle). For simplicity we don't check whether it's a native object but unconditionally set up that link - with the new scheme of removing the obj->dma_buf reference when the last handle disappears we can do that. To make it clear that this is not just for exported buffers anymore als rename it from export_dma_buf to dma_buf. To make sure that now one can race a fd_to_handle or handle_to_fd with gem_close we use the same tricks as in flink of extending the dev->object_name_locking critical section. With this change we finally have a guaranteed 1:1 relationship (at least for native objects) between gem objects and dma-bufs, even accounting for races (which can happen since the dma-buf itself holds a reference while in-flight). This prevent igt/prime_self_import/export-vs-gem_close-race from Oopsing the kernel. There is still a leak though since the per-file priv dma-buf/handle cache handling is racy. That will be fixed in a later patch. v2: Remove the bogus dma_buf_put from the export_and_register_object failure path if we've raced with the handle count dropping to 0. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 1 + drivers/gpu/drm/drm_gem.c | 24 ++++++++++++++-- drivers/gpu/drm/drm_prime.c | 70 +++++++++++++++++++++++++++++++++++---------- include/drm/drmP.h | 12 ++++++-- 4 files changed, 87 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/drm_prime.c') diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 59f459291093..2d2401e9c5ae 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -486,6 +486,7 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->postclose) dev->driver->postclose(dev, file_priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_prime_destroy_file_private(&file_priv->prime); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d47aa774d64b..4b3c533be859 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -195,9 +195,14 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) drm_prime_remove_buf_handle(&filp->prime, obj->import_attach->dmabuf); } - if (obj->export_dma_buf) { + + /* + * Note: obj->dma_buf can't disappear as long as we still hold a + * handle reference in obj->handle_count. + */ + if (obj->dma_buf) { drm_prime_remove_buf_handle(&filp->prime, - obj->export_dma_buf); + obj->dma_buf); } } @@ -231,6 +236,15 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj) } } +static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) +{ + /* Unbreak the reference cycle if we have an exported dma_buf. */ + if (obj->dma_buf) { + dma_buf_put(obj->dma_buf); + obj->dma_buf = NULL; + } +} + static void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) { @@ -244,8 +258,10 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) */ mutex_lock(&obj->dev->object_name_lock); - if (--obj->handle_count == 0) + if (--obj->handle_count == 0) { drm_gem_object_handle_free(obj); + drm_gem_object_exported_dma_buf_free(obj); + } mutex_unlock(&obj->dev->object_name_lock); drm_gem_object_unreference_unlocked(obj); @@ -712,6 +728,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) void drm_gem_object_release(struct drm_gem_object *obj) { + WARN_ON(obj->dma_buf); + if (obj->filp) fput(obj->filp); } diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 3d576018893a..5e543e9264d7 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -193,11 +193,8 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; - if (obj->export_dma_buf == dma_buf) { - /* drop the reference on the export fd holds */ - obj->export_dma_buf = NULL; - drm_gem_object_unreference_unlocked(obj); - } + /* drop the reference on the export fd holds */ + drm_gem_object_unreference_unlocked(obj); } EXPORT_SYMBOL(drm_gem_dmabuf_release); @@ -298,6 +295,37 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, } EXPORT_SYMBOL(drm_gem_prime_export); +static struct dma_buf *export_and_register_object(struct drm_device *dev, + struct drm_gem_object *obj, + uint32_t flags) +{ + struct dma_buf *dmabuf; + + /* prevent races with concurrent gem_close. */ + if (obj->handle_count == 0) { + dmabuf = ERR_PTR(-ENOENT); + return dmabuf; + } + + dmabuf = dev->driver->gem_prime_export(dev, obj, flags); + if (IS_ERR(dmabuf)) { + /* normally the created dma-buf takes ownership of the ref, + * but if that fails then drop the ref + */ + return dmabuf; + } + + /* + * Note that callers do not need to clean up the export cache + * since the check for obj->handle_count guarantees that someone + * will clean it up. + */ + obj->dma_buf = dmabuf; + get_dma_buf(obj->dma_buf); + + return dmabuf; +} + int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd) @@ -313,15 +341,20 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; + get_dma_buf(dmabuf); goto out_have_obj; } - if (obj->export_dma_buf) { - dmabuf = obj->export_dma_buf; + mutex_lock(&dev->object_name_lock); + if (obj->dma_buf) { + get_dma_buf(obj->dma_buf); + dmabuf = obj->dma_buf; + mutex_unlock(&dev->object_name_lock); goto out_have_obj; } - dmabuf = dev->driver->gem_prime_export(dev, obj, flags); + dmabuf = export_and_register_object(dev, obj, flags); + mutex_unlock(&dev->object_name_lock); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref @@ -329,14 +362,13 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, ret = PTR_ERR(dmabuf); goto out; } - obj->export_dma_buf = dmabuf; mutex_lock(&file_priv->prime.lock); /* if we've exported this buffer the cheat and add it to the import list * so we get the correct handle back */ ret = drm_prime_add_buf_handle(&file_priv->prime, - obj->export_dma_buf, handle); + dmabuf, handle); if (ret) goto fail_put_dmabuf; @@ -349,7 +381,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, return 0; out_have_obj: - get_dma_buf(dmabuf); ret = dma_buf_fd(dmabuf, flags); if (ret < 0) { dma_buf_put(dmabuf); @@ -365,8 +396,6 @@ fail_rm_handle: dmabuf); mutex_unlock(&file_priv->prime.lock); fail_put_dmabuf: - /* clear NOT to be checked when releasing dma_buf */ - obj->export_dma_buf = NULL; dma_buf_put(dmabuf); out: drm_gem_object_unreference_unlocked(obj); @@ -448,13 +477,22 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, goto out_put; /* never seen this one, need to import */ + mutex_lock(&dev->object_name_lock); obj = dev->driver->gem_prime_import(dev, dma_buf); if (IS_ERR(obj)) { ret = PTR_ERR(obj); - goto out_put; + goto out_unlock; + } + + if (obj->dma_buf) { + WARN_ON(obj->dma_buf != dma_buf); + } else { + obj->dma_buf = dma_buf; + get_dma_buf(dma_buf); } - ret = drm_gem_handle_create(file_priv, obj, handle); + /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ + ret = drm_gem_handle_create_tail(file_priv, obj, handle); drm_gem_object_unreference_unlocked(obj); if (ret) goto out_put; @@ -475,6 +513,8 @@ fail: * to detach.. which seems ok.. */ drm_gem_handle_delete(file_priv, *handle); +out_unlock: + mutex_lock(&dev->object_name_lock); out_put: dma_buf_put(dma_buf); mutex_unlock(&file_priv->prime.lock); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 063eac31b97b..a95db49b3f9e 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -667,8 +667,16 @@ struct drm_gem_object { void *driver_private; - /* dma buf exported from this GEM object */ - struct dma_buf *export_dma_buf; + /** + * dma_buf - dma buf associated with this GEM object + * + * Pointer to the dma-buf associated with this gem object (either + * through importing or exporting). We break the resulting reference + * loop when the last gem handle for this object is released. + * + * Protected by obj->object_name_lock + */ + struct dma_buf *dma_buf; /** * import_attach - dma buf attachment backing this object -- cgit v1.2.3-55-g7522 From de9564d8b9e69bf6603521e810d3cb46fa98ad81 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:48 +0200 Subject: drm/prime: make drm_prime_lookup_buf_handle static ... and move it to the top of the function to avoid a forward declaration. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 29 +++++++++++++++-------------- include/drm/drmP.h | 1 - 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/drm_prime.c') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 5e543e9264d7..ed1ea5c1a9ca 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -83,6 +83,21 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, return 0; } +static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, + struct dma_buf *dma_buf, + uint32_t *handle) +{ + struct drm_prime_member *member; + + list_for_each_entry(member, &prime_fpriv->head, entry) { + if (member->dma_buf == dma_buf) { + *handle = member->handle; + return 0; + } + } + return -ENOENT; +} + static int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev, struct dma_buf_attachment *attach) @@ -655,20 +670,6 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) } EXPORT_SYMBOL(drm_prime_destroy_file_private); -int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) -{ - struct drm_prime_member *member; - - list_for_each_entry(member, &prime_fpriv->head, entry) { - if (member->dma_buf == dma_buf) { - *handle = member->handle; - return 0; - } - } - return -ENOENT; -} -EXPORT_SYMBOL(drm_prime_lookup_buf_handle); - void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) { mutex_lock(&prime_fpriv->lock); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index ce1e6bd30306..5914cc5c3fa6 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1508,7 +1508,6 @@ int drm_gem_dumb_destroy(struct drm_file *file, void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); -int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); #if DRM_DEBUG_CODE -- cgit v1.2.3-55-g7522 From d0b2c5334f41bdd18adaa3fbc1f7b5f1daab7eac Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 15 Aug 2013 00:02:49 +0200 Subject: drm/prime: Always add exported buffers to the handle cache ... not only when the dma-buf is freshly created. In contrived examples someone else could have exported/imported the dma-buf already and handed us the gem object with a flink name. If such on object gets reexported as a dma_buf we won't have it in the handle cache already, which breaks the guarantee that for dma-buf imports we always hand back an existing handle if there is one. This is exercised by igt/prime_self_import/with_one_bo_two_files Now if we extend the locked sections just a notch more we can also plug th racy buf/handle cache setup in handle_to_fd: If evil userspace races a concurrent gem close against a prime export operation we can end up tearing down the gem handle before the dma buf handle cache is set up. When handle_to_fd gets around to adding the handle to the cache there will be no one left to clean it up, effectily leaking the bo (and the dma-buf, since the handle cache holds a ref on the dma-buf): Thread A Thread B handle_to_fd: lookup gem object from handle creates new dma_buf gem_close on the same handle obj->dma_buf is set, but file priv buf handle cache has no entry obj->handle_count drops to 0 drm_prime_add_buf_handle sets up the handle cache -> We have a dma-buf reference in the handle cache, but since the handle_count of the gem object already dropped to 0 no on will clean it up. When closing the drm device fd we'll hit the WARN_ON in drm_prime_destroy_file_private. The important change is to extend the critical section of the filp->prime.lock to cover the gem handle lookup. This serializes with a concurrent gem handle close. This leak is exercised by igt/prime_self_import/export-vs-gem_close-race Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 6 ++-- drivers/gpu/drm/drm_prime.c | 81 +++++++++++++++++++++++++++------------------ include/drm/drmP.h | 2 +- 3 files changed, 53 insertions(+), 36 deletions(-) (limited to 'drivers/gpu/drm/drm_prime.c') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 0a5a0ca0a52e..1ce88c3301a1 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -195,10 +195,12 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) * Note: obj->dma_buf can't disappear as long as we still hold a * handle reference in obj->handle_count. */ + mutex_lock(&filp->prime.lock); if (obj->dma_buf) { - drm_prime_remove_buf_handle(&filp->prime, - obj->dma_buf); + drm_prime_remove_buf_handle_locked(&filp->prime, + obj->dma_buf); } + mutex_unlock(&filp->prime.lock); } static void drm_gem_object_ref_bug(struct kref *list_kref) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index ed1ea5c1a9ca..7ae2bfcab70e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -83,6 +83,19 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, return 0; } +static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, + uint32_t handle) +{ + struct drm_prime_member *member; + + list_for_each_entry(member, &prime_fpriv->head, entry) { + if (member->handle == handle) + return member->dma_buf; + } + + return NULL; +} + static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) @@ -146,9 +159,8 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf, attach->priv = NULL; } -static void drm_prime_remove_buf_handle_locked( - struct drm_prime_file_private *prime_fpriv, - struct dma_buf *dma_buf) +void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, + struct dma_buf *dma_buf) { struct drm_prime_member *member, *safe; @@ -337,6 +349,8 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, */ obj->dma_buf = dmabuf; get_dma_buf(obj->dma_buf); + /* Grab a new ref since the callers is now used by the dma-buf */ + drm_gem_object_reference(obj); return dmabuf; } @@ -349,10 +363,20 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, int ret = 0; struct dma_buf *dmabuf; + mutex_lock(&file_priv->prime.lock); obj = drm_gem_object_lookup(dev, file_priv, handle); - if (!obj) - return -ENOENT; + if (!obj) { + ret = -ENOENT; + goto out_unlock; + } + + dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); + if (dmabuf) { + get_dma_buf(dmabuf); + goto out_have_handle; + } + mutex_lock(&dev->object_name_lock); /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; @@ -360,45 +384,45 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, goto out_have_obj; } - mutex_lock(&dev->object_name_lock); if (obj->dma_buf) { get_dma_buf(obj->dma_buf); dmabuf = obj->dma_buf; - mutex_unlock(&dev->object_name_lock); goto out_have_obj; } dmabuf = export_and_register_object(dev, obj, flags); - mutex_unlock(&dev->object_name_lock); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ ret = PTR_ERR(dmabuf); + mutex_unlock(&dev->object_name_lock); goto out; } - mutex_lock(&file_priv->prime.lock); - /* if we've exported this buffer the cheat and add it to the import list - * so we get the correct handle back +out_have_obj: + /* + * If we've exported this buffer then cheat and add it to the import list + * so we get the correct handle back. We must do this under the + * protection of dev->object_name_lock to ensure that a racing gem close + * ioctl doesn't miss to remove this buffer handle from the cache. */ ret = drm_prime_add_buf_handle(&file_priv->prime, dmabuf, handle); + mutex_unlock(&dev->object_name_lock); if (ret) goto fail_put_dmabuf; +out_have_handle: ret = dma_buf_fd(dmabuf, flags); - if (ret < 0) - goto fail_rm_handle; - - *prime_fd = ret; - mutex_unlock(&file_priv->prime.lock); - return 0; - -out_have_obj: - ret = dma_buf_fd(dmabuf, flags); + /* + * We must _not_ remove the buffer from the handle cache since the newly + * created dma buf is already linked in the global obj->dma_buf pointer, + * and that is invariant as long as a userspace gem handle exists. + * Closing the handle will clean out the cache anyway, so we don't leak. + */ if (ret < 0) { - dma_buf_put(dmabuf); + goto fail_put_dmabuf; } else { *prime_fd = ret; ret = 0; @@ -406,14 +430,13 @@ out_have_obj: goto out; -fail_rm_handle: - drm_prime_remove_buf_handle_locked(&file_priv->prime, - dmabuf); - mutex_unlock(&file_priv->prime.lock); fail_put_dmabuf: dma_buf_put(dmabuf); out: drm_gem_object_unreference_unlocked(obj); +out_unlock: + mutex_unlock(&file_priv->prime.lock); + return ret; } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); @@ -669,11 +692,3 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) WARN_ON(!list_empty(&prime_fpriv->head)); } EXPORT_SYMBOL(drm_prime_destroy_file_private); - -void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) -{ - mutex_lock(&prime_fpriv->lock); - drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf); - mutex_unlock(&prime_fpriv->lock); -} -EXPORT_SYMBOL(drm_prime_remove_buf_handle); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 5914cc5c3fa6..90833dccc919 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1508,7 +1508,7 @@ int drm_gem_dumb_destroy(struct drm_file *file, void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); -void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); +void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); #if DRM_DEBUG_CODE extern int drm_vma_info(struct seq_file *m, void *data); -- cgit v1.2.3-55-g7522 From 0adb23709ba9dd87d8bfa1ee349482ac8ec0730a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 23 Aug 2013 23:46:02 +0300 Subject: drm/prime: double lock typo There is a typo so deadlocks on error instead of unlocking. Signed-off-by: Dan Carpenter Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/drm_prime.c') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 7ae2bfcab70e..276d470f7b3e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -552,7 +552,7 @@ fail: */ drm_gem_handle_delete(file_priv, *handle); out_unlock: - mutex_lock(&dev->object_name_lock); + mutex_unlock(&dev->object_name_lock); out_put: dma_buf_put(dma_buf); mutex_unlock(&file_priv->prime.lock); -- cgit v1.2.3-55-g7522