summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_execbuffer.c
diff options
context:
space:
mode:
authorChris Wilson2016-08-04 17:32:42 +0200
committerChris Wilson2016-08-04 21:20:06 +0200
commitad778f8967ea2f0bfda02701f918bcfcd495b721 (patch)
treebe8ab4cf23133b97782d69d6a1744b1956a66312 /drivers/gpu/drm/i915/i915_gem_execbuffer.c
parentdrm/i915: Enable lockless lookup of request tracking via RCU (diff)
downloadkernel-qcow2-linux-ad778f8967ea2f0bfda02701f918bcfcd495b721.tar.gz
kernel-qcow2-linux-ad778f8967ea2f0bfda02701f918bcfcd495b721.tar.xz
kernel-qcow2-linux-ad778f8967ea2f0bfda02701f918bcfcd495b721.zip
drm/i915: Export our request as a dma-buf fence on the reservation object
If the GEM objects being rendered with in this request have been exported via dma-buf to a third party, hook ourselves into the dma-buf reservation object so that the third party can serialise with our rendering via the dma-buf fences. Testcase: igt/prime_busy Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-26-git-send-email-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c31
1 files changed, 29 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a1da3028a949..71834741bd87 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -26,14 +26,18 @@
*
*/
+#include <linux/dma_remapping.h>
+#include <linux/reservation.h>
+#include <linux/uaccess.h>
+
#include <drm/drmP.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
-#include <linux/dma_remapping.h>
-#include <linux/uaccess.h>
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
@@ -1205,6 +1209,28 @@ void i915_vma_move_to_active(struct i915_vma *vma,
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
+static void eb_export_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_request *req,
+ unsigned int flags)
+{
+ struct reservation_object *resv;
+
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (!resv)
+ return;
+
+ /* Ignore errors from failing to allocate the new fence, we can't
+ * handle an error right now. Worst case should be missed
+ * synchronisation leading to rendering corruption.
+ */
+ ww_mutex_lock(&resv->lock, NULL);
+ if (flags & EXEC_OBJECT_WRITE)
+ reservation_object_add_excl_fence(resv, &req->fence);
+ else if (reservation_object_reserve_shared(resv) == 0)
+ reservation_object_add_shared_fence(resv, &req->fence);
+ ww_mutex_unlock(&resv->lock);
+}
+
static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
@@ -1224,6 +1250,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->base.read_domains = obj->base.pending_read_domains;
i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
+ eb_export_fence(obj, req, vma->exec_entry->flags);
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}