summaryrefslogblamecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_active.c
blob: 91950d778cab6b52925439eb90219988267d0b6e (plain) (tree)



































































































































































































































                                                                               
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

#include "i915_drv.h"
#include "i915_active.h"

#define BKL(ref) (&(ref)->i915->drm.struct_mutex)

struct active_node {
	struct i915_gem_active base;
	struct i915_active *ref;
	struct rb_node node;
	u64 timeline;
};

static void
__active_retire(struct i915_active *ref)
{
	GEM_BUG_ON(!ref->count);
	if (!--ref->count)
		ref->retire(ref);
}

static void
node_retire(struct i915_gem_active *base, struct i915_request *rq)
{
	__active_retire(container_of(base, struct active_node, base)->ref);
}

static void
last_retire(struct i915_gem_active *base, struct i915_request *rq)
{
	__active_retire(container_of(base, struct i915_active, last));
}

static struct i915_gem_active *
active_instance(struct i915_active *ref, u64 idx)
{
	struct active_node *node;
	struct rb_node **p, *parent;
	struct i915_request *old;

	/*
	 * We track the most recently used timeline to skip a rbtree search
	 * for the common case, under typical loads we never need the rbtree
	 * at all. We can reuse the last slot if it is empty, that is
	 * after the previous activity has been retired, or if it matches the
	 * current timeline.
	 *
	 * Note that we allow the timeline to be active simultaneously in
	 * the rbtree and the last cache. We do this to avoid having
	 * to search and replace the rbtree element for a new timeline, with
	 * the cost being that we must be aware that the ref may be retired
	 * twice for the same timeline (as the older rbtree element will be
	 * retired before the new request added to last).
	 */
	old = i915_gem_active_raw(&ref->last, BKL(ref));
	if (!old || old->fence.context == idx)
		goto out;

	/* Move the currently active fence into the rbtree */
	idx = old->fence.context;

	parent = NULL;
	p = &ref->tree.rb_node;
	while (*p) {
		parent = *p;

		node = rb_entry(parent, struct active_node, node);
		if (node->timeline == idx)
			goto replace;

		if (node->timeline < idx)
			p = &parent->rb_right;
		else
			p = &parent->rb_left;
	}

	node = kmalloc(sizeof(*node), GFP_KERNEL);

	/* kmalloc may retire the ref->last (thanks shrinker)! */
	if (unlikely(!i915_gem_active_raw(&ref->last, BKL(ref)))) {
		kfree(node);
		goto out;
	}

	if (unlikely(!node))
		return ERR_PTR(-ENOMEM);

	init_request_active(&node->base, node_retire);
	node->ref = ref;
	node->timeline = idx;

	rb_link_node(&node->node, parent, p);
	rb_insert_color(&node->node, &ref->tree);

replace:
	/*
	 * Overwrite the previous active slot in the rbtree with last,
	 * leaving last zeroed. If the previous slot is still active,
	 * we must be careful as we now only expect to receive one retire
	 * callback not two, and so much undo the active counting for the
	 * overwritten slot.
	 */
	if (i915_gem_active_isset(&node->base)) {
		/* Retire ourselves from the old rq->active_list */
		__list_del_entry(&node->base.link);
		ref->count--;
		GEM_BUG_ON(!ref->count);
	}
	GEM_BUG_ON(list_empty(&ref->last.link));
	list_replace_init(&ref->last.link, &node->base.link);
	node->base.request = fetch_and_zero(&ref->last.request);

out:
	return &ref->last;
}

void i915_active_init(struct drm_i915_private *i915,
		      struct i915_active *ref,
		      void (*retire)(struct i915_active *ref))
{
	ref->i915 = i915;
	ref->retire = retire;
	ref->tree = RB_ROOT;
	init_request_active(&ref->last, last_retire);
	ref->count = 0;
}

int i915_active_ref(struct i915_active *ref,
		    u64 timeline,
		    struct i915_request *rq)
{
	struct i915_gem_active *active;

	active = active_instance(ref, timeline);
	if (IS_ERR(active))
		return PTR_ERR(active);

	if (!i915_gem_active_isset(active))
		ref->count++;
	i915_gem_active_set(active, rq);

	GEM_BUG_ON(!ref->count);
	return 0;
}

bool i915_active_acquire(struct i915_active *ref)
{
	lockdep_assert_held(BKL(ref));
	return !ref->count++;
}

void i915_active_release(struct i915_active *ref)
{
	lockdep_assert_held(BKL(ref));
	__active_retire(ref);
}

int i915_active_wait(struct i915_active *ref)
{
	struct active_node *it, *n;
	int ret = 0;

	if (i915_active_acquire(ref))
		goto out_release;

	ret = i915_gem_active_retire(&ref->last, BKL(ref));
	if (ret)
		goto out_release;

	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
		ret = i915_gem_active_retire(&it->base, BKL(ref));
		if (ret)
			break;
	}

out_release:
	i915_active_release(ref);
	return ret;
}

static int __i915_request_await_active(struct i915_request *rq,
				       struct i915_gem_active *active)
{
	struct i915_request *barrier =
		i915_gem_active_raw(active, &rq->i915->drm.struct_mutex);

	return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
}

int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
	struct active_node *it, *n;
	int ret;

	ret = __i915_request_await_active(rq, &ref->last);
	if (ret)
		return ret;

	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
		ret = __i915_request_await_active(rq, &it->base);
		if (ret)
			return ret;
	}

	return 0;
}

void i915_active_fini(struct i915_active *ref)
{
	struct active_node *it, *n;

	GEM_BUG_ON(i915_gem_active_isset(&ref->last));

	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
		GEM_BUG_ON(i915_gem_active_isset(&it->base));
		kfree(it);
	}
	ref->tree = RB_ROOT;
}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_active.c"
#endif