xref: /linux/drivers/gpu/drm/i915/i915_active.c (revision b2eb7d716426fe056596761cd371005d64e9caec)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/debugobjects.h>
8 
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_heartbeat.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_ring.h"
13 
14 #include "i915_drv.h"
15 #include "i915_active.h"
16 
17 /*
18  * Active refs memory management
19  *
20  * To be more economical with memory, we reap all the i915_active trees as
21  * they idle (when we know the active requests are inactive) and allocate the
22  * nodes from a local slab cache to hopefully reduce the fragmentation.
23  */
24 static struct kmem_cache *slab_cache;
25 
26 struct active_node {
27 	struct rb_node node;
28 	struct i915_active_fence base;
29 	struct i915_active *ref;
30 	u64 timeline;
31 };
32 
33 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
34 
35 static inline struct active_node *
36 node_from_active(struct i915_active_fence *active)
37 {
38 	return container_of(active, struct active_node, base);
39 }
40 
41 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
42 
43 static inline bool is_barrier(const struct i915_active_fence *active)
44 {
45 	return IS_ERR(rcu_access_pointer(active->fence));
46 }
47 
48 static inline struct llist_node *barrier_to_ll(struct active_node *node)
49 {
50 	GEM_BUG_ON(!is_barrier(&node->base));
51 	return (struct llist_node *)&node->base.cb.node;
52 }
53 
54 static inline struct intel_engine_cs *
55 __barrier_to_engine(struct active_node *node)
56 {
57 	return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
58 }
59 
60 static inline struct intel_engine_cs *
61 barrier_to_engine(struct active_node *node)
62 {
63 	GEM_BUG_ON(!is_barrier(&node->base));
64 	return __barrier_to_engine(node);
65 }
66 
67 static inline struct active_node *barrier_from_ll(struct llist_node *x)
68 {
69 	return container_of((struct list_head *)x,
70 			    struct active_node, base.cb.node);
71 }
72 
73 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
74 
75 static void *active_debug_hint(void *addr)
76 {
77 	struct i915_active *ref = addr;
78 
79 	return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
80 }
81 
82 static const struct debug_obj_descr active_debug_desc = {
83 	.name = "i915_active",
84 	.debug_hint = active_debug_hint,
85 };
86 
87 static void debug_active_init(struct i915_active *ref)
88 {
89 	debug_object_init(ref, &active_debug_desc);
90 }
91 
92 static void debug_active_activate(struct i915_active *ref)
93 {
94 	lockdep_assert_held(&ref->tree_lock);
95 	if (!atomic_read(&ref->count)) /* before the first inc */
96 		debug_object_activate(ref, &active_debug_desc);
97 }
98 
99 static void debug_active_deactivate(struct i915_active *ref)
100 {
101 	lockdep_assert_held(&ref->tree_lock);
102 	if (!atomic_read(&ref->count)) /* after the last dec */
103 		debug_object_deactivate(ref, &active_debug_desc);
104 }
105 
106 static void debug_active_fini(struct i915_active *ref)
107 {
108 	debug_object_free(ref, &active_debug_desc);
109 }
110 
111 static void debug_active_assert(struct i915_active *ref)
112 {
113 	debug_object_assert_init(ref, &active_debug_desc);
114 }
115 
116 #else
117 
118 static inline void debug_active_init(struct i915_active *ref) { }
119 static inline void debug_active_activate(struct i915_active *ref) { }
120 static inline void debug_active_deactivate(struct i915_active *ref) { }
121 static inline void debug_active_fini(struct i915_active *ref) { }
122 static inline void debug_active_assert(struct i915_active *ref) { }
123 
124 #endif
125 
126 static void
127 __active_retire(struct i915_active *ref)
128 {
129 	struct rb_root root = RB_ROOT;
130 	struct active_node *it, *n;
131 	unsigned long flags;
132 
133 	GEM_BUG_ON(i915_active_is_idle(ref));
134 
135 	/* return the unused nodes to our slabcache -- flushing the allocator */
136 	if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
137 		return;
138 
139 	GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
140 	debug_active_deactivate(ref);
141 
142 	/* Even if we have not used the cache, we may still have a barrier */
143 	if (!ref->cache)
144 		ref->cache = fetch_node(ref->tree.rb_node);
145 
146 	/* Keep the MRU cached node for reuse */
147 	if (ref->cache) {
148 		/* Discard all other nodes in the tree */
149 		rb_erase(&ref->cache->node, &ref->tree);
150 		root = ref->tree;
151 
152 		/* Rebuild the tree with only the cached node */
153 		rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
154 		rb_insert_color(&ref->cache->node, &ref->tree);
155 		GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
156 
157 		/* Make the cached node available for reuse with any timeline */
158 		ref->cache->timeline = 0; /* needs cmpxchg(u64) */
159 	}
160 
161 	spin_unlock_irqrestore(&ref->tree_lock, flags);
162 
163 	/* After the final retire, the entire struct may be freed */
164 	if (ref->retire)
165 		ref->retire(ref);
166 
167 	/* ... except if you wait on it, you must manage your own references! */
168 	wake_up_var(ref);
169 
170 	/* Finally free the discarded timeline tree  */
171 	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
172 		GEM_BUG_ON(i915_active_fence_isset(&it->base));
173 		kmem_cache_free(slab_cache, it);
174 	}
175 }
176 
177 static void
178 active_work(struct work_struct *wrk)
179 {
180 	struct i915_active *ref = container_of(wrk, typeof(*ref), work);
181 
182 	GEM_BUG_ON(!atomic_read(&ref->count));
183 	if (atomic_add_unless(&ref->count, -1, 1))
184 		return;
185 
186 	__active_retire(ref);
187 }
188 
189 static void
190 active_retire(struct i915_active *ref)
191 {
192 	GEM_BUG_ON(!atomic_read(&ref->count));
193 	if (atomic_add_unless(&ref->count, -1, 1))
194 		return;
195 
196 	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
197 		queue_work(system_unbound_wq, &ref->work);
198 		return;
199 	}
200 
201 	__active_retire(ref);
202 }
203 
204 static inline struct dma_fence **
205 __active_fence_slot(struct i915_active_fence *active)
206 {
207 	return (struct dma_fence ** __force)&active->fence;
208 }
209 
210 static inline bool
211 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
212 {
213 	struct i915_active_fence *active =
214 		container_of(cb, typeof(*active), cb);
215 
216 	return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
217 }
218 
219 static void
220 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
221 {
222 	if (active_fence_cb(fence, cb))
223 		active_retire(container_of(cb, struct active_node, base.cb)->ref);
224 }
225 
226 static void
227 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
228 {
229 	if (active_fence_cb(fence, cb))
230 		active_retire(container_of(cb, struct i915_active, excl.cb));
231 }
232 
233 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
234 {
235 	struct active_node *it;
236 
237 	GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
238 
239 	/*
240 	 * We track the most recently used timeline to skip a rbtree search
241 	 * for the common case, under typical loads we never need the rbtree
242 	 * at all. We can reuse the last slot if it is empty, that is
243 	 * after the previous activity has been retired, or if it matches the
244 	 * current timeline.
245 	 */
246 	it = READ_ONCE(ref->cache);
247 	if (it) {
248 		u64 cached = READ_ONCE(it->timeline);
249 
250 		/* Once claimed, this slot will only belong to this idx */
251 		if (cached == idx)
252 			return it;
253 
254 		/*
255 		 * An unclaimed cache [.timeline=0] can only be claimed once.
256 		 *
257 		 * If the value is already non-zero, some other thread has
258 		 * claimed the cache and we know that is does not match our
259 		 * idx. If, and only if, the timeline is currently zero is it
260 		 * worth competing to claim it atomically for ourselves (for
261 		 * only the winner of that race will cmpxchg return the old
262 		 * value of 0).
263 		 */
264 		if (!cached && !cmpxchg64(&it->timeline, 0, idx))
265 			return it;
266 	}
267 
268 	BUILD_BUG_ON(offsetof(typeof(*it), node));
269 
270 	/* While active, the tree can only be built; not destroyed */
271 	GEM_BUG_ON(i915_active_is_idle(ref));
272 
273 	it = fetch_node(ref->tree.rb_node);
274 	while (it) {
275 		if (it->timeline < idx) {
276 			it = fetch_node(it->node.rb_right);
277 		} else if (it->timeline > idx) {
278 			it = fetch_node(it->node.rb_left);
279 		} else {
280 			WRITE_ONCE(ref->cache, it);
281 			break;
282 		}
283 	}
284 
285 	/* NB: If the tree rotated beneath us, we may miss our target. */
286 	return it;
287 }
288 
289 static struct i915_active_fence *
290 active_instance(struct i915_active *ref, u64 idx)
291 {
292 	struct active_node *node;
293 	struct rb_node **p, *parent;
294 
295 	node = __active_lookup(ref, idx);
296 	if (likely(node))
297 		return &node->base;
298 
299 	spin_lock_irq(&ref->tree_lock);
300 	GEM_BUG_ON(i915_active_is_idle(ref));
301 
302 	parent = NULL;
303 	p = &ref->tree.rb_node;
304 	while (*p) {
305 		parent = *p;
306 
307 		node = rb_entry(parent, struct active_node, node);
308 		if (node->timeline == idx)
309 			goto out;
310 
311 		if (node->timeline < idx)
312 			p = &parent->rb_right;
313 		else
314 			p = &parent->rb_left;
315 	}
316 
317 	/*
318 	 * XXX: We should preallocate this before i915_active_ref() is ever
319 	 *  called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
320 	 */
321 	node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
322 	if (!node)
323 		goto out;
324 
325 	__i915_active_fence_init(&node->base, NULL, node_retire);
326 	node->ref = ref;
327 	node->timeline = idx;
328 
329 	rb_link_node(&node->node, parent, p);
330 	rb_insert_color(&node->node, &ref->tree);
331 
332 out:
333 	WRITE_ONCE(ref->cache, node);
334 	spin_unlock_irq(&ref->tree_lock);
335 
336 	return &node->base;
337 }
338 
339 void __i915_active_init(struct i915_active *ref,
340 			int (*active)(struct i915_active *ref),
341 			void (*retire)(struct i915_active *ref),
342 			unsigned long flags,
343 			struct lock_class_key *mkey,
344 			struct lock_class_key *wkey)
345 {
346 	debug_active_init(ref);
347 
348 	ref->flags = flags;
349 	ref->active = active;
350 	ref->retire = retire;
351 
352 	spin_lock_init(&ref->tree_lock);
353 	ref->tree = RB_ROOT;
354 	ref->cache = NULL;
355 
356 	init_llist_head(&ref->preallocated_barriers);
357 	atomic_set(&ref->count, 0);
358 	__mutex_init(&ref->mutex, "i915_active", mkey);
359 	__i915_active_fence_init(&ref->excl, NULL, excl_retire);
360 	INIT_WORK(&ref->work, active_work);
361 #if IS_ENABLED(CONFIG_LOCKDEP)
362 	lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
363 #endif
364 }
365 
366 static bool ____active_del_barrier(struct i915_active *ref,
367 				   struct active_node *node,
368 				   struct intel_engine_cs *engine)
369 
370 {
371 	struct llist_node *head = NULL, *tail = NULL;
372 	struct llist_node *pos, *next;
373 
374 	GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
375 
376 	/*
377 	 * Rebuild the llist excluding our node. We may perform this
378 	 * outside of the kernel_context timeline mutex and so someone
379 	 * else may be manipulating the engine->barrier_tasks, in
380 	 * which case either we or they will be upset :)
381 	 *
382 	 * A second __active_del_barrier() will report failure to claim
383 	 * the active_node and the caller will just shrug and know not to
384 	 * claim ownership of its node.
385 	 *
386 	 * A concurrent i915_request_add_active_barriers() will miss adding
387 	 * any of the tasks, but we will try again on the next -- and since
388 	 * we are actively using the barrier, we know that there will be
389 	 * at least another opportunity when we idle.
390 	 */
391 	llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
392 		if (node == barrier_from_ll(pos)) {
393 			node = NULL;
394 			continue;
395 		}
396 
397 		pos->next = head;
398 		head = pos;
399 		if (!tail)
400 			tail = pos;
401 	}
402 	if (head)
403 		llist_add_batch(head, tail, &engine->barrier_tasks);
404 
405 	return !node;
406 }
407 
408 static bool
409 __active_del_barrier(struct i915_active *ref, struct active_node *node)
410 {
411 	return ____active_del_barrier(ref, node, barrier_to_engine(node));
412 }
413 
414 static bool
415 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
416 {
417 	if (!is_barrier(active)) /* proto-node used by our idle barrier? */
418 		return false;
419 
420 	/*
421 	 * This request is on the kernel_context timeline, and so
422 	 * we can use it to substitute for the pending idle-barrer
423 	 * request that we want to emit on the kernel_context.
424 	 */
425 	__active_del_barrier(ref, node_from_active(active));
426 	return true;
427 }
428 
429 int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
430 {
431 	struct i915_active_fence *active;
432 	int err;
433 
434 	/* Prevent reaping in case we malloc/wait while building the tree */
435 	err = i915_active_acquire(ref);
436 	if (err)
437 		return err;
438 
439 	active = active_instance(ref, idx);
440 	if (!active) {
441 		err = -ENOMEM;
442 		goto out;
443 	}
444 
445 	if (replace_barrier(ref, active)) {
446 		RCU_INIT_POINTER(active->fence, NULL);
447 		atomic_dec(&ref->count);
448 	}
449 	if (!__i915_active_fence_set(active, fence))
450 		__i915_active_acquire(ref);
451 
452 out:
453 	i915_active_release(ref);
454 	return err;
455 }
456 
457 static struct dma_fence *
458 __i915_active_set_fence(struct i915_active *ref,
459 			struct i915_active_fence *active,
460 			struct dma_fence *fence)
461 {
462 	struct dma_fence *prev;
463 
464 	if (replace_barrier(ref, active)) {
465 		RCU_INIT_POINTER(active->fence, fence);
466 		return NULL;
467 	}
468 
469 	rcu_read_lock();
470 	prev = __i915_active_fence_set(active, fence);
471 	if (prev)
472 		prev = dma_fence_get_rcu(prev);
473 	else
474 		__i915_active_acquire(ref);
475 	rcu_read_unlock();
476 
477 	return prev;
478 }
479 
480 static struct i915_active_fence *
481 __active_fence(struct i915_active *ref, u64 idx)
482 {
483 	struct active_node *it;
484 
485 	it = __active_lookup(ref, idx);
486 	if (unlikely(!it)) { /* Contention with parallel tree builders! */
487 		spin_lock_irq(&ref->tree_lock);
488 		it = __active_lookup(ref, idx);
489 		spin_unlock_irq(&ref->tree_lock);
490 	}
491 	GEM_BUG_ON(!it); /* slot must be preallocated */
492 
493 	return &it->base;
494 }
495 
496 struct dma_fence *
497 __i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
498 {
499 	/* Only valid while active, see i915_active_acquire_for_context() */
500 	return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
501 }
502 
503 struct dma_fence *
504 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
505 {
506 	/* We expect the caller to manage the exclusive timeline ordering */
507 	return __i915_active_set_fence(ref, &ref->excl, f);
508 }
509 
510 bool i915_active_acquire_if_busy(struct i915_active *ref)
511 {
512 	debug_active_assert(ref);
513 	return atomic_add_unless(&ref->count, 1, 0);
514 }
515 
516 static void __i915_active_activate(struct i915_active *ref)
517 {
518 	spin_lock_irq(&ref->tree_lock); /* __active_retire() */
519 	if (!atomic_fetch_inc(&ref->count))
520 		debug_active_activate(ref);
521 	spin_unlock_irq(&ref->tree_lock);
522 }
523 
524 int i915_active_acquire(struct i915_active *ref)
525 {
526 	int err;
527 
528 	if (i915_active_acquire_if_busy(ref))
529 		return 0;
530 
531 	if (!ref->active) {
532 		__i915_active_activate(ref);
533 		return 0;
534 	}
535 
536 	err = mutex_lock_interruptible(&ref->mutex);
537 	if (err)
538 		return err;
539 
540 	if (likely(!i915_active_acquire_if_busy(ref))) {
541 		err = ref->active(ref);
542 		if (!err)
543 			__i915_active_activate(ref);
544 	}
545 
546 	mutex_unlock(&ref->mutex);
547 
548 	return err;
549 }
550 
551 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
552 {
553 	struct i915_active_fence *active;
554 	int err;
555 
556 	err = i915_active_acquire(ref);
557 	if (err)
558 		return err;
559 
560 	active = active_instance(ref, idx);
561 	if (!active) {
562 		i915_active_release(ref);
563 		return -ENOMEM;
564 	}
565 
566 	return 0; /* return with active ref */
567 }
568 
569 void i915_active_release(struct i915_active *ref)
570 {
571 	debug_active_assert(ref);
572 	active_retire(ref);
573 }
574 
575 static void enable_signaling(struct i915_active_fence *active)
576 {
577 	struct dma_fence *fence;
578 
579 	if (unlikely(is_barrier(active)))
580 		return;
581 
582 	fence = i915_active_fence_get(active);
583 	if (!fence)
584 		return;
585 
586 	dma_fence_enable_sw_signaling(fence);
587 	dma_fence_put(fence);
588 }
589 
590 static int flush_barrier(struct active_node *it)
591 {
592 	struct intel_engine_cs *engine;
593 
594 	if (likely(!is_barrier(&it->base)))
595 		return 0;
596 
597 	engine = __barrier_to_engine(it);
598 	smp_rmb(); /* serialise with add_active_barriers */
599 	if (!is_barrier(&it->base))
600 		return 0;
601 
602 	return intel_engine_flush_barriers(engine);
603 }
604 
605 static int flush_lazy_signals(struct i915_active *ref)
606 {
607 	struct active_node *it, *n;
608 	int err = 0;
609 
610 	enable_signaling(&ref->excl);
611 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
612 		err = flush_barrier(it); /* unconnected idle barrier? */
613 		if (err)
614 			break;
615 
616 		enable_signaling(&it->base);
617 	}
618 
619 	return err;
620 }
621 
622 int __i915_active_wait(struct i915_active *ref, int state)
623 {
624 	might_sleep();
625 
626 	/* Any fence added after the wait begins will not be auto-signaled */
627 	if (i915_active_acquire_if_busy(ref)) {
628 		int err;
629 
630 		err = flush_lazy_signals(ref);
631 		i915_active_release(ref);
632 		if (err)
633 			return err;
634 
635 		if (___wait_var_event(ref, i915_active_is_idle(ref),
636 				      state, 0, 0, schedule()))
637 			return -EINTR;
638 	}
639 
640 	/*
641 	 * After the wait is complete, the caller may free the active.
642 	 * We have to flush any concurrent retirement before returning.
643 	 */
644 	flush_work(&ref->work);
645 	return 0;
646 }
647 
648 static int __await_active(struct i915_active_fence *active,
649 			  int (*fn)(void *arg, struct dma_fence *fence),
650 			  void *arg)
651 {
652 	struct dma_fence *fence;
653 
654 	if (is_barrier(active)) /* XXX flush the barrier? */
655 		return 0;
656 
657 	fence = i915_active_fence_get(active);
658 	if (fence) {
659 		int err;
660 
661 		err = fn(arg, fence);
662 		dma_fence_put(fence);
663 		if (err < 0)
664 			return err;
665 	}
666 
667 	return 0;
668 }
669 
670 struct wait_barrier {
671 	struct wait_queue_entry base;
672 	struct i915_active *ref;
673 };
674 
675 static int
676 barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
677 {
678 	struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
679 
680 	if (i915_active_is_idle(wb->ref)) {
681 		list_del(&wq->entry);
682 		i915_sw_fence_complete(wq->private);
683 		kfree(wq);
684 	}
685 
686 	return 0;
687 }
688 
689 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
690 {
691 	struct wait_barrier *wb;
692 
693 	wb = kmalloc(sizeof(*wb), GFP_KERNEL);
694 	if (unlikely(!wb))
695 		return -ENOMEM;
696 
697 	GEM_BUG_ON(i915_active_is_idle(ref));
698 	if (!i915_sw_fence_await(fence)) {
699 		kfree(wb);
700 		return -EINVAL;
701 	}
702 
703 	wb->base.flags = 0;
704 	wb->base.func = barrier_wake;
705 	wb->base.private = fence;
706 	wb->ref = ref;
707 
708 	add_wait_queue(__var_waitqueue(ref), &wb->base);
709 	return 0;
710 }
711 
712 static int await_active(struct i915_active *ref,
713 			unsigned int flags,
714 			int (*fn)(void *arg, struct dma_fence *fence),
715 			void *arg, struct i915_sw_fence *barrier)
716 {
717 	int err = 0;
718 
719 	if (!i915_active_acquire_if_busy(ref))
720 		return 0;
721 
722 	if (flags & I915_ACTIVE_AWAIT_EXCL &&
723 	    rcu_access_pointer(ref->excl.fence)) {
724 		err = __await_active(&ref->excl, fn, arg);
725 		if (err)
726 			goto out;
727 	}
728 
729 	if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
730 		struct active_node *it, *n;
731 
732 		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
733 			err = __await_active(&it->base, fn, arg);
734 			if (err)
735 				goto out;
736 		}
737 	}
738 
739 	if (flags & I915_ACTIVE_AWAIT_BARRIER) {
740 		err = flush_lazy_signals(ref);
741 		if (err)
742 			goto out;
743 
744 		err = __await_barrier(ref, barrier);
745 		if (err)
746 			goto out;
747 	}
748 
749 out:
750 	i915_active_release(ref);
751 	return err;
752 }
753 
754 static int rq_await_fence(void *arg, struct dma_fence *fence)
755 {
756 	return i915_request_await_dma_fence(arg, fence);
757 }
758 
759 int i915_request_await_active(struct i915_request *rq,
760 			      struct i915_active *ref,
761 			      unsigned int flags)
762 {
763 	return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
764 }
765 
766 static int sw_await_fence(void *arg, struct dma_fence *fence)
767 {
768 	return i915_sw_fence_await_dma_fence(arg, fence, 0,
769 					     GFP_NOWAIT | __GFP_NOWARN);
770 }
771 
772 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
773 			       struct i915_active *ref,
774 			       unsigned int flags)
775 {
776 	return await_active(ref, flags, sw_await_fence, fence, fence);
777 }
778 
779 void i915_active_fini(struct i915_active *ref)
780 {
781 	debug_active_fini(ref);
782 	GEM_BUG_ON(atomic_read(&ref->count));
783 	GEM_BUG_ON(work_pending(&ref->work));
784 	mutex_destroy(&ref->mutex);
785 
786 	if (ref->cache)
787 		kmem_cache_free(slab_cache, ref->cache);
788 }
789 
790 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
791 {
792 	return node->timeline == idx && !i915_active_fence_isset(&node->base);
793 }
794 
795 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
796 {
797 	struct rb_node *prev, *p;
798 
799 	if (RB_EMPTY_ROOT(&ref->tree))
800 		return NULL;
801 
802 	GEM_BUG_ON(i915_active_is_idle(ref));
803 
804 	/*
805 	 * Try to reuse any existing barrier nodes already allocated for this
806 	 * i915_active, due to overlapping active phases there is likely a
807 	 * node kept alive (as we reuse before parking). We prefer to reuse
808 	 * completely idle barriers (less hassle in manipulating the llists),
809 	 * but otherwise any will do.
810 	 */
811 	if (ref->cache && is_idle_barrier(ref->cache, idx)) {
812 		p = &ref->cache->node;
813 		goto match;
814 	}
815 
816 	prev = NULL;
817 	p = ref->tree.rb_node;
818 	while (p) {
819 		struct active_node *node =
820 			rb_entry(p, struct active_node, node);
821 
822 		if (is_idle_barrier(node, idx))
823 			goto match;
824 
825 		prev = p;
826 		if (node->timeline < idx)
827 			p = READ_ONCE(p->rb_right);
828 		else
829 			p = READ_ONCE(p->rb_left);
830 	}
831 
832 	/*
833 	 * No quick match, but we did find the leftmost rb_node for the
834 	 * kernel_context. Walk the rb_tree in-order to see if there were
835 	 * any idle-barriers on this timeline that we missed, or just use
836 	 * the first pending barrier.
837 	 */
838 	for (p = prev; p; p = rb_next(p)) {
839 		struct active_node *node =
840 			rb_entry(p, struct active_node, node);
841 		struct intel_engine_cs *engine;
842 
843 		if (node->timeline > idx)
844 			break;
845 
846 		if (node->timeline < idx)
847 			continue;
848 
849 		if (is_idle_barrier(node, idx))
850 			goto match;
851 
852 		/*
853 		 * The list of pending barriers is protected by the
854 		 * kernel_context timeline, which notably we do not hold
855 		 * here. i915_request_add_active_barriers() may consume
856 		 * the barrier before we claim it, so we have to check
857 		 * for success.
858 		 */
859 		engine = __barrier_to_engine(node);
860 		smp_rmb(); /* serialise with add_active_barriers */
861 		if (is_barrier(&node->base) &&
862 		    ____active_del_barrier(ref, node, engine))
863 			goto match;
864 	}
865 
866 	return NULL;
867 
868 match:
869 	spin_lock_irq(&ref->tree_lock);
870 	rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
871 	if (p == &ref->cache->node)
872 		WRITE_ONCE(ref->cache, NULL);
873 	spin_unlock_irq(&ref->tree_lock);
874 
875 	return rb_entry(p, struct active_node, node);
876 }
877 
878 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
879 					    struct intel_engine_cs *engine)
880 {
881 	intel_engine_mask_t tmp, mask = engine->mask;
882 	struct llist_node *first = NULL, *last = NULL;
883 	struct intel_gt *gt = engine->gt;
884 
885 	GEM_BUG_ON(i915_active_is_idle(ref));
886 
887 	/* Wait until the previous preallocation is completed */
888 	while (!llist_empty(&ref->preallocated_barriers))
889 		cond_resched();
890 
891 	/*
892 	 * Preallocate a node for each physical engine supporting the target
893 	 * engine (remember virtual engines have more than one sibling).
894 	 * We can then use the preallocated nodes in
895 	 * i915_active_acquire_barrier()
896 	 */
897 	GEM_BUG_ON(!mask);
898 	for_each_engine_masked(engine, gt, mask, tmp) {
899 		u64 idx = engine->kernel_context->timeline->fence_context;
900 		struct llist_node *prev = first;
901 		struct active_node *node;
902 
903 		rcu_read_lock();
904 		node = reuse_idle_barrier(ref, idx);
905 		rcu_read_unlock();
906 		if (!node) {
907 			node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
908 			if (!node)
909 				goto unwind;
910 
911 			RCU_INIT_POINTER(node->base.fence, NULL);
912 			node->base.cb.func = node_retire;
913 			node->timeline = idx;
914 			node->ref = ref;
915 		}
916 
917 		if (!i915_active_fence_isset(&node->base)) {
918 			/*
919 			 * Mark this as being *our* unconnected proto-node.
920 			 *
921 			 * Since this node is not in any list, and we have
922 			 * decoupled it from the rbtree, we can reuse the
923 			 * request to indicate this is an idle-barrier node
924 			 * and then we can use the rb_node and list pointers
925 			 * for our tracking of the pending barrier.
926 			 */
927 			RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
928 			node->base.cb.node.prev = (void *)engine;
929 			__i915_active_acquire(ref);
930 		}
931 		GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
932 
933 		GEM_BUG_ON(barrier_to_engine(node) != engine);
934 		first = barrier_to_ll(node);
935 		first->next = prev;
936 		if (!last)
937 			last = first;
938 		intel_engine_pm_get(engine);
939 	}
940 
941 	GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
942 	llist_add_batch(first, last, &ref->preallocated_barriers);
943 
944 	return 0;
945 
946 unwind:
947 	while (first) {
948 		struct active_node *node = barrier_from_ll(first);
949 
950 		first = first->next;
951 
952 		atomic_dec(&ref->count);
953 		intel_engine_pm_put(barrier_to_engine(node));
954 
955 		kmem_cache_free(slab_cache, node);
956 	}
957 	return -ENOMEM;
958 }
959 
960 void i915_active_acquire_barrier(struct i915_active *ref)
961 {
962 	struct llist_node *pos, *next;
963 	unsigned long flags;
964 
965 	GEM_BUG_ON(i915_active_is_idle(ref));
966 
967 	/*
968 	 * Transfer the list of preallocated barriers into the
969 	 * i915_active rbtree, but only as proto-nodes. They will be
970 	 * populated by i915_request_add_active_barriers() to point to the
971 	 * request that will eventually release them.
972 	 */
973 	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
974 		struct active_node *node = barrier_from_ll(pos);
975 		struct intel_engine_cs *engine = barrier_to_engine(node);
976 		struct rb_node **p, *parent;
977 
978 		spin_lock_irqsave_nested(&ref->tree_lock, flags,
979 					 SINGLE_DEPTH_NESTING);
980 		parent = NULL;
981 		p = &ref->tree.rb_node;
982 		while (*p) {
983 			struct active_node *it;
984 
985 			parent = *p;
986 
987 			it = rb_entry(parent, struct active_node, node);
988 			if (it->timeline < node->timeline)
989 				p = &parent->rb_right;
990 			else
991 				p = &parent->rb_left;
992 		}
993 		rb_link_node(&node->node, parent, p);
994 		rb_insert_color(&node->node, &ref->tree);
995 		spin_unlock_irqrestore(&ref->tree_lock, flags);
996 
997 		GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
998 		llist_add(barrier_to_ll(node), &engine->barrier_tasks);
999 		intel_engine_pm_put_delay(engine, 1);
1000 	}
1001 }
1002 
1003 static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
1004 {
1005 	return __active_fence_slot(&barrier_from_ll(node)->base);
1006 }
1007 
1008 void i915_request_add_active_barriers(struct i915_request *rq)
1009 {
1010 	struct intel_engine_cs *engine = rq->engine;
1011 	struct llist_node *node, *next;
1012 	unsigned long flags;
1013 
1014 	GEM_BUG_ON(!intel_context_is_barrier(rq->context));
1015 	GEM_BUG_ON(intel_engine_is_virtual(engine));
1016 	GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
1017 
1018 	node = llist_del_all(&engine->barrier_tasks);
1019 	if (!node)
1020 		return;
1021 	/*
1022 	 * Attach the list of proto-fences to the in-flight request such
1023 	 * that the parent i915_active will be released when this request
1024 	 * is retired.
1025 	 */
1026 	spin_lock_irqsave(&rq->lock, flags);
1027 	llist_for_each_safe(node, next, node) {
1028 		/* serialise with reuse_idle_barrier */
1029 		smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1030 		list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1031 	}
1032 	spin_unlock_irqrestore(&rq->lock, flags);
1033 }
1034 
1035 /*
1036  * __i915_active_fence_set: Update the last active fence along its timeline
1037  * @active: the active tracker
1038  * @fence: the new fence (under construction)
1039  *
1040  * Records the new @fence as the last active fence along its timeline in
1041  * this active tracker, moving the tracking callbacks from the previous
1042  * fence onto this one. Returns the previous fence (if not already completed),
1043  * which the caller must ensure is executed before the new fence. To ensure
1044  * that the order of fences within the timeline of the i915_active_fence is
1045  * understood, it should be locked by the caller.
1046  */
1047 struct dma_fence *
1048 __i915_active_fence_set(struct i915_active_fence *active,
1049 			struct dma_fence *fence)
1050 {
1051 	struct dma_fence *prev;
1052 	unsigned long flags;
1053 
1054 	if (fence == rcu_access_pointer(active->fence))
1055 		return fence;
1056 
1057 	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1058 
1059 	/*
1060 	 * Consider that we have two threads arriving (A and B), with
1061 	 * C already resident as the active->fence.
1062 	 *
1063 	 * A does the xchg first, and so it sees C or NULL depending
1064 	 * on the timing of the interrupt handler. If it is NULL, the
1065 	 * previous fence must have been signaled and we know that
1066 	 * we are first on the timeline. If it is still present,
1067 	 * we acquire the lock on that fence and serialise with the interrupt
1068 	 * handler, in the process removing it from any future interrupt
1069 	 * callback. A will then wait on C before executing (if present).
1070 	 *
1071 	 * As B is second, it sees A as the previous fence and so waits for
1072 	 * it to complete its transition and takes over the occupancy for
1073 	 * itself -- remembering that it needs to wait on A before executing.
1074 	 *
1075 	 * Note the strong ordering of the timeline also provides consistent
1076 	 * nesting rules for the fence->lock; the inner lock is always the
1077 	 * older lock.
1078 	 */
1079 	spin_lock_irqsave(fence->lock, flags);
1080 	prev = xchg(__active_fence_slot(active), fence);
1081 	if (prev) {
1082 		GEM_BUG_ON(prev == fence);
1083 		spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1084 		__list_del_entry(&active->cb.node);
1085 		spin_unlock(prev->lock); /* serialise with prev->cb_list */
1086 	}
1087 	list_add_tail(&active->cb.node, &fence->cb_list);
1088 	spin_unlock_irqrestore(fence->lock, flags);
1089 
1090 	return prev;
1091 }
1092 
1093 int i915_active_fence_set(struct i915_active_fence *active,
1094 			  struct i915_request *rq)
1095 {
1096 	struct dma_fence *fence;
1097 	int err = 0;
1098 
1099 	/* Must maintain timeline ordering wrt previous active requests */
1100 	rcu_read_lock();
1101 	fence = __i915_active_fence_set(active, &rq->fence);
1102 	if (fence) /* but the previous fence may not belong to that timeline! */
1103 		fence = dma_fence_get_rcu(fence);
1104 	rcu_read_unlock();
1105 	if (fence) {
1106 		err = i915_request_await_dma_fence(rq, fence);
1107 		dma_fence_put(fence);
1108 	}
1109 
1110 	return err;
1111 }
1112 
1113 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1114 {
1115 	active_fence_cb(fence, cb);
1116 }
1117 
1118 struct auto_active {
1119 	struct i915_active base;
1120 	struct kref ref;
1121 };
1122 
1123 struct i915_active *i915_active_get(struct i915_active *ref)
1124 {
1125 	struct auto_active *aa = container_of(ref, typeof(*aa), base);
1126 
1127 	kref_get(&aa->ref);
1128 	return &aa->base;
1129 }
1130 
1131 static void auto_release(struct kref *ref)
1132 {
1133 	struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1134 
1135 	i915_active_fini(&aa->base);
1136 	kfree(aa);
1137 }
1138 
1139 void i915_active_put(struct i915_active *ref)
1140 {
1141 	struct auto_active *aa = container_of(ref, typeof(*aa), base);
1142 
1143 	kref_put(&aa->ref, auto_release);
1144 }
1145 
1146 static int auto_active(struct i915_active *ref)
1147 {
1148 	i915_active_get(ref);
1149 	return 0;
1150 }
1151 
1152 static void auto_retire(struct i915_active *ref)
1153 {
1154 	i915_active_put(ref);
1155 }
1156 
1157 struct i915_active *i915_active_create(void)
1158 {
1159 	struct auto_active *aa;
1160 
1161 	aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1162 	if (!aa)
1163 		return NULL;
1164 
1165 	kref_init(&aa->ref);
1166 	i915_active_init(&aa->base, auto_active, auto_retire, 0);
1167 
1168 	return &aa->base;
1169 }
1170 
1171 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1172 #include "selftests/i915_active.c"
1173 #endif
1174 
1175 void i915_active_module_exit(void)
1176 {
1177 	kmem_cache_destroy(slab_cache);
1178 }
1179 
1180 int __init i915_active_module_init(void)
1181 {
1182 	slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1183 	if (!slab_cache)
1184 		return -ENOMEM;
1185 
1186 	return 0;
1187 }
1188