1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7 #include <linux/debugobjects.h>
8
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_heartbeat.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_ring.h"
13
14 #include "i915_drv.h"
15 #include "i915_active.h"
16
17 /*
18 * Active refs memory management
19 *
20 * To be more economical with memory, we reap all the i915_active trees as
21 * they idle (when we know the active requests are inactive) and allocate the
22 * nodes from a local slab cache to hopefully reduce the fragmentation.
23 */
24 static struct kmem_cache *slab_cache;
25
26 struct active_node {
27 struct rb_node node;
28 struct i915_active_fence base;
29 struct i915_active *ref;
30 u64 timeline;
31 };
32
33 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
34
35 static inline struct active_node *
node_from_active(struct i915_active_fence * active)36 node_from_active(struct i915_active_fence *active)
37 {
38 return container_of(active, struct active_node, base);
39 }
40
41 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
42
is_barrier(const struct i915_active_fence * active)43 static inline bool is_barrier(const struct i915_active_fence *active)
44 {
45 return IS_ERR(rcu_access_pointer(active->fence));
46 }
47
barrier_to_ll(struct active_node * node)48 static inline struct llist_node *barrier_to_ll(struct active_node *node)
49 {
50 GEM_BUG_ON(!is_barrier(&node->base));
51 return (struct llist_node *)&node->base.cb.node;
52 }
53
54 static inline struct intel_engine_cs *
__barrier_to_engine(struct active_node * node)55 __barrier_to_engine(struct active_node *node)
56 {
57 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
58 }
59
60 static inline struct intel_engine_cs *
barrier_to_engine(struct active_node * node)61 barrier_to_engine(struct active_node *node)
62 {
63 GEM_BUG_ON(!is_barrier(&node->base));
64 return __barrier_to_engine(node);
65 }
66
barrier_from_ll(struct llist_node * x)67 static inline struct active_node *barrier_from_ll(struct llist_node *x)
68 {
69 return container_of((struct list_head *)x,
70 struct active_node, base.cb.node);
71 }
72
73 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
74
active_debug_hint(void * addr)75 static void *active_debug_hint(void *addr)
76 {
77 struct i915_active *ref = addr;
78
79 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
80 }
81
82 static const struct debug_obj_descr active_debug_desc = {
83 .name = "i915_active",
84 .debug_hint = active_debug_hint,
85 };
86
debug_active_init(struct i915_active * ref)87 static void debug_active_init(struct i915_active *ref)
88 {
89 debug_object_init(ref, &active_debug_desc);
90 }
91
debug_active_activate(struct i915_active * ref)92 static void debug_active_activate(struct i915_active *ref)
93 {
94 lockdep_assert_held(&ref->tree_lock);
95 debug_object_activate(ref, &active_debug_desc);
96 }
97
debug_active_deactivate(struct i915_active * ref)98 static void debug_active_deactivate(struct i915_active *ref)
99 {
100 lockdep_assert_held(&ref->tree_lock);
101 if (!atomic_read(&ref->count)) /* after the last dec */
102 debug_object_deactivate(ref, &active_debug_desc);
103 }
104
debug_active_fini(struct i915_active * ref)105 static void debug_active_fini(struct i915_active *ref)
106 {
107 debug_object_free(ref, &active_debug_desc);
108 }
109
debug_active_assert(struct i915_active * ref)110 static void debug_active_assert(struct i915_active *ref)
111 {
112 debug_object_assert_init(ref, &active_debug_desc);
113 }
114
115 #else
116
debug_active_init(struct i915_active * ref)117 static inline void debug_active_init(struct i915_active *ref) { }
debug_active_activate(struct i915_active * ref)118 static inline void debug_active_activate(struct i915_active *ref) { }
debug_active_deactivate(struct i915_active * ref)119 static inline void debug_active_deactivate(struct i915_active *ref) { }
debug_active_fini(struct i915_active * ref)120 static inline void debug_active_fini(struct i915_active *ref) { }
debug_active_assert(struct i915_active * ref)121 static inline void debug_active_assert(struct i915_active *ref) { }
122
123 #endif
124
125 static void
__active_retire(struct i915_active * ref)126 __active_retire(struct i915_active *ref)
127 {
128 struct rb_root root = RB_ROOT;
129 struct active_node *it, *n;
130 unsigned long flags;
131
132 GEM_BUG_ON(i915_active_is_idle(ref));
133
134 /* return the unused nodes to our slabcache -- flushing the allocator */
135 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
136 return;
137
138 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
139 debug_active_deactivate(ref);
140
141 /* Even if we have not used the cache, we may still have a barrier */
142 if (!ref->cache)
143 ref->cache = fetch_node(ref->tree.rb_node);
144
145 /* Keep the MRU cached node for reuse */
146 if (ref->cache) {
147 /* Discard all other nodes in the tree */
148 rb_erase(&ref->cache->node, &ref->tree);
149 root = ref->tree;
150
151 /* Rebuild the tree with only the cached node */
152 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
153 rb_insert_color(&ref->cache->node, &ref->tree);
154 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
155
156 /* Make the cached node available for reuse with any timeline */
157 ref->cache->timeline = 0; /* needs cmpxchg(u64) */
158 }
159
160 spin_unlock_irqrestore(&ref->tree_lock, flags);
161
162 /* After the final retire, the entire struct may be freed */
163 if (ref->retire)
164 ref->retire(ref);
165
166 /* ... except if you wait on it, you must manage your own references! */
167 wake_up_var(ref);
168
169 /* Finally free the discarded timeline tree */
170 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
171 GEM_BUG_ON(i915_active_fence_isset(&it->base));
172 kmem_cache_free(slab_cache, it);
173 }
174 }
175
176 static void
active_work(struct work_struct * wrk)177 active_work(struct work_struct *wrk)
178 {
179 struct i915_active *ref = container_of(wrk, typeof(*ref), work);
180
181 GEM_BUG_ON(!atomic_read(&ref->count));
182 if (atomic_add_unless(&ref->count, -1, 1))
183 return;
184
185 __active_retire(ref);
186 }
187
188 static void
active_retire(struct i915_active * ref)189 active_retire(struct i915_active *ref)
190 {
191 GEM_BUG_ON(!atomic_read(&ref->count));
192 if (atomic_add_unless(&ref->count, -1, 1))
193 return;
194
195 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
196 queue_work(system_unbound_wq, &ref->work);
197 return;
198 }
199
200 __active_retire(ref);
201 }
202
203 static inline struct dma_fence **
__active_fence_slot(struct i915_active_fence * active)204 __active_fence_slot(struct i915_active_fence *active)
205 {
206 return (struct dma_fence ** __force)&active->fence;
207 }
208
209 static inline bool
active_fence_cb(struct dma_fence * fence,struct dma_fence_cb * cb)210 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
211 {
212 struct i915_active_fence *active =
213 container_of(cb, typeof(*active), cb);
214
215 return try_cmpxchg(__active_fence_slot(active), &fence, NULL);
216 }
217
218 static void
node_retire(struct dma_fence * fence,struct dma_fence_cb * cb)219 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
220 {
221 if (active_fence_cb(fence, cb))
222 active_retire(container_of(cb, struct active_node, base.cb)->ref);
223 }
224
225 static void
excl_retire(struct dma_fence * fence,struct dma_fence_cb * cb)226 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
227 {
228 if (active_fence_cb(fence, cb))
229 active_retire(container_of(cb, struct i915_active, excl.cb));
230 }
231
__active_lookup(struct i915_active * ref,u64 idx)232 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
233 {
234 struct active_node *it;
235
236 GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
237
238 /*
239 * We track the most recently used timeline to skip a rbtree search
240 * for the common case, under typical loads we never need the rbtree
241 * at all. We can reuse the last slot if it is empty, that is
242 * after the previous activity has been retired, or if it matches the
243 * current timeline.
244 */
245 it = READ_ONCE(ref->cache);
246 if (it) {
247 u64 cached = READ_ONCE(it->timeline);
248
249 /* Once claimed, this slot will only belong to this idx */
250 if (cached == idx)
251 return it;
252
253 /*
254 * An unclaimed cache [.timeline=0] can only be claimed once.
255 *
256 * If the value is already non-zero, some other thread has
257 * claimed the cache and we know that is does not match our
258 * idx. If, and only if, the timeline is currently zero is it
259 * worth competing to claim it atomically for ourselves (for
260 * only the winner of that race will cmpxchg return the old
261 * value of 0).
262 */
263 if (!cached && !cmpxchg64(&it->timeline, 0, idx))
264 return it;
265 }
266
267 BUILD_BUG_ON(offsetof(typeof(*it), node));
268
269 /* While active, the tree can only be built; not destroyed */
270 GEM_BUG_ON(i915_active_is_idle(ref));
271
272 it = fetch_node(ref->tree.rb_node);
273 while (it) {
274 if (it->timeline < idx) {
275 it = fetch_node(it->node.rb_right);
276 } else if (it->timeline > idx) {
277 it = fetch_node(it->node.rb_left);
278 } else {
279 WRITE_ONCE(ref->cache, it);
280 break;
281 }
282 }
283
284 /* NB: If the tree rotated beneath us, we may miss our target. */
285 return it;
286 }
287
288 static struct i915_active_fence *
active_instance(struct i915_active * ref,u64 idx)289 active_instance(struct i915_active *ref, u64 idx)
290 {
291 struct active_node *node;
292 struct rb_node **p, *parent;
293
294 node = __active_lookup(ref, idx);
295 if (likely(node))
296 return &node->base;
297
298 spin_lock_irq(&ref->tree_lock);
299 GEM_BUG_ON(i915_active_is_idle(ref));
300
301 parent = NULL;
302 p = &ref->tree.rb_node;
303 while (*p) {
304 parent = *p;
305
306 node = rb_entry(parent, struct active_node, node);
307 if (node->timeline == idx)
308 goto out;
309
310 if (node->timeline < idx)
311 p = &parent->rb_right;
312 else
313 p = &parent->rb_left;
314 }
315
316 /*
317 * XXX: We should preallocate this before i915_active_ref() is ever
318 * called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
319 */
320 node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
321 if (!node)
322 goto out;
323
324 __i915_active_fence_init(&node->base, NULL, node_retire);
325 node->ref = ref;
326 node->timeline = idx;
327
328 rb_link_node(&node->node, parent, p);
329 rb_insert_color(&node->node, &ref->tree);
330
331 out:
332 WRITE_ONCE(ref->cache, node);
333 spin_unlock_irq(&ref->tree_lock);
334
335 return &node->base;
336 }
337
__i915_active_init(struct i915_active * ref,int (* active)(struct i915_active * ref),void (* retire)(struct i915_active * ref),unsigned long flags,struct lock_class_key * mkey,struct lock_class_key * wkey)338 void __i915_active_init(struct i915_active *ref,
339 int (*active)(struct i915_active *ref),
340 void (*retire)(struct i915_active *ref),
341 unsigned long flags,
342 struct lock_class_key *mkey,
343 struct lock_class_key *wkey)
344 {
345 debug_active_init(ref);
346
347 ref->flags = flags;
348 ref->active = active;
349 ref->retire = retire;
350
351 spin_lock_init(&ref->tree_lock);
352 ref->tree = RB_ROOT;
353 ref->cache = NULL;
354
355 init_llist_head(&ref->preallocated_barriers);
356 atomic_set(&ref->count, 0);
357 __mutex_init(&ref->mutex, "i915_active", mkey);
358 __i915_active_fence_init(&ref->excl, NULL, excl_retire);
359 INIT_WORK(&ref->work, active_work);
360 #if IS_ENABLED(CONFIG_LOCKDEP)
361 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
362 #endif
363 }
364
____active_del_barrier(struct i915_active * ref,struct active_node * node,struct intel_engine_cs * engine)365 static bool ____active_del_barrier(struct i915_active *ref,
366 struct active_node *node,
367 struct intel_engine_cs *engine)
368
369 {
370 struct llist_node *head = NULL, *tail = NULL;
371 struct llist_node *pos, *next;
372
373 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
374
375 /*
376 * Rebuild the llist excluding our node. We may perform this
377 * outside of the kernel_context timeline mutex and so someone
378 * else may be manipulating the engine->barrier_tasks, in
379 * which case either we or they will be upset :)
380 *
381 * A second __active_del_barrier() will report failure to claim
382 * the active_node and the caller will just shrug and know not to
383 * claim ownership of its node.
384 *
385 * A concurrent i915_request_add_active_barriers() will miss adding
386 * any of the tasks, but we will try again on the next -- and since
387 * we are actively using the barrier, we know that there will be
388 * at least another opportunity when we idle.
389 */
390 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
391 if (node == barrier_from_ll(pos)) {
392 node = NULL;
393 continue;
394 }
395
396 pos->next = head;
397 head = pos;
398 if (!tail)
399 tail = pos;
400 }
401 if (head)
402 llist_add_batch(head, tail, &engine->barrier_tasks);
403
404 return !node;
405 }
406
407 static bool
__active_del_barrier(struct i915_active * ref,struct active_node * node)408 __active_del_barrier(struct i915_active *ref, struct active_node *node)
409 {
410 return ____active_del_barrier(ref, node, barrier_to_engine(node));
411 }
412
413 static bool
replace_barrier(struct i915_active * ref,struct i915_active_fence * active)414 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
415 {
416 if (!is_barrier(active)) /* proto-node used by our idle barrier? */
417 return false;
418
419 /*
420 * This request is on the kernel_context timeline, and so
421 * we can use it to substitute for the pending idle-barrer
422 * request that we want to emit on the kernel_context.
423 */
424 return __active_del_barrier(ref, node_from_active(active));
425 }
426
i915_active_add_request(struct i915_active * ref,struct i915_request * rq)427 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
428 {
429 u64 idx = i915_request_timeline(rq)->fence_context;
430 struct dma_fence *fence = &rq->fence;
431 struct i915_active_fence *active;
432 int err;
433
434 /* Prevent reaping in case we malloc/wait while building the tree */
435 err = i915_active_acquire(ref);
436 if (err)
437 return err;
438
439 do {
440 active = active_instance(ref, idx);
441 if (!active) {
442 err = -ENOMEM;
443 goto out;
444 }
445
446 if (replace_barrier(ref, active)) {
447 RCU_INIT_POINTER(active->fence, NULL);
448 atomic_dec(&ref->count);
449 }
450 } while (unlikely(is_barrier(active)));
451
452 fence = __i915_active_fence_set(active, fence);
453 if (!fence)
454 __i915_active_acquire(ref);
455 else
456 dma_fence_put(fence);
457
458 out:
459 i915_active_release(ref);
460 return err;
461 }
462
463 static struct dma_fence *
__i915_active_set_fence(struct i915_active * ref,struct i915_active_fence * active,struct dma_fence * fence)464 __i915_active_set_fence(struct i915_active *ref,
465 struct i915_active_fence *active,
466 struct dma_fence *fence)
467 {
468 struct dma_fence *prev;
469
470 if (replace_barrier(ref, active)) {
471 RCU_INIT_POINTER(active->fence, fence);
472 return NULL;
473 }
474
475 prev = __i915_active_fence_set(active, fence);
476 if (!prev)
477 __i915_active_acquire(ref);
478
479 return prev;
480 }
481
482 struct dma_fence *
i915_active_set_exclusive(struct i915_active * ref,struct dma_fence * f)483 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
484 {
485 /* We expect the caller to manage the exclusive timeline ordering */
486 return __i915_active_set_fence(ref, &ref->excl, f);
487 }
488
i915_active_acquire_if_busy(struct i915_active * ref)489 bool i915_active_acquire_if_busy(struct i915_active *ref)
490 {
491 debug_active_assert(ref);
492 return atomic_add_unless(&ref->count, 1, 0);
493 }
494
__i915_active_activate(struct i915_active * ref)495 static void __i915_active_activate(struct i915_active *ref)
496 {
497 spin_lock_irq(&ref->tree_lock); /* __active_retire() */
498 if (!atomic_fetch_inc(&ref->count))
499 debug_active_activate(ref);
500 spin_unlock_irq(&ref->tree_lock);
501 }
502
i915_active_acquire(struct i915_active * ref)503 int i915_active_acquire(struct i915_active *ref)
504 {
505 int err;
506
507 if (i915_active_acquire_if_busy(ref))
508 return 0;
509
510 if (!ref->active) {
511 __i915_active_activate(ref);
512 return 0;
513 }
514
515 err = mutex_lock_interruptible(&ref->mutex);
516 if (err)
517 return err;
518
519 if (likely(!i915_active_acquire_if_busy(ref))) {
520 err = ref->active(ref);
521 if (!err)
522 __i915_active_activate(ref);
523 }
524
525 mutex_unlock(&ref->mutex);
526
527 return err;
528 }
529
i915_active_release(struct i915_active * ref)530 void i915_active_release(struct i915_active *ref)
531 {
532 debug_active_assert(ref);
533 active_retire(ref);
534 }
535
enable_signaling(struct i915_active_fence * active)536 static void enable_signaling(struct i915_active_fence *active)
537 {
538 struct dma_fence *fence;
539
540 if (unlikely(is_barrier(active)))
541 return;
542
543 fence = i915_active_fence_get(active);
544 if (!fence)
545 return;
546
547 dma_fence_enable_sw_signaling(fence);
548 dma_fence_put(fence);
549 }
550
flush_barrier(struct active_node * it)551 static int flush_barrier(struct active_node *it)
552 {
553 struct intel_engine_cs *engine;
554
555 if (likely(!is_barrier(&it->base)))
556 return 0;
557
558 engine = __barrier_to_engine(it);
559 smp_rmb(); /* serialise with add_active_barriers */
560 if (!is_barrier(&it->base))
561 return 0;
562
563 return intel_engine_flush_barriers(engine);
564 }
565
flush_lazy_signals(struct i915_active * ref)566 static int flush_lazy_signals(struct i915_active *ref)
567 {
568 struct active_node *it, *n;
569 int err = 0;
570
571 enable_signaling(&ref->excl);
572 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
573 err = flush_barrier(it); /* unconnected idle barrier? */
574 if (err)
575 break;
576
577 enable_signaling(&it->base);
578 }
579
580 return err;
581 }
582
__i915_active_wait(struct i915_active * ref,int state)583 int __i915_active_wait(struct i915_active *ref, int state)
584 {
585 might_sleep();
586
587 /* Any fence added after the wait begins will not be auto-signaled */
588 if (i915_active_acquire_if_busy(ref)) {
589 int err;
590
591 err = flush_lazy_signals(ref);
592 i915_active_release(ref);
593 if (err)
594 return err;
595
596 if (___wait_var_event(ref, i915_active_is_idle(ref),
597 state, 0, 0, schedule()))
598 return -EINTR;
599 }
600
601 /*
602 * After the wait is complete, the caller may free the active.
603 * We have to flush any concurrent retirement before returning.
604 */
605 flush_work(&ref->work);
606 return 0;
607 }
608
__await_active(struct i915_active_fence * active,int (* fn)(void * arg,struct dma_fence * fence),void * arg)609 static int __await_active(struct i915_active_fence *active,
610 int (*fn)(void *arg, struct dma_fence *fence),
611 void *arg)
612 {
613 struct dma_fence *fence;
614
615 if (is_barrier(active)) /* XXX flush the barrier? */
616 return 0;
617
618 fence = i915_active_fence_get(active);
619 if (fence) {
620 int err;
621
622 err = fn(arg, fence);
623 dma_fence_put(fence);
624 if (err < 0)
625 return err;
626 }
627
628 return 0;
629 }
630
631 struct wait_barrier {
632 struct wait_queue_entry base;
633 struct i915_active *ref;
634 };
635
636 static int
barrier_wake(wait_queue_entry_t * wq,unsigned int mode,int flags,void * key)637 barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
638 {
639 struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
640
641 if (i915_active_is_idle(wb->ref)) {
642 list_del(&wq->entry);
643 i915_sw_fence_complete(wq->private);
644 kfree(wq);
645 }
646
647 return 0;
648 }
649
__await_barrier(struct i915_active * ref,struct i915_sw_fence * fence)650 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
651 {
652 struct wait_barrier *wb;
653
654 wb = kmalloc(sizeof(*wb), GFP_KERNEL);
655 if (unlikely(!wb))
656 return -ENOMEM;
657
658 GEM_BUG_ON(i915_active_is_idle(ref));
659 if (!i915_sw_fence_await(fence)) {
660 kfree(wb);
661 return -EINVAL;
662 }
663
664 wb->base.flags = 0;
665 wb->base.func = barrier_wake;
666 wb->base.private = fence;
667 wb->ref = ref;
668
669 add_wait_queue(__var_waitqueue(ref), &wb->base);
670 return 0;
671 }
672
await_active(struct i915_active * ref,unsigned int flags,int (* fn)(void * arg,struct dma_fence * fence),void * arg,struct i915_sw_fence * barrier)673 static int await_active(struct i915_active *ref,
674 unsigned int flags,
675 int (*fn)(void *arg, struct dma_fence *fence),
676 void *arg, struct i915_sw_fence *barrier)
677 {
678 int err = 0;
679
680 if (!i915_active_acquire_if_busy(ref))
681 return 0;
682
683 if (flags & I915_ACTIVE_AWAIT_EXCL &&
684 rcu_access_pointer(ref->excl.fence)) {
685 err = __await_active(&ref->excl, fn, arg);
686 if (err)
687 goto out;
688 }
689
690 if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
691 struct active_node *it, *n;
692
693 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
694 err = __await_active(&it->base, fn, arg);
695 if (err)
696 goto out;
697 }
698 }
699
700 if (flags & I915_ACTIVE_AWAIT_BARRIER) {
701 err = flush_lazy_signals(ref);
702 if (err)
703 goto out;
704
705 err = __await_barrier(ref, barrier);
706 if (err)
707 goto out;
708 }
709
710 out:
711 i915_active_release(ref);
712 return err;
713 }
714
rq_await_fence(void * arg,struct dma_fence * fence)715 static int rq_await_fence(void *arg, struct dma_fence *fence)
716 {
717 return i915_request_await_dma_fence(arg, fence);
718 }
719
i915_request_await_active(struct i915_request * rq,struct i915_active * ref,unsigned int flags)720 int i915_request_await_active(struct i915_request *rq,
721 struct i915_active *ref,
722 unsigned int flags)
723 {
724 return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
725 }
726
sw_await_fence(void * arg,struct dma_fence * fence)727 static int sw_await_fence(void *arg, struct dma_fence *fence)
728 {
729 return i915_sw_fence_await_dma_fence(arg, fence, 0,
730 GFP_NOWAIT | __GFP_NOWARN);
731 }
732
i915_sw_fence_await_active(struct i915_sw_fence * fence,struct i915_active * ref,unsigned int flags)733 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
734 struct i915_active *ref,
735 unsigned int flags)
736 {
737 return await_active(ref, flags, sw_await_fence, fence, fence);
738 }
739
i915_active_fini(struct i915_active * ref)740 void i915_active_fini(struct i915_active *ref)
741 {
742 debug_active_fini(ref);
743 GEM_BUG_ON(atomic_read(&ref->count));
744 GEM_BUG_ON(work_pending(&ref->work));
745 mutex_destroy(&ref->mutex);
746
747 if (ref->cache)
748 kmem_cache_free(slab_cache, ref->cache);
749 }
750
is_idle_barrier(struct active_node * node,u64 idx)751 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
752 {
753 return node->timeline == idx && !i915_active_fence_isset(&node->base);
754 }
755
reuse_idle_barrier(struct i915_active * ref,u64 idx)756 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
757 {
758 struct rb_node *prev, *p;
759
760 if (RB_EMPTY_ROOT(&ref->tree))
761 return NULL;
762
763 GEM_BUG_ON(i915_active_is_idle(ref));
764
765 /*
766 * Try to reuse any existing barrier nodes already allocated for this
767 * i915_active, due to overlapping active phases there is likely a
768 * node kept alive (as we reuse before parking). We prefer to reuse
769 * completely idle barriers (less hassle in manipulating the llists),
770 * but otherwise any will do.
771 */
772 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
773 p = &ref->cache->node;
774 goto match;
775 }
776
777 prev = NULL;
778 p = ref->tree.rb_node;
779 while (p) {
780 struct active_node *node =
781 rb_entry(p, struct active_node, node);
782
783 if (is_idle_barrier(node, idx))
784 goto match;
785
786 prev = p;
787 if (node->timeline < idx)
788 p = READ_ONCE(p->rb_right);
789 else
790 p = READ_ONCE(p->rb_left);
791 }
792
793 /*
794 * No quick match, but we did find the leftmost rb_node for the
795 * kernel_context. Walk the rb_tree in-order to see if there were
796 * any idle-barriers on this timeline that we missed, or just use
797 * the first pending barrier.
798 */
799 for (p = prev; p; p = rb_next(p)) {
800 struct active_node *node =
801 rb_entry(p, struct active_node, node);
802 struct intel_engine_cs *engine;
803
804 if (node->timeline > idx)
805 break;
806
807 if (node->timeline < idx)
808 continue;
809
810 if (is_idle_barrier(node, idx))
811 goto match;
812
813 /*
814 * The list of pending barriers is protected by the
815 * kernel_context timeline, which notably we do not hold
816 * here. i915_request_add_active_barriers() may consume
817 * the barrier before we claim it, so we have to check
818 * for success.
819 */
820 engine = __barrier_to_engine(node);
821 smp_rmb(); /* serialise with add_active_barriers */
822 if (is_barrier(&node->base) &&
823 ____active_del_barrier(ref, node, engine))
824 goto match;
825 }
826
827 return NULL;
828
829 match:
830 spin_lock_irq(&ref->tree_lock);
831 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
832 if (p == &ref->cache->node)
833 WRITE_ONCE(ref->cache, NULL);
834 spin_unlock_irq(&ref->tree_lock);
835
836 return rb_entry(p, struct active_node, node);
837 }
838
i915_active_acquire_preallocate_barrier(struct i915_active * ref,struct intel_engine_cs * engine)839 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
840 struct intel_engine_cs *engine)
841 {
842 intel_engine_mask_t tmp, mask = engine->mask;
843 struct llist_node *first = NULL, *last = NULL;
844 struct intel_gt *gt = engine->gt;
845
846 GEM_BUG_ON(i915_active_is_idle(ref));
847
848 /* Wait until the previous preallocation is completed */
849 while (!llist_empty(&ref->preallocated_barriers))
850 cond_resched();
851
852 /*
853 * Preallocate a node for each physical engine supporting the target
854 * engine (remember virtual engines have more than one sibling).
855 * We can then use the preallocated nodes in
856 * i915_active_acquire_barrier()
857 */
858 GEM_BUG_ON(!mask);
859 for_each_engine_masked(engine, gt, mask, tmp) {
860 u64 idx = engine->kernel_context->timeline->fence_context;
861 struct llist_node *prev = first;
862 struct active_node *node;
863
864 rcu_read_lock();
865 node = reuse_idle_barrier(ref, idx);
866 rcu_read_unlock();
867 if (!node) {
868 node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
869 if (!node)
870 goto unwind;
871
872 RCU_INIT_POINTER(node->base.fence, NULL);
873 node->base.cb.func = node_retire;
874 node->timeline = idx;
875 node->ref = ref;
876 }
877
878 if (!i915_active_fence_isset(&node->base)) {
879 /*
880 * Mark this as being *our* unconnected proto-node.
881 *
882 * Since this node is not in any list, and we have
883 * decoupled it from the rbtree, we can reuse the
884 * request to indicate this is an idle-barrier node
885 * and then we can use the rb_node and list pointers
886 * for our tracking of the pending barrier.
887 */
888 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
889 node->base.cb.node.prev = (void *)engine;
890 __i915_active_acquire(ref);
891 }
892 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
893
894 GEM_BUG_ON(barrier_to_engine(node) != engine);
895 first = barrier_to_ll(node);
896 first->next = prev;
897 if (!last)
898 last = first;
899 intel_engine_pm_get(engine);
900 }
901
902 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
903 llist_add_batch(first, last, &ref->preallocated_barriers);
904
905 return 0;
906
907 unwind:
908 while (first) {
909 struct active_node *node = barrier_from_ll(first);
910
911 first = first->next;
912
913 atomic_dec(&ref->count);
914 intel_engine_pm_put(barrier_to_engine(node));
915
916 kmem_cache_free(slab_cache, node);
917 }
918 return -ENOMEM;
919 }
920
i915_active_acquire_barrier(struct i915_active * ref)921 void i915_active_acquire_barrier(struct i915_active *ref)
922 {
923 struct llist_node *pos, *next;
924 unsigned long flags;
925
926 GEM_BUG_ON(i915_active_is_idle(ref));
927
928 /*
929 * Transfer the list of preallocated barriers into the
930 * i915_active rbtree, but only as proto-nodes. They will be
931 * populated by i915_request_add_active_barriers() to point to the
932 * request that will eventually release them.
933 */
934 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
935 struct active_node *node = barrier_from_ll(pos);
936 struct intel_engine_cs *engine = barrier_to_engine(node);
937 struct rb_node **p, *parent;
938
939 spin_lock_irqsave_nested(&ref->tree_lock, flags,
940 SINGLE_DEPTH_NESTING);
941 parent = NULL;
942 p = &ref->tree.rb_node;
943 while (*p) {
944 struct active_node *it;
945
946 parent = *p;
947
948 it = rb_entry(parent, struct active_node, node);
949 if (it->timeline < node->timeline)
950 p = &parent->rb_right;
951 else
952 p = &parent->rb_left;
953 }
954 rb_link_node(&node->node, parent, p);
955 rb_insert_color(&node->node, &ref->tree);
956 spin_unlock_irqrestore(&ref->tree_lock, flags);
957
958 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
959 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
960 intel_engine_pm_put_delay(engine, 2);
961 }
962 }
963
ll_to_fence_slot(struct llist_node * node)964 static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
965 {
966 return __active_fence_slot(&barrier_from_ll(node)->base);
967 }
968
i915_request_add_active_barriers(struct i915_request * rq)969 void i915_request_add_active_barriers(struct i915_request *rq)
970 {
971 struct intel_engine_cs *engine = rq->engine;
972 struct llist_node *node, *next;
973 unsigned long flags;
974
975 GEM_BUG_ON(!intel_context_is_barrier(rq->context));
976 GEM_BUG_ON(intel_engine_is_virtual(engine));
977 GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
978
979 node = llist_del_all(&engine->barrier_tasks);
980 if (!node)
981 return;
982 /*
983 * Attach the list of proto-fences to the in-flight request such
984 * that the parent i915_active will be released when this request
985 * is retired.
986 */
987 spin_lock_irqsave(&rq->lock, flags);
988 llist_for_each_safe(node, next, node) {
989 /* serialise with reuse_idle_barrier */
990 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
991 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
992 }
993 spin_unlock_irqrestore(&rq->lock, flags);
994 }
995
996 /*
997 * __i915_active_fence_set: Update the last active fence along its timeline
998 * @active: the active tracker
999 * @fence: the new fence (under construction)
1000 *
1001 * Records the new @fence as the last active fence along its timeline in
1002 * this active tracker, moving the tracking callbacks from the previous
1003 * fence onto this one. Gets and returns a reference to the previous fence
1004 * (if not already completed), which the caller must put after making sure
1005 * that it is executed before the new fence. To ensure that the order of
1006 * fences within the timeline of the i915_active_fence is understood, it
1007 * should be locked by the caller.
1008 */
1009 struct dma_fence *
__i915_active_fence_set(struct i915_active_fence * active,struct dma_fence * fence)1010 __i915_active_fence_set(struct i915_active_fence *active,
1011 struct dma_fence *fence)
1012 {
1013 struct dma_fence *prev;
1014 unsigned long flags;
1015
1016 /*
1017 * In case of fences embedded in i915_requests, their memory is
1018 * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
1019 * by new requests. Then, there is a risk of passing back a pointer
1020 * to a new, completely unrelated fence that reuses the same memory
1021 * while tracked under a different active tracker. Combined with i915
1022 * perf open/close operations that build await dependencies between
1023 * engine kernel context requests and user requests from different
1024 * timelines, this can lead to dependency loops and infinite waits.
1025 *
1026 * As a countermeasure, we try to get a reference to the active->fence
1027 * first, so if we succeed and pass it back to our user then it is not
1028 * released and potentially reused by an unrelated request before the
1029 * user has a chance to set up an await dependency on it.
1030 */
1031 prev = i915_active_fence_get(active);
1032 if (fence == prev)
1033 return fence;
1034
1035 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1036
1037 /*
1038 * Consider that we have two threads arriving (A and B), with
1039 * C already resident as the active->fence.
1040 *
1041 * Both A and B have got a reference to C or NULL, depending on the
1042 * timing of the interrupt handler. Let's assume that if A has got C
1043 * then it has locked C first (before B).
1044 *
1045 * Note the strong ordering of the timeline also provides consistent
1046 * nesting rules for the fence->lock; the inner lock is always the
1047 * older lock.
1048 */
1049 spin_lock_irqsave(fence->lock, flags);
1050 if (prev)
1051 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1052
1053 /*
1054 * A does the cmpxchg first, and so it sees C or NULL, as before, or
1055 * something else, depending on the timing of other threads and/or
1056 * interrupt handler. If not the same as before then A unlocks C if
1057 * applicable and retries, starting from an attempt to get a new
1058 * active->fence. Meanwhile, B follows the same path as A.
1059 * Once A succeeds with cmpxch, B fails again, retires, gets A from
1060 * active->fence, locks it as soon as A completes, and possibly
1061 * succeeds with cmpxchg.
1062 */
1063 while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
1064 if (prev) {
1065 spin_unlock(prev->lock);
1066 dma_fence_put(prev);
1067 }
1068 spin_unlock_irqrestore(fence->lock, flags);
1069
1070 prev = i915_active_fence_get(active);
1071 GEM_BUG_ON(prev == fence);
1072
1073 spin_lock_irqsave(fence->lock, flags);
1074 if (prev)
1075 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1076 }
1077
1078 /*
1079 * If prev is NULL then the previous fence must have been signaled
1080 * and we know that we are first on the timeline. If it is still
1081 * present then, having the lock on that fence already acquired, we
1082 * serialise with the interrupt handler, in the process of removing it
1083 * from any future interrupt callback. A will then wait on C before
1084 * executing (if present).
1085 *
1086 * As B is second, it sees A as the previous fence and so waits for
1087 * it to complete its transition and takes over the occupancy for
1088 * itself -- remembering that it needs to wait on A before executing.
1089 */
1090 if (prev) {
1091 __list_del_entry(&active->cb.node);
1092 spin_unlock(prev->lock); /* serialise with prev->cb_list */
1093 }
1094 list_add_tail(&active->cb.node, &fence->cb_list);
1095 spin_unlock_irqrestore(fence->lock, flags);
1096
1097 return prev;
1098 }
1099
i915_active_fence_set(struct i915_active_fence * active,struct i915_request * rq)1100 int i915_active_fence_set(struct i915_active_fence *active,
1101 struct i915_request *rq)
1102 {
1103 struct dma_fence *fence;
1104 int err = 0;
1105
1106 /* Must maintain timeline ordering wrt previous active requests */
1107 fence = __i915_active_fence_set(active, &rq->fence);
1108 if (fence) {
1109 err = i915_request_await_dma_fence(rq, fence);
1110 dma_fence_put(fence);
1111 }
1112
1113 return err;
1114 }
1115
i915_active_noop(struct dma_fence * fence,struct dma_fence_cb * cb)1116 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1117 {
1118 active_fence_cb(fence, cb);
1119 }
1120
1121 struct auto_active {
1122 struct i915_active base;
1123 struct kref ref;
1124 };
1125
i915_active_get(struct i915_active * ref)1126 struct i915_active *i915_active_get(struct i915_active *ref)
1127 {
1128 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1129
1130 kref_get(&aa->ref);
1131 return &aa->base;
1132 }
1133
auto_release(struct kref * ref)1134 static void auto_release(struct kref *ref)
1135 {
1136 struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1137
1138 i915_active_fini(&aa->base);
1139 kfree(aa);
1140 }
1141
i915_active_put(struct i915_active * ref)1142 void i915_active_put(struct i915_active *ref)
1143 {
1144 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1145
1146 kref_put(&aa->ref, auto_release);
1147 }
1148
auto_active(struct i915_active * ref)1149 static int auto_active(struct i915_active *ref)
1150 {
1151 i915_active_get(ref);
1152 return 0;
1153 }
1154
auto_retire(struct i915_active * ref)1155 static void auto_retire(struct i915_active *ref)
1156 {
1157 i915_active_put(ref);
1158 }
1159
i915_active_create(void)1160 struct i915_active *i915_active_create(void)
1161 {
1162 struct auto_active *aa;
1163
1164 aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1165 if (!aa)
1166 return NULL;
1167
1168 kref_init(&aa->ref);
1169 i915_active_init(&aa->base, auto_active, auto_retire, 0);
1170
1171 return &aa->base;
1172 }
1173
1174 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1175 #include "selftests/i915_active.c"
1176 #endif
1177
i915_active_module_exit(void)1178 void i915_active_module_exit(void)
1179 {
1180 kmem_cache_destroy(slab_cache);
1181 }
1182
i915_active_module_init(void)1183 int __init i915_active_module_init(void)
1184 {
1185 slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1186 if (!slab_cache)
1187 return -ENOMEM;
1188
1189 return 0;
1190 }
1191