xref: /linux/drivers/gpu/drm/i915/i915_scheduler.c (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include <linux/mutex.h>
8 
9 #include "i915_drv.h"
10 #include "i915_globals.h"
11 #include "i915_request.h"
12 #include "i915_scheduler.h"
13 
14 static struct i915_global_scheduler {
15 	struct i915_global base;
16 	struct kmem_cache *slab_dependencies;
17 	struct kmem_cache *slab_priorities;
18 } global;
19 
20 static DEFINE_SPINLOCK(schedule_lock);
21 
22 static const struct i915_request *
23 node_to_request(const struct i915_sched_node *node)
24 {
25 	return container_of(node, const struct i915_request, sched);
26 }
27 
28 static inline bool node_started(const struct i915_sched_node *node)
29 {
30 	return i915_request_started(node_to_request(node));
31 }
32 
33 static inline bool node_signaled(const struct i915_sched_node *node)
34 {
35 	return i915_request_completed(node_to_request(node));
36 }
37 
38 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
39 {
40 	return rb_entry(rb, struct i915_priolist, node);
41 }
42 
43 static void assert_priolists(struct intel_engine_execlists * const execlists)
44 {
45 	struct rb_node *rb;
46 	long last_prio;
47 
48 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
49 		return;
50 
51 	GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
52 		   rb_first(&execlists->queue.rb_root));
53 
54 	last_prio = INT_MAX;
55 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
56 		const struct i915_priolist *p = to_priolist(rb);
57 
58 		GEM_BUG_ON(p->priority > last_prio);
59 		last_prio = p->priority;
60 	}
61 }
62 
63 struct list_head *
64 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
65 {
66 	struct intel_engine_execlists * const execlists = &engine->execlists;
67 	struct i915_priolist *p;
68 	struct rb_node **parent, *rb;
69 	bool first = true;
70 
71 	lockdep_assert_held(&engine->active.lock);
72 	assert_priolists(execlists);
73 
74 	if (unlikely(execlists->no_priolist))
75 		prio = I915_PRIORITY_NORMAL;
76 
77 find_priolist:
78 	/* most positive priority is scheduled first, equal priorities fifo */
79 	rb = NULL;
80 	parent = &execlists->queue.rb_root.rb_node;
81 	while (*parent) {
82 		rb = *parent;
83 		p = to_priolist(rb);
84 		if (prio > p->priority) {
85 			parent = &rb->rb_left;
86 		} else if (prio < p->priority) {
87 			parent = &rb->rb_right;
88 			first = false;
89 		} else {
90 			return &p->requests;
91 		}
92 	}
93 
94 	if (prio == I915_PRIORITY_NORMAL) {
95 		p = &execlists->default_priolist;
96 	} else {
97 		p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
98 		/* Convert an allocation failure to a priority bump */
99 		if (unlikely(!p)) {
100 			prio = I915_PRIORITY_NORMAL; /* recurses just once */
101 
102 			/* To maintain ordering with all rendering, after an
103 			 * allocation failure we have to disable all scheduling.
104 			 * Requests will then be executed in fifo, and schedule
105 			 * will ensure that dependencies are emitted in fifo.
106 			 * There will be still some reordering with existing
107 			 * requests, so if userspace lied about their
108 			 * dependencies that reordering may be visible.
109 			 */
110 			execlists->no_priolist = true;
111 			goto find_priolist;
112 		}
113 	}
114 
115 	p->priority = prio;
116 	INIT_LIST_HEAD(&p->requests);
117 
118 	rb_link_node(&p->node, rb, parent);
119 	rb_insert_color_cached(&p->node, &execlists->queue, first);
120 
121 	return &p->requests;
122 }
123 
124 void __i915_priolist_free(struct i915_priolist *p)
125 {
126 	kmem_cache_free(global.slab_priorities, p);
127 }
128 
129 struct sched_cache {
130 	struct list_head *priolist;
131 };
132 
133 static struct intel_engine_cs *
134 sched_lock_engine(const struct i915_sched_node *node,
135 		  struct intel_engine_cs *locked,
136 		  struct sched_cache *cache)
137 {
138 	const struct i915_request *rq = node_to_request(node);
139 	struct intel_engine_cs *engine;
140 
141 	GEM_BUG_ON(!locked);
142 
143 	/*
144 	 * Virtual engines complicate acquiring the engine timeline lock,
145 	 * as their rq->engine pointer is not stable until under that
146 	 * engine lock. The simple ploy we use is to take the lock then
147 	 * check that the rq still belongs to the newly locked engine.
148 	 */
149 	while (locked != (engine = READ_ONCE(rq->engine))) {
150 		spin_unlock(&locked->active.lock);
151 		memset(cache, 0, sizeof(*cache));
152 		spin_lock(&engine->active.lock);
153 		locked = engine;
154 	}
155 
156 	GEM_BUG_ON(locked != engine);
157 	return locked;
158 }
159 
160 static inline int rq_prio(const struct i915_request *rq)
161 {
162 	return rq->sched.attr.priority;
163 }
164 
165 static inline bool need_preempt(int prio, int active)
166 {
167 	/*
168 	 * Allow preemption of low -> normal -> high, but we do
169 	 * not allow low priority tasks to preempt other low priority
170 	 * tasks under the impression that latency for low priority
171 	 * tasks does not matter (as much as background throughput),
172 	 * so kiss.
173 	 */
174 	return prio >= max(I915_PRIORITY_NORMAL, active);
175 }
176 
177 static void kick_submission(struct intel_engine_cs *engine,
178 			    const struct i915_request *rq,
179 			    int prio)
180 {
181 	const struct i915_request *inflight;
182 
183 	/*
184 	 * We only need to kick the tasklet once for the high priority
185 	 * new context we add into the queue.
186 	 */
187 	if (prio <= engine->execlists.queue_priority_hint)
188 		return;
189 
190 	rcu_read_lock();
191 
192 	/* Nothing currently active? We're overdue for a submission! */
193 	inflight = execlists_active(&engine->execlists);
194 	if (!inflight)
195 		goto unlock;
196 
197 	/*
198 	 * If we are already the currently executing context, don't
199 	 * bother evaluating if we should preempt ourselves.
200 	 */
201 	if (inflight->context == rq->context)
202 		goto unlock;
203 
204 	ENGINE_TRACE(engine,
205 		     "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
206 		     prio,
207 		     rq->fence.context, rq->fence.seqno,
208 		     inflight->fence.context, inflight->fence.seqno,
209 		     inflight->sched.attr.priority);
210 
211 	engine->execlists.queue_priority_hint = prio;
212 	if (need_preempt(prio, rq_prio(inflight)))
213 		tasklet_hi_schedule(&engine->execlists.tasklet);
214 
215 unlock:
216 	rcu_read_unlock();
217 }
218 
219 static void __i915_schedule(struct i915_sched_node *node,
220 			    const struct i915_sched_attr *attr)
221 {
222 	const int prio = max(attr->priority, node->attr.priority);
223 	struct intel_engine_cs *engine;
224 	struct i915_dependency *dep, *p;
225 	struct i915_dependency stack;
226 	struct sched_cache cache;
227 	LIST_HEAD(dfs);
228 
229 	/* Needed in order to use the temporary link inside i915_dependency */
230 	lockdep_assert_held(&schedule_lock);
231 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
232 
233 	if (node_signaled(node))
234 		return;
235 
236 	stack.signaler = node;
237 	list_add(&stack.dfs_link, &dfs);
238 
239 	/*
240 	 * Recursively bump all dependent priorities to match the new request.
241 	 *
242 	 * A naive approach would be to use recursion:
243 	 * static void update_priorities(struct i915_sched_node *node, prio) {
244 	 *	list_for_each_entry(dep, &node->signalers_list, signal_link)
245 	 *		update_priorities(dep->signal, prio)
246 	 *	queue_request(node);
247 	 * }
248 	 * but that may have unlimited recursion depth and so runs a very
249 	 * real risk of overunning the kernel stack. Instead, we build
250 	 * a flat list of all dependencies starting with the current request.
251 	 * As we walk the list of dependencies, we add all of its dependencies
252 	 * to the end of the list (this may include an already visited
253 	 * request) and continue to walk onwards onto the new dependencies. The
254 	 * end result is a topological list of requests in reverse order, the
255 	 * last element in the list is the request we must execute first.
256 	 */
257 	list_for_each_entry(dep, &dfs, dfs_link) {
258 		struct i915_sched_node *node = dep->signaler;
259 
260 		/* If we are already flying, we know we have no signalers */
261 		if (node_started(node))
262 			continue;
263 
264 		/*
265 		 * Within an engine, there can be no cycle, but we may
266 		 * refer to the same dependency chain multiple times
267 		 * (redundant dependencies are not eliminated) and across
268 		 * engines.
269 		 */
270 		list_for_each_entry(p, &node->signalers_list, signal_link) {
271 			GEM_BUG_ON(p == dep); /* no cycles! */
272 
273 			if (node_signaled(p->signaler))
274 				continue;
275 
276 			if (prio > READ_ONCE(p->signaler->attr.priority))
277 				list_move_tail(&p->dfs_link, &dfs);
278 		}
279 	}
280 
281 	/*
282 	 * If we didn't need to bump any existing priorities, and we haven't
283 	 * yet submitted this request (i.e. there is no potential race with
284 	 * execlists_submit_request()), we can set our own priority and skip
285 	 * acquiring the engine locks.
286 	 */
287 	if (node->attr.priority == I915_PRIORITY_INVALID) {
288 		GEM_BUG_ON(!list_empty(&node->link));
289 		node->attr = *attr;
290 
291 		if (stack.dfs_link.next == stack.dfs_link.prev)
292 			return;
293 
294 		__list_del_entry(&stack.dfs_link);
295 	}
296 
297 	memset(&cache, 0, sizeof(cache));
298 	engine = node_to_request(node)->engine;
299 	spin_lock(&engine->active.lock);
300 
301 	/* Fifo and depth-first replacement ensure our deps execute before us */
302 	engine = sched_lock_engine(node, engine, &cache);
303 	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
304 		INIT_LIST_HEAD(&dep->dfs_link);
305 
306 		node = dep->signaler;
307 		engine = sched_lock_engine(node, engine, &cache);
308 		lockdep_assert_held(&engine->active.lock);
309 
310 		/* Recheck after acquiring the engine->timeline.lock */
311 		if (prio <= node->attr.priority || node_signaled(node))
312 			continue;
313 
314 		GEM_BUG_ON(node_to_request(node)->engine != engine);
315 
316 		WRITE_ONCE(node->attr.priority, prio);
317 
318 		/*
319 		 * Once the request is ready, it will be placed into the
320 		 * priority lists and then onto the HW runlist. Before the
321 		 * request is ready, it does not contribute to our preemption
322 		 * decisions and we can safely ignore it, as it will, and
323 		 * any preemption required, be dealt with upon submission.
324 		 * See engine->submit_request()
325 		 */
326 		if (list_empty(&node->link))
327 			continue;
328 
329 		if (i915_request_in_priority_queue(node_to_request(node))) {
330 			if (!cache.priolist)
331 				cache.priolist =
332 					i915_sched_lookup_priolist(engine,
333 								   prio);
334 			list_move_tail(&node->link, cache.priolist);
335 		}
336 
337 		/* Defer (tasklet) submission until after all of our updates. */
338 		kick_submission(engine, node_to_request(node), prio);
339 	}
340 
341 	spin_unlock(&engine->active.lock);
342 }
343 
344 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
345 {
346 	spin_lock_irq(&schedule_lock);
347 	__i915_schedule(&rq->sched, attr);
348 	spin_unlock_irq(&schedule_lock);
349 }
350 
351 void i915_sched_node_init(struct i915_sched_node *node)
352 {
353 	INIT_LIST_HEAD(&node->signalers_list);
354 	INIT_LIST_HEAD(&node->waiters_list);
355 	INIT_LIST_HEAD(&node->link);
356 
357 	i915_sched_node_reinit(node);
358 }
359 
360 void i915_sched_node_reinit(struct i915_sched_node *node)
361 {
362 	node->attr.priority = I915_PRIORITY_INVALID;
363 	node->semaphores = 0;
364 	node->flags = 0;
365 
366 	GEM_BUG_ON(!list_empty(&node->signalers_list));
367 	GEM_BUG_ON(!list_empty(&node->waiters_list));
368 	GEM_BUG_ON(!list_empty(&node->link));
369 }
370 
371 static struct i915_dependency *
372 i915_dependency_alloc(void)
373 {
374 	return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
375 }
376 
377 static void
378 i915_dependency_free(struct i915_dependency *dep)
379 {
380 	kmem_cache_free(global.slab_dependencies, dep);
381 }
382 
383 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
384 				      struct i915_sched_node *signal,
385 				      struct i915_dependency *dep,
386 				      unsigned long flags)
387 {
388 	bool ret = false;
389 
390 	spin_lock_irq(&schedule_lock);
391 
392 	if (!node_signaled(signal)) {
393 		INIT_LIST_HEAD(&dep->dfs_link);
394 		dep->signaler = signal;
395 		dep->waiter = node;
396 		dep->flags = flags;
397 
398 		/* All set, now publish. Beware the lockless walkers. */
399 		list_add_rcu(&dep->signal_link, &node->signalers_list);
400 		list_add_rcu(&dep->wait_link, &signal->waiters_list);
401 
402 		/* Propagate the chains */
403 		node->flags |= signal->flags;
404 		ret = true;
405 	}
406 
407 	spin_unlock_irq(&schedule_lock);
408 
409 	return ret;
410 }
411 
412 int i915_sched_node_add_dependency(struct i915_sched_node *node,
413 				   struct i915_sched_node *signal,
414 				   unsigned long flags)
415 {
416 	struct i915_dependency *dep;
417 
418 	dep = i915_dependency_alloc();
419 	if (!dep)
420 		return -ENOMEM;
421 
422 	if (!__i915_sched_node_add_dependency(node, signal, dep,
423 					      flags | I915_DEPENDENCY_ALLOC))
424 		i915_dependency_free(dep);
425 
426 	return 0;
427 }
428 
429 void i915_sched_node_fini(struct i915_sched_node *node)
430 {
431 	struct i915_dependency *dep, *tmp;
432 
433 	spin_lock_irq(&schedule_lock);
434 
435 	/*
436 	 * Everyone we depended upon (the fences we wait to be signaled)
437 	 * should retire before us and remove themselves from our list.
438 	 * However, retirement is run independently on each timeline and
439 	 * so we may be called out-of-order.
440 	 */
441 	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
442 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
443 
444 		list_del_rcu(&dep->wait_link);
445 		if (dep->flags & I915_DEPENDENCY_ALLOC)
446 			i915_dependency_free(dep);
447 	}
448 	INIT_LIST_HEAD(&node->signalers_list);
449 
450 	/* Remove ourselves from everyone who depends upon us */
451 	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
452 		GEM_BUG_ON(dep->signaler != node);
453 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
454 
455 		list_del_rcu(&dep->signal_link);
456 		if (dep->flags & I915_DEPENDENCY_ALLOC)
457 			i915_dependency_free(dep);
458 	}
459 	INIT_LIST_HEAD(&node->waiters_list);
460 
461 	spin_unlock_irq(&schedule_lock);
462 }
463 
464 void i915_request_show_with_schedule(struct drm_printer *m,
465 				     const struct i915_request *rq,
466 				     const char *prefix,
467 				     int indent)
468 {
469 	struct i915_dependency *dep;
470 
471 	i915_request_show(m, rq, prefix, indent);
472 	if (i915_request_completed(rq))
473 		return;
474 
475 	rcu_read_lock();
476 	for_each_signaler(dep, rq) {
477 		const struct i915_request *signaler =
478 			node_to_request(dep->signaler);
479 
480 		/* Dependencies along the same timeline are expected. */
481 		if (signaler->timeline == rq->timeline)
482 			continue;
483 
484 		if (__i915_request_is_complete(signaler))
485 			continue;
486 
487 		i915_request_show(m, signaler, prefix, indent + 2);
488 	}
489 	rcu_read_unlock();
490 }
491 
492 static void i915_global_scheduler_shrink(void)
493 {
494 	kmem_cache_shrink(global.slab_dependencies);
495 	kmem_cache_shrink(global.slab_priorities);
496 }
497 
498 static void i915_global_scheduler_exit(void)
499 {
500 	kmem_cache_destroy(global.slab_dependencies);
501 	kmem_cache_destroy(global.slab_priorities);
502 }
503 
504 static struct i915_global_scheduler global = { {
505 	.shrink = i915_global_scheduler_shrink,
506 	.exit = i915_global_scheduler_exit,
507 } };
508 
509 int __init i915_global_scheduler_init(void)
510 {
511 	global.slab_dependencies = KMEM_CACHE(i915_dependency,
512 					      SLAB_HWCACHE_ALIGN |
513 					      SLAB_TYPESAFE_BY_RCU);
514 	if (!global.slab_dependencies)
515 		return -ENOMEM;
516 
517 	global.slab_priorities = KMEM_CACHE(i915_priolist, 0);
518 	if (!global.slab_priorities)
519 		goto err_priorities;
520 
521 	i915_global_register(&global.base);
522 	return 0;
523 
524 err_priorities:
525 	kmem_cache_destroy(global.slab_priorities);
526 	return -ENOMEM;
527 }
528