xref: /linux/drivers/gpu/drm/scheduler/sched_entity.c (revision 7b1166dee847d5018c1f3cc781218e806078f752)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/slab.h>
25 #include <linux/completion.h>
26 
27 #include <drm/drm_print.h>
28 #include <drm/gpu_scheduler.h>
29 
30 #include "sched_internal.h"
31 
32 #include "gpu_scheduler_trace.h"
33 
34 /**
35  * drm_sched_entity_init - Init a context entity used by scheduler when
36  * submit to HW ring.
37  *
38  * @entity: scheduler entity to init
39  * @priority: priority of the entity
40  * @sched_list: the list of drm scheds on which jobs from this
41  *           entity can be submitted
42  * @num_sched_list: number of drm sched in sched_list
43  * @guilty: atomic_t set to 1 when a job on this queue
44  *          is found to be guilty causing a timeout
45  *
46  * Note that the &sched_list must have at least one element to schedule the entity.
47  *
48  * For changing @priority later on at runtime see
49  * drm_sched_entity_set_priority(). For changing the set of schedulers
50  * @sched_list at runtime see drm_sched_entity_modify_sched().
51  *
52  * An entity is cleaned up by calling drm_sched_entity_fini(). See also
53  * drm_sched_entity_destroy().
54  *
55  * Returns 0 on success or a negative error code on failure.
56  */
57 int drm_sched_entity_init(struct drm_sched_entity *entity,
58 			  enum drm_sched_priority priority,
59 			  struct drm_gpu_scheduler **sched_list,
60 			  unsigned int num_sched_list,
61 			  atomic_t *guilty)
62 {
63 	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
64 		return -EINVAL;
65 
66 	memset(entity, 0, sizeof(struct drm_sched_entity));
67 	INIT_LIST_HEAD(&entity->list);
68 	entity->rq = NULL;
69 	entity->guilty = guilty;
70 	entity->num_sched_list = num_sched_list;
71 	entity->priority = priority;
72 	/*
73 	 * It's perfectly valid to initialize an entity without having a valid
74 	 * scheduler attached. It's just not valid to use the scheduler before it
75 	 * is initialized itself.
76 	 */
77 	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
78 	RCU_INIT_POINTER(entity->last_scheduled, NULL);
79 	RB_CLEAR_NODE(&entity->rb_tree_node);
80 
81 	if (num_sched_list && !sched_list[0]->sched_rq) {
82 		/* Since every entry covered by num_sched_list
83 		 * should be non-NULL and therefore we warn drivers
84 		 * not to do this and to fix their DRM calling order.
85 		 */
86 		pr_warn("%s: called with uninitialized scheduler\n", __func__);
87 	} else if (num_sched_list) {
88 		/* The "priority" of an entity cannot exceed the number of run-queues of a
89 		 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
90 		 * the lowest priority available.
91 		 */
92 		if (entity->priority >= sched_list[0]->num_rqs) {
93 			dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n",
94 				entity->priority, sched_list[0]->num_rqs);
95 			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
96 						 (s32) DRM_SCHED_PRIORITY_KERNEL);
97 		}
98 		entity->rq = sched_list[0]->sched_rq[entity->priority];
99 	}
100 
101 	init_completion(&entity->entity_idle);
102 
103 	/* We start in an idle state. */
104 	complete_all(&entity->entity_idle);
105 
106 	spin_lock_init(&entity->lock);
107 	spsc_queue_init(&entity->job_queue);
108 
109 	atomic_set(&entity->fence_seq, 0);
110 	entity->fence_context = dma_fence_context_alloc(2);
111 
112 	return 0;
113 }
114 EXPORT_SYMBOL(drm_sched_entity_init);
115 
116 /**
117  * drm_sched_entity_modify_sched - Modify sched of an entity
118  * @entity: scheduler entity to init
119  * @sched_list: the list of new drm scheds which will replace
120  *		 existing entity->sched_list
121  * @num_sched_list: number of drm sched in sched_list
122  *
123  * Note that this must be called under the same common lock for @entity as
124  * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
125  * guarantee through some other means that this is never called while new jobs
126  * can be pushed to @entity.
127  */
128 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
129 				    struct drm_gpu_scheduler **sched_list,
130 				    unsigned int num_sched_list)
131 {
132 	WARN_ON(!num_sched_list || !sched_list);
133 
134 	spin_lock(&entity->lock);
135 	entity->sched_list = sched_list;
136 	entity->num_sched_list = num_sched_list;
137 	spin_unlock(&entity->lock);
138 }
139 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
140 
141 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
142 {
143 	rmb(); /* for list_empty to work without lock */
144 
145 	if (list_empty(&entity->list) ||
146 	    spsc_queue_count(&entity->job_queue) == 0 ||
147 	    entity->stopped)
148 		return true;
149 
150 	return false;
151 }
152 
153 /**
154  * drm_sched_entity_error - return error of last scheduled job
155  * @entity: scheduler entity to check
156  *
157  * Opportunistically return the error of the last scheduled job. Result can
158  * change any time when new jobs are pushed to the hw.
159  */
160 int drm_sched_entity_error(struct drm_sched_entity *entity)
161 {
162 	struct dma_fence *fence;
163 	int r;
164 
165 	rcu_read_lock();
166 	fence = rcu_dereference(entity->last_scheduled);
167 	r = fence ? fence->error : 0;
168 	rcu_read_unlock();
169 
170 	return r;
171 }
172 EXPORT_SYMBOL(drm_sched_entity_error);
173 
174 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
175 {
176 	struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
177 
178 	drm_sched_fence_finished(job->s_fence, -ESRCH);
179 	WARN_ON(job->s_fence->parent);
180 	job->sched->ops->free_job(job);
181 }
182 
183 /* Signal the scheduler finished fence when the entity in question is killed. */
184 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
185 					  struct dma_fence_cb *cb)
186 {
187 	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
188 						 finish_cb);
189 	unsigned long index;
190 
191 	dma_fence_put(f);
192 
193 	/* Wait for all dependencies to avoid data corruptions */
194 	xa_for_each(&job->dependencies, index, f) {
195 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
196 
197 		if (s_fence && f == &s_fence->scheduled) {
198 			/* The dependencies array had a reference on the scheduled
199 			 * fence, and the finished fence refcount might have
200 			 * dropped to zero. Use dma_fence_get_rcu() so we get
201 			 * a NULL fence in that case.
202 			 */
203 			f = dma_fence_get_rcu(&s_fence->finished);
204 
205 			/* Now that we have a reference on the finished fence,
206 			 * we can release the reference the dependencies array
207 			 * had on the scheduled fence.
208 			 */
209 			dma_fence_put(&s_fence->scheduled);
210 		}
211 
212 		xa_erase(&job->dependencies, index);
213 		if (f && !dma_fence_add_callback(f, &job->finish_cb,
214 						 drm_sched_entity_kill_jobs_cb))
215 			return;
216 
217 		dma_fence_put(f);
218 	}
219 
220 	INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
221 	schedule_work(&job->work);
222 }
223 
224 /* Remove the entity from the scheduler and kill all pending jobs */
225 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
226 {
227 	struct drm_sched_job *job;
228 	struct dma_fence *prev;
229 
230 	if (!entity->rq)
231 		return;
232 
233 	spin_lock(&entity->lock);
234 	entity->stopped = true;
235 	drm_sched_rq_remove_entity(entity->rq, entity);
236 	spin_unlock(&entity->lock);
237 
238 	/* Make sure this entity is not used by the scheduler at the moment */
239 	wait_for_completion(&entity->entity_idle);
240 
241 	/* The entity is guaranteed to not be used by the scheduler */
242 	prev = rcu_dereference_check(entity->last_scheduled, true);
243 	dma_fence_get(prev);
244 	while ((job = drm_sched_entity_queue_pop(entity))) {
245 		struct drm_sched_fence *s_fence = job->s_fence;
246 
247 		dma_fence_get(&s_fence->finished);
248 		if (!prev ||
249 		    dma_fence_add_callback(prev, &job->finish_cb,
250 					   drm_sched_entity_kill_jobs_cb)) {
251 			/*
252 			 * Adding callback above failed.
253 			 * dma_fence_put() checks for NULL.
254 			 */
255 			dma_fence_put(prev);
256 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
257 		}
258 
259 		prev = &s_fence->finished;
260 	}
261 	dma_fence_put(prev);
262 }
263 
264 /**
265  * drm_sched_entity_flush - Flush a context entity
266  *
267  * @entity: scheduler entity
268  * @timeout: time to wait in for Q to become empty in jiffies.
269  *
270  * Splitting drm_sched_entity_fini() into two functions, The first one does the
271  * waiting, removes the entity from the runqueue and returns an error when the
272  * process was killed.
273  *
274  * Returns the remaining time in jiffies left from the input timeout
275  */
276 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
277 {
278 	struct drm_gpu_scheduler *sched;
279 	struct task_struct *last_user;
280 	long ret = timeout;
281 
282 	if (!entity->rq)
283 		return 0;
284 
285 	sched = entity->rq->sched;
286 	/**
287 	 * The client will not queue more IBs during this fini, consume existing
288 	 * queued IBs or discard them on SIGKILL
289 	 */
290 	if (current->flags & PF_EXITING) {
291 		if (timeout)
292 			ret = wait_event_timeout(
293 					sched->job_scheduled,
294 					drm_sched_entity_is_idle(entity),
295 					timeout);
296 	} else {
297 		wait_event_killable(sched->job_scheduled,
298 				    drm_sched_entity_is_idle(entity));
299 	}
300 
301 	/* For killed process disable any more IBs enqueue right now */
302 	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
303 	if ((!last_user || last_user == current->group_leader) &&
304 	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
305 		drm_sched_entity_kill(entity);
306 
307 	return ret;
308 }
309 EXPORT_SYMBOL(drm_sched_entity_flush);
310 
311 /**
312  * drm_sched_entity_fini - Destroy a context entity
313  *
314  * @entity: scheduler entity
315  *
316  * Cleanups up @entity which has been initialized by drm_sched_entity_init().
317  *
318  * If there are potentially job still in flight or getting newly queued
319  * drm_sched_entity_flush() must be called first. This function then goes over
320  * the entity and signals all jobs with an error code if the process was killed.
321  */
322 void drm_sched_entity_fini(struct drm_sched_entity *entity)
323 {
324 	/*
325 	 * If consumption of existing IBs wasn't completed. Forcefully remove
326 	 * them here. Also makes sure that the scheduler won't touch this entity
327 	 * any more.
328 	 */
329 	drm_sched_entity_kill(entity);
330 
331 	if (entity->dependency) {
332 		dma_fence_remove_callback(entity->dependency, &entity->cb);
333 		dma_fence_put(entity->dependency);
334 		entity->dependency = NULL;
335 	}
336 
337 	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
338 	RCU_INIT_POINTER(entity->last_scheduled, NULL);
339 }
340 EXPORT_SYMBOL(drm_sched_entity_fini);
341 
342 /**
343  * drm_sched_entity_destroy - Destroy a context entity
344  * @entity: scheduler entity
345  *
346  * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
347  * convenience wrapper.
348  */
349 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
350 {
351 	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
352 	drm_sched_entity_fini(entity);
353 }
354 EXPORT_SYMBOL(drm_sched_entity_destroy);
355 
356 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */
357 static void drm_sched_entity_clear_dep(struct dma_fence *f,
358 				       struct dma_fence_cb *cb)
359 {
360 	struct drm_sched_entity *entity =
361 		container_of(cb, struct drm_sched_entity, cb);
362 
363 	entity->dependency = NULL;
364 	dma_fence_put(f);
365 }
366 
367 /*
368  * drm_sched_entity_wakeup - callback to clear the entity's dependency and
369  * wake up the scheduler
370  */
371 static void drm_sched_entity_wakeup(struct dma_fence *f,
372 				    struct dma_fence_cb *cb)
373 {
374 	struct drm_sched_entity *entity =
375 		container_of(cb, struct drm_sched_entity, cb);
376 
377 	drm_sched_entity_clear_dep(f, cb);
378 	drm_sched_wakeup(entity->rq->sched);
379 }
380 
381 /**
382  * drm_sched_entity_set_priority - Sets priority of the entity
383  *
384  * @entity: scheduler entity
385  * @priority: scheduler priority
386  *
387  * Update the priority of runqueues used for the entity.
388  */
389 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
390 				   enum drm_sched_priority priority)
391 {
392 	spin_lock(&entity->lock);
393 	entity->priority = priority;
394 	spin_unlock(&entity->lock);
395 }
396 EXPORT_SYMBOL(drm_sched_entity_set_priority);
397 
398 /*
399  * Add a callback to the current dependency of the entity to wake up the
400  * scheduler when the entity becomes available.
401  */
402 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
403 {
404 	struct drm_gpu_scheduler *sched = entity->rq->sched;
405 	struct dma_fence *fence = entity->dependency;
406 	struct drm_sched_fence *s_fence;
407 
408 	if (fence->context == entity->fence_context ||
409 	    fence->context == entity->fence_context + 1) {
410 		/*
411 		 * Fence is a scheduled/finished fence from a job
412 		 * which belongs to the same entity, we can ignore
413 		 * fences from ourself
414 		 */
415 		dma_fence_put(entity->dependency);
416 		return false;
417 	}
418 
419 	s_fence = to_drm_sched_fence(fence);
420 	if (!fence->error && s_fence && s_fence->sched == sched &&
421 	    !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
422 
423 		/*
424 		 * Fence is from the same scheduler, only need to wait for
425 		 * it to be scheduled
426 		 */
427 		fence = dma_fence_get(&s_fence->scheduled);
428 		dma_fence_put(entity->dependency);
429 		entity->dependency = fence;
430 		if (!dma_fence_add_callback(fence, &entity->cb,
431 					    drm_sched_entity_clear_dep))
432 			return true;
433 
434 		/* Ignore it when it is already scheduled */
435 		dma_fence_put(fence);
436 		return false;
437 	}
438 
439 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
440 				    drm_sched_entity_wakeup))
441 		return true;
442 
443 	dma_fence_put(entity->dependency);
444 	return false;
445 }
446 
447 static struct dma_fence *
448 drm_sched_job_dependency(struct drm_sched_job *job,
449 			 struct drm_sched_entity *entity)
450 {
451 	struct dma_fence *f;
452 
453 	/* We keep the fence around, so we can iterate over all dependencies
454 	 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
455 	 * before killing the job.
456 	 */
457 	f = xa_load(&job->dependencies, job->last_dependency);
458 	if (f) {
459 		job->last_dependency++;
460 		return dma_fence_get(f);
461 	}
462 
463 	if (job->sched->ops->prepare_job)
464 		return job->sched->ops->prepare_job(job, entity);
465 
466 	return NULL;
467 }
468 
469 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
470 {
471 	struct drm_sched_job *sched_job;
472 
473 	sched_job = drm_sched_entity_queue_peek(entity);
474 	if (!sched_job)
475 		return NULL;
476 
477 	while ((entity->dependency =
478 			drm_sched_job_dependency(sched_job, entity))) {
479 		if (drm_sched_entity_add_dependency_cb(entity)) {
480 			trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
481 			return NULL;
482 		}
483 	}
484 
485 	/* skip jobs from entity that marked guilty */
486 	if (entity->guilty && atomic_read(entity->guilty))
487 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
488 
489 	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
490 	rcu_assign_pointer(entity->last_scheduled,
491 			   dma_fence_get(&sched_job->s_fence->finished));
492 
493 	/*
494 	 * If the queue is empty we allow drm_sched_entity_select_rq() to
495 	 * locklessly access ->last_scheduled. This only works if we set the
496 	 * pointer before we dequeue and if we a write barrier here.
497 	 */
498 	smp_wmb();
499 
500 	spsc_queue_pop(&entity->job_queue);
501 
502 	/*
503 	 * Update the entity's location in the min heap according to
504 	 * the timestamp of the next job, if any.
505 	 */
506 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
507 		struct drm_sched_job *next;
508 
509 		next = drm_sched_entity_queue_peek(entity);
510 		if (next) {
511 			struct drm_sched_rq *rq;
512 
513 			spin_lock(&entity->lock);
514 			rq = entity->rq;
515 			spin_lock(&rq->lock);
516 			drm_sched_rq_update_fifo_locked(entity, rq,
517 							next->submit_ts);
518 			spin_unlock(&rq->lock);
519 			spin_unlock(&entity->lock);
520 		}
521 	}
522 
523 	/* Jobs and entities might have different lifecycles. Since we're
524 	 * removing the job from the entities queue, set the jobs entity pointer
525 	 * to NULL to prevent any future access of the entity through this job.
526 	 */
527 	sched_job->entity = NULL;
528 
529 	return sched_job;
530 }
531 
532 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
533 {
534 	struct dma_fence *fence;
535 	struct drm_gpu_scheduler *sched;
536 	struct drm_sched_rq *rq;
537 
538 	/* single possible engine and already selected */
539 	if (!entity->sched_list)
540 		return;
541 
542 	/* queue non-empty, stay on the same engine */
543 	if (spsc_queue_count(&entity->job_queue))
544 		return;
545 
546 	/*
547 	 * Only when the queue is empty are we guaranteed that
548 	 * drm_sched_run_job_work() cannot change entity->last_scheduled. To
549 	 * enforce ordering we need a read barrier here. See
550 	 * drm_sched_entity_pop_job() for the other side.
551 	 */
552 	smp_rmb();
553 
554 	fence = rcu_dereference_check(entity->last_scheduled, true);
555 
556 	/* stay on the same engine if the previous job hasn't finished */
557 	if (fence && !dma_fence_is_signaled(fence))
558 		return;
559 
560 	spin_lock(&entity->lock);
561 	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
562 	rq = sched ? sched->sched_rq[entity->priority] : NULL;
563 	if (rq != entity->rq) {
564 		drm_sched_rq_remove_entity(entity->rq, entity);
565 		entity->rq = rq;
566 	}
567 	spin_unlock(&entity->lock);
568 
569 	if (entity->num_sched_list == 1)
570 		entity->sched_list = NULL;
571 }
572 
573 /**
574  * drm_sched_entity_push_job - Submit a job to the entity's job queue
575  * @sched_job: job to submit
576  *
577  * Note: To guarantee that the order of insertion to queue matches the job's
578  * fence sequence number this function should be called with drm_sched_job_arm()
579  * under common lock for the struct drm_sched_entity that was set up for
580  * @sched_job in drm_sched_job_init().
581  */
582 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
583 {
584 	struct drm_sched_entity *entity = sched_job->entity;
585 	bool first;
586 	ktime_t submit_ts;
587 
588 	trace_drm_sched_job_queue(sched_job, entity);
589 
590 	if (trace_drm_sched_job_add_dep_enabled()) {
591 		struct dma_fence *entry;
592 		unsigned long index;
593 
594 		xa_for_each(&sched_job->dependencies, index, entry)
595 			trace_drm_sched_job_add_dep(sched_job, entry);
596 	}
597 	atomic_inc(entity->rq->sched->score);
598 	WRITE_ONCE(entity->last_user, current->group_leader);
599 
600 	/*
601 	 * After the sched_job is pushed into the entity queue, it may be
602 	 * completed and freed up at any time. We can no longer access it.
603 	 * Make sure to set the submit_ts first, to avoid a race.
604 	 */
605 	sched_job->submit_ts = submit_ts = ktime_get();
606 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
607 
608 	/* first job wakes up scheduler */
609 	if (first) {
610 		struct drm_gpu_scheduler *sched;
611 		struct drm_sched_rq *rq;
612 
613 		/* Add the entity to the run queue */
614 		spin_lock(&entity->lock);
615 		if (entity->stopped) {
616 			spin_unlock(&entity->lock);
617 
618 			DRM_ERROR("Trying to push to a killed entity\n");
619 			return;
620 		}
621 
622 		rq = entity->rq;
623 		sched = rq->sched;
624 
625 		spin_lock(&rq->lock);
626 		drm_sched_rq_add_entity(rq, entity);
627 
628 		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
629 			drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
630 
631 		spin_unlock(&rq->lock);
632 		spin_unlock(&entity->lock);
633 
634 		drm_sched_wakeup(sched);
635 	}
636 }
637 EXPORT_SYMBOL(drm_sched_entity_push_job);
638