xref: /linux/drivers/gpu/drm/scheduler/sched_entity.c (revision ce801e5d6c1bac228bf10f75e8bede4285c58282)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27 
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30 
31 #include "sched_internal.h"
32 
33 #include "gpu_scheduler_trace.h"
34 
35 /**
36  * drm_sched_entity_init - Init a context entity used by scheduler when
37  * submit to HW ring.
38  *
39  * @entity: scheduler entity to init
40  * @priority: priority of the entity
41  * @sched_list: the list of drm scheds on which jobs from this
42  *           entity can be submitted
43  * @num_sched_list: number of drm sched in sched_list
44  * @guilty: atomic_t set to 1 when a job on this queue
45  *          is found to be guilty causing a timeout
46  *
47  * Note that the &sched_list must have at least one element to schedule the entity.
48  *
49  * For changing @priority later on at runtime see
50  * drm_sched_entity_set_priority(). For changing the set of schedulers
51  * @sched_list at runtime see drm_sched_entity_modify_sched().
52  *
53  * An entity is cleaned up by calling drm_sched_entity_fini(). See also
54  * drm_sched_entity_destroy().
55  *
56  * Returns 0 on success or a negative error code on failure.
57  */
58 int drm_sched_entity_init(struct drm_sched_entity *entity,
59 			  enum drm_sched_priority priority,
60 			  struct drm_gpu_scheduler **sched_list,
61 			  unsigned int num_sched_list,
62 			  atomic_t *guilty)
63 {
64 	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
65 		return -EINVAL;
66 
67 	memset(entity, 0, sizeof(struct drm_sched_entity));
68 	INIT_LIST_HEAD(&entity->list);
69 	entity->rq = NULL;
70 	entity->guilty = guilty;
71 	entity->num_sched_list = num_sched_list;
72 	entity->priority = priority;
73 	/*
74 	 * It's perfectly valid to initialize an entity without having a valid
75 	 * scheduler attached. It's just not valid to use the scheduler before it
76 	 * is initialized itself.
77 	 */
78 	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
79 	RCU_INIT_POINTER(entity->last_scheduled, NULL);
80 	RB_CLEAR_NODE(&entity->rb_tree_node);
81 
82 	if (num_sched_list && !sched_list[0]->sched_rq) {
83 		/* Since every entry covered by num_sched_list
84 		 * should be non-NULL and therefore we warn drivers
85 		 * not to do this and to fix their DRM calling order.
86 		 */
87 		pr_warn("%s: called with uninitialized scheduler\n", __func__);
88 	} else if (num_sched_list) {
89 		/* The "priority" of an entity cannot exceed the number of run-queues of a
90 		 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
91 		 * the lowest priority available.
92 		 */
93 		if (entity->priority >= sched_list[0]->num_rqs) {
94 			dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n",
95 				entity->priority, sched_list[0]->num_rqs);
96 			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
97 						 (s32) DRM_SCHED_PRIORITY_KERNEL);
98 		}
99 		entity->rq = sched_list[0]->sched_rq[entity->priority];
100 	}
101 
102 	init_completion(&entity->entity_idle);
103 
104 	/* We start in an idle state. */
105 	complete_all(&entity->entity_idle);
106 
107 	spin_lock_init(&entity->lock);
108 	spsc_queue_init(&entity->job_queue);
109 
110 	atomic_set(&entity->fence_seq, 0);
111 	entity->fence_context = dma_fence_context_alloc(2);
112 
113 	return 0;
114 }
115 EXPORT_SYMBOL(drm_sched_entity_init);
116 
117 /**
118  * drm_sched_entity_modify_sched - Modify sched of an entity
119  * @entity: scheduler entity to init
120  * @sched_list: the list of new drm scheds which will replace
121  *		 existing entity->sched_list
122  * @num_sched_list: number of drm sched in sched_list
123  *
124  * Note that this must be called under the same common lock for @entity as
125  * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
126  * guarantee through some other means that this is never called while new jobs
127  * can be pushed to @entity.
128  */
129 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
130 				    struct drm_gpu_scheduler **sched_list,
131 				    unsigned int num_sched_list)
132 {
133 	WARN_ON(!num_sched_list || !sched_list);
134 
135 	spin_lock(&entity->lock);
136 	entity->sched_list = sched_list;
137 	entity->num_sched_list = num_sched_list;
138 	spin_unlock(&entity->lock);
139 }
140 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
141 
142 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
143 {
144 	rmb(); /* for list_empty to work without lock */
145 
146 	if (list_empty(&entity->list) ||
147 	    spsc_queue_count(&entity->job_queue) == 0 ||
148 	    entity->stopped)
149 		return true;
150 
151 	return false;
152 }
153 
154 /**
155  * drm_sched_entity_error - return error of last scheduled job
156  * @entity: scheduler entity to check
157  *
158  * Opportunistically return the error of the last scheduled job. Result can
159  * change any time when new jobs are pushed to the hw.
160  */
161 int drm_sched_entity_error(struct drm_sched_entity *entity)
162 {
163 	struct dma_fence *fence;
164 	int r;
165 
166 	rcu_read_lock();
167 	fence = rcu_dereference(entity->last_scheduled);
168 	r = fence ? fence->error : 0;
169 	rcu_read_unlock();
170 
171 	return r;
172 }
173 EXPORT_SYMBOL(drm_sched_entity_error);
174 
175 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
176 {
177 	struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
178 
179 	drm_sched_fence_finished(job->s_fence, -ESRCH);
180 	WARN_ON(job->s_fence->parent);
181 	job->sched->ops->free_job(job);
182 }
183 
184 /* Signal the scheduler finished fence when the entity in question is killed. */
185 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
186 					  struct dma_fence_cb *cb)
187 {
188 	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
189 						 finish_cb);
190 	unsigned long index;
191 
192 	dma_fence_put(f);
193 
194 	/* Wait for all dependencies to avoid data corruptions */
195 	xa_for_each(&job->dependencies, index, f) {
196 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
197 
198 		if (s_fence && f == &s_fence->scheduled) {
199 			/* The dependencies array had a reference on the scheduled
200 			 * fence, and the finished fence refcount might have
201 			 * dropped to zero. Use dma_fence_get_rcu() so we get
202 			 * a NULL fence in that case.
203 			 */
204 			f = dma_fence_get_rcu(&s_fence->finished);
205 
206 			/* Now that we have a reference on the finished fence,
207 			 * we can release the reference the dependencies array
208 			 * had on the scheduled fence.
209 			 */
210 			dma_fence_put(&s_fence->scheduled);
211 		}
212 
213 		xa_erase(&job->dependencies, index);
214 		if (f && !dma_fence_add_callback(f, &job->finish_cb,
215 						 drm_sched_entity_kill_jobs_cb))
216 			return;
217 
218 		dma_fence_put(f);
219 	}
220 
221 	INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
222 	schedule_work(&job->work);
223 }
224 
225 /* Remove the entity from the scheduler and kill all pending jobs */
226 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
227 {
228 	struct drm_sched_job *job;
229 	struct dma_fence *prev;
230 
231 	if (!entity->rq)
232 		return;
233 
234 	spin_lock(&entity->lock);
235 	entity->stopped = true;
236 	drm_sched_rq_remove_entity(entity->rq, entity);
237 	spin_unlock(&entity->lock);
238 
239 	/* Make sure this entity is not used by the scheduler at the moment */
240 	wait_for_completion(&entity->entity_idle);
241 
242 	/* The entity is guaranteed to not be used by the scheduler */
243 	prev = rcu_dereference_check(entity->last_scheduled, true);
244 	dma_fence_get(prev);
245 	while ((job = drm_sched_entity_queue_pop(entity))) {
246 		struct drm_sched_fence *s_fence = job->s_fence;
247 
248 		dma_fence_get(&s_fence->finished);
249 		if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
250 					   drm_sched_entity_kill_jobs_cb))
251 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
252 
253 		prev = &s_fence->finished;
254 	}
255 	dma_fence_put(prev);
256 }
257 
258 /**
259  * drm_sched_entity_flush - Flush a context entity
260  *
261  * @entity: scheduler entity
262  * @timeout: time to wait in for Q to become empty in jiffies.
263  *
264  * Splitting drm_sched_entity_fini() into two functions, The first one does the
265  * waiting, removes the entity from the runqueue and returns an error when the
266  * process was killed.
267  *
268  * Returns the remaining time in jiffies left from the input timeout
269  */
270 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
271 {
272 	struct drm_gpu_scheduler *sched;
273 	struct task_struct *last_user;
274 	long ret = timeout;
275 
276 	if (!entity->rq)
277 		return 0;
278 
279 	sched = entity->rq->sched;
280 	/**
281 	 * The client will not queue more IBs during this fini, consume existing
282 	 * queued IBs or discard them on SIGKILL
283 	 */
284 	if (current->flags & PF_EXITING) {
285 		if (timeout)
286 			ret = wait_event_timeout(
287 					sched->job_scheduled,
288 					drm_sched_entity_is_idle(entity),
289 					timeout);
290 	} else {
291 		wait_event_killable(sched->job_scheduled,
292 				    drm_sched_entity_is_idle(entity));
293 	}
294 
295 	/* For killed process disable any more IBs enqueue right now */
296 	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
297 	if ((!last_user || last_user == current->group_leader) &&
298 	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
299 		drm_sched_entity_kill(entity);
300 
301 	return ret;
302 }
303 EXPORT_SYMBOL(drm_sched_entity_flush);
304 
305 /**
306  * drm_sched_entity_fini - Destroy a context entity
307  *
308  * @entity: scheduler entity
309  *
310  * Cleanups up @entity which has been initialized by drm_sched_entity_init().
311  *
312  * If there are potentially job still in flight or getting newly queued
313  * drm_sched_entity_flush() must be called first. This function then goes over
314  * the entity and signals all jobs with an error code if the process was killed.
315  */
316 void drm_sched_entity_fini(struct drm_sched_entity *entity)
317 {
318 	/*
319 	 * If consumption of existing IBs wasn't completed. Forcefully remove
320 	 * them here. Also makes sure that the scheduler won't touch this entity
321 	 * any more.
322 	 */
323 	drm_sched_entity_kill(entity);
324 
325 	if (entity->dependency) {
326 		dma_fence_remove_callback(entity->dependency, &entity->cb);
327 		dma_fence_put(entity->dependency);
328 		entity->dependency = NULL;
329 	}
330 
331 	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
332 	RCU_INIT_POINTER(entity->last_scheduled, NULL);
333 }
334 EXPORT_SYMBOL(drm_sched_entity_fini);
335 
336 /**
337  * drm_sched_entity_destroy - Destroy a context entity
338  * @entity: scheduler entity
339  *
340  * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
341  * convenience wrapper.
342  */
343 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
344 {
345 	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
346 	drm_sched_entity_fini(entity);
347 }
348 EXPORT_SYMBOL(drm_sched_entity_destroy);
349 
350 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */
351 static void drm_sched_entity_clear_dep(struct dma_fence *f,
352 				       struct dma_fence_cb *cb)
353 {
354 	struct drm_sched_entity *entity =
355 		container_of(cb, struct drm_sched_entity, cb);
356 
357 	entity->dependency = NULL;
358 	dma_fence_put(f);
359 }
360 
361 /*
362  * drm_sched_entity_wakeup - callback to clear the entity's dependency and
363  * wake up the scheduler
364  */
365 static void drm_sched_entity_wakeup(struct dma_fence *f,
366 				    struct dma_fence_cb *cb)
367 {
368 	struct drm_sched_entity *entity =
369 		container_of(cb, struct drm_sched_entity, cb);
370 
371 	drm_sched_entity_clear_dep(f, cb);
372 	drm_sched_wakeup(entity->rq->sched);
373 }
374 
375 /**
376  * drm_sched_entity_set_priority - Sets priority of the entity
377  *
378  * @entity: scheduler entity
379  * @priority: scheduler priority
380  *
381  * Update the priority of runqueues used for the entity.
382  */
383 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
384 				   enum drm_sched_priority priority)
385 {
386 	spin_lock(&entity->lock);
387 	entity->priority = priority;
388 	spin_unlock(&entity->lock);
389 }
390 EXPORT_SYMBOL(drm_sched_entity_set_priority);
391 
392 /*
393  * Add a callback to the current dependency of the entity to wake up the
394  * scheduler when the entity becomes available.
395  */
396 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
397 {
398 	struct drm_gpu_scheduler *sched = entity->rq->sched;
399 	struct dma_fence *fence = entity->dependency;
400 	struct drm_sched_fence *s_fence;
401 
402 	if (fence->context == entity->fence_context ||
403 	    fence->context == entity->fence_context + 1) {
404 		/*
405 		 * Fence is a scheduled/finished fence from a job
406 		 * which belongs to the same entity, we can ignore
407 		 * fences from ourself
408 		 */
409 		dma_fence_put(entity->dependency);
410 		return false;
411 	}
412 
413 	s_fence = to_drm_sched_fence(fence);
414 	if (!fence->error && s_fence && s_fence->sched == sched &&
415 	    !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
416 
417 		/*
418 		 * Fence is from the same scheduler, only need to wait for
419 		 * it to be scheduled
420 		 */
421 		fence = dma_fence_get(&s_fence->scheduled);
422 		dma_fence_put(entity->dependency);
423 		entity->dependency = fence;
424 		if (!dma_fence_add_callback(fence, &entity->cb,
425 					    drm_sched_entity_clear_dep))
426 			return true;
427 
428 		/* Ignore it when it is already scheduled */
429 		dma_fence_put(fence);
430 		return false;
431 	}
432 
433 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
434 				    drm_sched_entity_wakeup))
435 		return true;
436 
437 	dma_fence_put(entity->dependency);
438 	return false;
439 }
440 
441 static struct dma_fence *
442 drm_sched_job_dependency(struct drm_sched_job *job,
443 			 struct drm_sched_entity *entity)
444 {
445 	struct dma_fence *f;
446 
447 	/* We keep the fence around, so we can iterate over all dependencies
448 	 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
449 	 * before killing the job.
450 	 */
451 	f = xa_load(&job->dependencies, job->last_dependency);
452 	if (f) {
453 		job->last_dependency++;
454 		return dma_fence_get(f);
455 	}
456 
457 	if (job->sched->ops->prepare_job)
458 		return job->sched->ops->prepare_job(job, entity);
459 
460 	return NULL;
461 }
462 
463 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
464 {
465 	struct drm_sched_job *sched_job;
466 
467 	sched_job = drm_sched_entity_queue_peek(entity);
468 	if (!sched_job)
469 		return NULL;
470 
471 	while ((entity->dependency =
472 			drm_sched_job_dependency(sched_job, entity))) {
473 		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
474 
475 		if (drm_sched_entity_add_dependency_cb(entity))
476 			return NULL;
477 	}
478 
479 	/* skip jobs from entity that marked guilty */
480 	if (entity->guilty && atomic_read(entity->guilty))
481 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
482 
483 	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
484 	rcu_assign_pointer(entity->last_scheduled,
485 			   dma_fence_get(&sched_job->s_fence->finished));
486 
487 	/*
488 	 * If the queue is empty we allow drm_sched_entity_select_rq() to
489 	 * locklessly access ->last_scheduled. This only works if we set the
490 	 * pointer before we dequeue and if we a write barrier here.
491 	 */
492 	smp_wmb();
493 
494 	spsc_queue_pop(&entity->job_queue);
495 
496 	/*
497 	 * Update the entity's location in the min heap according to
498 	 * the timestamp of the next job, if any.
499 	 */
500 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
501 		struct drm_sched_job *next;
502 
503 		next = drm_sched_entity_queue_peek(entity);
504 		if (next) {
505 			struct drm_sched_rq *rq;
506 
507 			spin_lock(&entity->lock);
508 			rq = entity->rq;
509 			spin_lock(&rq->lock);
510 			drm_sched_rq_update_fifo_locked(entity, rq,
511 							next->submit_ts);
512 			spin_unlock(&rq->lock);
513 			spin_unlock(&entity->lock);
514 		}
515 	}
516 
517 	/* Jobs and entities might have different lifecycles. Since we're
518 	 * removing the job from the entities queue, set the jobs entity pointer
519 	 * to NULL to prevent any future access of the entity through this job.
520 	 */
521 	sched_job->entity = NULL;
522 
523 	return sched_job;
524 }
525 
526 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
527 {
528 	struct dma_fence *fence;
529 	struct drm_gpu_scheduler *sched;
530 	struct drm_sched_rq *rq;
531 
532 	/* single possible engine and already selected */
533 	if (!entity->sched_list)
534 		return;
535 
536 	/* queue non-empty, stay on the same engine */
537 	if (spsc_queue_count(&entity->job_queue))
538 		return;
539 
540 	/*
541 	 * Only when the queue is empty are we guaranteed that the scheduler
542 	 * thread cannot change ->last_scheduled. To enforce ordering we need
543 	 * a read barrier here. See drm_sched_entity_pop_job() for the other
544 	 * side.
545 	 */
546 	smp_rmb();
547 
548 	fence = rcu_dereference_check(entity->last_scheduled, true);
549 
550 	/* stay on the same engine if the previous job hasn't finished */
551 	if (fence && !dma_fence_is_signaled(fence))
552 		return;
553 
554 	spin_lock(&entity->lock);
555 	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
556 	rq = sched ? sched->sched_rq[entity->priority] : NULL;
557 	if (rq != entity->rq) {
558 		drm_sched_rq_remove_entity(entity->rq, entity);
559 		entity->rq = rq;
560 	}
561 	spin_unlock(&entity->lock);
562 
563 	if (entity->num_sched_list == 1)
564 		entity->sched_list = NULL;
565 }
566 
567 /**
568  * drm_sched_entity_push_job - Submit a job to the entity's job queue
569  * @sched_job: job to submit
570  *
571  * Note: To guarantee that the order of insertion to queue matches the job's
572  * fence sequence number this function should be called with drm_sched_job_arm()
573  * under common lock for the struct drm_sched_entity that was set up for
574  * @sched_job in drm_sched_job_init().
575  */
576 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
577 {
578 	struct drm_sched_entity *entity = sched_job->entity;
579 	bool first;
580 	ktime_t submit_ts;
581 
582 	trace_drm_sched_job(sched_job, entity);
583 	atomic_inc(entity->rq->sched->score);
584 	WRITE_ONCE(entity->last_user, current->group_leader);
585 
586 	/*
587 	 * After the sched_job is pushed into the entity queue, it may be
588 	 * completed and freed up at any time. We can no longer access it.
589 	 * Make sure to set the submit_ts first, to avoid a race.
590 	 */
591 	sched_job->submit_ts = submit_ts = ktime_get();
592 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
593 
594 	/* first job wakes up scheduler */
595 	if (first) {
596 		struct drm_gpu_scheduler *sched;
597 		struct drm_sched_rq *rq;
598 
599 		/* Add the entity to the run queue */
600 		spin_lock(&entity->lock);
601 		if (entity->stopped) {
602 			spin_unlock(&entity->lock);
603 
604 			DRM_ERROR("Trying to push to a killed entity\n");
605 			return;
606 		}
607 
608 		rq = entity->rq;
609 		sched = rq->sched;
610 
611 		spin_lock(&rq->lock);
612 		drm_sched_rq_add_entity(rq, entity);
613 
614 		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
615 			drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
616 
617 		spin_unlock(&rq->lock);
618 		spin_unlock(&entity->lock);
619 
620 		drm_sched_wakeup(sched);
621 	}
622 }
623 EXPORT_SYMBOL(drm_sched_entity_push_job);
624