xref: /linux/drivers/gpu/drm/scheduler/sched_entity.c (revision 09b1704f5b02c18dd02b21343530463fcfc92c54)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27 
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30 
31 #include "sched_internal.h"
32 
33 #include "gpu_scheduler_trace.h"
34 
35 /**
36  * drm_sched_entity_init - Init a context entity used by scheduler when
37  * submit to HW ring.
38  *
39  * @entity: scheduler entity to init
40  * @priority: priority of the entity
41  * @sched_list: the list of drm scheds on which jobs from this
42  *           entity can be submitted
43  * @num_sched_list: number of drm sched in sched_list
44  * @guilty: atomic_t set to 1 when a job on this queue
45  *          is found to be guilty causing a timeout
46  *
47  * Note that the &sched_list must have at least one element to schedule the entity.
48  *
49  * For changing @priority later on at runtime see
50  * drm_sched_entity_set_priority(). For changing the set of schedulers
51  * @sched_list at runtime see drm_sched_entity_modify_sched().
52  *
53  * An entity is cleaned up by calling drm_sched_entity_fini(). See also
54  * drm_sched_entity_destroy().
55  *
56  * Returns 0 on success or a negative error code on failure.
57  */
58 int drm_sched_entity_init(struct drm_sched_entity *entity,
59 			  enum drm_sched_priority priority,
60 			  struct drm_gpu_scheduler **sched_list,
61 			  unsigned int num_sched_list,
62 			  atomic_t *guilty)
63 {
64 	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
65 		return -EINVAL;
66 
67 	memset(entity, 0, sizeof(struct drm_sched_entity));
68 	INIT_LIST_HEAD(&entity->list);
69 	entity->rq = NULL;
70 	entity->guilty = guilty;
71 	entity->num_sched_list = num_sched_list;
72 	entity->priority = priority;
73 	entity->last_user = current->group_leader;
74 	/*
75 	 * It's perfectly valid to initialize an entity without having a valid
76 	 * scheduler attached. It's just not valid to use the scheduler before it
77 	 * is initialized itself.
78 	 */
79 	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
80 	RCU_INIT_POINTER(entity->last_scheduled, NULL);
81 	RB_CLEAR_NODE(&entity->rb_tree_node);
82 
83 	if (num_sched_list && !sched_list[0]->sched_rq) {
84 		/* Since every entry covered by num_sched_list
85 		 * should be non-NULL and therefore we warn drivers
86 		 * not to do this and to fix their DRM calling order.
87 		 */
88 		pr_warn("%s: called with uninitialized scheduler\n", __func__);
89 	} else if (num_sched_list) {
90 		/* The "priority" of an entity cannot exceed the number of run-queues of a
91 		 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
92 		 * the lowest priority available.
93 		 */
94 		if (entity->priority >= sched_list[0]->num_rqs) {
95 			dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n",
96 				entity->priority, sched_list[0]->num_rqs);
97 			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
98 						 (s32) DRM_SCHED_PRIORITY_KERNEL);
99 		}
100 		entity->rq = sched_list[0]->sched_rq[entity->priority];
101 	}
102 
103 	init_completion(&entity->entity_idle);
104 
105 	/* We start in an idle state. */
106 	complete_all(&entity->entity_idle);
107 
108 	spin_lock_init(&entity->lock);
109 	spsc_queue_init(&entity->job_queue);
110 
111 	atomic_set(&entity->fence_seq, 0);
112 	entity->fence_context = dma_fence_context_alloc(2);
113 
114 	return 0;
115 }
116 EXPORT_SYMBOL(drm_sched_entity_init);
117 
118 /**
119  * drm_sched_entity_modify_sched - Modify sched of an entity
120  * @entity: scheduler entity to init
121  * @sched_list: the list of new drm scheds which will replace
122  *		 existing entity->sched_list
123  * @num_sched_list: number of drm sched in sched_list
124  *
125  * Note that this must be called under the same common lock for @entity as
126  * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
127  * guarantee through some other means that this is never called while new jobs
128  * can be pushed to @entity.
129  */
130 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
131 				    struct drm_gpu_scheduler **sched_list,
132 				    unsigned int num_sched_list)
133 {
134 	WARN_ON(!num_sched_list || !sched_list);
135 
136 	spin_lock(&entity->lock);
137 	entity->sched_list = sched_list;
138 	entity->num_sched_list = num_sched_list;
139 	spin_unlock(&entity->lock);
140 }
141 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
142 
143 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
144 {
145 	rmb(); /* for list_empty to work without lock */
146 
147 	if (list_empty(&entity->list) ||
148 	    spsc_queue_count(&entity->job_queue) == 0 ||
149 	    entity->stopped)
150 		return true;
151 
152 	return false;
153 }
154 
155 /**
156  * drm_sched_entity_error - return error of last scheduled job
157  * @entity: scheduler entity to check
158  *
159  * Opportunistically return the error of the last scheduled job. Result can
160  * change any time when new jobs are pushed to the hw.
161  */
162 int drm_sched_entity_error(struct drm_sched_entity *entity)
163 {
164 	struct dma_fence *fence;
165 	int r;
166 
167 	rcu_read_lock();
168 	fence = rcu_dereference(entity->last_scheduled);
169 	r = fence ? fence->error : 0;
170 	rcu_read_unlock();
171 
172 	return r;
173 }
174 EXPORT_SYMBOL(drm_sched_entity_error);
175 
176 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
177 {
178 	struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
179 
180 	drm_sched_fence_scheduled(job->s_fence, NULL);
181 	drm_sched_fence_finished(job->s_fence, -ESRCH);
182 	WARN_ON(job->s_fence->parent);
183 	job->sched->ops->free_job(job);
184 }
185 
186 /* Signal the scheduler finished fence when the entity in question is killed. */
187 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
188 					  struct dma_fence_cb *cb)
189 {
190 	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
191 						 finish_cb);
192 	unsigned long index;
193 
194 	dma_fence_put(f);
195 
196 	/* Wait for all dependencies to avoid data corruptions */
197 	xa_for_each(&job->dependencies, index, f) {
198 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
199 
200 		if (s_fence && f == &s_fence->scheduled) {
201 			/* The dependencies array had a reference on the scheduled
202 			 * fence, and the finished fence refcount might have
203 			 * dropped to zero. Use dma_fence_get_rcu() so we get
204 			 * a NULL fence in that case.
205 			 */
206 			f = dma_fence_get_rcu(&s_fence->finished);
207 
208 			/* Now that we have a reference on the finished fence,
209 			 * we can release the reference the dependencies array
210 			 * had on the scheduled fence.
211 			 */
212 			dma_fence_put(&s_fence->scheduled);
213 		}
214 
215 		xa_erase(&job->dependencies, index);
216 		if (f && !dma_fence_add_callback(f, &job->finish_cb,
217 						 drm_sched_entity_kill_jobs_cb))
218 			return;
219 
220 		dma_fence_put(f);
221 	}
222 
223 	INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
224 	schedule_work(&job->work);
225 }
226 
227 /* Remove the entity from the scheduler and kill all pending jobs */
228 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
229 {
230 	struct drm_sched_job *job;
231 	struct dma_fence *prev;
232 
233 	if (!entity->rq)
234 		return;
235 
236 	spin_lock(&entity->lock);
237 	entity->stopped = true;
238 	drm_sched_rq_remove_entity(entity->rq, entity);
239 	spin_unlock(&entity->lock);
240 
241 	/* Make sure this entity is not used by the scheduler at the moment */
242 	wait_for_completion(&entity->entity_idle);
243 
244 	/* The entity is guaranteed to not be used by the scheduler */
245 	prev = rcu_dereference_check(entity->last_scheduled, true);
246 	dma_fence_get(prev);
247 	while ((job = drm_sched_entity_queue_pop(entity))) {
248 		struct drm_sched_fence *s_fence = job->s_fence;
249 
250 		dma_fence_get(&s_fence->finished);
251 		if (!prev ||
252 		    dma_fence_add_callback(prev, &job->finish_cb,
253 					   drm_sched_entity_kill_jobs_cb)) {
254 			/*
255 			 * Adding callback above failed.
256 			 * dma_fence_put() checks for NULL.
257 			 */
258 			dma_fence_put(prev);
259 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
260 		}
261 
262 		prev = &s_fence->finished;
263 	}
264 	dma_fence_put(prev);
265 }
266 
267 /**
268  * drm_sched_entity_flush - Flush a context entity
269  *
270  * @entity: scheduler entity
271  * @timeout: time to wait in for Q to become empty in jiffies.
272  *
273  * Splitting drm_sched_entity_fini() into two functions, The first one does the
274  * waiting, removes the entity from the runqueue and returns an error when the
275  * process was killed.
276  *
277  * Returns the remaining time in jiffies left from the input timeout
278  */
279 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
280 {
281 	struct drm_gpu_scheduler *sched;
282 	struct task_struct *last_user;
283 	long ret = timeout;
284 
285 	if (!entity->rq)
286 		return 0;
287 
288 	sched = entity->rq->sched;
289 	/*
290 	 * The client will not queue more jobs during this fini - consume
291 	 * existing queued ones, or discard them on SIGKILL.
292 	 */
293 	if (current->flags & PF_EXITING) {
294 		if (timeout)
295 			ret = wait_event_timeout(
296 					sched->job_scheduled,
297 					drm_sched_entity_is_idle(entity),
298 					timeout);
299 	} else {
300 		wait_event_killable(sched->job_scheduled,
301 				    drm_sched_entity_is_idle(entity));
302 	}
303 
304 	/* For a killed process disallow further enqueueing of jobs. */
305 	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
306 	if (last_user == current->group_leader &&
307 	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
308 		drm_sched_entity_kill(entity);
309 
310 	return ret;
311 }
312 EXPORT_SYMBOL(drm_sched_entity_flush);
313 
314 /**
315  * drm_sched_entity_fini - Destroy a context entity
316  *
317  * @entity: scheduler entity
318  *
319  * Cleanups up @entity which has been initialized by drm_sched_entity_init().
320  *
321  * If there are potentially job still in flight or getting newly queued
322  * drm_sched_entity_flush() must be called first. This function then goes over
323  * the entity and signals all jobs with an error code if the process was killed.
324  */
325 void drm_sched_entity_fini(struct drm_sched_entity *entity)
326 {
327 	/*
328 	 * If consumption of existing jobs wasn't completed forcefully remove
329 	 * them. Also makes sure that the scheduler won't touch this entity any
330 	 * more.
331 	 */
332 	drm_sched_entity_kill(entity);
333 
334 	if (entity->dependency) {
335 		dma_fence_remove_callback(entity->dependency, &entity->cb);
336 		dma_fence_put(entity->dependency);
337 		entity->dependency = NULL;
338 	}
339 
340 	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
341 	RCU_INIT_POINTER(entity->last_scheduled, NULL);
342 }
343 EXPORT_SYMBOL(drm_sched_entity_fini);
344 
345 /**
346  * drm_sched_entity_destroy - Destroy a context entity
347  * @entity: scheduler entity
348  *
349  * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
350  * convenience wrapper.
351  */
352 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
353 {
354 	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
355 	drm_sched_entity_fini(entity);
356 }
357 EXPORT_SYMBOL(drm_sched_entity_destroy);
358 
359 /*
360  * drm_sched_entity_wakeup - callback to clear the entity's dependency and
361  * wake up the scheduler
362  */
363 static void drm_sched_entity_wakeup(struct dma_fence *f,
364 				    struct dma_fence_cb *cb)
365 {
366 	struct drm_sched_entity *entity =
367 		container_of(cb, struct drm_sched_entity, cb);
368 
369 	entity->dependency = NULL;
370 	dma_fence_put(f);
371 	drm_sched_wakeup(entity->rq->sched);
372 }
373 
374 /**
375  * drm_sched_entity_set_priority - Sets priority of the entity
376  *
377  * @entity: scheduler entity
378  * @priority: scheduler priority
379  *
380  * Update the priority of runqueues used for the entity.
381  */
382 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
383 				   enum drm_sched_priority priority)
384 {
385 	spin_lock(&entity->lock);
386 	entity->priority = priority;
387 	spin_unlock(&entity->lock);
388 }
389 EXPORT_SYMBOL(drm_sched_entity_set_priority);
390 
391 /*
392  * Add a callback to the current dependency of the entity to wake up the
393  * scheduler when the entity becomes available.
394  */
395 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity,
396 					       struct drm_sched_job *sched_job)
397 {
398 	struct drm_gpu_scheduler *sched = entity->rq->sched;
399 	struct dma_fence *fence = entity->dependency;
400 	struct drm_sched_fence *s_fence;
401 
402 	if (fence->context == entity->fence_context ||
403 	    fence->context == entity->fence_context + 1) {
404 		/*
405 		 * Fence is a scheduled/finished fence from a job
406 		 * which belongs to the same entity, we can ignore
407 		 * fences from ourself
408 		 */
409 		dma_fence_put(entity->dependency);
410 		return false;
411 	}
412 
413 	s_fence = to_drm_sched_fence(fence);
414 	if (!fence->error && s_fence && s_fence->sched == sched &&
415 	    !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
416 
417 		/*
418 		 * Fence is from the same scheduler, only need to wait for
419 		 * it to be scheduled
420 		 */
421 		fence = dma_fence_get(&s_fence->scheduled);
422 		dma_fence_put(entity->dependency);
423 		entity->dependency = fence;
424 	}
425 
426 	if (trace_drm_sched_job_unschedulable_enabled() &&
427 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags))
428 		trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
429 
430 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
431 				    drm_sched_entity_wakeup))
432 		return true;
433 
434 	dma_fence_put(entity->dependency);
435 	return false;
436 }
437 
438 static struct dma_fence *
439 drm_sched_job_dependency(struct drm_sched_job *job,
440 			 struct drm_sched_entity *entity)
441 {
442 	struct dma_fence *f;
443 
444 	/* We keep the fence around, so we can iterate over all dependencies
445 	 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
446 	 * before killing the job.
447 	 */
448 	f = xa_load(&job->dependencies, job->last_dependency);
449 	if (f) {
450 		job->last_dependency++;
451 		return dma_fence_get(f);
452 	}
453 
454 	if (job->sched->ops->prepare_job)
455 		return job->sched->ops->prepare_job(job, entity);
456 
457 	return NULL;
458 }
459 
460 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
461 {
462 	struct drm_sched_job *sched_job;
463 
464 	sched_job = drm_sched_entity_queue_peek(entity);
465 	if (!sched_job)
466 		return NULL;
467 
468 	while ((entity->dependency =
469 			drm_sched_job_dependency(sched_job, entity))) {
470 		if (drm_sched_entity_add_dependency_cb(entity, sched_job))
471 			return NULL;
472 	}
473 
474 	/* skip jobs from entity that marked guilty */
475 	if (entity->guilty && atomic_read(entity->guilty))
476 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
477 
478 	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
479 	rcu_assign_pointer(entity->last_scheduled,
480 			   dma_fence_get(&sched_job->s_fence->finished));
481 
482 	/*
483 	 * If the queue is empty we allow drm_sched_entity_select_rq() to
484 	 * locklessly access ->last_scheduled. This only works if we set the
485 	 * pointer before we dequeue and if we a write barrier here.
486 	 */
487 	smp_wmb();
488 
489 	spsc_queue_pop(&entity->job_queue);
490 
491 	/*
492 	 * Update the entity's location in the min heap according to
493 	 * the timestamp of the next job, if any.
494 	 */
495 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
496 		struct drm_sched_job *next;
497 
498 		next = drm_sched_entity_queue_peek(entity);
499 		if (next) {
500 			struct drm_sched_rq *rq;
501 
502 			spin_lock(&entity->lock);
503 			rq = entity->rq;
504 			spin_lock(&rq->lock);
505 			drm_sched_rq_update_fifo_locked(entity, rq,
506 							next->submit_ts);
507 			spin_unlock(&rq->lock);
508 			spin_unlock(&entity->lock);
509 		}
510 	}
511 
512 	/* Jobs and entities might have different lifecycles. Since we're
513 	 * removing the job from the entities queue, set the jobs entity pointer
514 	 * to NULL to prevent any future access of the entity through this job.
515 	 */
516 	sched_job->entity = NULL;
517 
518 	return sched_job;
519 }
520 
521 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
522 {
523 	struct dma_fence *fence;
524 	struct drm_gpu_scheduler *sched;
525 	struct drm_sched_rq *rq;
526 
527 	/* single possible engine and already selected */
528 	if (!entity->sched_list)
529 		return;
530 
531 	/* queue non-empty, stay on the same engine */
532 	if (spsc_queue_count(&entity->job_queue))
533 		return;
534 
535 	/*
536 	 * Only when the queue is empty are we guaranteed that
537 	 * drm_sched_run_job_work() cannot change entity->last_scheduled. To
538 	 * enforce ordering we need a read barrier here. See
539 	 * drm_sched_entity_pop_job() for the other side.
540 	 */
541 	smp_rmb();
542 
543 	fence = rcu_dereference_check(entity->last_scheduled, true);
544 
545 	/* stay on the same engine if the previous job hasn't finished */
546 	if (fence && !dma_fence_is_signaled(fence))
547 		return;
548 
549 	spin_lock(&entity->lock);
550 	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
551 	rq = sched ? sched->sched_rq[entity->priority] : NULL;
552 	if (rq != entity->rq) {
553 		drm_sched_rq_remove_entity(entity->rq, entity);
554 		entity->rq = rq;
555 	}
556 
557 	if (entity->num_sched_list == 1)
558 		entity->sched_list = NULL;
559 
560 	spin_unlock(&entity->lock);
561 }
562 
563 /**
564  * drm_sched_entity_push_job - Submit a job to the entity's job queue
565  * @sched_job: job to submit
566  *
567  * Note: To guarantee that the order of insertion to queue matches the job's
568  * fence sequence number this function should be called with drm_sched_job_arm()
569  * under common lock for the struct drm_sched_entity that was set up for
570  * @sched_job in drm_sched_job_init().
571  */
572 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
573 {
574 	struct drm_sched_entity *entity = sched_job->entity;
575 	bool first;
576 	ktime_t submit_ts;
577 
578 	trace_drm_sched_job_queue(sched_job, entity);
579 
580 	if (trace_drm_sched_job_add_dep_enabled()) {
581 		struct dma_fence *entry;
582 		unsigned long index;
583 
584 		xa_for_each(&sched_job->dependencies, index, entry)
585 			trace_drm_sched_job_add_dep(sched_job, entry);
586 	}
587 	atomic_inc(entity->rq->sched->score);
588 	WRITE_ONCE(entity->last_user, current->group_leader);
589 
590 	/*
591 	 * After the sched_job is pushed into the entity queue, it may be
592 	 * completed and freed up at any time. We can no longer access it.
593 	 * Make sure to set the submit_ts first, to avoid a race.
594 	 */
595 	sched_job->submit_ts = submit_ts = ktime_get();
596 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
597 
598 	/* first job wakes up scheduler */
599 	if (first) {
600 		struct drm_gpu_scheduler *sched;
601 		struct drm_sched_rq *rq;
602 
603 		/* Add the entity to the run queue */
604 		spin_lock(&entity->lock);
605 		if (entity->stopped) {
606 			spin_unlock(&entity->lock);
607 
608 			DRM_ERROR("Trying to push to a killed entity\n");
609 			return;
610 		}
611 
612 		rq = entity->rq;
613 		sched = rq->sched;
614 
615 		spin_lock(&rq->lock);
616 		drm_sched_rq_add_entity(rq, entity);
617 
618 		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
619 			drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
620 
621 		spin_unlock(&rq->lock);
622 		spin_unlock(&entity->lock);
623 
624 		drm_sched_wakeup(sched);
625 	}
626 }
627 EXPORT_SYMBOL(drm_sched_entity_push_job);
628