xref: /linux/drivers/gpu/drm/scheduler/sched_entity.c (revision 5661706efa200252d0e9fea02421b0a5857808c3)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27 
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30 
31 #include "gpu_scheduler_trace.h"
32 
33 #define to_drm_sched_job(sched_job)		\
34 		container_of((sched_job), struct drm_sched_job, queue_node)
35 
36 /**
37  * drm_sched_entity_init - Init a context entity used by scheduler when
38  * submit to HW ring.
39  *
40  * @entity: scheduler entity to init
41  * @priority: priority of the entity
42  * @sched_list: the list of drm scheds on which jobs from this
43  *           entity can be submitted
44  * @num_sched_list: number of drm sched in sched_list
45  * @guilty: atomic_t set to 1 when a job on this queue
46  *          is found to be guilty causing a timeout
47  *
48  * Note that the &sched_list must have at least one element to schedule the entity.
49  *
50  * For changing @priority later on at runtime see
51  * drm_sched_entity_set_priority(). For changing the set of schedulers
52  * @sched_list at runtime see drm_sched_entity_modify_sched().
53  *
54  * An entity is cleaned up by callind drm_sched_entity_fini(). See also
55  * drm_sched_entity_destroy().
56  *
57  * Returns 0 on success or a negative error code on failure.
58  */
59 int drm_sched_entity_init(struct drm_sched_entity *entity,
60 			  enum drm_sched_priority priority,
61 			  struct drm_gpu_scheduler **sched_list,
62 			  unsigned int num_sched_list,
63 			  atomic_t *guilty)
64 {
65 	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
66 		return -EINVAL;
67 
68 	memset(entity, 0, sizeof(struct drm_sched_entity));
69 	INIT_LIST_HEAD(&entity->list);
70 	entity->rq = NULL;
71 	entity->guilty = guilty;
72 	entity->num_sched_list = num_sched_list;
73 	entity->priority = priority;
74 	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
75 	entity->last_scheduled = NULL;
76 	RB_CLEAR_NODE(&entity->rb_tree_node);
77 
78 	if(num_sched_list)
79 		entity->rq = &sched_list[0]->sched_rq[entity->priority];
80 
81 	init_completion(&entity->entity_idle);
82 
83 	/* We start in an idle state. */
84 	complete_all(&entity->entity_idle);
85 
86 	spin_lock_init(&entity->rq_lock);
87 	spsc_queue_init(&entity->job_queue);
88 
89 	atomic_set(&entity->fence_seq, 0);
90 	entity->fence_context = dma_fence_context_alloc(2);
91 
92 	return 0;
93 }
94 EXPORT_SYMBOL(drm_sched_entity_init);
95 
96 /**
97  * drm_sched_entity_modify_sched - Modify sched of an entity
98  * @entity: scheduler entity to init
99  * @sched_list: the list of new drm scheds which will replace
100  *		 existing entity->sched_list
101  * @num_sched_list: number of drm sched in sched_list
102  *
103  * Note that this must be called under the same common lock for @entity as
104  * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
105  * guarantee through some other means that this is never called while new jobs
106  * can be pushed to @entity.
107  */
108 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
109 				    struct drm_gpu_scheduler **sched_list,
110 				    unsigned int num_sched_list)
111 {
112 	WARN_ON(!num_sched_list || !sched_list);
113 
114 	entity->sched_list = sched_list;
115 	entity->num_sched_list = num_sched_list;
116 }
117 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
118 
119 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
120 {
121 	rmb(); /* for list_empty to work without lock */
122 
123 	if (list_empty(&entity->list) ||
124 	    spsc_queue_count(&entity->job_queue) == 0 ||
125 	    entity->stopped)
126 		return true;
127 
128 	return false;
129 }
130 
131 /* Return true if entity could provide a job. */
132 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
133 {
134 	if (spsc_queue_peek(&entity->job_queue) == NULL)
135 		return false;
136 
137 	if (READ_ONCE(entity->dependency))
138 		return false;
139 
140 	return true;
141 }
142 
143 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
144 {
145 	struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
146 
147 	drm_sched_fence_finished(job->s_fence);
148 	WARN_ON(job->s_fence->parent);
149 	job->sched->ops->free_job(job);
150 }
151 
152 /* Signal the scheduler finished fence when the entity in question is killed. */
153 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
154 					  struct dma_fence_cb *cb)
155 {
156 	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
157 						 finish_cb);
158 	int r;
159 
160 	dma_fence_put(f);
161 
162 	/* Wait for all dependencies to avoid data corruptions */
163 	while (!xa_empty(&job->dependencies)) {
164 		f = xa_erase(&job->dependencies, job->last_dependency++);
165 		r = dma_fence_add_callback(f, &job->finish_cb,
166 					   drm_sched_entity_kill_jobs_cb);
167 		if (!r)
168 			return;
169 
170 		dma_fence_put(f);
171 	}
172 
173 	INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
174 	schedule_work(&job->work);
175 }
176 
177 /* Remove the entity from the scheduler and kill all pending jobs */
178 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
179 {
180 	struct drm_sched_job *job;
181 	struct dma_fence *prev;
182 
183 	if (!entity->rq)
184 		return;
185 
186 	spin_lock(&entity->rq_lock);
187 	entity->stopped = true;
188 	drm_sched_rq_remove_entity(entity->rq, entity);
189 	spin_unlock(&entity->rq_lock);
190 
191 	/* Make sure this entity is not used by the scheduler at the moment */
192 	wait_for_completion(&entity->entity_idle);
193 
194 	prev = dma_fence_get(entity->last_scheduled);
195 	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
196 		struct drm_sched_fence *s_fence = job->s_fence;
197 
198 		dma_fence_set_error(&s_fence->finished, -ESRCH);
199 
200 		dma_fence_get(&s_fence->finished);
201 		if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
202 					   drm_sched_entity_kill_jobs_cb))
203 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
204 
205 		prev = &s_fence->finished;
206 	}
207 	dma_fence_put(prev);
208 }
209 
210 /**
211  * drm_sched_entity_flush - Flush a context entity
212  *
213  * @entity: scheduler entity
214  * @timeout: time to wait in for Q to become empty in jiffies.
215  *
216  * Splitting drm_sched_entity_fini() into two functions, The first one does the
217  * waiting, removes the entity from the runqueue and returns an error when the
218  * process was killed.
219  *
220  * Returns the remaining time in jiffies left from the input timeout
221  */
222 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
223 {
224 	struct drm_gpu_scheduler *sched;
225 	struct task_struct *last_user;
226 	long ret = timeout;
227 
228 	if (!entity->rq)
229 		return 0;
230 
231 	sched = entity->rq->sched;
232 	/**
233 	 * The client will not queue more IBs during this fini, consume existing
234 	 * queued IBs or discard them on SIGKILL
235 	 */
236 	if (current->flags & PF_EXITING) {
237 		if (timeout)
238 			ret = wait_event_timeout(
239 					sched->job_scheduled,
240 					drm_sched_entity_is_idle(entity),
241 					timeout);
242 	} else {
243 		wait_event_killable(sched->job_scheduled,
244 				    drm_sched_entity_is_idle(entity));
245 	}
246 
247 	/* For killed process disable any more IBs enqueue right now */
248 	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
249 	if ((!last_user || last_user == current->group_leader) &&
250 	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
251 		drm_sched_entity_kill(entity);
252 
253 	return ret;
254 }
255 EXPORT_SYMBOL(drm_sched_entity_flush);
256 
257 /**
258  * drm_sched_entity_fini - Destroy a context entity
259  *
260  * @entity: scheduler entity
261  *
262  * Cleanups up @entity which has been initialized by drm_sched_entity_init().
263  *
264  * If there are potentially job still in flight or getting newly queued
265  * drm_sched_entity_flush() must be called first. This function then goes over
266  * the entity and signals all jobs with an error code if the process was killed.
267  */
268 void drm_sched_entity_fini(struct drm_sched_entity *entity)
269 {
270 	/*
271 	 * If consumption of existing IBs wasn't completed. Forcefully remove
272 	 * them here. Also makes sure that the scheduler won't touch this entity
273 	 * any more.
274 	 */
275 	drm_sched_entity_kill(entity);
276 
277 	if (entity->dependency) {
278 		dma_fence_remove_callback(entity->dependency, &entity->cb);
279 		dma_fence_put(entity->dependency);
280 		entity->dependency = NULL;
281 	}
282 
283 	dma_fence_put(entity->last_scheduled);
284 	entity->last_scheduled = NULL;
285 }
286 EXPORT_SYMBOL(drm_sched_entity_fini);
287 
288 /**
289  * drm_sched_entity_destroy - Destroy a context entity
290  * @entity: scheduler entity
291  *
292  * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
293  * convenience wrapper.
294  */
295 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
296 {
297 	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
298 	drm_sched_entity_fini(entity);
299 }
300 EXPORT_SYMBOL(drm_sched_entity_destroy);
301 
302 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */
303 static void drm_sched_entity_clear_dep(struct dma_fence *f,
304 				       struct dma_fence_cb *cb)
305 {
306 	struct drm_sched_entity *entity =
307 		container_of(cb, struct drm_sched_entity, cb);
308 
309 	entity->dependency = NULL;
310 	dma_fence_put(f);
311 }
312 
313 /*
314  * drm_sched_entity_clear_dep - callback to clear the entities dependency and
315  * wake up scheduler
316  */
317 static void drm_sched_entity_wakeup(struct dma_fence *f,
318 				    struct dma_fence_cb *cb)
319 {
320 	struct drm_sched_entity *entity =
321 		container_of(cb, struct drm_sched_entity, cb);
322 
323 	drm_sched_entity_clear_dep(f, cb);
324 	drm_sched_wakeup(entity->rq->sched);
325 }
326 
327 /**
328  * drm_sched_entity_set_priority - Sets priority of the entity
329  *
330  * @entity: scheduler entity
331  * @priority: scheduler priority
332  *
333  * Update the priority of runqueus used for the entity.
334  */
335 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
336 				   enum drm_sched_priority priority)
337 {
338 	spin_lock(&entity->rq_lock);
339 	entity->priority = priority;
340 	spin_unlock(&entity->rq_lock);
341 }
342 EXPORT_SYMBOL(drm_sched_entity_set_priority);
343 
344 /*
345  * Add a callback to the current dependency of the entity to wake up the
346  * scheduler when the entity becomes available.
347  */
348 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
349 {
350 	struct drm_gpu_scheduler *sched = entity->rq->sched;
351 	struct dma_fence *fence = entity->dependency;
352 	struct drm_sched_fence *s_fence;
353 
354 	if (fence->context == entity->fence_context ||
355 	    fence->context == entity->fence_context + 1) {
356 		/*
357 		 * Fence is a scheduled/finished fence from a job
358 		 * which belongs to the same entity, we can ignore
359 		 * fences from ourself
360 		 */
361 		dma_fence_put(entity->dependency);
362 		return false;
363 	}
364 
365 	s_fence = to_drm_sched_fence(fence);
366 	if (s_fence && s_fence->sched == sched &&
367 	    !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
368 
369 		/*
370 		 * Fence is from the same scheduler, only need to wait for
371 		 * it to be scheduled
372 		 */
373 		fence = dma_fence_get(&s_fence->scheduled);
374 		dma_fence_put(entity->dependency);
375 		entity->dependency = fence;
376 		if (!dma_fence_add_callback(fence, &entity->cb,
377 					    drm_sched_entity_clear_dep))
378 			return true;
379 
380 		/* Ignore it when it is already scheduled */
381 		dma_fence_put(fence);
382 		return false;
383 	}
384 
385 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
386 				    drm_sched_entity_wakeup))
387 		return true;
388 
389 	dma_fence_put(entity->dependency);
390 	return false;
391 }
392 
393 static struct dma_fence *
394 drm_sched_job_dependency(struct drm_sched_job *job,
395 			 struct drm_sched_entity *entity)
396 {
397 	if (!xa_empty(&job->dependencies))
398 		return xa_erase(&job->dependencies, job->last_dependency++);
399 
400 	if (job->sched->ops->prepare_job)
401 		return job->sched->ops->prepare_job(job, entity);
402 
403 	return NULL;
404 }
405 
406 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
407 {
408 	struct drm_sched_job *sched_job;
409 
410 	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
411 	if (!sched_job)
412 		return NULL;
413 
414 	while ((entity->dependency =
415 			drm_sched_job_dependency(sched_job, entity))) {
416 		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
417 
418 		if (drm_sched_entity_add_dependency_cb(entity))
419 			return NULL;
420 	}
421 
422 	/* skip jobs from entity that marked guilty */
423 	if (entity->guilty && atomic_read(entity->guilty))
424 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
425 
426 	dma_fence_put(entity->last_scheduled);
427 
428 	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
429 
430 	/*
431 	 * If the queue is empty we allow drm_sched_entity_select_rq() to
432 	 * locklessly access ->last_scheduled. This only works if we set the
433 	 * pointer before we dequeue and if we a write barrier here.
434 	 */
435 	smp_wmb();
436 
437 	spsc_queue_pop(&entity->job_queue);
438 
439 	/*
440 	 * Update the entity's location in the min heap according to
441 	 * the timestamp of the next job, if any.
442 	 */
443 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
444 		struct drm_sched_job *next;
445 
446 		next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
447 		if (next)
448 			drm_sched_rq_update_fifo(entity, next->submit_ts);
449 	}
450 
451 	return sched_job;
452 }
453 
454 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
455 {
456 	struct dma_fence *fence;
457 	struct drm_gpu_scheduler *sched;
458 	struct drm_sched_rq *rq;
459 
460 	/* single possible engine and already selected */
461 	if (!entity->sched_list)
462 		return;
463 
464 	/* queue non-empty, stay on the same engine */
465 	if (spsc_queue_count(&entity->job_queue))
466 		return;
467 
468 	/*
469 	 * Only when the queue is empty are we guaranteed that the scheduler
470 	 * thread cannot change ->last_scheduled. To enforce ordering we need
471 	 * a read barrier here. See drm_sched_entity_pop_job() for the other
472 	 * side.
473 	 */
474 	smp_rmb();
475 
476 	fence = entity->last_scheduled;
477 
478 	/* stay on the same engine if the previous job hasn't finished */
479 	if (fence && !dma_fence_is_signaled(fence))
480 		return;
481 
482 	spin_lock(&entity->rq_lock);
483 	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
484 	rq = sched ? &sched->sched_rq[entity->priority] : NULL;
485 	if (rq != entity->rq) {
486 		drm_sched_rq_remove_entity(entity->rq, entity);
487 		entity->rq = rq;
488 	}
489 	spin_unlock(&entity->rq_lock);
490 
491 	if (entity->num_sched_list == 1)
492 		entity->sched_list = NULL;
493 }
494 
495 /**
496  * drm_sched_entity_push_job - Submit a job to the entity's job queue
497  * @sched_job: job to submit
498  *
499  * Note: To guarantee that the order of insertion to queue matches the job's
500  * fence sequence number this function should be called with drm_sched_job_arm()
501  * under common lock for the struct drm_sched_entity that was set up for
502  * @sched_job in drm_sched_job_init().
503  *
504  * Returns 0 for success, negative error code otherwise.
505  */
506 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
507 {
508 	struct drm_sched_entity *entity = sched_job->entity;
509 	bool first;
510 
511 	trace_drm_sched_job(sched_job, entity);
512 	atomic_inc(entity->rq->sched->score);
513 	WRITE_ONCE(entity->last_user, current->group_leader);
514 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
515 	sched_job->submit_ts = ktime_get();
516 
517 	/* first job wakes up scheduler */
518 	if (first) {
519 		/* Add the entity to the run queue */
520 		spin_lock(&entity->rq_lock);
521 		if (entity->stopped) {
522 			spin_unlock(&entity->rq_lock);
523 
524 			DRM_ERROR("Trying to push to a killed entity\n");
525 			return;
526 		}
527 
528 		drm_sched_rq_add_entity(entity->rq, entity);
529 		spin_unlock(&entity->rq_lock);
530 
531 		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
532 			drm_sched_rq_update_fifo(entity, sched_job->submit_ts);
533 
534 		drm_sched_wakeup(entity->rq->sched);
535 	}
536 }
537 EXPORT_SYMBOL(drm_sched_entity_push_job);
538