xref: /linux/drivers/gpu/drm/scheduler/sched_entity.c (revision b3ee1e4609512dfff642a96b34d7e5dfcdc92d05)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27 
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30 
31 #include "gpu_scheduler_trace.h"
32 
33 #define to_drm_sched_job(sched_job)		\
34 		container_of((sched_job), struct drm_sched_job, queue_node)
35 
36 /**
37  * drm_sched_entity_init - Init a context entity used by scheduler when
38  * submit to HW ring.
39  *
40  * @entity: scheduler entity to init
41  * @priority: priority of the entity
42  * @sched_list: the list of drm scheds on which jobs from this
43  *           entity can be submitted
44  * @num_sched_list: number of drm sched in sched_list
45  * @guilty: atomic_t set to 1 when a job on this queue
46  *          is found to be guilty causing a timeout
47  *
48  * Note that the &sched_list must have at least one element to schedule the entity.
49  *
50  * For changing @priority later on at runtime see
51  * drm_sched_entity_set_priority(). For changing the set of schedulers
52  * @sched_list at runtime see drm_sched_entity_modify_sched().
53  *
54  * An entity is cleaned up by calling drm_sched_entity_fini(). See also
55  * drm_sched_entity_destroy().
56  *
57  * Returns 0 on success or a negative error code on failure.
58  */
drm_sched_entity_init(struct drm_sched_entity * entity,enum drm_sched_priority priority,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list,atomic_t * guilty)59 int drm_sched_entity_init(struct drm_sched_entity *entity,
60 			  enum drm_sched_priority priority,
61 			  struct drm_gpu_scheduler **sched_list,
62 			  unsigned int num_sched_list,
63 			  atomic_t *guilty)
64 {
65 	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
66 		return -EINVAL;
67 
68 	memset(entity, 0, sizeof(struct drm_sched_entity));
69 	INIT_LIST_HEAD(&entity->list);
70 	entity->rq = NULL;
71 	entity->guilty = guilty;
72 	entity->num_sched_list = num_sched_list;
73 	entity->priority = priority;
74 	/*
75 	 * It's perfectly valid to initialize an entity without having a valid
76 	 * scheduler attached. It's just not valid to use the scheduler before it
77 	 * is initialized itself.
78 	 */
79 	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
80 	RCU_INIT_POINTER(entity->last_scheduled, NULL);
81 	RB_CLEAR_NODE(&entity->rb_tree_node);
82 
83 	if (num_sched_list && !sched_list[0]->sched_rq) {
84 		/* Since every entry covered by num_sched_list
85 		 * should be non-NULL and therefore we warn drivers
86 		 * not to do this and to fix their DRM calling order.
87 		 */
88 		pr_warn("%s: called with uninitialized scheduler\n", __func__);
89 	} else if (num_sched_list) {
90 		/* The "priority" of an entity cannot exceed the number of run-queues of a
91 		 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
92 		 * the lowest priority available.
93 		 */
94 		if (entity->priority >= sched_list[0]->num_rqs) {
95 			drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
96 				entity->priority, sched_list[0]->num_rqs);
97 			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
98 						 (s32) DRM_SCHED_PRIORITY_KERNEL);
99 		}
100 		entity->rq = sched_list[0]->sched_rq[entity->priority];
101 	}
102 
103 	init_completion(&entity->entity_idle);
104 
105 	/* We start in an idle state. */
106 	complete_all(&entity->entity_idle);
107 
108 	spin_lock_init(&entity->lock);
109 	spsc_queue_init(&entity->job_queue);
110 
111 	atomic_set(&entity->fence_seq, 0);
112 	entity->fence_context = dma_fence_context_alloc(2);
113 
114 	return 0;
115 }
116 EXPORT_SYMBOL(drm_sched_entity_init);
117 
118 /**
119  * drm_sched_entity_modify_sched - Modify sched of an entity
120  * @entity: scheduler entity to init
121  * @sched_list: the list of new drm scheds which will replace
122  *		 existing entity->sched_list
123  * @num_sched_list: number of drm sched in sched_list
124  *
125  * Note that this must be called under the same common lock for @entity as
126  * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
127  * guarantee through some other means that this is never called while new jobs
128  * can be pushed to @entity.
129  */
drm_sched_entity_modify_sched(struct drm_sched_entity * entity,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)130 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
131 				    struct drm_gpu_scheduler **sched_list,
132 				    unsigned int num_sched_list)
133 {
134 	WARN_ON(!num_sched_list || !sched_list);
135 
136 	spin_lock(&entity->lock);
137 	entity->sched_list = sched_list;
138 	entity->num_sched_list = num_sched_list;
139 	spin_unlock(&entity->lock);
140 }
141 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
142 
drm_sched_entity_is_idle(struct drm_sched_entity * entity)143 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
144 {
145 	rmb(); /* for list_empty to work without lock */
146 
147 	if (list_empty(&entity->list) ||
148 	    spsc_queue_count(&entity->job_queue) == 0 ||
149 	    entity->stopped)
150 		return true;
151 
152 	return false;
153 }
154 
155 /* Return true if entity could provide a job. */
drm_sched_entity_is_ready(struct drm_sched_entity * entity)156 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
157 {
158 	if (spsc_queue_peek(&entity->job_queue) == NULL)
159 		return false;
160 
161 	if (READ_ONCE(entity->dependency))
162 		return false;
163 
164 	return true;
165 }
166 
167 /**
168  * drm_sched_entity_error - return error of last scheduled job
169  * @entity: scheduler entity to check
170  *
171  * Opportunistically return the error of the last scheduled job. Result can
172  * change any time when new jobs are pushed to the hw.
173  */
drm_sched_entity_error(struct drm_sched_entity * entity)174 int drm_sched_entity_error(struct drm_sched_entity *entity)
175 {
176 	struct dma_fence *fence;
177 	int r;
178 
179 	rcu_read_lock();
180 	fence = rcu_dereference(entity->last_scheduled);
181 	r = fence ? fence->error : 0;
182 	rcu_read_unlock();
183 
184 	return r;
185 }
186 EXPORT_SYMBOL(drm_sched_entity_error);
187 
drm_sched_entity_kill_jobs_work(struct work_struct * wrk)188 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
189 {
190 	struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
191 
192 	drm_sched_fence_finished(job->s_fence, -ESRCH);
193 	WARN_ON(job->s_fence->parent);
194 	job->sched->ops->free_job(job);
195 }
196 
197 /* Signal the scheduler finished fence when the entity in question is killed. */
drm_sched_entity_kill_jobs_cb(struct dma_fence * f,struct dma_fence_cb * cb)198 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
199 					  struct dma_fence_cb *cb)
200 {
201 	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
202 						 finish_cb);
203 	unsigned long index;
204 
205 	dma_fence_put(f);
206 
207 	/* Wait for all dependencies to avoid data corruptions */
208 	xa_for_each(&job->dependencies, index, f) {
209 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
210 
211 		if (s_fence && f == &s_fence->scheduled) {
212 			/* The dependencies array had a reference on the scheduled
213 			 * fence, and the finished fence refcount might have
214 			 * dropped to zero. Use dma_fence_get_rcu() so we get
215 			 * a NULL fence in that case.
216 			 */
217 			f = dma_fence_get_rcu(&s_fence->finished);
218 
219 			/* Now that we have a reference on the finished fence,
220 			 * we can release the reference the dependencies array
221 			 * had on the scheduled fence.
222 			 */
223 			dma_fence_put(&s_fence->scheduled);
224 		}
225 
226 		xa_erase(&job->dependencies, index);
227 		if (f && !dma_fence_add_callback(f, &job->finish_cb,
228 						 drm_sched_entity_kill_jobs_cb))
229 			return;
230 
231 		dma_fence_put(f);
232 	}
233 
234 	INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
235 	schedule_work(&job->work);
236 }
237 
238 /* Remove the entity from the scheduler and kill all pending jobs */
drm_sched_entity_kill(struct drm_sched_entity * entity)239 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
240 {
241 	struct drm_sched_job *job;
242 	struct dma_fence *prev;
243 
244 	if (!entity->rq)
245 		return;
246 
247 	spin_lock(&entity->lock);
248 	entity->stopped = true;
249 	drm_sched_rq_remove_entity(entity->rq, entity);
250 	spin_unlock(&entity->lock);
251 
252 	/* Make sure this entity is not used by the scheduler at the moment */
253 	wait_for_completion(&entity->entity_idle);
254 
255 	/* The entity is guaranteed to not be used by the scheduler */
256 	prev = rcu_dereference_check(entity->last_scheduled, true);
257 	dma_fence_get(prev);
258 	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
259 		struct drm_sched_fence *s_fence = job->s_fence;
260 
261 		dma_fence_get(&s_fence->finished);
262 		if (!prev ||
263 		    dma_fence_add_callback(prev, &job->finish_cb,
264 					   drm_sched_entity_kill_jobs_cb)) {
265 			/*
266 			 * Adding callback above failed.
267 			 * dma_fence_put() checks for NULL.
268 			 */
269 			dma_fence_put(prev);
270 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
271 		}
272 
273 		prev = &s_fence->finished;
274 	}
275 	dma_fence_put(prev);
276 }
277 
278 /**
279  * drm_sched_entity_flush - Flush a context entity
280  *
281  * @entity: scheduler entity
282  * @timeout: time to wait in for Q to become empty in jiffies.
283  *
284  * Splitting drm_sched_entity_fini() into two functions, The first one does the
285  * waiting, removes the entity from the runqueue and returns an error when the
286  * process was killed.
287  *
288  * Returns the remaining time in jiffies left from the input timeout
289  */
drm_sched_entity_flush(struct drm_sched_entity * entity,long timeout)290 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
291 {
292 	struct drm_gpu_scheduler *sched;
293 	struct task_struct *last_user;
294 	long ret = timeout;
295 
296 	if (!entity->rq)
297 		return 0;
298 
299 	sched = entity->rq->sched;
300 	/**
301 	 * The client will not queue more IBs during this fini, consume existing
302 	 * queued IBs or discard them on SIGKILL
303 	 */
304 	if (current->flags & PF_EXITING) {
305 		if (timeout)
306 			ret = wait_event_timeout(
307 					sched->job_scheduled,
308 					drm_sched_entity_is_idle(entity),
309 					timeout);
310 	} else {
311 		wait_event_killable(sched->job_scheduled,
312 				    drm_sched_entity_is_idle(entity));
313 	}
314 
315 	/* For killed process disable any more IBs enqueue right now */
316 	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
317 	if ((!last_user || last_user == current->group_leader) &&
318 	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
319 		drm_sched_entity_kill(entity);
320 
321 	return ret;
322 }
323 EXPORT_SYMBOL(drm_sched_entity_flush);
324 
325 /**
326  * drm_sched_entity_fini - Destroy a context entity
327  *
328  * @entity: scheduler entity
329  *
330  * Cleanups up @entity which has been initialized by drm_sched_entity_init().
331  *
332  * If there are potentially job still in flight or getting newly queued
333  * drm_sched_entity_flush() must be called first. This function then goes over
334  * the entity and signals all jobs with an error code if the process was killed.
335  */
drm_sched_entity_fini(struct drm_sched_entity * entity)336 void drm_sched_entity_fini(struct drm_sched_entity *entity)
337 {
338 	/*
339 	 * If consumption of existing IBs wasn't completed. Forcefully remove
340 	 * them here. Also makes sure that the scheduler won't touch this entity
341 	 * any more.
342 	 */
343 	drm_sched_entity_kill(entity);
344 
345 	if (entity->dependency) {
346 		dma_fence_remove_callback(entity->dependency, &entity->cb);
347 		dma_fence_put(entity->dependency);
348 		entity->dependency = NULL;
349 	}
350 
351 	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
352 	RCU_INIT_POINTER(entity->last_scheduled, NULL);
353 }
354 EXPORT_SYMBOL(drm_sched_entity_fini);
355 
356 /**
357  * drm_sched_entity_destroy - Destroy a context entity
358  * @entity: scheduler entity
359  *
360  * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
361  * convenience wrapper.
362  */
drm_sched_entity_destroy(struct drm_sched_entity * entity)363 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
364 {
365 	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
366 	drm_sched_entity_fini(entity);
367 }
368 EXPORT_SYMBOL(drm_sched_entity_destroy);
369 
370 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */
drm_sched_entity_clear_dep(struct dma_fence * f,struct dma_fence_cb * cb)371 static void drm_sched_entity_clear_dep(struct dma_fence *f,
372 				       struct dma_fence_cb *cb)
373 {
374 	struct drm_sched_entity *entity =
375 		container_of(cb, struct drm_sched_entity, cb);
376 
377 	entity->dependency = NULL;
378 	dma_fence_put(f);
379 }
380 
381 /*
382  * drm_sched_entity_wakeup - callback to clear the entity's dependency and
383  * wake up the scheduler
384  */
drm_sched_entity_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)385 static void drm_sched_entity_wakeup(struct dma_fence *f,
386 				    struct dma_fence_cb *cb)
387 {
388 	struct drm_sched_entity *entity =
389 		container_of(cb, struct drm_sched_entity, cb);
390 
391 	drm_sched_entity_clear_dep(f, cb);
392 	drm_sched_wakeup(entity->rq->sched);
393 }
394 
395 /**
396  * drm_sched_entity_set_priority - Sets priority of the entity
397  *
398  * @entity: scheduler entity
399  * @priority: scheduler priority
400  *
401  * Update the priority of runqueues used for the entity.
402  */
drm_sched_entity_set_priority(struct drm_sched_entity * entity,enum drm_sched_priority priority)403 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
404 				   enum drm_sched_priority priority)
405 {
406 	spin_lock(&entity->lock);
407 	entity->priority = priority;
408 	spin_unlock(&entity->lock);
409 }
410 EXPORT_SYMBOL(drm_sched_entity_set_priority);
411 
412 /*
413  * Add a callback to the current dependency of the entity to wake up the
414  * scheduler when the entity becomes available.
415  */
drm_sched_entity_add_dependency_cb(struct drm_sched_entity * entity)416 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
417 {
418 	struct drm_gpu_scheduler *sched = entity->rq->sched;
419 	struct dma_fence *fence = entity->dependency;
420 	struct drm_sched_fence *s_fence;
421 
422 	if (fence->context == entity->fence_context ||
423 	    fence->context == entity->fence_context + 1) {
424 		/*
425 		 * Fence is a scheduled/finished fence from a job
426 		 * which belongs to the same entity, we can ignore
427 		 * fences from ourself
428 		 */
429 		dma_fence_put(entity->dependency);
430 		return false;
431 	}
432 
433 	s_fence = to_drm_sched_fence(fence);
434 	if (!fence->error && s_fence && s_fence->sched == sched &&
435 	    !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
436 
437 		/*
438 		 * Fence is from the same scheduler, only need to wait for
439 		 * it to be scheduled
440 		 */
441 		fence = dma_fence_get(&s_fence->scheduled);
442 		dma_fence_put(entity->dependency);
443 		entity->dependency = fence;
444 		if (!dma_fence_add_callback(fence, &entity->cb,
445 					    drm_sched_entity_clear_dep))
446 			return true;
447 
448 		/* Ignore it when it is already scheduled */
449 		dma_fence_put(fence);
450 		return false;
451 	}
452 
453 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
454 				    drm_sched_entity_wakeup))
455 		return true;
456 
457 	dma_fence_put(entity->dependency);
458 	return false;
459 }
460 
461 static struct dma_fence *
drm_sched_job_dependency(struct drm_sched_job * job,struct drm_sched_entity * entity)462 drm_sched_job_dependency(struct drm_sched_job *job,
463 			 struct drm_sched_entity *entity)
464 {
465 	struct dma_fence *f;
466 
467 	/* We keep the fence around, so we can iterate over all dependencies
468 	 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
469 	 * before killing the job.
470 	 */
471 	f = xa_load(&job->dependencies, job->last_dependency);
472 	if (f) {
473 		job->last_dependency++;
474 		return dma_fence_get(f);
475 	}
476 
477 	if (job->sched->ops->prepare_job)
478 		return job->sched->ops->prepare_job(job, entity);
479 
480 	return NULL;
481 }
482 
drm_sched_entity_pop_job(struct drm_sched_entity * entity)483 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
484 {
485 	struct drm_sched_job *sched_job;
486 
487 	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
488 	if (!sched_job)
489 		return NULL;
490 
491 	while ((entity->dependency =
492 			drm_sched_job_dependency(sched_job, entity))) {
493 		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
494 
495 		if (drm_sched_entity_add_dependency_cb(entity))
496 			return NULL;
497 	}
498 
499 	/* skip jobs from entity that marked guilty */
500 	if (entity->guilty && atomic_read(entity->guilty))
501 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
502 
503 	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
504 	rcu_assign_pointer(entity->last_scheduled,
505 			   dma_fence_get(&sched_job->s_fence->finished));
506 
507 	/*
508 	 * If the queue is empty we allow drm_sched_entity_select_rq() to
509 	 * locklessly access ->last_scheduled. This only works if we set the
510 	 * pointer before we dequeue and if we a write barrier here.
511 	 */
512 	smp_wmb();
513 
514 	spsc_queue_pop(&entity->job_queue);
515 
516 	/*
517 	 * Update the entity's location in the min heap according to
518 	 * the timestamp of the next job, if any.
519 	 */
520 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
521 		struct drm_sched_job *next;
522 
523 		next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
524 		if (next) {
525 			struct drm_sched_rq *rq;
526 
527 			spin_lock(&entity->lock);
528 			rq = entity->rq;
529 			spin_lock(&rq->lock);
530 			drm_sched_rq_update_fifo_locked(entity, rq,
531 							next->submit_ts);
532 			spin_unlock(&rq->lock);
533 			spin_unlock(&entity->lock);
534 		}
535 	}
536 
537 	/* Jobs and entities might have different lifecycles. Since we're
538 	 * removing the job from the entities queue, set the jobs entity pointer
539 	 * to NULL to prevent any future access of the entity through this job.
540 	 */
541 	sched_job->entity = NULL;
542 
543 	return sched_job;
544 }
545 
drm_sched_entity_select_rq(struct drm_sched_entity * entity)546 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
547 {
548 	struct dma_fence *fence;
549 	struct drm_gpu_scheduler *sched;
550 	struct drm_sched_rq *rq;
551 
552 	/* single possible engine and already selected */
553 	if (!entity->sched_list)
554 		return;
555 
556 	/* queue non-empty, stay on the same engine */
557 	if (spsc_queue_count(&entity->job_queue))
558 		return;
559 
560 	/*
561 	 * Only when the queue is empty are we guaranteed that the scheduler
562 	 * thread cannot change ->last_scheduled. To enforce ordering we need
563 	 * a read barrier here. See drm_sched_entity_pop_job() for the other
564 	 * side.
565 	 */
566 	smp_rmb();
567 
568 	fence = rcu_dereference_check(entity->last_scheduled, true);
569 
570 	/* stay on the same engine if the previous job hasn't finished */
571 	if (fence && !dma_fence_is_signaled(fence))
572 		return;
573 
574 	spin_lock(&entity->lock);
575 	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
576 	rq = sched ? sched->sched_rq[entity->priority] : NULL;
577 	if (rq != entity->rq) {
578 		drm_sched_rq_remove_entity(entity->rq, entity);
579 		entity->rq = rq;
580 	}
581 	spin_unlock(&entity->lock);
582 
583 	if (entity->num_sched_list == 1)
584 		entity->sched_list = NULL;
585 }
586 
587 /**
588  * drm_sched_entity_push_job - Submit a job to the entity's job queue
589  * @sched_job: job to submit
590  *
591  * Note: To guarantee that the order of insertion to queue matches the job's
592  * fence sequence number this function should be called with drm_sched_job_arm()
593  * under common lock for the struct drm_sched_entity that was set up for
594  * @sched_job in drm_sched_job_init().
595  */
drm_sched_entity_push_job(struct drm_sched_job * sched_job)596 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
597 {
598 	struct drm_sched_entity *entity = sched_job->entity;
599 	bool first;
600 	ktime_t submit_ts;
601 
602 	trace_drm_sched_job(sched_job, entity);
603 	atomic_inc(entity->rq->sched->score);
604 	WRITE_ONCE(entity->last_user, current->group_leader);
605 
606 	/*
607 	 * After the sched_job is pushed into the entity queue, it may be
608 	 * completed and freed up at any time. We can no longer access it.
609 	 * Make sure to set the submit_ts first, to avoid a race.
610 	 */
611 	sched_job->submit_ts = submit_ts = ktime_get();
612 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
613 
614 	/* first job wakes up scheduler */
615 	if (first) {
616 		struct drm_gpu_scheduler *sched;
617 		struct drm_sched_rq *rq;
618 
619 		/* Add the entity to the run queue */
620 		spin_lock(&entity->lock);
621 		if (entity->stopped) {
622 			spin_unlock(&entity->lock);
623 
624 			DRM_ERROR("Trying to push to a killed entity\n");
625 			return;
626 		}
627 
628 		rq = entity->rq;
629 		sched = rq->sched;
630 
631 		spin_lock(&rq->lock);
632 		drm_sched_rq_add_entity(rq, entity);
633 
634 		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
635 			drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
636 
637 		spin_unlock(&rq->lock);
638 		spin_unlock(&entity->lock);
639 
640 		drm_sched_wakeup(sched);
641 	}
642 }
643 EXPORT_SYMBOL(drm_sched_entity_push_job);
644