1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/kthread.h> 25 #include <linux/slab.h> 26 #include <linux/completion.h> 27 28 #include <drm/drm_print.h> 29 #include <drm/gpu_scheduler.h> 30 31 #include "sched_internal.h" 32 33 #include "gpu_scheduler_trace.h" 34 35 /** 36 * drm_sched_entity_init - Init a context entity used by scheduler when 37 * submit to HW ring. 38 * 39 * @entity: scheduler entity to init 40 * @priority: priority of the entity 41 * @sched_list: the list of drm scheds on which jobs from this 42 * entity can be submitted 43 * @num_sched_list: number of drm sched in sched_list 44 * @guilty: atomic_t set to 1 when a job on this queue 45 * is found to be guilty causing a timeout 46 * 47 * Note that the &sched_list must have at least one element to schedule the entity. 48 * 49 * For changing @priority later on at runtime see 50 * drm_sched_entity_set_priority(). For changing the set of schedulers 51 * @sched_list at runtime see drm_sched_entity_modify_sched(). 52 * 53 * An entity is cleaned up by calling drm_sched_entity_fini(). See also 54 * drm_sched_entity_destroy(). 55 * 56 * Returns 0 on success or a negative error code on failure. 57 */ 58 int drm_sched_entity_init(struct drm_sched_entity *entity, 59 enum drm_sched_priority priority, 60 struct drm_gpu_scheduler **sched_list, 61 unsigned int num_sched_list, 62 atomic_t *guilty) 63 { 64 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 65 return -EINVAL; 66 67 memset(entity, 0, sizeof(struct drm_sched_entity)); 68 INIT_LIST_HEAD(&entity->list); 69 entity->rq = NULL; 70 entity->guilty = guilty; 71 entity->num_sched_list = num_sched_list; 72 entity->priority = priority; 73 /* 74 * It's perfectly valid to initialize an entity without having a valid 75 * scheduler attached. It's just not valid to use the scheduler before it 76 * is initialized itself. 77 */ 78 entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 79 RCU_INIT_POINTER(entity->last_scheduled, NULL); 80 RB_CLEAR_NODE(&entity->rb_tree_node); 81 82 if (num_sched_list && !sched_list[0]->sched_rq) { 83 /* Since every entry covered by num_sched_list 84 * should be non-NULL and therefore we warn drivers 85 * not to do this and to fix their DRM calling order. 86 */ 87 pr_warn("%s: called with uninitialized scheduler\n", __func__); 88 } else if (num_sched_list) { 89 /* The "priority" of an entity cannot exceed the number of run-queues of a 90 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose 91 * the lowest priority available. 92 */ 93 if (entity->priority >= sched_list[0]->num_rqs) { 94 dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n", 95 entity->priority, sched_list[0]->num_rqs); 96 entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, 97 (s32) DRM_SCHED_PRIORITY_KERNEL); 98 } 99 entity->rq = sched_list[0]->sched_rq[entity->priority]; 100 } 101 102 init_completion(&entity->entity_idle); 103 104 /* We start in an idle state. */ 105 complete_all(&entity->entity_idle); 106 107 spin_lock_init(&entity->lock); 108 spsc_queue_init(&entity->job_queue); 109 110 atomic_set(&entity->fence_seq, 0); 111 entity->fence_context = dma_fence_context_alloc(2); 112 113 return 0; 114 } 115 EXPORT_SYMBOL(drm_sched_entity_init); 116 117 /** 118 * drm_sched_entity_modify_sched - Modify sched of an entity 119 * @entity: scheduler entity to init 120 * @sched_list: the list of new drm scheds which will replace 121 * existing entity->sched_list 122 * @num_sched_list: number of drm sched in sched_list 123 * 124 * Note that this must be called under the same common lock for @entity as 125 * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to 126 * guarantee through some other means that this is never called while new jobs 127 * can be pushed to @entity. 128 */ 129 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 130 struct drm_gpu_scheduler **sched_list, 131 unsigned int num_sched_list) 132 { 133 WARN_ON(!num_sched_list || !sched_list); 134 135 spin_lock(&entity->lock); 136 entity->sched_list = sched_list; 137 entity->num_sched_list = num_sched_list; 138 spin_unlock(&entity->lock); 139 } 140 EXPORT_SYMBOL(drm_sched_entity_modify_sched); 141 142 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 143 { 144 rmb(); /* for list_empty to work without lock */ 145 146 if (list_empty(&entity->list) || 147 spsc_queue_count(&entity->job_queue) == 0 || 148 entity->stopped) 149 return true; 150 151 return false; 152 } 153 154 /** 155 * drm_sched_entity_error - return error of last scheduled job 156 * @entity: scheduler entity to check 157 * 158 * Opportunistically return the error of the last scheduled job. Result can 159 * change any time when new jobs are pushed to the hw. 160 */ 161 int drm_sched_entity_error(struct drm_sched_entity *entity) 162 { 163 struct dma_fence *fence; 164 int r; 165 166 rcu_read_lock(); 167 fence = rcu_dereference(entity->last_scheduled); 168 r = fence ? fence->error : 0; 169 rcu_read_unlock(); 170 171 return r; 172 } 173 EXPORT_SYMBOL(drm_sched_entity_error); 174 175 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) 176 { 177 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); 178 179 drm_sched_fence_scheduled(job->s_fence, NULL); 180 drm_sched_fence_finished(job->s_fence, -ESRCH); 181 WARN_ON(job->s_fence->parent); 182 job->sched->ops->free_job(job); 183 } 184 185 /* Signal the scheduler finished fence when the entity in question is killed. */ 186 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 187 struct dma_fence_cb *cb) 188 { 189 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 190 finish_cb); 191 unsigned long index; 192 193 dma_fence_put(f); 194 195 /* Wait for all dependencies to avoid data corruptions */ 196 xa_for_each(&job->dependencies, index, f) { 197 struct drm_sched_fence *s_fence = to_drm_sched_fence(f); 198 199 if (s_fence && f == &s_fence->scheduled) { 200 /* The dependencies array had a reference on the scheduled 201 * fence, and the finished fence refcount might have 202 * dropped to zero. Use dma_fence_get_rcu() so we get 203 * a NULL fence in that case. 204 */ 205 f = dma_fence_get_rcu(&s_fence->finished); 206 207 /* Now that we have a reference on the finished fence, 208 * we can release the reference the dependencies array 209 * had on the scheduled fence. 210 */ 211 dma_fence_put(&s_fence->scheduled); 212 } 213 214 xa_erase(&job->dependencies, index); 215 if (f && !dma_fence_add_callback(f, &job->finish_cb, 216 drm_sched_entity_kill_jobs_cb)) 217 return; 218 219 dma_fence_put(f); 220 } 221 222 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); 223 schedule_work(&job->work); 224 } 225 226 /* Remove the entity from the scheduler and kill all pending jobs */ 227 static void drm_sched_entity_kill(struct drm_sched_entity *entity) 228 { 229 struct drm_sched_job *job; 230 struct dma_fence *prev; 231 232 if (!entity->rq) 233 return; 234 235 spin_lock(&entity->lock); 236 entity->stopped = true; 237 drm_sched_rq_remove_entity(entity->rq, entity); 238 spin_unlock(&entity->lock); 239 240 /* Make sure this entity is not used by the scheduler at the moment */ 241 wait_for_completion(&entity->entity_idle); 242 243 /* The entity is guaranteed to not be used by the scheduler */ 244 prev = rcu_dereference_check(entity->last_scheduled, true); 245 dma_fence_get(prev); 246 while ((job = drm_sched_entity_queue_pop(entity))) { 247 struct drm_sched_fence *s_fence = job->s_fence; 248 249 dma_fence_get(&s_fence->finished); 250 if (!prev || 251 dma_fence_add_callback(prev, &job->finish_cb, 252 drm_sched_entity_kill_jobs_cb)) { 253 /* 254 * Adding callback above failed. 255 * dma_fence_put() checks for NULL. 256 */ 257 dma_fence_put(prev); 258 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 259 } 260 261 prev = &s_fence->finished; 262 } 263 dma_fence_put(prev); 264 } 265 266 /** 267 * drm_sched_entity_flush - Flush a context entity 268 * 269 * @entity: scheduler entity 270 * @timeout: time to wait in for Q to become empty in jiffies. 271 * 272 * Splitting drm_sched_entity_fini() into two functions, The first one does the 273 * waiting, removes the entity from the runqueue and returns an error when the 274 * process was killed. 275 * 276 * Returns the remaining time in jiffies left from the input timeout 277 */ 278 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 279 { 280 struct drm_gpu_scheduler *sched; 281 struct task_struct *last_user; 282 long ret = timeout; 283 284 if (!entity->rq) 285 return 0; 286 287 sched = entity->rq->sched; 288 /** 289 * The client will not queue more IBs during this fini, consume existing 290 * queued IBs or discard them on SIGKILL 291 */ 292 if (current->flags & PF_EXITING) { 293 if (timeout) 294 ret = wait_event_timeout( 295 sched->job_scheduled, 296 drm_sched_entity_is_idle(entity), 297 timeout); 298 } else { 299 wait_event_killable(sched->job_scheduled, 300 drm_sched_entity_is_idle(entity)); 301 } 302 303 /* For killed process disable any more IBs enqueue right now */ 304 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 305 if ((!last_user || last_user == current->group_leader) && 306 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) 307 drm_sched_entity_kill(entity); 308 309 return ret; 310 } 311 EXPORT_SYMBOL(drm_sched_entity_flush); 312 313 /** 314 * drm_sched_entity_fini - Destroy a context entity 315 * 316 * @entity: scheduler entity 317 * 318 * Cleanups up @entity which has been initialized by drm_sched_entity_init(). 319 * 320 * If there are potentially job still in flight or getting newly queued 321 * drm_sched_entity_flush() must be called first. This function then goes over 322 * the entity and signals all jobs with an error code if the process was killed. 323 */ 324 void drm_sched_entity_fini(struct drm_sched_entity *entity) 325 { 326 /* 327 * If consumption of existing IBs wasn't completed. Forcefully remove 328 * them here. Also makes sure that the scheduler won't touch this entity 329 * any more. 330 */ 331 drm_sched_entity_kill(entity); 332 333 if (entity->dependency) { 334 dma_fence_remove_callback(entity->dependency, &entity->cb); 335 dma_fence_put(entity->dependency); 336 entity->dependency = NULL; 337 } 338 339 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true)); 340 RCU_INIT_POINTER(entity->last_scheduled, NULL); 341 } 342 EXPORT_SYMBOL(drm_sched_entity_fini); 343 344 /** 345 * drm_sched_entity_destroy - Destroy a context entity 346 * @entity: scheduler entity 347 * 348 * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a 349 * convenience wrapper. 350 */ 351 void drm_sched_entity_destroy(struct drm_sched_entity *entity) 352 { 353 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 354 drm_sched_entity_fini(entity); 355 } 356 EXPORT_SYMBOL(drm_sched_entity_destroy); 357 358 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */ 359 static void drm_sched_entity_clear_dep(struct dma_fence *f, 360 struct dma_fence_cb *cb) 361 { 362 struct drm_sched_entity *entity = 363 container_of(cb, struct drm_sched_entity, cb); 364 365 entity->dependency = NULL; 366 dma_fence_put(f); 367 } 368 369 /* 370 * drm_sched_entity_wakeup - callback to clear the entity's dependency and 371 * wake up the scheduler 372 */ 373 static void drm_sched_entity_wakeup(struct dma_fence *f, 374 struct dma_fence_cb *cb) 375 { 376 struct drm_sched_entity *entity = 377 container_of(cb, struct drm_sched_entity, cb); 378 379 drm_sched_entity_clear_dep(f, cb); 380 drm_sched_wakeup(entity->rq->sched); 381 } 382 383 /** 384 * drm_sched_entity_set_priority - Sets priority of the entity 385 * 386 * @entity: scheduler entity 387 * @priority: scheduler priority 388 * 389 * Update the priority of runqueues used for the entity. 390 */ 391 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 392 enum drm_sched_priority priority) 393 { 394 spin_lock(&entity->lock); 395 entity->priority = priority; 396 spin_unlock(&entity->lock); 397 } 398 EXPORT_SYMBOL(drm_sched_entity_set_priority); 399 400 /* 401 * Add a callback to the current dependency of the entity to wake up the 402 * scheduler when the entity becomes available. 403 */ 404 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 405 { 406 struct drm_gpu_scheduler *sched = entity->rq->sched; 407 struct dma_fence *fence = entity->dependency; 408 struct drm_sched_fence *s_fence; 409 410 if (fence->context == entity->fence_context || 411 fence->context == entity->fence_context + 1) { 412 /* 413 * Fence is a scheduled/finished fence from a job 414 * which belongs to the same entity, we can ignore 415 * fences from ourself 416 */ 417 dma_fence_put(entity->dependency); 418 return false; 419 } 420 421 s_fence = to_drm_sched_fence(fence); 422 if (!fence->error && s_fence && s_fence->sched == sched && 423 !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { 424 425 /* 426 * Fence is from the same scheduler, only need to wait for 427 * it to be scheduled 428 */ 429 fence = dma_fence_get(&s_fence->scheduled); 430 dma_fence_put(entity->dependency); 431 entity->dependency = fence; 432 if (!dma_fence_add_callback(fence, &entity->cb, 433 drm_sched_entity_clear_dep)) 434 return true; 435 436 /* Ignore it when it is already scheduled */ 437 dma_fence_put(fence); 438 return false; 439 } 440 441 if (!dma_fence_add_callback(entity->dependency, &entity->cb, 442 drm_sched_entity_wakeup)) 443 return true; 444 445 dma_fence_put(entity->dependency); 446 return false; 447 } 448 449 static struct dma_fence * 450 drm_sched_job_dependency(struct drm_sched_job *job, 451 struct drm_sched_entity *entity) 452 { 453 struct dma_fence *f; 454 455 /* We keep the fence around, so we can iterate over all dependencies 456 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled 457 * before killing the job. 458 */ 459 f = xa_load(&job->dependencies, job->last_dependency); 460 if (f) { 461 job->last_dependency++; 462 return dma_fence_get(f); 463 } 464 465 if (job->sched->ops->prepare_job) 466 return job->sched->ops->prepare_job(job, entity); 467 468 return NULL; 469 } 470 471 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 472 { 473 struct drm_sched_job *sched_job; 474 475 sched_job = drm_sched_entity_queue_peek(entity); 476 if (!sched_job) 477 return NULL; 478 479 while ((entity->dependency = 480 drm_sched_job_dependency(sched_job, entity))) { 481 trace_drm_sched_job_wait_dep(sched_job, entity->dependency); 482 483 if (drm_sched_entity_add_dependency_cb(entity)) 484 return NULL; 485 } 486 487 /* skip jobs from entity that marked guilty */ 488 if (entity->guilty && atomic_read(entity->guilty)) 489 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 490 491 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true)); 492 rcu_assign_pointer(entity->last_scheduled, 493 dma_fence_get(&sched_job->s_fence->finished)); 494 495 /* 496 * If the queue is empty we allow drm_sched_entity_select_rq() to 497 * locklessly access ->last_scheduled. This only works if we set the 498 * pointer before we dequeue and if we a write barrier here. 499 */ 500 smp_wmb(); 501 502 spsc_queue_pop(&entity->job_queue); 503 504 /* 505 * Update the entity's location in the min heap according to 506 * the timestamp of the next job, if any. 507 */ 508 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) { 509 struct drm_sched_job *next; 510 511 next = drm_sched_entity_queue_peek(entity); 512 if (next) { 513 struct drm_sched_rq *rq; 514 515 spin_lock(&entity->lock); 516 rq = entity->rq; 517 spin_lock(&rq->lock); 518 drm_sched_rq_update_fifo_locked(entity, rq, 519 next->submit_ts); 520 spin_unlock(&rq->lock); 521 spin_unlock(&entity->lock); 522 } 523 } 524 525 /* Jobs and entities might have different lifecycles. Since we're 526 * removing the job from the entities queue, set the jobs entity pointer 527 * to NULL to prevent any future access of the entity through this job. 528 */ 529 sched_job->entity = NULL; 530 531 return sched_job; 532 } 533 534 void drm_sched_entity_select_rq(struct drm_sched_entity *entity) 535 { 536 struct dma_fence *fence; 537 struct drm_gpu_scheduler *sched; 538 struct drm_sched_rq *rq; 539 540 /* single possible engine and already selected */ 541 if (!entity->sched_list) 542 return; 543 544 /* queue non-empty, stay on the same engine */ 545 if (spsc_queue_count(&entity->job_queue)) 546 return; 547 548 /* 549 * Only when the queue is empty are we guaranteed that the scheduler 550 * thread cannot change ->last_scheduled. To enforce ordering we need 551 * a read barrier here. See drm_sched_entity_pop_job() for the other 552 * side. 553 */ 554 smp_rmb(); 555 556 fence = rcu_dereference_check(entity->last_scheduled, true); 557 558 /* stay on the same engine if the previous job hasn't finished */ 559 if (fence && !dma_fence_is_signaled(fence)) 560 return; 561 562 spin_lock(&entity->lock); 563 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); 564 rq = sched ? sched->sched_rq[entity->priority] : NULL; 565 if (rq != entity->rq) { 566 drm_sched_rq_remove_entity(entity->rq, entity); 567 entity->rq = rq; 568 } 569 spin_unlock(&entity->lock); 570 571 if (entity->num_sched_list == 1) 572 entity->sched_list = NULL; 573 } 574 575 /** 576 * drm_sched_entity_push_job - Submit a job to the entity's job queue 577 * @sched_job: job to submit 578 * 579 * Note: To guarantee that the order of insertion to queue matches the job's 580 * fence sequence number this function should be called with drm_sched_job_arm() 581 * under common lock for the struct drm_sched_entity that was set up for 582 * @sched_job in drm_sched_job_init(). 583 */ 584 void drm_sched_entity_push_job(struct drm_sched_job *sched_job) 585 { 586 struct drm_sched_entity *entity = sched_job->entity; 587 bool first; 588 ktime_t submit_ts; 589 590 trace_drm_sched_job(sched_job, entity); 591 atomic_inc(entity->rq->sched->score); 592 WRITE_ONCE(entity->last_user, current->group_leader); 593 594 /* 595 * After the sched_job is pushed into the entity queue, it may be 596 * completed and freed up at any time. We can no longer access it. 597 * Make sure to set the submit_ts first, to avoid a race. 598 */ 599 sched_job->submit_ts = submit_ts = ktime_get(); 600 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 601 602 /* first job wakes up scheduler */ 603 if (first) { 604 struct drm_gpu_scheduler *sched; 605 struct drm_sched_rq *rq; 606 607 /* Add the entity to the run queue */ 608 spin_lock(&entity->lock); 609 if (entity->stopped) { 610 spin_unlock(&entity->lock); 611 612 DRM_ERROR("Trying to push to a killed entity\n"); 613 return; 614 } 615 616 rq = entity->rq; 617 sched = rq->sched; 618 619 spin_lock(&rq->lock); 620 drm_sched_rq_add_entity(rq, entity); 621 622 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) 623 drm_sched_rq_update_fifo_locked(entity, rq, submit_ts); 624 625 spin_unlock(&rq->lock); 626 spin_unlock(&entity->lock); 627 628 drm_sched_wakeup(sched); 629 } 630 } 631 EXPORT_SYMBOL(drm_sched_entity_push_job); 632