1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 /** 25 * DOC: Overview 26 * 27 * The GPU scheduler provides entities which allow userspace to push jobs 28 * into software queues which are then scheduled on a hardware run queue. 29 * The software queues have a priority among them. The scheduler selects the entities 30 * from the run queue using a FIFO. The scheduler provides dependency handling 31 * features among jobs. The driver is supposed to provide callback functions for 32 * backend operations to the scheduler like submitting a job to hardware run queue, 33 * returning the dependencies of a job etc. 34 * 35 * The organisation of the scheduler is the following: 36 * 37 * 1. Each hw run queue has one scheduler 38 * 2. Each scheduler has multiple run queues with different priorities 39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL) 40 * 3. Each scheduler run queue has a queue of entities to schedule 41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on 42 * the hardware. 43 * 44 * The jobs in a entity are always scheduled in the order that they were pushed. 45 * 46 * Note that once a job was taken from the entities queue and pushed to the 47 * hardware, i.e. the pending queue, the entity must not be referenced anymore 48 * through the jobs entity pointer. 49 */ 50 51 #include <linux/kthread.h> 52 #include <linux/wait.h> 53 #include <linux/sched.h> 54 #include <linux/completion.h> 55 #include <linux/dma-resv.h> 56 #include <uapi/linux/sched/types.h> 57 58 #include <drm/drm_print.h> 59 #include <drm/drm_gem.h> 60 #include <drm/drm_syncobj.h> 61 #include <drm/gpu_scheduler.h> 62 #include <drm/spsc_queue.h> 63 64 #define CREATE_TRACE_POINTS 65 #include "gpu_scheduler_trace.h" 66 67 #define to_drm_sched_job(sched_job) \ 68 container_of((sched_job), struct drm_sched_job, queue_node) 69 70 int drm_sched_policy = DRM_SCHED_POLICY_FIFO; 71 72 /** 73 * DOC: sched_policy (int) 74 * Used to override default entities scheduling policy in a run queue. 75 */ 76 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default)."); 77 module_param_named(sched_policy, drm_sched_policy, int, 0444); 78 79 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, 80 const struct rb_node *b) 81 { 82 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node); 83 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node); 84 85 return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting); 86 } 87 88 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity) 89 { 90 struct drm_sched_rq *rq = entity->rq; 91 92 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { 93 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); 94 RB_CLEAR_NODE(&entity->rb_tree_node); 95 } 96 } 97 98 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts) 99 { 100 /* 101 * Both locks need to be grabbed, one to protect from entity->rq change 102 * for entity from within concurrent drm_sched_entity_select_rq and the 103 * other to update the rb tree structure. 104 */ 105 spin_lock(&entity->rq_lock); 106 spin_lock(&entity->rq->lock); 107 108 drm_sched_rq_remove_fifo_locked(entity); 109 110 entity->oldest_job_waiting = ts; 111 112 rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root, 113 drm_sched_entity_compare_before); 114 115 spin_unlock(&entity->rq->lock); 116 spin_unlock(&entity->rq_lock); 117 } 118 119 /** 120 * drm_sched_rq_init - initialize a given run queue struct 121 * 122 * @sched: scheduler instance to associate with this run queue 123 * @rq: scheduler run queue 124 * 125 * Initializes a scheduler runqueue. 126 */ 127 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, 128 struct drm_sched_rq *rq) 129 { 130 spin_lock_init(&rq->lock); 131 INIT_LIST_HEAD(&rq->entities); 132 rq->rb_tree_root = RB_ROOT_CACHED; 133 rq->current_entity = NULL; 134 rq->sched = sched; 135 } 136 137 /** 138 * drm_sched_rq_add_entity - add an entity 139 * 140 * @rq: scheduler run queue 141 * @entity: scheduler entity 142 * 143 * Adds a scheduler entity to the run queue. 144 */ 145 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 146 struct drm_sched_entity *entity) 147 { 148 if (!list_empty(&entity->list)) 149 return; 150 151 spin_lock(&rq->lock); 152 153 atomic_inc(rq->sched->score); 154 list_add_tail(&entity->list, &rq->entities); 155 156 spin_unlock(&rq->lock); 157 } 158 159 /** 160 * drm_sched_rq_remove_entity - remove an entity 161 * 162 * @rq: scheduler run queue 163 * @entity: scheduler entity 164 * 165 * Removes a scheduler entity from the run queue. 166 */ 167 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 168 struct drm_sched_entity *entity) 169 { 170 if (list_empty(&entity->list)) 171 return; 172 173 spin_lock(&rq->lock); 174 175 atomic_dec(rq->sched->score); 176 list_del_init(&entity->list); 177 178 if (rq->current_entity == entity) 179 rq->current_entity = NULL; 180 181 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) 182 drm_sched_rq_remove_fifo_locked(entity); 183 184 spin_unlock(&rq->lock); 185 } 186 187 /** 188 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run 189 * 190 * @rq: scheduler run queue to check. 191 * 192 * Try to find a ready entity, returns NULL if none found. 193 */ 194 static struct drm_sched_entity * 195 drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq) 196 { 197 struct drm_sched_entity *entity; 198 199 spin_lock(&rq->lock); 200 201 entity = rq->current_entity; 202 if (entity) { 203 list_for_each_entry_continue(entity, &rq->entities, list) { 204 if (drm_sched_entity_is_ready(entity)) { 205 rq->current_entity = entity; 206 reinit_completion(&entity->entity_idle); 207 spin_unlock(&rq->lock); 208 return entity; 209 } 210 } 211 } 212 213 list_for_each_entry(entity, &rq->entities, list) { 214 215 if (drm_sched_entity_is_ready(entity)) { 216 rq->current_entity = entity; 217 reinit_completion(&entity->entity_idle); 218 spin_unlock(&rq->lock); 219 return entity; 220 } 221 222 if (entity == rq->current_entity) 223 break; 224 } 225 226 spin_unlock(&rq->lock); 227 228 return NULL; 229 } 230 231 /** 232 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run 233 * 234 * @rq: scheduler run queue to check. 235 * 236 * Find oldest waiting ready entity, returns NULL if none found. 237 */ 238 static struct drm_sched_entity * 239 drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) 240 { 241 struct rb_node *rb; 242 243 spin_lock(&rq->lock); 244 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) { 245 struct drm_sched_entity *entity; 246 247 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node); 248 if (drm_sched_entity_is_ready(entity)) { 249 rq->current_entity = entity; 250 reinit_completion(&entity->entity_idle); 251 break; 252 } 253 } 254 spin_unlock(&rq->lock); 255 256 return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL; 257 } 258 259 /** 260 * drm_sched_job_done - complete a job 261 * @s_job: pointer to the job which is done 262 * 263 * Finish the job's fence and wake up the worker thread. 264 */ 265 static void drm_sched_job_done(struct drm_sched_job *s_job, int result) 266 { 267 struct drm_sched_fence *s_fence = s_job->s_fence; 268 struct drm_gpu_scheduler *sched = s_fence->sched; 269 270 atomic_dec(&sched->hw_rq_count); 271 atomic_dec(sched->score); 272 273 trace_drm_sched_process_job(s_fence); 274 275 dma_fence_get(&s_fence->finished); 276 drm_sched_fence_finished(s_fence, result); 277 dma_fence_put(&s_fence->finished); 278 wake_up_interruptible(&sched->wake_up_worker); 279 } 280 281 /** 282 * drm_sched_job_done_cb - the callback for a done job 283 * @f: fence 284 * @cb: fence callbacks 285 */ 286 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) 287 { 288 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); 289 290 drm_sched_job_done(s_job, f->error); 291 } 292 293 /** 294 * drm_sched_start_timeout - start timeout for reset worker 295 * 296 * @sched: scheduler instance to start the worker for 297 * 298 * Start the timeout for the given scheduler. 299 */ 300 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) 301 { 302 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 303 !list_empty(&sched->pending_list)) 304 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); 305 } 306 307 /** 308 * drm_sched_fault - immediately start timeout handler 309 * 310 * @sched: scheduler where the timeout handling should be started. 311 * 312 * Start timeout handling immediately when the driver detects a hardware fault. 313 */ 314 void drm_sched_fault(struct drm_gpu_scheduler *sched) 315 { 316 if (sched->timeout_wq) 317 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); 318 } 319 EXPORT_SYMBOL(drm_sched_fault); 320 321 /** 322 * drm_sched_suspend_timeout - Suspend scheduler job timeout 323 * 324 * @sched: scheduler instance for which to suspend the timeout 325 * 326 * Suspend the delayed work timeout for the scheduler. This is done by 327 * modifying the delayed work timeout to an arbitrary large value, 328 * MAX_SCHEDULE_TIMEOUT in this case. 329 * 330 * Returns the timeout remaining 331 * 332 */ 333 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) 334 { 335 unsigned long sched_timeout, now = jiffies; 336 337 sched_timeout = sched->work_tdr.timer.expires; 338 339 /* 340 * Modify the timeout to an arbitrarily large value. This also prevents 341 * the timeout to be restarted when new submissions arrive 342 */ 343 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) 344 && time_after(sched_timeout, now)) 345 return sched_timeout - now; 346 else 347 return sched->timeout; 348 } 349 EXPORT_SYMBOL(drm_sched_suspend_timeout); 350 351 /** 352 * drm_sched_resume_timeout - Resume scheduler job timeout 353 * 354 * @sched: scheduler instance for which to resume the timeout 355 * @remaining: remaining timeout 356 * 357 * Resume the delayed work timeout for the scheduler. 358 */ 359 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 360 unsigned long remaining) 361 { 362 spin_lock(&sched->job_list_lock); 363 364 if (list_empty(&sched->pending_list)) 365 cancel_delayed_work(&sched->work_tdr); 366 else 367 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); 368 369 spin_unlock(&sched->job_list_lock); 370 } 371 EXPORT_SYMBOL(drm_sched_resume_timeout); 372 373 static void drm_sched_job_begin(struct drm_sched_job *s_job) 374 { 375 struct drm_gpu_scheduler *sched = s_job->sched; 376 377 spin_lock(&sched->job_list_lock); 378 list_add_tail(&s_job->list, &sched->pending_list); 379 drm_sched_start_timeout(sched); 380 spin_unlock(&sched->job_list_lock); 381 } 382 383 static void drm_sched_job_timedout(struct work_struct *work) 384 { 385 struct drm_gpu_scheduler *sched; 386 struct drm_sched_job *job; 387 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL; 388 389 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); 390 391 /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ 392 spin_lock(&sched->job_list_lock); 393 job = list_first_entry_or_null(&sched->pending_list, 394 struct drm_sched_job, list); 395 396 if (job) { 397 /* 398 * Remove the bad job so it cannot be freed by concurrent 399 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread 400 * is parked at which point it's safe. 401 */ 402 list_del_init(&job->list); 403 spin_unlock(&sched->job_list_lock); 404 405 status = job->sched->ops->timedout_job(job); 406 407 /* 408 * Guilty job did complete and hence needs to be manually removed 409 * See drm_sched_stop doc. 410 */ 411 if (sched->free_guilty) { 412 job->sched->ops->free_job(job); 413 sched->free_guilty = false; 414 } 415 } else { 416 spin_unlock(&sched->job_list_lock); 417 } 418 419 if (status != DRM_GPU_SCHED_STAT_ENODEV) { 420 spin_lock(&sched->job_list_lock); 421 drm_sched_start_timeout(sched); 422 spin_unlock(&sched->job_list_lock); 423 } 424 } 425 426 /** 427 * drm_sched_stop - stop the scheduler 428 * 429 * @sched: scheduler instance 430 * @bad: job which caused the time out 431 * 432 * Stop the scheduler and also removes and frees all completed jobs. 433 * Note: bad job will not be freed as it might be used later and so it's 434 * callers responsibility to release it manually if it's not part of the 435 * pending list any more. 436 * 437 */ 438 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) 439 { 440 struct drm_sched_job *s_job, *tmp; 441 442 kthread_park(sched->thread); 443 444 /* 445 * Reinsert back the bad job here - now it's safe as 446 * drm_sched_get_cleanup_job cannot race against us and release the 447 * bad job at this point - we parked (waited for) any in progress 448 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called 449 * now until the scheduler thread is unparked. 450 */ 451 if (bad && bad->sched == sched) 452 /* 453 * Add at the head of the queue to reflect it was the earliest 454 * job extracted. 455 */ 456 list_add(&bad->list, &sched->pending_list); 457 458 /* 459 * Iterate the job list from later to earlier one and either deactive 460 * their HW callbacks or remove them from pending list if they already 461 * signaled. 462 * This iteration is thread safe as sched thread is stopped. 463 */ 464 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, 465 list) { 466 if (s_job->s_fence->parent && 467 dma_fence_remove_callback(s_job->s_fence->parent, 468 &s_job->cb)) { 469 dma_fence_put(s_job->s_fence->parent); 470 s_job->s_fence->parent = NULL; 471 atomic_dec(&sched->hw_rq_count); 472 } else { 473 /* 474 * remove job from pending_list. 475 * Locking here is for concurrent resume timeout 476 */ 477 spin_lock(&sched->job_list_lock); 478 list_del_init(&s_job->list); 479 spin_unlock(&sched->job_list_lock); 480 481 /* 482 * Wait for job's HW fence callback to finish using s_job 483 * before releasing it. 484 * 485 * Job is still alive so fence refcount at least 1 486 */ 487 dma_fence_wait(&s_job->s_fence->finished, false); 488 489 /* 490 * We must keep bad job alive for later use during 491 * recovery by some of the drivers but leave a hint 492 * that the guilty job must be released. 493 */ 494 if (bad != s_job) 495 sched->ops->free_job(s_job); 496 else 497 sched->free_guilty = true; 498 } 499 } 500 501 /* 502 * Stop pending timer in flight as we rearm it in drm_sched_start. This 503 * avoids the pending timeout work in progress to fire right away after 504 * this TDR finished and before the newly restarted jobs had a 505 * chance to complete. 506 */ 507 cancel_delayed_work(&sched->work_tdr); 508 } 509 510 EXPORT_SYMBOL(drm_sched_stop); 511 512 /** 513 * drm_sched_start - recover jobs after a reset 514 * 515 * @sched: scheduler instance 516 * @full_recovery: proceed with complete sched restart 517 * 518 */ 519 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) 520 { 521 struct drm_sched_job *s_job, *tmp; 522 int r; 523 524 /* 525 * Locking the list is not required here as the sched thread is parked 526 * so no new jobs are being inserted or removed. Also concurrent 527 * GPU recovers can't run in parallel. 528 */ 529 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 530 struct dma_fence *fence = s_job->s_fence->parent; 531 532 atomic_inc(&sched->hw_rq_count); 533 534 if (!full_recovery) 535 continue; 536 537 if (fence) { 538 r = dma_fence_add_callback(fence, &s_job->cb, 539 drm_sched_job_done_cb); 540 if (r == -ENOENT) 541 drm_sched_job_done(s_job, fence->error); 542 else if (r) 543 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", 544 r); 545 } else 546 drm_sched_job_done(s_job, -ECANCELED); 547 } 548 549 if (full_recovery) { 550 spin_lock(&sched->job_list_lock); 551 drm_sched_start_timeout(sched); 552 spin_unlock(&sched->job_list_lock); 553 } 554 555 kthread_unpark(sched->thread); 556 } 557 EXPORT_SYMBOL(drm_sched_start); 558 559 /** 560 * drm_sched_resubmit_jobs - Deprecated, don't use in new code! 561 * 562 * @sched: scheduler instance 563 * 564 * Re-submitting jobs was a concept AMD came up as cheap way to implement 565 * recovery after a job timeout. 566 * 567 * This turned out to be not working very well. First of all there are many 568 * problem with the dma_fence implementation and requirements. Either the 569 * implementation is risking deadlocks with core memory management or violating 570 * documented implementation details of the dma_fence object. 571 * 572 * Drivers can still save and restore their state for recovery operations, but 573 * we shouldn't make this a general scheduler feature around the dma_fence 574 * interface. 575 */ 576 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) 577 { 578 struct drm_sched_job *s_job, *tmp; 579 uint64_t guilty_context; 580 bool found_guilty = false; 581 struct dma_fence *fence; 582 583 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 584 struct drm_sched_fence *s_fence = s_job->s_fence; 585 586 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { 587 found_guilty = true; 588 guilty_context = s_job->s_fence->scheduled.context; 589 } 590 591 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) 592 dma_fence_set_error(&s_fence->finished, -ECANCELED); 593 594 fence = sched->ops->run_job(s_job); 595 596 if (IS_ERR_OR_NULL(fence)) { 597 if (IS_ERR(fence)) 598 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); 599 600 s_job->s_fence->parent = NULL; 601 } else { 602 603 s_job->s_fence->parent = dma_fence_get(fence); 604 605 /* Drop for orignal kref_init */ 606 dma_fence_put(fence); 607 } 608 } 609 } 610 EXPORT_SYMBOL(drm_sched_resubmit_jobs); 611 612 /** 613 * drm_sched_job_init - init a scheduler job 614 * @job: scheduler job to init 615 * @entity: scheduler entity to use 616 * @owner: job owner for debugging 617 * 618 * Refer to drm_sched_entity_push_job() documentation 619 * for locking considerations. 620 * 621 * Drivers must make sure drm_sched_job_cleanup() if this function returns 622 * successfully, even when @job is aborted before drm_sched_job_arm() is called. 623 * 624 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware 625 * has died, which can mean that there's no valid runqueue for a @entity. 626 * This function returns -ENOENT in this case (which probably should be -EIO as 627 * a more meanigful return value). 628 * 629 * Returns 0 for success, negative error code otherwise. 630 */ 631 int drm_sched_job_init(struct drm_sched_job *job, 632 struct drm_sched_entity *entity, 633 void *owner) 634 { 635 if (!entity->rq) { 636 /* This will most likely be followed by missing frames 637 * or worse--a blank screen--leave a trail in the 638 * logs, so this can be debugged easier. 639 */ 640 drm_err(job->sched, "%s: entity has no rq!\n", __func__); 641 return -ENOENT; 642 } 643 644 job->entity = entity; 645 job->s_fence = drm_sched_fence_alloc(entity, owner); 646 if (!job->s_fence) 647 return -ENOMEM; 648 649 INIT_LIST_HEAD(&job->list); 650 651 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC); 652 653 return 0; 654 } 655 EXPORT_SYMBOL(drm_sched_job_init); 656 657 /** 658 * drm_sched_job_arm - arm a scheduler job for execution 659 * @job: scheduler job to arm 660 * 661 * This arms a scheduler job for execution. Specifically it initializes the 662 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv 663 * or other places that need to track the completion of this job. 664 * 665 * Refer to drm_sched_entity_push_job() documentation for locking 666 * considerations. 667 * 668 * This can only be called if drm_sched_job_init() succeeded. 669 */ 670 void drm_sched_job_arm(struct drm_sched_job *job) 671 { 672 struct drm_gpu_scheduler *sched; 673 struct drm_sched_entity *entity = job->entity; 674 675 BUG_ON(!entity); 676 drm_sched_entity_select_rq(entity); 677 sched = entity->rq->sched; 678 679 job->sched = sched; 680 job->s_priority = entity->priority; 681 job->id = atomic64_inc_return(&sched->job_id_count); 682 683 drm_sched_fence_init(job->s_fence, job->entity); 684 } 685 EXPORT_SYMBOL(drm_sched_job_arm); 686 687 /** 688 * drm_sched_job_add_dependency - adds the fence as a job dependency 689 * @job: scheduler job to add the dependencies to 690 * @fence: the dma_fence to add to the list of dependencies. 691 * 692 * Note that @fence is consumed in both the success and error cases. 693 * 694 * Returns: 695 * 0 on success, or an error on failing to expand the array. 696 */ 697 int drm_sched_job_add_dependency(struct drm_sched_job *job, 698 struct dma_fence *fence) 699 { 700 struct dma_fence *entry; 701 unsigned long index; 702 u32 id = 0; 703 int ret; 704 705 if (!fence) 706 return 0; 707 708 /* Deduplicate if we already depend on a fence from the same context. 709 * This lets the size of the array of deps scale with the number of 710 * engines involved, rather than the number of BOs. 711 */ 712 xa_for_each(&job->dependencies, index, entry) { 713 if (entry->context != fence->context) 714 continue; 715 716 if (dma_fence_is_later(fence, entry)) { 717 dma_fence_put(entry); 718 xa_store(&job->dependencies, index, fence, GFP_KERNEL); 719 } else { 720 dma_fence_put(fence); 721 } 722 return 0; 723 } 724 725 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL); 726 if (ret != 0) 727 dma_fence_put(fence); 728 729 return ret; 730 } 731 EXPORT_SYMBOL(drm_sched_job_add_dependency); 732 733 /** 734 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency 735 * @job: scheduler job to add the dependencies to 736 * @file: drm file private pointer 737 * @handle: syncobj handle to lookup 738 * @point: timeline point 739 * 740 * This adds the fence matching the given syncobj to @job. 741 * 742 * Returns: 743 * 0 on success, or an error on failing to expand the array. 744 */ 745 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, 746 struct drm_file *file, 747 u32 handle, 748 u32 point) 749 { 750 struct dma_fence *fence; 751 int ret; 752 753 ret = drm_syncobj_find_fence(file, handle, point, 0, &fence); 754 if (ret) 755 return ret; 756 757 return drm_sched_job_add_dependency(job, fence); 758 } 759 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency); 760 761 /** 762 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job 763 * @job: scheduler job to add the dependencies to 764 * @resv: the dma_resv object to get the fences from 765 * @usage: the dma_resv_usage to use to filter the fences 766 * 767 * This adds all fences matching the given usage from @resv to @job. 768 * Must be called with the @resv lock held. 769 * 770 * Returns: 771 * 0 on success, or an error on failing to expand the array. 772 */ 773 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, 774 struct dma_resv *resv, 775 enum dma_resv_usage usage) 776 { 777 struct dma_resv_iter cursor; 778 struct dma_fence *fence; 779 int ret; 780 781 dma_resv_assert_held(resv); 782 783 dma_resv_for_each_fence(&cursor, resv, usage, fence) { 784 /* Make sure to grab an additional ref on the added fence */ 785 dma_fence_get(fence); 786 ret = drm_sched_job_add_dependency(job, fence); 787 if (ret) { 788 dma_fence_put(fence); 789 return ret; 790 } 791 } 792 return 0; 793 } 794 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies); 795 796 /** 797 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job 798 * dependencies 799 * @job: scheduler job to add the dependencies to 800 * @obj: the gem object to add new dependencies from. 801 * @write: whether the job might write the object (so we need to depend on 802 * shared fences in the reservation object). 803 * 804 * This should be called after drm_gem_lock_reservations() on your array of 805 * GEM objects used in the job but before updating the reservations with your 806 * own fences. 807 * 808 * Returns: 809 * 0 on success, or an error on failing to expand the array. 810 */ 811 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, 812 struct drm_gem_object *obj, 813 bool write) 814 { 815 return drm_sched_job_add_resv_dependencies(job, obj->resv, 816 dma_resv_usage_rw(write)); 817 } 818 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); 819 820 /** 821 * drm_sched_job_cleanup - clean up scheduler job resources 822 * @job: scheduler job to clean up 823 * 824 * Cleans up the resources allocated with drm_sched_job_init(). 825 * 826 * Drivers should call this from their error unwind code if @job is aborted 827 * before drm_sched_job_arm() is called. 828 * 829 * After that point of no return @job is committed to be executed by the 830 * scheduler, and this function should be called from the 831 * &drm_sched_backend_ops.free_job callback. 832 */ 833 void drm_sched_job_cleanup(struct drm_sched_job *job) 834 { 835 struct dma_fence *fence; 836 unsigned long index; 837 838 if (kref_read(&job->s_fence->finished.refcount)) { 839 /* drm_sched_job_arm() has been called */ 840 dma_fence_put(&job->s_fence->finished); 841 } else { 842 /* aborted job before committing to run it */ 843 drm_sched_fence_free(job->s_fence); 844 } 845 846 job->s_fence = NULL; 847 848 xa_for_each(&job->dependencies, index, fence) { 849 dma_fence_put(fence); 850 } 851 xa_destroy(&job->dependencies); 852 853 } 854 EXPORT_SYMBOL(drm_sched_job_cleanup); 855 856 /** 857 * drm_sched_can_queue -- Can we queue more to the hardware? 858 * @sched: scheduler instance 859 * 860 * Return true if we can push more jobs to the hw, otherwise false. 861 */ 862 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) 863 { 864 return atomic_read(&sched->hw_rq_count) < 865 sched->hw_submission_limit; 866 } 867 868 /** 869 * drm_sched_wakeup_if_can_queue - Wake up the scheduler 870 * @sched: scheduler instance 871 * 872 * Wake up the scheduler if we can queue jobs. 873 */ 874 void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched) 875 { 876 if (drm_sched_can_queue(sched)) 877 wake_up_interruptible(&sched->wake_up_worker); 878 } 879 880 /** 881 * drm_sched_select_entity - Select next entity to process 882 * 883 * @sched: scheduler instance 884 * 885 * Returns the entity to process or NULL if none are found. 886 */ 887 static struct drm_sched_entity * 888 drm_sched_select_entity(struct drm_gpu_scheduler *sched) 889 { 890 struct drm_sched_entity *entity; 891 int i; 892 893 if (!drm_sched_can_queue(sched)) 894 return NULL; 895 896 /* Kernel run queue has higher priority than normal run queue*/ 897 for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 898 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? 899 drm_sched_rq_select_entity_fifo(sched->sched_rq[i]) : 900 drm_sched_rq_select_entity_rr(sched->sched_rq[i]); 901 if (entity) 902 break; 903 } 904 905 return entity; 906 } 907 908 /** 909 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed 910 * 911 * @sched: scheduler instance 912 * 913 * Returns the next finished job from the pending list (if there is one) 914 * ready for it to be destroyed. 915 */ 916 static struct drm_sched_job * 917 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) 918 { 919 struct drm_sched_job *job, *next; 920 921 spin_lock(&sched->job_list_lock); 922 923 job = list_first_entry_or_null(&sched->pending_list, 924 struct drm_sched_job, list); 925 926 if (job && dma_fence_is_signaled(&job->s_fence->finished)) { 927 /* remove job from pending_list */ 928 list_del_init(&job->list); 929 930 /* cancel this job's TO timer */ 931 cancel_delayed_work(&sched->work_tdr); 932 /* make the scheduled timestamp more accurate */ 933 next = list_first_entry_or_null(&sched->pending_list, 934 typeof(*next), list); 935 936 if (next) { 937 next->s_fence->scheduled.timestamp = 938 dma_fence_timestamp(&job->s_fence->finished); 939 /* start TO timer for next job */ 940 drm_sched_start_timeout(sched); 941 } 942 } else { 943 job = NULL; 944 } 945 946 spin_unlock(&sched->job_list_lock); 947 948 return job; 949 } 950 951 /** 952 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load 953 * @sched_list: list of drm_gpu_schedulers 954 * @num_sched_list: number of drm_gpu_schedulers in the sched_list 955 * 956 * Returns pointer of the sched with the least load or NULL if none of the 957 * drm_gpu_schedulers are ready 958 */ 959 struct drm_gpu_scheduler * 960 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 961 unsigned int num_sched_list) 962 { 963 struct drm_gpu_scheduler *sched, *picked_sched = NULL; 964 int i; 965 unsigned int min_score = UINT_MAX, num_score; 966 967 for (i = 0; i < num_sched_list; ++i) { 968 sched = sched_list[i]; 969 970 if (!sched->ready) { 971 DRM_WARN("scheduler %s is not ready, skipping", 972 sched->name); 973 continue; 974 } 975 976 num_score = atomic_read(sched->score); 977 if (num_score < min_score) { 978 min_score = num_score; 979 picked_sched = sched; 980 } 981 } 982 983 return picked_sched; 984 } 985 EXPORT_SYMBOL(drm_sched_pick_best); 986 987 /** 988 * drm_sched_blocked - check if the scheduler is blocked 989 * 990 * @sched: scheduler instance 991 * 992 * Returns true if blocked, otherwise false. 993 */ 994 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) 995 { 996 if (kthread_should_park()) { 997 kthread_parkme(); 998 return true; 999 } 1000 1001 return false; 1002 } 1003 1004 /** 1005 * drm_sched_main - main scheduler thread 1006 * 1007 * @param: scheduler instance 1008 * 1009 * Returns 0. 1010 */ 1011 static int drm_sched_main(void *param) 1012 { 1013 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; 1014 int r; 1015 1016 sched_set_fifo_low(current); 1017 1018 while (!kthread_should_stop()) { 1019 struct drm_sched_entity *entity = NULL; 1020 struct drm_sched_fence *s_fence; 1021 struct drm_sched_job *sched_job; 1022 struct dma_fence *fence; 1023 struct drm_sched_job *cleanup_job = NULL; 1024 1025 wait_event_interruptible(sched->wake_up_worker, 1026 (cleanup_job = drm_sched_get_cleanup_job(sched)) || 1027 (!drm_sched_blocked(sched) && 1028 (entity = drm_sched_select_entity(sched))) || 1029 kthread_should_stop()); 1030 1031 if (cleanup_job) 1032 sched->ops->free_job(cleanup_job); 1033 1034 if (!entity) 1035 continue; 1036 1037 sched_job = drm_sched_entity_pop_job(entity); 1038 1039 if (!sched_job) { 1040 complete_all(&entity->entity_idle); 1041 continue; 1042 } 1043 1044 s_fence = sched_job->s_fence; 1045 1046 atomic_inc(&sched->hw_rq_count); 1047 drm_sched_job_begin(sched_job); 1048 1049 trace_drm_run_job(sched_job, entity); 1050 fence = sched->ops->run_job(sched_job); 1051 complete_all(&entity->entity_idle); 1052 drm_sched_fence_scheduled(s_fence, fence); 1053 1054 if (!IS_ERR_OR_NULL(fence)) { 1055 /* Drop for original kref_init of the fence */ 1056 dma_fence_put(fence); 1057 1058 r = dma_fence_add_callback(fence, &sched_job->cb, 1059 drm_sched_job_done_cb); 1060 if (r == -ENOENT) 1061 drm_sched_job_done(sched_job, fence->error); 1062 else if (r) 1063 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", 1064 r); 1065 } else { 1066 drm_sched_job_done(sched_job, IS_ERR(fence) ? 1067 PTR_ERR(fence) : 0); 1068 } 1069 1070 wake_up(&sched->job_scheduled); 1071 } 1072 return 0; 1073 } 1074 1075 /** 1076 * drm_sched_init - Init a gpu scheduler instance 1077 * 1078 * @sched: scheduler instance 1079 * @ops: backend operations for this scheduler 1080 * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT 1081 * @hw_submission: number of hw submissions that can be in flight 1082 * @hang_limit: number of times to allow a job to hang before dropping it 1083 * @timeout: timeout value in jiffies for the scheduler 1084 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is 1085 * used 1086 * @score: optional score atomic shared with other schedulers 1087 * @name: name used for debugging 1088 * @dev: target &struct device 1089 * 1090 * Return 0 on success, otherwise error code. 1091 */ 1092 int drm_sched_init(struct drm_gpu_scheduler *sched, 1093 const struct drm_sched_backend_ops *ops, 1094 u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit, 1095 long timeout, struct workqueue_struct *timeout_wq, 1096 atomic_t *score, const char *name, struct device *dev) 1097 { 1098 int i, ret; 1099 1100 sched->ops = ops; 1101 sched->hw_submission_limit = hw_submission; 1102 sched->name = name; 1103 sched->timeout = timeout; 1104 sched->timeout_wq = timeout_wq ? : system_wq; 1105 sched->hang_limit = hang_limit; 1106 sched->score = score ? score : &sched->_score; 1107 sched->dev = dev; 1108 1109 if (num_rqs > DRM_SCHED_PRIORITY_COUNT) { 1110 /* This is a gross violation--tell drivers what the problem is. 1111 */ 1112 drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", 1113 __func__); 1114 return -EINVAL; 1115 } else if (sched->sched_rq) { 1116 /* Not an error, but warn anyway so drivers can 1117 * fine-tune their DRM calling order, and return all 1118 * is good. 1119 */ 1120 drm_warn(sched, "%s: scheduler already initialized!\n", __func__); 1121 return 0; 1122 } 1123 1124 sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq), 1125 GFP_KERNEL | __GFP_ZERO); 1126 if (!sched->sched_rq) { 1127 drm_err(sched, "%s: out of memory for sched_rq\n", __func__); 1128 return -ENOMEM; 1129 } 1130 sched->num_rqs = num_rqs; 1131 ret = -ENOMEM; 1132 for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) { 1133 sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); 1134 if (!sched->sched_rq[i]) 1135 goto Out_unroll; 1136 drm_sched_rq_init(sched, sched->sched_rq[i]); 1137 } 1138 1139 init_waitqueue_head(&sched->wake_up_worker); 1140 init_waitqueue_head(&sched->job_scheduled); 1141 INIT_LIST_HEAD(&sched->pending_list); 1142 spin_lock_init(&sched->job_list_lock); 1143 atomic_set(&sched->hw_rq_count, 0); 1144 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); 1145 atomic_set(&sched->_score, 0); 1146 atomic64_set(&sched->job_id_count, 0); 1147 1148 /* Each scheduler will run on a seperate kernel thread */ 1149 sched->thread = kthread_run(drm_sched_main, sched, sched->name); 1150 if (IS_ERR(sched->thread)) { 1151 ret = PTR_ERR(sched->thread); 1152 sched->thread = NULL; 1153 DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name); 1154 goto Out_unroll; 1155 } 1156 1157 sched->ready = true; 1158 return 0; 1159 Out_unroll: 1160 for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--) 1161 kfree(sched->sched_rq[i]); 1162 kfree(sched->sched_rq); 1163 sched->sched_rq = NULL; 1164 drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); 1165 return ret; 1166 } 1167 EXPORT_SYMBOL(drm_sched_init); 1168 1169 /** 1170 * drm_sched_fini - Destroy a gpu scheduler 1171 * 1172 * @sched: scheduler instance 1173 * 1174 * Tears down and cleans up the scheduler. 1175 */ 1176 void drm_sched_fini(struct drm_gpu_scheduler *sched) 1177 { 1178 struct drm_sched_entity *s_entity; 1179 int i; 1180 1181 if (sched->thread) 1182 kthread_stop(sched->thread); 1183 1184 for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 1185 struct drm_sched_rq *rq = sched->sched_rq[i]; 1186 1187 spin_lock(&rq->lock); 1188 list_for_each_entry(s_entity, &rq->entities, list) 1189 /* 1190 * Prevents reinsertion and marks job_queue as idle, 1191 * it will removed from rq in drm_sched_entity_fini 1192 * eventually 1193 */ 1194 s_entity->stopped = true; 1195 spin_unlock(&rq->lock); 1196 kfree(sched->sched_rq[i]); 1197 } 1198 1199 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */ 1200 wake_up_all(&sched->job_scheduled); 1201 1202 /* Confirm no work left behind accessing device structures */ 1203 cancel_delayed_work_sync(&sched->work_tdr); 1204 1205 sched->ready = false; 1206 kfree(sched->sched_rq); 1207 sched->sched_rq = NULL; 1208 } 1209 EXPORT_SYMBOL(drm_sched_fini); 1210 1211 /** 1212 * drm_sched_increase_karma - Update sched_entity guilty flag 1213 * 1214 * @bad: The job guilty of time out 1215 * 1216 * Increment on every hang caused by the 'bad' job. If this exceeds the hang 1217 * limit of the scheduler then the respective sched entity is marked guilty and 1218 * jobs from it will not be scheduled further 1219 */ 1220 void drm_sched_increase_karma(struct drm_sched_job *bad) 1221 { 1222 int i; 1223 struct drm_sched_entity *tmp; 1224 struct drm_sched_entity *entity; 1225 struct drm_gpu_scheduler *sched = bad->sched; 1226 1227 /* don't change @bad's karma if it's from KERNEL RQ, 1228 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs) 1229 * corrupt but keep in mind that kernel jobs always considered good. 1230 */ 1231 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { 1232 atomic_inc(&bad->karma); 1233 1234 for (i = DRM_SCHED_PRIORITY_MIN; 1235 i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL); 1236 i++) { 1237 struct drm_sched_rq *rq = sched->sched_rq[i]; 1238 1239 spin_lock(&rq->lock); 1240 list_for_each_entry_safe(entity, tmp, &rq->entities, list) { 1241 if (bad->s_fence->scheduled.context == 1242 entity->fence_context) { 1243 if (entity->guilty) 1244 atomic_set(entity->guilty, 1); 1245 break; 1246 } 1247 } 1248 spin_unlock(&rq->lock); 1249 if (&entity->list != &rq->entities) 1250 break; 1251 } 1252 } 1253 } 1254 EXPORT_SYMBOL(drm_sched_increase_karma); 1255