1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 /** 25 * DOC: Overview 26 * 27 * The GPU scheduler provides entities which allow userspace to push jobs 28 * into software queues which are then scheduled on a hardware run queue. 29 * The software queues have a priority among them. The scheduler selects the entities 30 * from the run queue using a FIFO. The scheduler provides dependency handling 31 * features among jobs. The driver is supposed to provide callback functions for 32 * backend operations to the scheduler like submitting a job to hardware run queue, 33 * returning the dependencies of a job etc. 34 * 35 * The organisation of the scheduler is the following: 36 * 37 * 1. Each hw run queue has one scheduler 38 * 2. Each scheduler has multiple run queues with different priorities 39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL) 40 * 3. Each scheduler run queue has a queue of entities to schedule 41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on 42 * the hardware. 43 * 44 * The jobs in an entity are always scheduled in the order in which they were pushed. 45 * 46 * Note that once a job was taken from the entities queue and pushed to the 47 * hardware, i.e. the pending queue, the entity must not be referenced anymore 48 * through the jobs entity pointer. 49 */ 50 51 /** 52 * DOC: Flow Control 53 * 54 * The DRM GPU scheduler provides a flow control mechanism to regulate the rate 55 * in which the jobs fetched from scheduler entities are executed. 56 * 57 * In this context the &drm_gpu_scheduler keeps track of a driver specified 58 * credit limit representing the capacity of this scheduler and a credit count; 59 * every &drm_sched_job carries a driver specified number of credits. 60 * 61 * Once a job is executed (but not yet finished), the job's credits contribute 62 * to the scheduler's credit count until the job is finished. If by executing 63 * one more job the scheduler's credit count would exceed the scheduler's 64 * credit limit, the job won't be executed. Instead, the scheduler will wait 65 * until the credit count has decreased enough to not overflow its credit limit. 66 * This implies waiting for previously executed jobs. 67 */ 68 69 #include <linux/export.h> 70 #include <linux/wait.h> 71 #include <linux/sched.h> 72 #include <linux/completion.h> 73 #include <linux/dma-resv.h> 74 #include <uapi/linux/sched/types.h> 75 76 #include <drm/drm_print.h> 77 #include <drm/drm_gem.h> 78 #include <drm/drm_syncobj.h> 79 #include <drm/gpu_scheduler.h> 80 #include <drm/spsc_queue.h> 81 82 #include "sched_internal.h" 83 84 #define CREATE_TRACE_POINTS 85 #include "gpu_scheduler_trace.h" 86 87 int drm_sched_policy = DRM_SCHED_POLICY_FIFO; 88 89 /** 90 * DOC: sched_policy (int) 91 * Used to override default entities scheduling policy in a run queue. 92 */ 93 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default)."); 94 module_param_named(sched_policy, drm_sched_policy, int, 0444); 95 96 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched) 97 { 98 u32 credits; 99 100 WARN_ON(check_sub_overflow(sched->credit_limit, 101 atomic_read(&sched->credit_count), 102 &credits)); 103 104 return credits; 105 } 106 107 /** 108 * drm_sched_can_queue -- Can we queue more to the hardware? 109 * @sched: scheduler instance 110 * @entity: the scheduler entity 111 * 112 * Return true if we can push at least one more job from @entity, false 113 * otherwise. 114 */ 115 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched, 116 struct drm_sched_entity *entity) 117 { 118 struct drm_sched_job *s_job; 119 120 s_job = drm_sched_entity_queue_peek(entity); 121 if (!s_job) 122 return false; 123 124 /* If a job exceeds the credit limit, truncate it to the credit limit 125 * itself to guarantee forward progress. 126 */ 127 if (s_job->credits > sched->credit_limit) { 128 dev_WARN(sched->dev, 129 "Jobs may not exceed the credit limit, truncate.\n"); 130 s_job->credits = sched->credit_limit; 131 } 132 133 return drm_sched_available_credits(sched) >= s_job->credits; 134 } 135 136 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, 137 const struct rb_node *b) 138 { 139 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node); 140 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node); 141 142 return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting); 143 } 144 145 static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity, 146 struct drm_sched_rq *rq) 147 { 148 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { 149 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); 150 RB_CLEAR_NODE(&entity->rb_tree_node); 151 } 152 } 153 154 void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, 155 struct drm_sched_rq *rq, 156 ktime_t ts) 157 { 158 /* 159 * Both locks need to be grabbed, one to protect from entity->rq change 160 * for entity from within concurrent drm_sched_entity_select_rq and the 161 * other to update the rb tree structure. 162 */ 163 lockdep_assert_held(&entity->lock); 164 lockdep_assert_held(&rq->lock); 165 166 drm_sched_rq_remove_fifo_locked(entity, rq); 167 168 entity->oldest_job_waiting = ts; 169 170 rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root, 171 drm_sched_entity_compare_before); 172 } 173 174 /** 175 * drm_sched_rq_init - initialize a given run queue struct 176 * 177 * @sched: scheduler instance to associate with this run queue 178 * @rq: scheduler run queue 179 * 180 * Initializes a scheduler runqueue. 181 */ 182 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, 183 struct drm_sched_rq *rq) 184 { 185 spin_lock_init(&rq->lock); 186 INIT_LIST_HEAD(&rq->entities); 187 rq->rb_tree_root = RB_ROOT_CACHED; 188 rq->current_entity = NULL; 189 rq->sched = sched; 190 } 191 192 /** 193 * drm_sched_rq_add_entity - add an entity 194 * 195 * @rq: scheduler run queue 196 * @entity: scheduler entity 197 * 198 * Adds a scheduler entity to the run queue. 199 */ 200 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 201 struct drm_sched_entity *entity) 202 { 203 lockdep_assert_held(&entity->lock); 204 lockdep_assert_held(&rq->lock); 205 206 if (!list_empty(&entity->list)) 207 return; 208 209 atomic_inc(rq->sched->score); 210 list_add_tail(&entity->list, &rq->entities); 211 } 212 213 /** 214 * drm_sched_rq_remove_entity - remove an entity 215 * 216 * @rq: scheduler run queue 217 * @entity: scheduler entity 218 * 219 * Removes a scheduler entity from the run queue. 220 */ 221 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 222 struct drm_sched_entity *entity) 223 { 224 lockdep_assert_held(&entity->lock); 225 226 if (list_empty(&entity->list)) 227 return; 228 229 spin_lock(&rq->lock); 230 231 atomic_dec(rq->sched->score); 232 list_del_init(&entity->list); 233 234 if (rq->current_entity == entity) 235 rq->current_entity = NULL; 236 237 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) 238 drm_sched_rq_remove_fifo_locked(entity, rq); 239 240 spin_unlock(&rq->lock); 241 } 242 243 /** 244 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run 245 * 246 * @sched: the gpu scheduler 247 * @rq: scheduler run queue to check. 248 * 249 * Try to find the next ready entity. 250 * 251 * Return an entity if one is found; return an error-pointer (!NULL) if an 252 * entity was ready, but the scheduler had insufficient credits to accommodate 253 * its job; return NULL, if no ready entity was found. 254 */ 255 static struct drm_sched_entity * 256 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched, 257 struct drm_sched_rq *rq) 258 { 259 struct drm_sched_entity *entity; 260 261 spin_lock(&rq->lock); 262 263 entity = rq->current_entity; 264 if (entity) { 265 list_for_each_entry_continue(entity, &rq->entities, list) { 266 if (drm_sched_entity_is_ready(entity)) 267 goto found; 268 } 269 } 270 271 list_for_each_entry(entity, &rq->entities, list) { 272 if (drm_sched_entity_is_ready(entity)) 273 goto found; 274 275 if (entity == rq->current_entity) 276 break; 277 } 278 279 spin_unlock(&rq->lock); 280 281 return NULL; 282 283 found: 284 if (!drm_sched_can_queue(sched, entity)) { 285 /* 286 * If scheduler cannot take more jobs signal the caller to not 287 * consider lower priority queues. 288 */ 289 entity = ERR_PTR(-ENOSPC); 290 } else { 291 rq->current_entity = entity; 292 reinit_completion(&entity->entity_idle); 293 } 294 295 spin_unlock(&rq->lock); 296 297 return entity; 298 } 299 300 /** 301 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run 302 * 303 * @sched: the gpu scheduler 304 * @rq: scheduler run queue to check. 305 * 306 * Find oldest waiting ready entity. 307 * 308 * Return an entity if one is found; return an error-pointer (!NULL) if an 309 * entity was ready, but the scheduler had insufficient credits to accommodate 310 * its job; return NULL, if no ready entity was found. 311 */ 312 static struct drm_sched_entity * 313 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched, 314 struct drm_sched_rq *rq) 315 { 316 struct rb_node *rb; 317 318 spin_lock(&rq->lock); 319 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) { 320 struct drm_sched_entity *entity; 321 322 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node); 323 if (drm_sched_entity_is_ready(entity)) { 324 /* If we can't queue yet, preserve the current entity in 325 * terms of fairness. 326 */ 327 if (!drm_sched_can_queue(sched, entity)) { 328 spin_unlock(&rq->lock); 329 return ERR_PTR(-ENOSPC); 330 } 331 332 reinit_completion(&entity->entity_idle); 333 break; 334 } 335 } 336 spin_unlock(&rq->lock); 337 338 return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL; 339 } 340 341 /** 342 * drm_sched_run_job_queue - enqueue run-job work 343 * @sched: scheduler instance 344 */ 345 static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) 346 { 347 if (!READ_ONCE(sched->pause_submit)) 348 queue_work(sched->submit_wq, &sched->work_run_job); 349 } 350 351 /** 352 * __drm_sched_run_free_queue - enqueue free-job work 353 * @sched: scheduler instance 354 */ 355 static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched) 356 { 357 if (!READ_ONCE(sched->pause_submit)) 358 queue_work(sched->submit_wq, &sched->work_free_job); 359 } 360 361 /** 362 * drm_sched_run_free_queue - enqueue free-job work if ready 363 * @sched: scheduler instance 364 */ 365 static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched) 366 { 367 struct drm_sched_job *job; 368 369 spin_lock(&sched->job_list_lock); 370 job = list_first_entry_or_null(&sched->pending_list, 371 struct drm_sched_job, list); 372 if (job && dma_fence_is_signaled(&job->s_fence->finished)) 373 __drm_sched_run_free_queue(sched); 374 spin_unlock(&sched->job_list_lock); 375 } 376 377 /** 378 * drm_sched_job_done - complete a job 379 * @s_job: pointer to the job which is done 380 * 381 * Finish the job's fence and resubmit the work items. 382 */ 383 static void drm_sched_job_done(struct drm_sched_job *s_job, int result) 384 { 385 struct drm_sched_fence *s_fence = s_job->s_fence; 386 struct drm_gpu_scheduler *sched = s_fence->sched; 387 388 atomic_sub(s_job->credits, &sched->credit_count); 389 atomic_dec(sched->score); 390 391 trace_drm_sched_job_done(s_fence); 392 393 dma_fence_get(&s_fence->finished); 394 drm_sched_fence_finished(s_fence, result); 395 dma_fence_put(&s_fence->finished); 396 __drm_sched_run_free_queue(sched); 397 } 398 399 /** 400 * drm_sched_job_done_cb - the callback for a done job 401 * @f: fence 402 * @cb: fence callbacks 403 */ 404 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) 405 { 406 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); 407 408 drm_sched_job_done(s_job, f->error); 409 } 410 411 /** 412 * drm_sched_start_timeout - start timeout for reset worker 413 * 414 * @sched: scheduler instance to start the worker for 415 * 416 * Start the timeout for the given scheduler. 417 */ 418 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) 419 { 420 lockdep_assert_held(&sched->job_list_lock); 421 422 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 423 !list_empty(&sched->pending_list)) 424 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); 425 } 426 427 static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched) 428 { 429 spin_lock(&sched->job_list_lock); 430 drm_sched_start_timeout(sched); 431 spin_unlock(&sched->job_list_lock); 432 } 433 434 /** 435 * drm_sched_tdr_queue_imm: - immediately start job timeout handler 436 * 437 * @sched: scheduler for which the timeout handling should be started. 438 * 439 * Start timeout handling immediately for the named scheduler. 440 */ 441 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched) 442 { 443 spin_lock(&sched->job_list_lock); 444 sched->timeout = 0; 445 drm_sched_start_timeout(sched); 446 spin_unlock(&sched->job_list_lock); 447 } 448 EXPORT_SYMBOL(drm_sched_tdr_queue_imm); 449 450 /** 451 * drm_sched_fault - immediately start timeout handler 452 * 453 * @sched: scheduler where the timeout handling should be started. 454 * 455 * Start timeout handling immediately when the driver detects a hardware fault. 456 */ 457 void drm_sched_fault(struct drm_gpu_scheduler *sched) 458 { 459 if (sched->timeout_wq) 460 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); 461 } 462 EXPORT_SYMBOL(drm_sched_fault); 463 464 /** 465 * drm_sched_suspend_timeout - Suspend scheduler job timeout 466 * 467 * @sched: scheduler instance for which to suspend the timeout 468 * 469 * Suspend the delayed work timeout for the scheduler. This is done by 470 * modifying the delayed work timeout to an arbitrary large value, 471 * MAX_SCHEDULE_TIMEOUT in this case. 472 * 473 * Returns the timeout remaining 474 * 475 */ 476 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) 477 { 478 unsigned long sched_timeout, now = jiffies; 479 480 sched_timeout = sched->work_tdr.timer.expires; 481 482 /* 483 * Modify the timeout to an arbitrarily large value. This also prevents 484 * the timeout to be restarted when new submissions arrive 485 */ 486 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) 487 && time_after(sched_timeout, now)) 488 return sched_timeout - now; 489 else 490 return sched->timeout; 491 } 492 EXPORT_SYMBOL(drm_sched_suspend_timeout); 493 494 /** 495 * drm_sched_resume_timeout - Resume scheduler job timeout 496 * 497 * @sched: scheduler instance for which to resume the timeout 498 * @remaining: remaining timeout 499 * 500 * Resume the delayed work timeout for the scheduler. 501 */ 502 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 503 unsigned long remaining) 504 { 505 spin_lock(&sched->job_list_lock); 506 507 if (list_empty(&sched->pending_list)) 508 cancel_delayed_work(&sched->work_tdr); 509 else 510 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); 511 512 spin_unlock(&sched->job_list_lock); 513 } 514 EXPORT_SYMBOL(drm_sched_resume_timeout); 515 516 static void drm_sched_job_begin(struct drm_sched_job *s_job) 517 { 518 struct drm_gpu_scheduler *sched = s_job->sched; 519 520 spin_lock(&sched->job_list_lock); 521 list_add_tail(&s_job->list, &sched->pending_list); 522 drm_sched_start_timeout(sched); 523 spin_unlock(&sched->job_list_lock); 524 } 525 526 static void drm_sched_job_timedout(struct work_struct *work) 527 { 528 struct drm_gpu_scheduler *sched; 529 struct drm_sched_job *job; 530 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL; 531 532 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); 533 534 /* Protects against concurrent deletion in drm_sched_get_finished_job */ 535 spin_lock(&sched->job_list_lock); 536 job = list_first_entry_or_null(&sched->pending_list, 537 struct drm_sched_job, list); 538 539 if (job) { 540 /* 541 * Remove the bad job so it cannot be freed by a concurrent 542 * &struct drm_sched_backend_ops.free_job. It will be 543 * reinserted after the scheduler's work items have been 544 * cancelled, at which point it's safe. 545 */ 546 list_del_init(&job->list); 547 spin_unlock(&sched->job_list_lock); 548 549 status = job->sched->ops->timedout_job(job); 550 551 /* 552 * Guilty job did complete and hence needs to be manually removed 553 * See drm_sched_stop doc. 554 */ 555 if (sched->free_guilty) { 556 job->sched->ops->free_job(job); 557 sched->free_guilty = false; 558 } 559 } else { 560 spin_unlock(&sched->job_list_lock); 561 } 562 563 if (status != DRM_GPU_SCHED_STAT_ENODEV) 564 drm_sched_start_timeout_unlocked(sched); 565 } 566 567 /** 568 * drm_sched_stop - stop the scheduler 569 * 570 * @sched: scheduler instance 571 * @bad: job which caused the time out 572 * 573 * Stop the scheduler and also removes and frees all completed jobs. 574 * Note: bad job will not be freed as it might be used later and so it's 575 * callers responsibility to release it manually if it's not part of the 576 * pending list any more. 577 * 578 * This function is typically used for reset recovery (see the docu of 579 * drm_sched_backend_ops.timedout_job() for details). Do not call it for 580 * scheduler teardown, i.e., before calling drm_sched_fini(). 581 */ 582 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) 583 { 584 struct drm_sched_job *s_job, *tmp; 585 586 drm_sched_wqueue_stop(sched); 587 588 /* 589 * Reinsert back the bad job here - now it's safe as 590 * drm_sched_get_finished_job() cannot race against us and release the 591 * bad job at this point - we parked (waited for) any in progress 592 * (earlier) cleanups and drm_sched_get_finished_job() will not be 593 * called now until the scheduler's work items are submitted again. 594 */ 595 if (bad && bad->sched == sched) 596 /* 597 * Add at the head of the queue to reflect it was the earliest 598 * job extracted. 599 */ 600 list_add(&bad->list, &sched->pending_list); 601 602 /* 603 * Iterate the job list from later to earlier one and either deactive 604 * their HW callbacks or remove them from pending list if they already 605 * signaled. 606 * This iteration is thread safe as the scheduler's work items have been 607 * cancelled. 608 */ 609 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, 610 list) { 611 if (s_job->s_fence->parent && 612 dma_fence_remove_callback(s_job->s_fence->parent, 613 &s_job->cb)) { 614 dma_fence_put(s_job->s_fence->parent); 615 s_job->s_fence->parent = NULL; 616 atomic_sub(s_job->credits, &sched->credit_count); 617 } else { 618 /* 619 * remove job from pending_list. 620 * Locking here is for concurrent resume timeout 621 */ 622 spin_lock(&sched->job_list_lock); 623 list_del_init(&s_job->list); 624 spin_unlock(&sched->job_list_lock); 625 626 /* 627 * Wait for job's HW fence callback to finish using s_job 628 * before releasing it. 629 * 630 * Job is still alive so fence refcount at least 1 631 */ 632 dma_fence_wait(&s_job->s_fence->finished, false); 633 634 /* 635 * We must keep bad job alive for later use during 636 * recovery by some of the drivers but leave a hint 637 * that the guilty job must be released. 638 */ 639 if (bad != s_job) 640 sched->ops->free_job(s_job); 641 else 642 sched->free_guilty = true; 643 } 644 } 645 646 /* 647 * Stop pending timer in flight as we rearm it in drm_sched_start. This 648 * avoids the pending timeout work in progress to fire right away after 649 * this TDR finished and before the newly restarted jobs had a 650 * chance to complete. 651 */ 652 cancel_delayed_work(&sched->work_tdr); 653 } 654 EXPORT_SYMBOL(drm_sched_stop); 655 656 /** 657 * drm_sched_start - recover jobs after a reset 658 * 659 * @sched: scheduler instance 660 * @errno: error to set on the pending fences 661 * 662 * This function is typically used for reset recovery (see the docu of 663 * drm_sched_backend_ops.timedout_job() for details). Do not call it for 664 * scheduler startup. The scheduler itself is fully operational after 665 * drm_sched_init() succeeded. 666 */ 667 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno) 668 { 669 struct drm_sched_job *s_job, *tmp; 670 671 /* 672 * Locking the list is not required here as the scheduler's work items 673 * are currently not running, so no new jobs are being inserted or 674 * removed. Also concurrent GPU recovers can't run in parallel. 675 */ 676 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 677 struct dma_fence *fence = s_job->s_fence->parent; 678 679 atomic_add(s_job->credits, &sched->credit_count); 680 681 if (!fence) { 682 drm_sched_job_done(s_job, errno ?: -ECANCELED); 683 continue; 684 } 685 686 if (dma_fence_add_callback(fence, &s_job->cb, 687 drm_sched_job_done_cb)) 688 drm_sched_job_done(s_job, fence->error ?: errno); 689 } 690 691 drm_sched_start_timeout_unlocked(sched); 692 drm_sched_wqueue_start(sched); 693 } 694 EXPORT_SYMBOL(drm_sched_start); 695 696 /** 697 * drm_sched_resubmit_jobs - Deprecated, don't use in new code! 698 * 699 * @sched: scheduler instance 700 * 701 * Re-submitting jobs was a concept AMD came up as cheap way to implement 702 * recovery after a job timeout. 703 * 704 * This turned out to be not working very well. First of all there are many 705 * problem with the dma_fence implementation and requirements. Either the 706 * implementation is risking deadlocks with core memory management or violating 707 * documented implementation details of the dma_fence object. 708 * 709 * Drivers can still save and restore their state for recovery operations, but 710 * we shouldn't make this a general scheduler feature around the dma_fence 711 * interface. 712 */ 713 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) 714 { 715 struct drm_sched_job *s_job, *tmp; 716 uint64_t guilty_context; 717 bool found_guilty = false; 718 struct dma_fence *fence; 719 720 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 721 struct drm_sched_fence *s_fence = s_job->s_fence; 722 723 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { 724 found_guilty = true; 725 guilty_context = s_job->s_fence->scheduled.context; 726 } 727 728 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) 729 dma_fence_set_error(&s_fence->finished, -ECANCELED); 730 731 fence = sched->ops->run_job(s_job); 732 733 if (IS_ERR_OR_NULL(fence)) { 734 if (IS_ERR(fence)) 735 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); 736 737 s_job->s_fence->parent = NULL; 738 } else { 739 740 s_job->s_fence->parent = dma_fence_get(fence); 741 742 /* Drop for orignal kref_init */ 743 dma_fence_put(fence); 744 } 745 } 746 } 747 EXPORT_SYMBOL(drm_sched_resubmit_jobs); 748 749 /** 750 * drm_sched_job_init - init a scheduler job 751 * @job: scheduler job to init 752 * @entity: scheduler entity to use 753 * @credits: the number of credits this job contributes to the schedulers 754 * credit limit 755 * @owner: job owner for debugging 756 * @drm_client_id: &struct drm_file.client_id of the owner (used by trace 757 * events) 758 * 759 * Refer to drm_sched_entity_push_job() documentation 760 * for locking considerations. 761 * 762 * Drivers must make sure drm_sched_job_cleanup() if this function returns 763 * successfully, even when @job is aborted before drm_sched_job_arm() is called. 764 * 765 * Note that this function does not assign a valid value to each struct member 766 * of struct drm_sched_job. Take a look at that struct's documentation to see 767 * who sets which struct member with what lifetime. 768 * 769 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware 770 * has died, which can mean that there's no valid runqueue for a @entity. 771 * This function returns -ENOENT in this case (which probably should be -EIO as 772 * a more meanigful return value). 773 * 774 * Returns 0 for success, negative error code otherwise. 775 */ 776 int drm_sched_job_init(struct drm_sched_job *job, 777 struct drm_sched_entity *entity, 778 u32 credits, void *owner, 779 uint64_t drm_client_id) 780 { 781 if (!entity->rq) { 782 /* This will most likely be followed by missing frames 783 * or worse--a blank screen--leave a trail in the 784 * logs, so this can be debugged easier. 785 */ 786 dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__); 787 return -ENOENT; 788 } 789 790 if (unlikely(!credits)) { 791 pr_err("*ERROR* %s: credits cannot be 0!\n", __func__); 792 return -EINVAL; 793 } 794 795 /* 796 * We don't know for sure how the user has allocated. Thus, zero the 797 * struct so that unallowed (i.e., too early) usage of pointers that 798 * this function does not set is guaranteed to lead to a NULL pointer 799 * exception instead of UB. 800 */ 801 memset(job, 0, sizeof(*job)); 802 803 job->entity = entity; 804 job->credits = credits; 805 job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id); 806 if (!job->s_fence) 807 return -ENOMEM; 808 809 INIT_LIST_HEAD(&job->list); 810 811 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC); 812 813 return 0; 814 } 815 EXPORT_SYMBOL(drm_sched_job_init); 816 817 /** 818 * drm_sched_job_arm - arm a scheduler job for execution 819 * @job: scheduler job to arm 820 * 821 * This arms a scheduler job for execution. Specifically it initializes the 822 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv 823 * or other places that need to track the completion of this job. It also 824 * initializes sequence numbers, which are fundamental for fence ordering. 825 * 826 * Refer to drm_sched_entity_push_job() documentation for locking 827 * considerations. 828 * 829 * Once this function was called, you *must* submit @job with 830 * drm_sched_entity_push_job(). 831 * 832 * This can only be called if drm_sched_job_init() succeeded. 833 */ 834 void drm_sched_job_arm(struct drm_sched_job *job) 835 { 836 struct drm_gpu_scheduler *sched; 837 struct drm_sched_entity *entity = job->entity; 838 839 BUG_ON(!entity); 840 drm_sched_entity_select_rq(entity); 841 sched = entity->rq->sched; 842 843 job->sched = sched; 844 job->s_priority = entity->priority; 845 846 drm_sched_fence_init(job->s_fence, job->entity); 847 } 848 EXPORT_SYMBOL(drm_sched_job_arm); 849 850 /** 851 * drm_sched_job_add_dependency - adds the fence as a job dependency 852 * @job: scheduler job to add the dependencies to 853 * @fence: the dma_fence to add to the list of dependencies. 854 * 855 * Note that @fence is consumed in both the success and error cases. 856 * 857 * Returns: 858 * 0 on success, or an error on failing to expand the array. 859 */ 860 int drm_sched_job_add_dependency(struct drm_sched_job *job, 861 struct dma_fence *fence) 862 { 863 struct dma_fence *entry; 864 unsigned long index; 865 u32 id = 0; 866 int ret; 867 868 if (!fence) 869 return 0; 870 871 /* Deduplicate if we already depend on a fence from the same context. 872 * This lets the size of the array of deps scale with the number of 873 * engines involved, rather than the number of BOs. 874 */ 875 xa_for_each(&job->dependencies, index, entry) { 876 if (entry->context != fence->context) 877 continue; 878 879 if (dma_fence_is_later(fence, entry)) { 880 dma_fence_put(entry); 881 xa_store(&job->dependencies, index, fence, GFP_KERNEL); 882 } else { 883 dma_fence_put(fence); 884 } 885 return 0; 886 } 887 888 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL); 889 if (ret != 0) 890 dma_fence_put(fence); 891 892 return ret; 893 } 894 EXPORT_SYMBOL(drm_sched_job_add_dependency); 895 896 /** 897 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency 898 * @job: scheduler job to add the dependencies to 899 * @file: drm file private pointer 900 * @handle: syncobj handle to lookup 901 * @point: timeline point 902 * 903 * This adds the fence matching the given syncobj to @job. 904 * 905 * Returns: 906 * 0 on success, or an error on failing to expand the array. 907 */ 908 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, 909 struct drm_file *file, 910 u32 handle, 911 u32 point) 912 { 913 struct dma_fence *fence; 914 int ret; 915 916 ret = drm_syncobj_find_fence(file, handle, point, 0, &fence); 917 if (ret) 918 return ret; 919 920 return drm_sched_job_add_dependency(job, fence); 921 } 922 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency); 923 924 /** 925 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job 926 * @job: scheduler job to add the dependencies to 927 * @resv: the dma_resv object to get the fences from 928 * @usage: the dma_resv_usage to use to filter the fences 929 * 930 * This adds all fences matching the given usage from @resv to @job. 931 * Must be called with the @resv lock held. 932 * 933 * Returns: 934 * 0 on success, or an error on failing to expand the array. 935 */ 936 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, 937 struct dma_resv *resv, 938 enum dma_resv_usage usage) 939 { 940 struct dma_resv_iter cursor; 941 struct dma_fence *fence; 942 int ret; 943 944 dma_resv_assert_held(resv); 945 946 dma_resv_for_each_fence(&cursor, resv, usage, fence) { 947 /* Make sure to grab an additional ref on the added fence */ 948 dma_fence_get(fence); 949 ret = drm_sched_job_add_dependency(job, fence); 950 if (ret) { 951 dma_fence_put(fence); 952 return ret; 953 } 954 } 955 return 0; 956 } 957 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies); 958 959 /** 960 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job 961 * dependencies 962 * @job: scheduler job to add the dependencies to 963 * @obj: the gem object to add new dependencies from. 964 * @write: whether the job might write the object (so we need to depend on 965 * shared fences in the reservation object). 966 * 967 * This should be called after drm_gem_lock_reservations() on your array of 968 * GEM objects used in the job but before updating the reservations with your 969 * own fences. 970 * 971 * Returns: 972 * 0 on success, or an error on failing to expand the array. 973 */ 974 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, 975 struct drm_gem_object *obj, 976 bool write) 977 { 978 return drm_sched_job_add_resv_dependencies(job, obj->resv, 979 dma_resv_usage_rw(write)); 980 } 981 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); 982 983 /** 984 * drm_sched_job_has_dependency - check whether fence is the job's dependency 985 * @job: scheduler job to check 986 * @fence: fence to look for 987 * 988 * Returns: 989 * True if @fence is found within the job's dependencies, or otherwise false. 990 */ 991 bool drm_sched_job_has_dependency(struct drm_sched_job *job, 992 struct dma_fence *fence) 993 { 994 struct dma_fence *f; 995 unsigned long index; 996 997 xa_for_each(&job->dependencies, index, f) { 998 if (f == fence) 999 return true; 1000 } 1001 1002 return false; 1003 } 1004 EXPORT_SYMBOL(drm_sched_job_has_dependency); 1005 1006 /** 1007 * drm_sched_job_cleanup - clean up scheduler job resources 1008 * @job: scheduler job to clean up 1009 * 1010 * Cleans up the resources allocated with drm_sched_job_init(). 1011 * 1012 * Drivers should call this from their error unwind code if @job is aborted 1013 * before drm_sched_job_arm() is called. 1014 * 1015 * drm_sched_job_arm() is a point of no return since it initializes the fences 1016 * and their sequence number etc. Once that function has been called, you *must* 1017 * submit it with drm_sched_entity_push_job() and cannot simply abort it by 1018 * calling drm_sched_job_cleanup(). 1019 * 1020 * This function should be called in the &drm_sched_backend_ops.free_job callback. 1021 */ 1022 void drm_sched_job_cleanup(struct drm_sched_job *job) 1023 { 1024 struct dma_fence *fence; 1025 unsigned long index; 1026 1027 if (kref_read(&job->s_fence->finished.refcount)) { 1028 /* The job has been processed by the scheduler, i.e., 1029 * drm_sched_job_arm() and drm_sched_entity_push_job() have 1030 * been called. 1031 */ 1032 dma_fence_put(&job->s_fence->finished); 1033 } else { 1034 /* The job was aborted before it has been committed to be run; 1035 * notably, drm_sched_job_arm() has not been called. 1036 */ 1037 drm_sched_fence_free(job->s_fence); 1038 } 1039 1040 job->s_fence = NULL; 1041 1042 xa_for_each(&job->dependencies, index, fence) { 1043 dma_fence_put(fence); 1044 } 1045 xa_destroy(&job->dependencies); 1046 1047 } 1048 EXPORT_SYMBOL(drm_sched_job_cleanup); 1049 1050 /** 1051 * drm_sched_wakeup - Wake up the scheduler if it is ready to queue 1052 * @sched: scheduler instance 1053 * 1054 * Wake up the scheduler if we can queue jobs. 1055 */ 1056 void drm_sched_wakeup(struct drm_gpu_scheduler *sched) 1057 { 1058 drm_sched_run_job_queue(sched); 1059 } 1060 1061 /** 1062 * drm_sched_select_entity - Select next entity to process 1063 * 1064 * @sched: scheduler instance 1065 * 1066 * Return an entity to process or NULL if none are found. 1067 * 1068 * Note, that we break out of the for-loop when "entity" is non-null, which can 1069 * also be an error-pointer--this assures we don't process lower priority 1070 * run-queues. See comments in the respectively called functions. 1071 */ 1072 static struct drm_sched_entity * 1073 drm_sched_select_entity(struct drm_gpu_scheduler *sched) 1074 { 1075 struct drm_sched_entity *entity; 1076 int i; 1077 1078 /* Start with the highest priority. 1079 */ 1080 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 1081 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? 1082 drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) : 1083 drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]); 1084 if (entity) 1085 break; 1086 } 1087 1088 return IS_ERR(entity) ? NULL : entity; 1089 } 1090 1091 /** 1092 * drm_sched_get_finished_job - fetch the next finished job to be destroyed 1093 * 1094 * @sched: scheduler instance 1095 * 1096 * Returns the next finished job from the pending list (if there is one) 1097 * ready for it to be destroyed. 1098 */ 1099 static struct drm_sched_job * 1100 drm_sched_get_finished_job(struct drm_gpu_scheduler *sched) 1101 { 1102 struct drm_sched_job *job, *next; 1103 1104 spin_lock(&sched->job_list_lock); 1105 1106 job = list_first_entry_or_null(&sched->pending_list, 1107 struct drm_sched_job, list); 1108 1109 if (job && dma_fence_is_signaled(&job->s_fence->finished)) { 1110 /* remove job from pending_list */ 1111 list_del_init(&job->list); 1112 1113 /* cancel this job's TO timer */ 1114 cancel_delayed_work(&sched->work_tdr); 1115 /* make the scheduled timestamp more accurate */ 1116 next = list_first_entry_or_null(&sched->pending_list, 1117 typeof(*next), list); 1118 1119 if (next) { 1120 if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, 1121 &next->s_fence->scheduled.flags)) 1122 next->s_fence->scheduled.timestamp = 1123 dma_fence_timestamp(&job->s_fence->finished); 1124 /* start TO timer for next job */ 1125 drm_sched_start_timeout(sched); 1126 } 1127 } else { 1128 job = NULL; 1129 } 1130 1131 spin_unlock(&sched->job_list_lock); 1132 1133 return job; 1134 } 1135 1136 /** 1137 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load 1138 * @sched_list: list of drm_gpu_schedulers 1139 * @num_sched_list: number of drm_gpu_schedulers in the sched_list 1140 * 1141 * Returns pointer of the sched with the least load or NULL if none of the 1142 * drm_gpu_schedulers are ready 1143 */ 1144 struct drm_gpu_scheduler * 1145 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 1146 unsigned int num_sched_list) 1147 { 1148 struct drm_gpu_scheduler *sched, *picked_sched = NULL; 1149 int i; 1150 unsigned int min_score = UINT_MAX, num_score; 1151 1152 for (i = 0; i < num_sched_list; ++i) { 1153 sched = sched_list[i]; 1154 1155 if (!sched->ready) { 1156 DRM_WARN("scheduler %s is not ready, skipping", 1157 sched->name); 1158 continue; 1159 } 1160 1161 num_score = atomic_read(sched->score); 1162 if (num_score < min_score) { 1163 min_score = num_score; 1164 picked_sched = sched; 1165 } 1166 } 1167 1168 return picked_sched; 1169 } 1170 EXPORT_SYMBOL(drm_sched_pick_best); 1171 1172 /** 1173 * drm_sched_free_job_work - worker to call free_job 1174 * 1175 * @w: free job work 1176 */ 1177 static void drm_sched_free_job_work(struct work_struct *w) 1178 { 1179 struct drm_gpu_scheduler *sched = 1180 container_of(w, struct drm_gpu_scheduler, work_free_job); 1181 struct drm_sched_job *job; 1182 1183 job = drm_sched_get_finished_job(sched); 1184 if (job) 1185 sched->ops->free_job(job); 1186 1187 drm_sched_run_free_queue(sched); 1188 drm_sched_run_job_queue(sched); 1189 } 1190 1191 /** 1192 * drm_sched_run_job_work - worker to call run_job 1193 * 1194 * @w: run job work 1195 */ 1196 static void drm_sched_run_job_work(struct work_struct *w) 1197 { 1198 struct drm_gpu_scheduler *sched = 1199 container_of(w, struct drm_gpu_scheduler, work_run_job); 1200 struct drm_sched_entity *entity; 1201 struct dma_fence *fence; 1202 struct drm_sched_fence *s_fence; 1203 struct drm_sched_job *sched_job; 1204 int r; 1205 1206 /* Find entity with a ready job */ 1207 entity = drm_sched_select_entity(sched); 1208 if (!entity) 1209 return; /* No more work */ 1210 1211 sched_job = drm_sched_entity_pop_job(entity); 1212 if (!sched_job) { 1213 complete_all(&entity->entity_idle); 1214 drm_sched_run_job_queue(sched); 1215 return; 1216 } 1217 1218 s_fence = sched_job->s_fence; 1219 1220 atomic_add(sched_job->credits, &sched->credit_count); 1221 drm_sched_job_begin(sched_job); 1222 1223 trace_drm_sched_job_run(sched_job, entity); 1224 /* 1225 * The run_job() callback must by definition return a fence whose 1226 * refcount has been incremented for the scheduler already. 1227 */ 1228 fence = sched->ops->run_job(sched_job); 1229 complete_all(&entity->entity_idle); 1230 drm_sched_fence_scheduled(s_fence, fence); 1231 1232 if (!IS_ERR_OR_NULL(fence)) { 1233 r = dma_fence_add_callback(fence, &sched_job->cb, 1234 drm_sched_job_done_cb); 1235 if (r == -ENOENT) 1236 drm_sched_job_done(sched_job, fence->error); 1237 else if (r) 1238 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); 1239 1240 dma_fence_put(fence); 1241 } else { 1242 drm_sched_job_done(sched_job, IS_ERR(fence) ? 1243 PTR_ERR(fence) : 0); 1244 } 1245 1246 wake_up(&sched->job_scheduled); 1247 drm_sched_run_job_queue(sched); 1248 } 1249 1250 static struct workqueue_struct *drm_sched_alloc_wq(const char *name) 1251 { 1252 #if (IS_ENABLED(CONFIG_LOCKDEP)) 1253 static struct lockdep_map map = { 1254 .name = "drm_sched_lockdep_map" 1255 }; 1256 1257 /* 1258 * Avoid leaking a lockdep map on each drm sched creation and 1259 * destruction by using a single lockdep map for all drm sched 1260 * allocated submit_wq. 1261 */ 1262 1263 return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map); 1264 #else 1265 return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 1266 #endif 1267 } 1268 1269 /** 1270 * drm_sched_init - Init a gpu scheduler instance 1271 * 1272 * @sched: scheduler instance 1273 * @args: scheduler initialization arguments 1274 * 1275 * Return 0 on success, otherwise error code. 1276 */ 1277 int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args) 1278 { 1279 int i; 1280 1281 sched->ops = args->ops; 1282 sched->credit_limit = args->credit_limit; 1283 sched->name = args->name; 1284 sched->timeout = args->timeout; 1285 sched->hang_limit = args->hang_limit; 1286 sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq; 1287 sched->score = args->score ? args->score : &sched->_score; 1288 sched->dev = args->dev; 1289 1290 if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) { 1291 /* This is a gross violation--tell drivers what the problem is. 1292 */ 1293 dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", 1294 __func__); 1295 return -EINVAL; 1296 } else if (sched->sched_rq) { 1297 /* Not an error, but warn anyway so drivers can 1298 * fine-tune their DRM calling order, and return all 1299 * is good. 1300 */ 1301 dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__); 1302 return 0; 1303 } 1304 1305 if (args->submit_wq) { 1306 sched->submit_wq = args->submit_wq; 1307 sched->own_submit_wq = false; 1308 } else { 1309 sched->submit_wq = drm_sched_alloc_wq(args->name); 1310 if (!sched->submit_wq) 1311 return -ENOMEM; 1312 1313 sched->own_submit_wq = true; 1314 } 1315 1316 sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq), 1317 GFP_KERNEL | __GFP_ZERO); 1318 if (!sched->sched_rq) 1319 goto Out_check_own; 1320 sched->num_rqs = args->num_rqs; 1321 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 1322 sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); 1323 if (!sched->sched_rq[i]) 1324 goto Out_unroll; 1325 drm_sched_rq_init(sched, sched->sched_rq[i]); 1326 } 1327 1328 init_waitqueue_head(&sched->job_scheduled); 1329 INIT_LIST_HEAD(&sched->pending_list); 1330 spin_lock_init(&sched->job_list_lock); 1331 atomic_set(&sched->credit_count, 0); 1332 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); 1333 INIT_WORK(&sched->work_run_job, drm_sched_run_job_work); 1334 INIT_WORK(&sched->work_free_job, drm_sched_free_job_work); 1335 atomic_set(&sched->_score, 0); 1336 atomic64_set(&sched->job_id_count, 0); 1337 sched->pause_submit = false; 1338 1339 sched->ready = true; 1340 return 0; 1341 Out_unroll: 1342 for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--) 1343 kfree(sched->sched_rq[i]); 1344 1345 kfree(sched->sched_rq); 1346 sched->sched_rq = NULL; 1347 Out_check_own: 1348 if (sched->own_submit_wq) 1349 destroy_workqueue(sched->submit_wq); 1350 dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); 1351 return -ENOMEM; 1352 } 1353 EXPORT_SYMBOL(drm_sched_init); 1354 1355 /** 1356 * drm_sched_fini - Destroy a gpu scheduler 1357 * 1358 * @sched: scheduler instance 1359 * 1360 * Tears down and cleans up the scheduler. 1361 * 1362 * This stops submission of new jobs to the hardware through 1363 * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job() 1364 * will not be called for all jobs still in drm_gpu_scheduler.pending_list. 1365 * There is no solution for this currently. Thus, it is up to the driver to make 1366 * sure that: 1367 * 1368 * a) drm_sched_fini() is only called after for all submitted jobs 1369 * drm_sched_backend_ops.free_job() has been called or that 1370 * b) the jobs for which drm_sched_backend_ops.free_job() has not been called 1371 * after drm_sched_fini() ran are freed manually. 1372 * 1373 * FIXME: Take care of the above problem and prevent this function from leaking 1374 * the jobs in drm_gpu_scheduler.pending_list under any circumstances. 1375 */ 1376 void drm_sched_fini(struct drm_gpu_scheduler *sched) 1377 { 1378 struct drm_sched_entity *s_entity; 1379 int i; 1380 1381 drm_sched_wqueue_stop(sched); 1382 1383 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 1384 struct drm_sched_rq *rq = sched->sched_rq[i]; 1385 1386 spin_lock(&rq->lock); 1387 list_for_each_entry(s_entity, &rq->entities, list) 1388 /* 1389 * Prevents reinsertion and marks job_queue as idle, 1390 * it will be removed from the rq in drm_sched_entity_fini() 1391 * eventually 1392 */ 1393 s_entity->stopped = true; 1394 spin_unlock(&rq->lock); 1395 kfree(sched->sched_rq[i]); 1396 } 1397 1398 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */ 1399 wake_up_all(&sched->job_scheduled); 1400 1401 /* Confirm no work left behind accessing device structures */ 1402 cancel_delayed_work_sync(&sched->work_tdr); 1403 1404 if (sched->own_submit_wq) 1405 destroy_workqueue(sched->submit_wq); 1406 sched->ready = false; 1407 kfree(sched->sched_rq); 1408 sched->sched_rq = NULL; 1409 } 1410 EXPORT_SYMBOL(drm_sched_fini); 1411 1412 /** 1413 * drm_sched_increase_karma - Update sched_entity guilty flag 1414 * 1415 * @bad: The job guilty of time out 1416 * 1417 * Increment on every hang caused by the 'bad' job. If this exceeds the hang 1418 * limit of the scheduler then the respective sched entity is marked guilty and 1419 * jobs from it will not be scheduled further 1420 */ 1421 void drm_sched_increase_karma(struct drm_sched_job *bad) 1422 { 1423 int i; 1424 struct drm_sched_entity *tmp; 1425 struct drm_sched_entity *entity; 1426 struct drm_gpu_scheduler *sched = bad->sched; 1427 1428 /* don't change @bad's karma if it's from KERNEL RQ, 1429 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs) 1430 * corrupt but keep in mind that kernel jobs always considered good. 1431 */ 1432 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { 1433 atomic_inc(&bad->karma); 1434 1435 for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) { 1436 struct drm_sched_rq *rq = sched->sched_rq[i]; 1437 1438 spin_lock(&rq->lock); 1439 list_for_each_entry_safe(entity, tmp, &rq->entities, list) { 1440 if (bad->s_fence->scheduled.context == 1441 entity->fence_context) { 1442 if (entity->guilty) 1443 atomic_set(entity->guilty, 1); 1444 break; 1445 } 1446 } 1447 spin_unlock(&rq->lock); 1448 if (&entity->list != &rq->entities) 1449 break; 1450 } 1451 } 1452 } 1453 EXPORT_SYMBOL(drm_sched_increase_karma); 1454 1455 /** 1456 * drm_sched_wqueue_ready - Is the scheduler ready for submission 1457 * 1458 * @sched: scheduler instance 1459 * 1460 * Returns true if submission is ready 1461 */ 1462 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched) 1463 { 1464 return sched->ready; 1465 } 1466 EXPORT_SYMBOL(drm_sched_wqueue_ready); 1467 1468 /** 1469 * drm_sched_wqueue_stop - stop scheduler submission 1470 * @sched: scheduler instance 1471 * 1472 * Stops the scheduler from pulling new jobs from entities. It also stops 1473 * freeing jobs automatically through drm_sched_backend_ops.free_job(). 1474 */ 1475 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched) 1476 { 1477 WRITE_ONCE(sched->pause_submit, true); 1478 cancel_work_sync(&sched->work_run_job); 1479 cancel_work_sync(&sched->work_free_job); 1480 } 1481 EXPORT_SYMBOL(drm_sched_wqueue_stop); 1482 1483 /** 1484 * drm_sched_wqueue_start - start scheduler submission 1485 * @sched: scheduler instance 1486 * 1487 * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it. 1488 * 1489 * This function is not necessary for 'conventional' startup. The scheduler is 1490 * fully operational after drm_sched_init() succeeded. 1491 */ 1492 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched) 1493 { 1494 WRITE_ONCE(sched->pause_submit, false); 1495 queue_work(sched->submit_wq, &sched->work_run_job); 1496 queue_work(sched->submit_wq, &sched->work_free_job); 1497 } 1498 EXPORT_SYMBOL(drm_sched_wqueue_start); 1499