1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 /** 25 * DOC: Overview 26 * 27 * The GPU scheduler provides entities which allow userspace to push jobs 28 * into software queues which are then scheduled on a hardware run queue. 29 * The software queues have a priority among them. The scheduler selects the entities 30 * from the run queue using a FIFO. The scheduler provides dependency handling 31 * features among jobs. The driver is supposed to provide callback functions for 32 * backend operations to the scheduler like submitting a job to hardware run queue, 33 * returning the dependencies of a job etc. 34 * 35 * The organisation of the scheduler is the following: 36 * 37 * 1. Each hw run queue has one scheduler 38 * 2. Each scheduler has multiple run queues with different priorities 39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL) 40 * 3. Each scheduler run queue has a queue of entities to schedule 41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on 42 * the hardware. 43 * 44 * The jobs in an entity are always scheduled in the order in which they were pushed. 45 * 46 * Note that once a job was taken from the entities queue and pushed to the 47 * hardware, i.e. the pending queue, the entity must not be referenced anymore 48 * through the jobs entity pointer. 49 */ 50 51 /** 52 * DOC: Flow Control 53 * 54 * The DRM GPU scheduler provides a flow control mechanism to regulate the rate 55 * in which the jobs fetched from scheduler entities are executed. 56 * 57 * In this context the &drm_gpu_scheduler keeps track of a driver specified 58 * credit limit representing the capacity of this scheduler and a credit count; 59 * every &drm_sched_job carries a driver specified number of credits. 60 * 61 * Once a job is executed (but not yet finished), the job's credits contribute 62 * to the scheduler's credit count until the job is finished. If by executing 63 * one more job the scheduler's credit count would exceed the scheduler's 64 * credit limit, the job won't be executed. Instead, the scheduler will wait 65 * until the credit count has decreased enough to not overflow its credit limit. 66 * This implies waiting for previously executed jobs. 67 */ 68 69 #include <linux/export.h> 70 #include <linux/wait.h> 71 #include <linux/sched.h> 72 #include <linux/completion.h> 73 #include <linux/dma-resv.h> 74 #include <uapi/linux/sched/types.h> 75 76 #include <drm/drm_print.h> 77 #include <drm/drm_gem.h> 78 #include <drm/drm_syncobj.h> 79 #include <drm/gpu_scheduler.h> 80 #include <drm/spsc_queue.h> 81 82 #include "sched_internal.h" 83 84 #define CREATE_TRACE_POINTS 85 #include "gpu_scheduler_trace.h" 86 87 int drm_sched_policy = DRM_SCHED_POLICY_FIFO; 88 89 /** 90 * DOC: sched_policy (int) 91 * Used to override default entities scheduling policy in a run queue. 92 */ 93 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default)."); 94 module_param_named(sched_policy, drm_sched_policy, int, 0444); 95 96 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched) 97 { 98 u32 credits; 99 100 WARN_ON(check_sub_overflow(sched->credit_limit, 101 atomic_read(&sched->credit_count), 102 &credits)); 103 104 return credits; 105 } 106 107 /** 108 * drm_sched_can_queue -- Can we queue more to the hardware? 109 * @sched: scheduler instance 110 * @entity: the scheduler entity 111 * 112 * Return true if we can push at least one more job from @entity, false 113 * otherwise. 114 */ 115 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched, 116 struct drm_sched_entity *entity) 117 { 118 struct drm_sched_job *s_job; 119 120 s_job = drm_sched_entity_queue_peek(entity); 121 if (!s_job) 122 return false; 123 124 /* If a job exceeds the credit limit, truncate it to the credit limit 125 * itself to guarantee forward progress. 126 */ 127 if (s_job->credits > sched->credit_limit) { 128 dev_WARN(sched->dev, 129 "Jobs may not exceed the credit limit, truncate.\n"); 130 s_job->credits = sched->credit_limit; 131 } 132 133 return drm_sched_available_credits(sched) >= s_job->credits; 134 } 135 136 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, 137 const struct rb_node *b) 138 { 139 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node); 140 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node); 141 142 return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting); 143 } 144 145 static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity, 146 struct drm_sched_rq *rq) 147 { 148 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { 149 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); 150 RB_CLEAR_NODE(&entity->rb_tree_node); 151 } 152 } 153 154 void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, 155 struct drm_sched_rq *rq, 156 ktime_t ts) 157 { 158 /* 159 * Both locks need to be grabbed, one to protect from entity->rq change 160 * for entity from within concurrent drm_sched_entity_select_rq and the 161 * other to update the rb tree structure. 162 */ 163 lockdep_assert_held(&entity->lock); 164 lockdep_assert_held(&rq->lock); 165 166 drm_sched_rq_remove_fifo_locked(entity, rq); 167 168 entity->oldest_job_waiting = ts; 169 170 rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root, 171 drm_sched_entity_compare_before); 172 } 173 174 /** 175 * drm_sched_rq_init - initialize a given run queue struct 176 * 177 * @sched: scheduler instance to associate with this run queue 178 * @rq: scheduler run queue 179 * 180 * Initializes a scheduler runqueue. 181 */ 182 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, 183 struct drm_sched_rq *rq) 184 { 185 spin_lock_init(&rq->lock); 186 INIT_LIST_HEAD(&rq->entities); 187 rq->rb_tree_root = RB_ROOT_CACHED; 188 rq->current_entity = NULL; 189 rq->sched = sched; 190 } 191 192 /** 193 * drm_sched_rq_add_entity - add an entity 194 * 195 * @rq: scheduler run queue 196 * @entity: scheduler entity 197 * 198 * Adds a scheduler entity to the run queue. 199 */ 200 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 201 struct drm_sched_entity *entity) 202 { 203 lockdep_assert_held(&entity->lock); 204 lockdep_assert_held(&rq->lock); 205 206 if (!list_empty(&entity->list)) 207 return; 208 209 atomic_inc(rq->sched->score); 210 list_add_tail(&entity->list, &rq->entities); 211 } 212 213 /** 214 * drm_sched_rq_remove_entity - remove an entity 215 * 216 * @rq: scheduler run queue 217 * @entity: scheduler entity 218 * 219 * Removes a scheduler entity from the run queue. 220 */ 221 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 222 struct drm_sched_entity *entity) 223 { 224 lockdep_assert_held(&entity->lock); 225 226 if (list_empty(&entity->list)) 227 return; 228 229 spin_lock(&rq->lock); 230 231 atomic_dec(rq->sched->score); 232 list_del_init(&entity->list); 233 234 if (rq->current_entity == entity) 235 rq->current_entity = NULL; 236 237 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) 238 drm_sched_rq_remove_fifo_locked(entity, rq); 239 240 spin_unlock(&rq->lock); 241 } 242 243 /** 244 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run 245 * 246 * @sched: the gpu scheduler 247 * @rq: scheduler run queue to check. 248 * 249 * Try to find the next ready entity. 250 * 251 * Return an entity if one is found; return an error-pointer (!NULL) if an 252 * entity was ready, but the scheduler had insufficient credits to accommodate 253 * its job; return NULL, if no ready entity was found. 254 */ 255 static struct drm_sched_entity * 256 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched, 257 struct drm_sched_rq *rq) 258 { 259 struct drm_sched_entity *entity; 260 261 spin_lock(&rq->lock); 262 263 entity = rq->current_entity; 264 if (entity) { 265 list_for_each_entry_continue(entity, &rq->entities, list) { 266 if (drm_sched_entity_is_ready(entity)) 267 goto found; 268 } 269 } 270 271 list_for_each_entry(entity, &rq->entities, list) { 272 if (drm_sched_entity_is_ready(entity)) 273 goto found; 274 275 if (entity == rq->current_entity) 276 break; 277 } 278 279 spin_unlock(&rq->lock); 280 281 return NULL; 282 283 found: 284 if (!drm_sched_can_queue(sched, entity)) { 285 /* 286 * If scheduler cannot take more jobs signal the caller to not 287 * consider lower priority queues. 288 */ 289 entity = ERR_PTR(-ENOSPC); 290 } else { 291 rq->current_entity = entity; 292 reinit_completion(&entity->entity_idle); 293 } 294 295 spin_unlock(&rq->lock); 296 297 return entity; 298 } 299 300 /** 301 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run 302 * 303 * @sched: the gpu scheduler 304 * @rq: scheduler run queue to check. 305 * 306 * Find oldest waiting ready entity. 307 * 308 * Return an entity if one is found; return an error-pointer (!NULL) if an 309 * entity was ready, but the scheduler had insufficient credits to accommodate 310 * its job; return NULL, if no ready entity was found. 311 */ 312 static struct drm_sched_entity * 313 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched, 314 struct drm_sched_rq *rq) 315 { 316 struct rb_node *rb; 317 318 spin_lock(&rq->lock); 319 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) { 320 struct drm_sched_entity *entity; 321 322 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node); 323 if (drm_sched_entity_is_ready(entity)) { 324 /* If we can't queue yet, preserve the current entity in 325 * terms of fairness. 326 */ 327 if (!drm_sched_can_queue(sched, entity)) { 328 spin_unlock(&rq->lock); 329 return ERR_PTR(-ENOSPC); 330 } 331 332 reinit_completion(&entity->entity_idle); 333 break; 334 } 335 } 336 spin_unlock(&rq->lock); 337 338 return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL; 339 } 340 341 /** 342 * drm_sched_run_job_queue - enqueue run-job work 343 * @sched: scheduler instance 344 */ 345 static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) 346 { 347 if (!READ_ONCE(sched->pause_submit)) 348 queue_work(sched->submit_wq, &sched->work_run_job); 349 } 350 351 /** 352 * drm_sched_run_free_queue - enqueue free-job work 353 * @sched: scheduler instance 354 */ 355 static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched) 356 { 357 if (!READ_ONCE(sched->pause_submit)) 358 queue_work(sched->submit_wq, &sched->work_free_job); 359 } 360 361 /** 362 * drm_sched_job_done - complete a job 363 * @s_job: pointer to the job which is done 364 * 365 * Finish the job's fence and resubmit the work items. 366 */ 367 static void drm_sched_job_done(struct drm_sched_job *s_job, int result) 368 { 369 struct drm_sched_fence *s_fence = s_job->s_fence; 370 struct drm_gpu_scheduler *sched = s_fence->sched; 371 372 atomic_sub(s_job->credits, &sched->credit_count); 373 atomic_dec(sched->score); 374 375 trace_drm_sched_job_done(s_fence); 376 377 dma_fence_get(&s_fence->finished); 378 drm_sched_fence_finished(s_fence, result); 379 dma_fence_put(&s_fence->finished); 380 drm_sched_run_free_queue(sched); 381 } 382 383 /** 384 * drm_sched_job_done_cb - the callback for a done job 385 * @f: fence 386 * @cb: fence callbacks 387 */ 388 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) 389 { 390 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); 391 392 drm_sched_job_done(s_job, f->error); 393 } 394 395 /** 396 * drm_sched_start_timeout - start timeout for reset worker 397 * 398 * @sched: scheduler instance to start the worker for 399 * 400 * Start the timeout for the given scheduler. 401 */ 402 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) 403 { 404 lockdep_assert_held(&sched->job_list_lock); 405 406 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 407 !list_empty(&sched->pending_list)) 408 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); 409 } 410 411 static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched) 412 { 413 spin_lock(&sched->job_list_lock); 414 drm_sched_start_timeout(sched); 415 spin_unlock(&sched->job_list_lock); 416 } 417 418 /** 419 * drm_sched_tdr_queue_imm: - immediately start job timeout handler 420 * 421 * @sched: scheduler for which the timeout handling should be started. 422 * 423 * Start timeout handling immediately for the named scheduler. 424 */ 425 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched) 426 { 427 spin_lock(&sched->job_list_lock); 428 sched->timeout = 0; 429 drm_sched_start_timeout(sched); 430 spin_unlock(&sched->job_list_lock); 431 } 432 EXPORT_SYMBOL(drm_sched_tdr_queue_imm); 433 434 /** 435 * drm_sched_fault - immediately start timeout handler 436 * 437 * @sched: scheduler where the timeout handling should be started. 438 * 439 * Start timeout handling immediately when the driver detects a hardware fault. 440 */ 441 void drm_sched_fault(struct drm_gpu_scheduler *sched) 442 { 443 if (sched->timeout_wq) 444 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); 445 } 446 EXPORT_SYMBOL(drm_sched_fault); 447 448 /** 449 * drm_sched_suspend_timeout - Suspend scheduler job timeout 450 * 451 * @sched: scheduler instance for which to suspend the timeout 452 * 453 * Suspend the delayed work timeout for the scheduler. This is done by 454 * modifying the delayed work timeout to an arbitrary large value, 455 * MAX_SCHEDULE_TIMEOUT in this case. 456 * 457 * Returns the timeout remaining 458 * 459 */ 460 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) 461 { 462 unsigned long sched_timeout, now = jiffies; 463 464 sched_timeout = sched->work_tdr.timer.expires; 465 466 /* 467 * Modify the timeout to an arbitrarily large value. This also prevents 468 * the timeout to be restarted when new submissions arrive 469 */ 470 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) 471 && time_after(sched_timeout, now)) 472 return sched_timeout - now; 473 else 474 return sched->timeout; 475 } 476 EXPORT_SYMBOL(drm_sched_suspend_timeout); 477 478 /** 479 * drm_sched_resume_timeout - Resume scheduler job timeout 480 * 481 * @sched: scheduler instance for which to resume the timeout 482 * @remaining: remaining timeout 483 * 484 * Resume the delayed work timeout for the scheduler. 485 */ 486 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 487 unsigned long remaining) 488 { 489 spin_lock(&sched->job_list_lock); 490 491 if (list_empty(&sched->pending_list)) 492 cancel_delayed_work(&sched->work_tdr); 493 else 494 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); 495 496 spin_unlock(&sched->job_list_lock); 497 } 498 EXPORT_SYMBOL(drm_sched_resume_timeout); 499 500 static void drm_sched_job_begin(struct drm_sched_job *s_job) 501 { 502 struct drm_gpu_scheduler *sched = s_job->sched; 503 504 spin_lock(&sched->job_list_lock); 505 list_add_tail(&s_job->list, &sched->pending_list); 506 drm_sched_start_timeout(sched); 507 spin_unlock(&sched->job_list_lock); 508 } 509 510 /** 511 * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout 512 * @sched: scheduler instance 513 * @job: job to be reinserted on the pending list 514 * 515 * In the case of a "false timeout" - when a timeout occurs but the GPU isn't 516 * hung and is making progress, the scheduler must reinsert the job back into 517 * @sched->pending_list. Otherwise, the job and its resources won't be freed 518 * through the &struct drm_sched_backend_ops.free_job callback. 519 * 520 * This function must be used in "false timeout" cases only. 521 */ 522 static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched, 523 struct drm_sched_job *job) 524 { 525 spin_lock(&sched->job_list_lock); 526 list_add(&job->list, &sched->pending_list); 527 528 /* After reinserting the job, the scheduler enqueues the free-job work 529 * again if ready. Otherwise, a signaled job could be added to the 530 * pending list, but never freed. 531 */ 532 drm_sched_run_free_queue(sched); 533 spin_unlock(&sched->job_list_lock); 534 } 535 536 static void drm_sched_job_timedout(struct work_struct *work) 537 { 538 struct drm_gpu_scheduler *sched; 539 struct drm_sched_job *job; 540 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET; 541 542 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); 543 544 /* Protects against concurrent deletion in drm_sched_get_finished_job */ 545 spin_lock(&sched->job_list_lock); 546 job = list_first_entry_or_null(&sched->pending_list, 547 struct drm_sched_job, list); 548 549 if (job) { 550 /* 551 * Remove the bad job so it cannot be freed by a concurrent 552 * &struct drm_sched_backend_ops.free_job. It will be 553 * reinserted after the scheduler's work items have been 554 * cancelled, at which point it's safe. 555 */ 556 list_del_init(&job->list); 557 spin_unlock(&sched->job_list_lock); 558 559 status = job->sched->ops->timedout_job(job); 560 561 /* 562 * Guilty job did complete and hence needs to be manually removed 563 * See drm_sched_stop doc. 564 */ 565 if (sched->free_guilty) { 566 job->sched->ops->free_job(job); 567 sched->free_guilty = false; 568 } 569 570 if (status == DRM_GPU_SCHED_STAT_NO_HANG) 571 drm_sched_job_reinsert_on_false_timeout(sched, job); 572 } else { 573 spin_unlock(&sched->job_list_lock); 574 } 575 576 if (status != DRM_GPU_SCHED_STAT_ENODEV) 577 drm_sched_start_timeout_unlocked(sched); 578 } 579 580 /** 581 * drm_sched_stop - stop the scheduler 582 * 583 * @sched: scheduler instance 584 * @bad: job which caused the time out 585 * 586 * Stop the scheduler and also removes and frees all completed jobs. 587 * Note: bad job will not be freed as it might be used later and so it's 588 * callers responsibility to release it manually if it's not part of the 589 * pending list any more. 590 * 591 * This function is typically used for reset recovery (see the docu of 592 * drm_sched_backend_ops.timedout_job() for details). Do not call it for 593 * scheduler teardown, i.e., before calling drm_sched_fini(). 594 * 595 * As it's only used for reset recovery, drivers must not call this function 596 * in their &struct drm_sched_backend_ops.timedout_job callback when they 597 * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG. 598 */ 599 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) 600 { 601 struct drm_sched_job *s_job, *tmp; 602 603 drm_sched_wqueue_stop(sched); 604 605 /* 606 * Reinsert back the bad job here - now it's safe as 607 * drm_sched_get_finished_job() cannot race against us and release the 608 * bad job at this point - we parked (waited for) any in progress 609 * (earlier) cleanups and drm_sched_get_finished_job() will not be 610 * called now until the scheduler's work items are submitted again. 611 */ 612 if (bad && bad->sched == sched) 613 /* 614 * Add at the head of the queue to reflect it was the earliest 615 * job extracted. 616 */ 617 list_add(&bad->list, &sched->pending_list); 618 619 /* 620 * Iterate the job list from later to earlier one and either deactive 621 * their HW callbacks or remove them from pending list if they already 622 * signaled. 623 * This iteration is thread safe as the scheduler's work items have been 624 * cancelled. 625 */ 626 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, 627 list) { 628 if (s_job->s_fence->parent && 629 dma_fence_remove_callback(s_job->s_fence->parent, 630 &s_job->cb)) { 631 dma_fence_put(s_job->s_fence->parent); 632 s_job->s_fence->parent = NULL; 633 atomic_sub(s_job->credits, &sched->credit_count); 634 } else { 635 /* 636 * remove job from pending_list. 637 * Locking here is for concurrent resume timeout 638 */ 639 spin_lock(&sched->job_list_lock); 640 list_del_init(&s_job->list); 641 spin_unlock(&sched->job_list_lock); 642 643 /* 644 * Wait for job's HW fence callback to finish using s_job 645 * before releasing it. 646 * 647 * Job is still alive so fence refcount at least 1 648 */ 649 dma_fence_wait(&s_job->s_fence->finished, false); 650 651 /* 652 * We must keep bad job alive for later use during 653 * recovery by some of the drivers but leave a hint 654 * that the guilty job must be released. 655 */ 656 if (bad != s_job) 657 sched->ops->free_job(s_job); 658 else 659 sched->free_guilty = true; 660 } 661 } 662 663 /* 664 * Stop pending timer in flight as we rearm it in drm_sched_start. This 665 * avoids the pending timeout work in progress to fire right away after 666 * this TDR finished and before the newly restarted jobs had a 667 * chance to complete. 668 */ 669 cancel_delayed_work(&sched->work_tdr); 670 } 671 EXPORT_SYMBOL(drm_sched_stop); 672 673 /** 674 * drm_sched_start - recover jobs after a reset 675 * 676 * @sched: scheduler instance 677 * @errno: error to set on the pending fences 678 * 679 * This function is typically used for reset recovery (see the docu of 680 * drm_sched_backend_ops.timedout_job() for details). Do not call it for 681 * scheduler startup. The scheduler itself is fully operational after 682 * drm_sched_init() succeeded. 683 * 684 * As it's only used for reset recovery, drivers must not call this function 685 * in their &struct drm_sched_backend_ops.timedout_job callback when they 686 * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG. 687 */ 688 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno) 689 { 690 struct drm_sched_job *s_job, *tmp; 691 692 /* 693 * Locking the list is not required here as the scheduler's work items 694 * are currently not running, so no new jobs are being inserted or 695 * removed. Also concurrent GPU recovers can't run in parallel. 696 */ 697 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 698 struct dma_fence *fence = s_job->s_fence->parent; 699 700 atomic_add(s_job->credits, &sched->credit_count); 701 702 if (!fence) { 703 drm_sched_job_done(s_job, errno ?: -ECANCELED); 704 continue; 705 } 706 707 if (dma_fence_add_callback(fence, &s_job->cb, 708 drm_sched_job_done_cb)) 709 drm_sched_job_done(s_job, fence->error ?: errno); 710 } 711 712 drm_sched_start_timeout_unlocked(sched); 713 drm_sched_wqueue_start(sched); 714 } 715 EXPORT_SYMBOL(drm_sched_start); 716 717 /** 718 * drm_sched_resubmit_jobs - Deprecated, don't use in new code! 719 * 720 * @sched: scheduler instance 721 * 722 * Re-submitting jobs was a concept AMD came up as cheap way to implement 723 * recovery after a job timeout. 724 * 725 * This turned out to be not working very well. First of all there are many 726 * problem with the dma_fence implementation and requirements. Either the 727 * implementation is risking deadlocks with core memory management or violating 728 * documented implementation details of the dma_fence object. 729 * 730 * Drivers can still save and restore their state for recovery operations, but 731 * we shouldn't make this a general scheduler feature around the dma_fence 732 * interface. 733 */ 734 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) 735 { 736 struct drm_sched_job *s_job, *tmp; 737 uint64_t guilty_context; 738 bool found_guilty = false; 739 struct dma_fence *fence; 740 741 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 742 struct drm_sched_fence *s_fence = s_job->s_fence; 743 744 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { 745 found_guilty = true; 746 guilty_context = s_job->s_fence->scheduled.context; 747 } 748 749 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) 750 dma_fence_set_error(&s_fence->finished, -ECANCELED); 751 752 fence = sched->ops->run_job(s_job); 753 754 if (IS_ERR_OR_NULL(fence)) { 755 if (IS_ERR(fence)) 756 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); 757 758 s_job->s_fence->parent = NULL; 759 } else { 760 761 s_job->s_fence->parent = dma_fence_get(fence); 762 763 /* Drop for orignal kref_init */ 764 dma_fence_put(fence); 765 } 766 } 767 } 768 EXPORT_SYMBOL(drm_sched_resubmit_jobs); 769 770 /** 771 * drm_sched_job_init - init a scheduler job 772 * @job: scheduler job to init 773 * @entity: scheduler entity to use 774 * @credits: the number of credits this job contributes to the schedulers 775 * credit limit 776 * @owner: job owner for debugging 777 * @drm_client_id: &struct drm_file.client_id of the owner (used by trace 778 * events) 779 * 780 * Refer to drm_sched_entity_push_job() documentation 781 * for locking considerations. 782 * 783 * Drivers must make sure drm_sched_job_cleanup() if this function returns 784 * successfully, even when @job is aborted before drm_sched_job_arm() is called. 785 * 786 * Note that this function does not assign a valid value to each struct member 787 * of struct drm_sched_job. Take a look at that struct's documentation to see 788 * who sets which struct member with what lifetime. 789 * 790 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware 791 * has died, which can mean that there's no valid runqueue for a @entity. 792 * This function returns -ENOENT in this case (which probably should be -EIO as 793 * a more meanigful return value). 794 * 795 * Returns 0 for success, negative error code otherwise. 796 */ 797 int drm_sched_job_init(struct drm_sched_job *job, 798 struct drm_sched_entity *entity, 799 u32 credits, void *owner, 800 uint64_t drm_client_id) 801 { 802 if (!entity->rq) { 803 /* This will most likely be followed by missing frames 804 * or worse--a blank screen--leave a trail in the 805 * logs, so this can be debugged easier. 806 */ 807 dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__); 808 return -ENOENT; 809 } 810 811 if (unlikely(!credits)) { 812 pr_err("*ERROR* %s: credits cannot be 0!\n", __func__); 813 return -EINVAL; 814 } 815 816 /* 817 * We don't know for sure how the user has allocated. Thus, zero the 818 * struct so that unallowed (i.e., too early) usage of pointers that 819 * this function does not set is guaranteed to lead to a NULL pointer 820 * exception instead of UB. 821 */ 822 memset(job, 0, sizeof(*job)); 823 824 job->entity = entity; 825 job->credits = credits; 826 job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id); 827 if (!job->s_fence) 828 return -ENOMEM; 829 830 INIT_LIST_HEAD(&job->list); 831 832 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC); 833 834 return 0; 835 } 836 EXPORT_SYMBOL(drm_sched_job_init); 837 838 /** 839 * drm_sched_job_arm - arm a scheduler job for execution 840 * @job: scheduler job to arm 841 * 842 * This arms a scheduler job for execution. Specifically it initializes the 843 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv 844 * or other places that need to track the completion of this job. It also 845 * initializes sequence numbers, which are fundamental for fence ordering. 846 * 847 * Refer to drm_sched_entity_push_job() documentation for locking 848 * considerations. 849 * 850 * Once this function was called, you *must* submit @job with 851 * drm_sched_entity_push_job(). 852 * 853 * This can only be called if drm_sched_job_init() succeeded. 854 */ 855 void drm_sched_job_arm(struct drm_sched_job *job) 856 { 857 struct drm_gpu_scheduler *sched; 858 struct drm_sched_entity *entity = job->entity; 859 860 BUG_ON(!entity); 861 drm_sched_entity_select_rq(entity); 862 sched = entity->rq->sched; 863 864 job->sched = sched; 865 job->s_priority = entity->priority; 866 867 drm_sched_fence_init(job->s_fence, job->entity); 868 } 869 EXPORT_SYMBOL(drm_sched_job_arm); 870 871 /** 872 * drm_sched_job_add_dependency - adds the fence as a job dependency 873 * @job: scheduler job to add the dependencies to 874 * @fence: the dma_fence to add to the list of dependencies. 875 * 876 * Note that @fence is consumed in both the success and error cases. 877 * 878 * Returns: 879 * 0 on success, or an error on failing to expand the array. 880 */ 881 int drm_sched_job_add_dependency(struct drm_sched_job *job, 882 struct dma_fence *fence) 883 { 884 struct dma_fence *entry; 885 unsigned long index; 886 u32 id = 0; 887 int ret; 888 889 if (!fence) 890 return 0; 891 892 /* Deduplicate if we already depend on a fence from the same context. 893 * This lets the size of the array of deps scale with the number of 894 * engines involved, rather than the number of BOs. 895 */ 896 xa_for_each(&job->dependencies, index, entry) { 897 if (entry->context != fence->context) 898 continue; 899 900 if (dma_fence_is_later(fence, entry)) { 901 dma_fence_put(entry); 902 xa_store(&job->dependencies, index, fence, GFP_KERNEL); 903 } else { 904 dma_fence_put(fence); 905 } 906 return 0; 907 } 908 909 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL); 910 if (ret != 0) 911 dma_fence_put(fence); 912 913 return ret; 914 } 915 EXPORT_SYMBOL(drm_sched_job_add_dependency); 916 917 /** 918 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency 919 * @job: scheduler job to add the dependencies to 920 * @file: drm file private pointer 921 * @handle: syncobj handle to lookup 922 * @point: timeline point 923 * 924 * This adds the fence matching the given syncobj to @job. 925 * 926 * Returns: 927 * 0 on success, or an error on failing to expand the array. 928 */ 929 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, 930 struct drm_file *file, 931 u32 handle, 932 u32 point) 933 { 934 struct dma_fence *fence; 935 int ret; 936 937 ret = drm_syncobj_find_fence(file, handle, point, 0, &fence); 938 if (ret) 939 return ret; 940 941 return drm_sched_job_add_dependency(job, fence); 942 } 943 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency); 944 945 /** 946 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job 947 * @job: scheduler job to add the dependencies to 948 * @resv: the dma_resv object to get the fences from 949 * @usage: the dma_resv_usage to use to filter the fences 950 * 951 * This adds all fences matching the given usage from @resv to @job. 952 * Must be called with the @resv lock held. 953 * 954 * Returns: 955 * 0 on success, or an error on failing to expand the array. 956 */ 957 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, 958 struct dma_resv *resv, 959 enum dma_resv_usage usage) 960 { 961 struct dma_resv_iter cursor; 962 struct dma_fence *fence; 963 int ret; 964 965 dma_resv_assert_held(resv); 966 967 dma_resv_for_each_fence(&cursor, resv, usage, fence) { 968 /* Make sure to grab an additional ref on the added fence */ 969 dma_fence_get(fence); 970 ret = drm_sched_job_add_dependency(job, fence); 971 if (ret) { 972 dma_fence_put(fence); 973 return ret; 974 } 975 } 976 return 0; 977 } 978 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies); 979 980 /** 981 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job 982 * dependencies 983 * @job: scheduler job to add the dependencies to 984 * @obj: the gem object to add new dependencies from. 985 * @write: whether the job might write the object (so we need to depend on 986 * shared fences in the reservation object). 987 * 988 * This should be called after drm_gem_lock_reservations() on your array of 989 * GEM objects used in the job but before updating the reservations with your 990 * own fences. 991 * 992 * Returns: 993 * 0 on success, or an error on failing to expand the array. 994 */ 995 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, 996 struct drm_gem_object *obj, 997 bool write) 998 { 999 return drm_sched_job_add_resv_dependencies(job, obj->resv, 1000 dma_resv_usage_rw(write)); 1001 } 1002 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); 1003 1004 /** 1005 * drm_sched_job_has_dependency - check whether fence is the job's dependency 1006 * @job: scheduler job to check 1007 * @fence: fence to look for 1008 * 1009 * Returns: 1010 * True if @fence is found within the job's dependencies, or otherwise false. 1011 */ 1012 bool drm_sched_job_has_dependency(struct drm_sched_job *job, 1013 struct dma_fence *fence) 1014 { 1015 struct dma_fence *f; 1016 unsigned long index; 1017 1018 xa_for_each(&job->dependencies, index, f) { 1019 if (f == fence) 1020 return true; 1021 } 1022 1023 return false; 1024 } 1025 EXPORT_SYMBOL(drm_sched_job_has_dependency); 1026 1027 /** 1028 * drm_sched_job_cleanup - clean up scheduler job resources 1029 * @job: scheduler job to clean up 1030 * 1031 * Cleans up the resources allocated with drm_sched_job_init(). 1032 * 1033 * Drivers should call this from their error unwind code if @job is aborted 1034 * before drm_sched_job_arm() is called. 1035 * 1036 * drm_sched_job_arm() is a point of no return since it initializes the fences 1037 * and their sequence number etc. Once that function has been called, you *must* 1038 * submit it with drm_sched_entity_push_job() and cannot simply abort it by 1039 * calling drm_sched_job_cleanup(). 1040 * 1041 * This function should be called in the &drm_sched_backend_ops.free_job callback. 1042 */ 1043 void drm_sched_job_cleanup(struct drm_sched_job *job) 1044 { 1045 struct dma_fence *fence; 1046 unsigned long index; 1047 1048 if (kref_read(&job->s_fence->finished.refcount)) { 1049 /* The job has been processed by the scheduler, i.e., 1050 * drm_sched_job_arm() and drm_sched_entity_push_job() have 1051 * been called. 1052 */ 1053 dma_fence_put(&job->s_fence->finished); 1054 } else { 1055 /* The job was aborted before it has been committed to be run; 1056 * notably, drm_sched_job_arm() has not been called. 1057 */ 1058 drm_sched_fence_free(job->s_fence); 1059 } 1060 1061 job->s_fence = NULL; 1062 1063 xa_for_each(&job->dependencies, index, fence) { 1064 dma_fence_put(fence); 1065 } 1066 xa_destroy(&job->dependencies); 1067 1068 } 1069 EXPORT_SYMBOL(drm_sched_job_cleanup); 1070 1071 /** 1072 * drm_sched_wakeup - Wake up the scheduler if it is ready to queue 1073 * @sched: scheduler instance 1074 * 1075 * Wake up the scheduler if we can queue jobs. 1076 */ 1077 void drm_sched_wakeup(struct drm_gpu_scheduler *sched) 1078 { 1079 drm_sched_run_job_queue(sched); 1080 } 1081 1082 /** 1083 * drm_sched_select_entity - Select next entity to process 1084 * 1085 * @sched: scheduler instance 1086 * 1087 * Return an entity to process or NULL if none are found. 1088 * 1089 * Note, that we break out of the for-loop when "entity" is non-null, which can 1090 * also be an error-pointer--this assures we don't process lower priority 1091 * run-queues. See comments in the respectively called functions. 1092 */ 1093 static struct drm_sched_entity * 1094 drm_sched_select_entity(struct drm_gpu_scheduler *sched) 1095 { 1096 struct drm_sched_entity *entity; 1097 int i; 1098 1099 /* Start with the highest priority. 1100 */ 1101 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 1102 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? 1103 drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) : 1104 drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]); 1105 if (entity) 1106 break; 1107 } 1108 1109 return IS_ERR(entity) ? NULL : entity; 1110 } 1111 1112 /** 1113 * drm_sched_get_finished_job - fetch the next finished job to be destroyed 1114 * 1115 * @sched: scheduler instance 1116 * @have_more: are there more finished jobs on the list 1117 * 1118 * Informs the caller through @have_more whether there are more finished jobs 1119 * besides the returned one. 1120 * 1121 * Returns the next finished job from the pending list (if there is one) 1122 * ready for it to be destroyed. 1123 */ 1124 static struct drm_sched_job * 1125 drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool *have_more) 1126 { 1127 struct drm_sched_job *job, *next; 1128 1129 spin_lock(&sched->job_list_lock); 1130 1131 job = list_first_entry_or_null(&sched->pending_list, 1132 struct drm_sched_job, list); 1133 if (job && dma_fence_is_signaled(&job->s_fence->finished)) { 1134 /* remove job from pending_list */ 1135 list_del_init(&job->list); 1136 1137 /* cancel this job's TO timer */ 1138 cancel_delayed_work(&sched->work_tdr); 1139 1140 *have_more = false; 1141 next = list_first_entry_or_null(&sched->pending_list, 1142 typeof(*next), list); 1143 if (next) { 1144 /* make the scheduled timestamp more accurate */ 1145 if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, 1146 &next->s_fence->scheduled.flags)) 1147 next->s_fence->scheduled.timestamp = 1148 dma_fence_timestamp(&job->s_fence->finished); 1149 1150 *have_more = dma_fence_is_signaled(&next->s_fence->finished); 1151 1152 /* start TO timer for next job */ 1153 drm_sched_start_timeout(sched); 1154 } 1155 } else { 1156 job = NULL; 1157 } 1158 1159 spin_unlock(&sched->job_list_lock); 1160 1161 return job; 1162 } 1163 1164 /** 1165 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load 1166 * @sched_list: list of drm_gpu_schedulers 1167 * @num_sched_list: number of drm_gpu_schedulers in the sched_list 1168 * 1169 * Returns pointer of the sched with the least load or NULL if none of the 1170 * drm_gpu_schedulers are ready 1171 */ 1172 struct drm_gpu_scheduler * 1173 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 1174 unsigned int num_sched_list) 1175 { 1176 struct drm_gpu_scheduler *sched, *picked_sched = NULL; 1177 int i; 1178 unsigned int min_score = UINT_MAX, num_score; 1179 1180 for (i = 0; i < num_sched_list; ++i) { 1181 sched = sched_list[i]; 1182 1183 if (!sched->ready) { 1184 DRM_WARN("scheduler %s is not ready, skipping", 1185 sched->name); 1186 continue; 1187 } 1188 1189 num_score = atomic_read(sched->score); 1190 if (num_score < min_score) { 1191 min_score = num_score; 1192 picked_sched = sched; 1193 } 1194 } 1195 1196 return picked_sched; 1197 } 1198 EXPORT_SYMBOL(drm_sched_pick_best); 1199 1200 /** 1201 * drm_sched_free_job_work - worker to call free_job 1202 * 1203 * @w: free job work 1204 */ 1205 static void drm_sched_free_job_work(struct work_struct *w) 1206 { 1207 struct drm_gpu_scheduler *sched = 1208 container_of(w, struct drm_gpu_scheduler, work_free_job); 1209 struct drm_sched_job *job; 1210 bool have_more; 1211 1212 job = drm_sched_get_finished_job(sched, &have_more); 1213 if (job) { 1214 sched->ops->free_job(job); 1215 if (have_more) 1216 drm_sched_run_free_queue(sched); 1217 } 1218 1219 drm_sched_run_job_queue(sched); 1220 } 1221 1222 /** 1223 * drm_sched_run_job_work - worker to call run_job 1224 * 1225 * @w: run job work 1226 */ 1227 static void drm_sched_run_job_work(struct work_struct *w) 1228 { 1229 struct drm_gpu_scheduler *sched = 1230 container_of(w, struct drm_gpu_scheduler, work_run_job); 1231 struct drm_sched_entity *entity; 1232 struct dma_fence *fence; 1233 struct drm_sched_fence *s_fence; 1234 struct drm_sched_job *sched_job; 1235 int r; 1236 1237 /* Find entity with a ready job */ 1238 entity = drm_sched_select_entity(sched); 1239 if (!entity) 1240 return; /* No more work */ 1241 1242 sched_job = drm_sched_entity_pop_job(entity); 1243 if (!sched_job) { 1244 complete_all(&entity->entity_idle); 1245 drm_sched_run_job_queue(sched); 1246 return; 1247 } 1248 1249 s_fence = sched_job->s_fence; 1250 1251 atomic_add(sched_job->credits, &sched->credit_count); 1252 drm_sched_job_begin(sched_job); 1253 1254 trace_drm_sched_job_run(sched_job, entity); 1255 /* 1256 * The run_job() callback must by definition return a fence whose 1257 * refcount has been incremented for the scheduler already. 1258 */ 1259 fence = sched->ops->run_job(sched_job); 1260 complete_all(&entity->entity_idle); 1261 drm_sched_fence_scheduled(s_fence, fence); 1262 1263 if (!IS_ERR_OR_NULL(fence)) { 1264 r = dma_fence_add_callback(fence, &sched_job->cb, 1265 drm_sched_job_done_cb); 1266 if (r == -ENOENT) 1267 drm_sched_job_done(sched_job, fence->error); 1268 else if (r) 1269 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); 1270 1271 dma_fence_put(fence); 1272 } else { 1273 drm_sched_job_done(sched_job, IS_ERR(fence) ? 1274 PTR_ERR(fence) : 0); 1275 } 1276 1277 wake_up(&sched->job_scheduled); 1278 drm_sched_run_job_queue(sched); 1279 } 1280 1281 static struct workqueue_struct *drm_sched_alloc_wq(const char *name) 1282 { 1283 #if (IS_ENABLED(CONFIG_LOCKDEP)) 1284 static struct lockdep_map map = { 1285 .name = "drm_sched_lockdep_map" 1286 }; 1287 1288 /* 1289 * Avoid leaking a lockdep map on each drm sched creation and 1290 * destruction by using a single lockdep map for all drm sched 1291 * allocated submit_wq. 1292 */ 1293 1294 return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map); 1295 #else 1296 return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 1297 #endif 1298 } 1299 1300 /** 1301 * drm_sched_init - Init a gpu scheduler instance 1302 * 1303 * @sched: scheduler instance 1304 * @args: scheduler initialization arguments 1305 * 1306 * Return 0 on success, otherwise error code. 1307 */ 1308 int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args) 1309 { 1310 int i; 1311 1312 sched->ops = args->ops; 1313 sched->credit_limit = args->credit_limit; 1314 sched->name = args->name; 1315 sched->timeout = args->timeout; 1316 sched->hang_limit = args->hang_limit; 1317 sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq; 1318 sched->score = args->score ? args->score : &sched->_score; 1319 sched->dev = args->dev; 1320 1321 if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) { 1322 /* This is a gross violation--tell drivers what the problem is. 1323 */ 1324 dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", 1325 __func__); 1326 return -EINVAL; 1327 } else if (sched->sched_rq) { 1328 /* Not an error, but warn anyway so drivers can 1329 * fine-tune their DRM calling order, and return all 1330 * is good. 1331 */ 1332 dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__); 1333 return 0; 1334 } 1335 1336 if (args->submit_wq) { 1337 sched->submit_wq = args->submit_wq; 1338 sched->own_submit_wq = false; 1339 } else { 1340 sched->submit_wq = drm_sched_alloc_wq(args->name); 1341 if (!sched->submit_wq) 1342 return -ENOMEM; 1343 1344 sched->own_submit_wq = true; 1345 } 1346 1347 sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq), 1348 GFP_KERNEL | __GFP_ZERO); 1349 if (!sched->sched_rq) 1350 goto Out_check_own; 1351 sched->num_rqs = args->num_rqs; 1352 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 1353 sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); 1354 if (!sched->sched_rq[i]) 1355 goto Out_unroll; 1356 drm_sched_rq_init(sched, sched->sched_rq[i]); 1357 } 1358 1359 init_waitqueue_head(&sched->job_scheduled); 1360 INIT_LIST_HEAD(&sched->pending_list); 1361 spin_lock_init(&sched->job_list_lock); 1362 atomic_set(&sched->credit_count, 0); 1363 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); 1364 INIT_WORK(&sched->work_run_job, drm_sched_run_job_work); 1365 INIT_WORK(&sched->work_free_job, drm_sched_free_job_work); 1366 atomic_set(&sched->_score, 0); 1367 atomic64_set(&sched->job_id_count, 0); 1368 sched->pause_submit = false; 1369 1370 sched->ready = true; 1371 return 0; 1372 Out_unroll: 1373 for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--) 1374 kfree(sched->sched_rq[i]); 1375 1376 kfree(sched->sched_rq); 1377 sched->sched_rq = NULL; 1378 Out_check_own: 1379 if (sched->own_submit_wq) 1380 destroy_workqueue(sched->submit_wq); 1381 dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); 1382 return -ENOMEM; 1383 } 1384 EXPORT_SYMBOL(drm_sched_init); 1385 1386 static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched) 1387 { 1388 struct drm_sched_job *job, *tmp; 1389 1390 /* All other accessors are stopped. No locking necessary. */ 1391 list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) { 1392 sched->ops->cancel_job(job); 1393 list_del(&job->list); 1394 sched->ops->free_job(job); 1395 } 1396 } 1397 1398 /** 1399 * drm_sched_fini - Destroy a gpu scheduler 1400 * 1401 * @sched: scheduler instance 1402 * 1403 * Tears down and cleans up the scheduler. 1404 * 1405 * This stops submission of new jobs to the hardware through &struct 1406 * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job 1407 * is implemented, all jobs will be canceled through it and afterwards cleaned 1408 * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not 1409 * implemented, memory could leak. 1410 */ 1411 void drm_sched_fini(struct drm_gpu_scheduler *sched) 1412 { 1413 struct drm_sched_entity *s_entity; 1414 int i; 1415 1416 drm_sched_wqueue_stop(sched); 1417 1418 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 1419 struct drm_sched_rq *rq = sched->sched_rq[i]; 1420 1421 spin_lock(&rq->lock); 1422 list_for_each_entry(s_entity, &rq->entities, list) 1423 /* 1424 * Prevents reinsertion and marks job_queue as idle, 1425 * it will be removed from the rq in drm_sched_entity_fini() 1426 * eventually 1427 * 1428 * FIXME: 1429 * This lacks the proper spin_lock(&s_entity->lock) and 1430 * is, therefore, a race condition. Most notably, it 1431 * can race with drm_sched_entity_push_job(). The lock 1432 * cannot be taken here, however, because this would 1433 * lead to lock inversion -> deadlock. 1434 * 1435 * The best solution probably is to enforce the life 1436 * time rule of all entities having to be torn down 1437 * before their scheduler. Then, however, locking could 1438 * be dropped alltogether from this function. 1439 * 1440 * For now, this remains a potential race in all 1441 * drivers that keep entities alive for longer than 1442 * the scheduler. 1443 */ 1444 s_entity->stopped = true; 1445 spin_unlock(&rq->lock); 1446 kfree(sched->sched_rq[i]); 1447 } 1448 1449 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */ 1450 wake_up_all(&sched->job_scheduled); 1451 1452 /* Confirm no work left behind accessing device structures */ 1453 cancel_delayed_work_sync(&sched->work_tdr); 1454 1455 /* Avoid memory leaks if supported by the driver. */ 1456 if (sched->ops->cancel_job) 1457 drm_sched_cancel_remaining_jobs(sched); 1458 1459 if (sched->own_submit_wq) 1460 destroy_workqueue(sched->submit_wq); 1461 sched->ready = false; 1462 kfree(sched->sched_rq); 1463 sched->sched_rq = NULL; 1464 1465 if (!list_empty(&sched->pending_list)) 1466 dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n"); 1467 } 1468 EXPORT_SYMBOL(drm_sched_fini); 1469 1470 /** 1471 * drm_sched_increase_karma - Update sched_entity guilty flag 1472 * 1473 * @bad: The job guilty of time out 1474 * 1475 * Increment on every hang caused by the 'bad' job. If this exceeds the hang 1476 * limit of the scheduler then the respective sched entity is marked guilty and 1477 * jobs from it will not be scheduled further 1478 */ 1479 void drm_sched_increase_karma(struct drm_sched_job *bad) 1480 { 1481 int i; 1482 struct drm_sched_entity *tmp; 1483 struct drm_sched_entity *entity; 1484 struct drm_gpu_scheduler *sched = bad->sched; 1485 1486 /* don't change @bad's karma if it's from KERNEL RQ, 1487 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs) 1488 * corrupt but keep in mind that kernel jobs always considered good. 1489 */ 1490 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { 1491 atomic_inc(&bad->karma); 1492 1493 for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) { 1494 struct drm_sched_rq *rq = sched->sched_rq[i]; 1495 1496 spin_lock(&rq->lock); 1497 list_for_each_entry_safe(entity, tmp, &rq->entities, list) { 1498 if (bad->s_fence->scheduled.context == 1499 entity->fence_context) { 1500 if (entity->guilty) 1501 atomic_set(entity->guilty, 1); 1502 break; 1503 } 1504 } 1505 spin_unlock(&rq->lock); 1506 if (&entity->list != &rq->entities) 1507 break; 1508 } 1509 } 1510 } 1511 EXPORT_SYMBOL(drm_sched_increase_karma); 1512 1513 /** 1514 * drm_sched_wqueue_ready - Is the scheduler ready for submission 1515 * 1516 * @sched: scheduler instance 1517 * 1518 * Returns true if submission is ready 1519 */ 1520 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched) 1521 { 1522 return sched->ready; 1523 } 1524 EXPORT_SYMBOL(drm_sched_wqueue_ready); 1525 1526 /** 1527 * drm_sched_wqueue_stop - stop scheduler submission 1528 * @sched: scheduler instance 1529 * 1530 * Stops the scheduler from pulling new jobs from entities. It also stops 1531 * freeing jobs automatically through drm_sched_backend_ops.free_job(). 1532 */ 1533 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched) 1534 { 1535 WRITE_ONCE(sched->pause_submit, true); 1536 cancel_work_sync(&sched->work_run_job); 1537 cancel_work_sync(&sched->work_free_job); 1538 } 1539 EXPORT_SYMBOL(drm_sched_wqueue_stop); 1540 1541 /** 1542 * drm_sched_wqueue_start - start scheduler submission 1543 * @sched: scheduler instance 1544 * 1545 * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it. 1546 * 1547 * This function is not necessary for 'conventional' startup. The scheduler is 1548 * fully operational after drm_sched_init() succeeded. 1549 */ 1550 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched) 1551 { 1552 WRITE_ONCE(sched->pause_submit, false); 1553 queue_work(sched->submit_wq, &sched->work_run_job); 1554 queue_work(sched->submit_wq, &sched->work_free_job); 1555 } 1556 EXPORT_SYMBOL(drm_sched_wqueue_start); 1557