1 // SPDX-License-Identifier: GPL-2.0 or MIT 2 /* Copyright 2023 Collabora ltd. */ 3 4 #include <drm/drm_drv.h> 5 #include <drm/drm_exec.h> 6 #include <drm/drm_gem_shmem_helper.h> 7 #include <drm/drm_managed.h> 8 #include <drm/gpu_scheduler.h> 9 #include <drm/panthor_drm.h> 10 11 #include <linux/build_bug.h> 12 #include <linux/cleanup.h> 13 #include <linux/clk.h> 14 #include <linux/delay.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/dma-resv.h> 17 #include <linux/firmware.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/iopoll.h> 21 #include <linux/iosys-map.h> 22 #include <linux/module.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 26 #include "panthor_devfreq.h" 27 #include "panthor_device.h" 28 #include "panthor_fw.h" 29 #include "panthor_gem.h" 30 #include "panthor_gpu.h" 31 #include "panthor_heap.h" 32 #include "panthor_mmu.h" 33 #include "panthor_regs.h" 34 #include "panthor_sched.h" 35 36 /** 37 * DOC: Scheduler 38 * 39 * Mali CSF hardware adopts a firmware-assisted scheduling model, where 40 * the firmware takes care of scheduling aspects, to some extent. 41 * 42 * The scheduling happens at the scheduling group level, each group 43 * contains 1 to N queues (N is FW/hardware dependent, and exposed 44 * through the firmware interface). Each queue is assigned a command 45 * stream ring buffer, which serves as a way to get jobs submitted to 46 * the GPU, among other things. 47 * 48 * The firmware can schedule a maximum of M groups (M is FW/hardware 49 * dependent, and exposed through the firmware interface). Passed 50 * this maximum number of groups, the kernel must take care of 51 * rotating the groups passed to the firmware so every group gets 52 * a chance to have his queues scheduled for execution. 53 * 54 * The current implementation only supports with kernel-mode queues. 55 * In other terms, userspace doesn't have access to the ring-buffer. 56 * Instead, userspace passes indirect command stream buffers that are 57 * called from the queue ring-buffer by the kernel using a pre-defined 58 * sequence of command stream instructions to ensure the userspace driver 59 * always gets consistent results (cache maintenance, 60 * synchronization, ...). 61 * 62 * We rely on the drm_gpu_scheduler framework to deal with job 63 * dependencies and submission. As any other driver dealing with a 64 * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each 65 * entity has its own job scheduler. When a job is ready to be executed 66 * (all its dependencies are met), it is pushed to the appropriate 67 * queue ring-buffer, and the group is scheduled for execution if it 68 * wasn't already active. 69 * 70 * Kernel-side group scheduling is timeslice-based. When we have less 71 * groups than there are slots, the periodic tick is disabled and we 72 * just let the FW schedule the active groups. When there are more 73 * groups than slots, we let each group a chance to execute stuff for 74 * a given amount of time, and then re-evaluate and pick new groups 75 * to schedule. The group selection algorithm is based on 76 * priority+round-robin. 77 * 78 * Even though user-mode queues is out of the scope right now, the 79 * current design takes them into account by avoiding any guess on the 80 * group/queue state that would be based on information we wouldn't have 81 * if userspace was in charge of the ring-buffer. That's also one of the 82 * reason we don't do 'cooperative' scheduling (encoding FW group slot 83 * reservation as dma_fence that would be returned from the 84 * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as 85 * a queue of waiters, ordered by job submission order). This approach 86 * would work for kernel-mode queues, but would make user-mode queues a 87 * lot more complicated to retrofit. 88 */ 89 90 #define JOB_TIMEOUT_MS 5000 91 92 #define MAX_CSG_PRIO 0xf 93 94 #define NUM_INSTRS_PER_CACHE_LINE (64 / sizeof(u64)) 95 #define MAX_INSTRS_PER_JOB 24 96 97 struct panthor_group; 98 99 /** 100 * struct panthor_csg_slot - Command stream group slot 101 * 102 * This represents a FW slot for a scheduling group. 103 */ 104 struct panthor_csg_slot { 105 /** @group: Scheduling group bound to this slot. */ 106 struct panthor_group *group; 107 108 /** @priority: Group priority. */ 109 u8 priority; 110 111 /** 112 * @idle: True if the group bound to this slot is idle. 113 * 114 * A group is idle when it has nothing waiting for execution on 115 * all its queues, or when queues are blocked waiting for something 116 * to happen (synchronization object). 117 */ 118 bool idle; 119 }; 120 121 /** 122 * enum panthor_csg_priority - Group priority 123 */ 124 enum panthor_csg_priority { 125 /** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */ 126 PANTHOR_CSG_PRIORITY_LOW = 0, 127 128 /** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */ 129 PANTHOR_CSG_PRIORITY_MEDIUM, 130 131 /** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */ 132 PANTHOR_CSG_PRIORITY_HIGH, 133 134 /** 135 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group. 136 * 137 * Real-time priority allows one to preempt scheduling of other 138 * non-real-time groups. When such a group becomes executable, 139 * it will evict the group with the lowest non-rt priority if 140 * there's no free group slot available. 141 */ 142 PANTHOR_CSG_PRIORITY_RT, 143 144 /** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */ 145 PANTHOR_CSG_PRIORITY_COUNT, 146 }; 147 148 /** 149 * struct panthor_scheduler - Object used to manage the scheduler 150 */ 151 struct panthor_scheduler { 152 /** @ptdev: Device. */ 153 struct panthor_device *ptdev; 154 155 /** 156 * @wq: Workqueue used by our internal scheduler logic and 157 * drm_gpu_scheduler. 158 * 159 * Used for the scheduler tick, group update or other kind of FW 160 * event processing that can't be handled in the threaded interrupt 161 * path. Also passed to the drm_gpu_scheduler instances embedded 162 * in panthor_queue. 163 */ 164 struct workqueue_struct *wq; 165 166 /** 167 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works. 168 * 169 * We have a queue dedicated to heap chunk allocation works to avoid 170 * blocking the rest of the scheduler if the allocation tries to 171 * reclaim memory. 172 */ 173 struct workqueue_struct *heap_alloc_wq; 174 175 /** @tick_work: Work executed on a scheduling tick. */ 176 struct delayed_work tick_work; 177 178 /** 179 * @sync_upd_work: Work used to process synchronization object updates. 180 * 181 * We use this work to unblock queues/groups that were waiting on a 182 * synchronization object. 183 */ 184 struct work_struct sync_upd_work; 185 186 /** 187 * @fw_events_work: Work used to process FW events outside the interrupt path. 188 * 189 * Even if the interrupt is threaded, we need any event processing 190 * that require taking the panthor_scheduler::lock to be processed 191 * outside the interrupt path so we don't block the tick logic when 192 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the 193 * event processing requires taking this lock, we just delegate all 194 * FW event processing to the scheduler workqueue. 195 */ 196 struct work_struct fw_events_work; 197 198 /** 199 * @fw_events: Bitmask encoding pending FW events. 200 */ 201 atomic_t fw_events; 202 203 /** 204 * @resched_target: When the next tick should occur. 205 * 206 * Expressed in jiffies. 207 */ 208 u64 resched_target; 209 210 /** 211 * @last_tick: When the last tick occurred. 212 * 213 * Expressed in jiffies. 214 */ 215 u64 last_tick; 216 217 /** @tick_period: Tick period in jiffies. */ 218 u64 tick_period; 219 220 /** 221 * @lock: Lock protecting access to all the scheduler fields. 222 * 223 * Should be taken in the tick work, the irq handler, and anywhere the @groups 224 * fields are touched. 225 */ 226 struct mutex lock; 227 228 /** @groups: Various lists used to classify groups. */ 229 struct { 230 /** 231 * @runnable: Runnable group lists. 232 * 233 * When a group has queues that want to execute something, 234 * its panthor_group::run_node should be inserted here. 235 * 236 * One list per-priority. 237 */ 238 struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT]; 239 240 /** 241 * @idle: Idle group lists. 242 * 243 * When all queues of a group are idle (either because they 244 * have nothing to execute, or because they are blocked), the 245 * panthor_group::run_node field should be inserted here. 246 * 247 * One list per-priority. 248 */ 249 struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT]; 250 251 /** 252 * @waiting: List of groups whose queues are blocked on a 253 * synchronization object. 254 * 255 * Insert panthor_group::wait_node here when a group is waiting 256 * for synchronization objects to be signaled. 257 * 258 * This list is evaluated in the @sync_upd_work work. 259 */ 260 struct list_head waiting; 261 } groups; 262 263 /** 264 * @csg_slots: FW command stream group slots. 265 */ 266 struct panthor_csg_slot csg_slots[MAX_CSGS]; 267 268 /** @csg_slot_count: Number of command stream group slots exposed by the FW. */ 269 u32 csg_slot_count; 270 271 /** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */ 272 u32 cs_slot_count; 273 274 /** @as_slot_count: Number of address space slots supported by the MMU. */ 275 u32 as_slot_count; 276 277 /** @used_csg_slot_count: Number of command stream group slot currently used. */ 278 u32 used_csg_slot_count; 279 280 /** @sb_slot_count: Number of scoreboard slots. */ 281 u32 sb_slot_count; 282 283 /** 284 * @might_have_idle_groups: True if an active group might have become idle. 285 * 286 * This will force a tick, so other runnable groups can be scheduled if one 287 * or more active groups became idle. 288 */ 289 bool might_have_idle_groups; 290 291 /** @pm: Power management related fields. */ 292 struct { 293 /** @has_ref: True if the scheduler owns a runtime PM reference. */ 294 bool has_ref; 295 } pm; 296 297 /** @reset: Reset related fields. */ 298 struct { 299 /** @lock: Lock protecting the other reset fields. */ 300 struct mutex lock; 301 302 /** 303 * @in_progress: True if a reset is in progress. 304 * 305 * Set to true in panthor_sched_pre_reset() and back to false in 306 * panthor_sched_post_reset(). 307 */ 308 atomic_t in_progress; 309 310 /** 311 * @stopped_groups: List containing all groups that were stopped 312 * before a reset. 313 * 314 * Insert panthor_group::run_node in the pre_reset path. 315 */ 316 struct list_head stopped_groups; 317 } reset; 318 }; 319 320 /** 321 * struct panthor_syncobj_32b - 32-bit FW synchronization object 322 */ 323 struct panthor_syncobj_32b { 324 /** @seqno: Sequence number. */ 325 u32 seqno; 326 327 /** 328 * @status: Status. 329 * 330 * Not zero on failure. 331 */ 332 u32 status; 333 }; 334 335 /** 336 * struct panthor_syncobj_64b - 64-bit FW synchronization object 337 */ 338 struct panthor_syncobj_64b { 339 /** @seqno: Sequence number. */ 340 u64 seqno; 341 342 /** 343 * @status: Status. 344 * 345 * Not zero on failure. 346 */ 347 u32 status; 348 349 /** @pad: MBZ. */ 350 u32 pad; 351 }; 352 353 /** 354 * struct panthor_queue - Execution queue 355 */ 356 struct panthor_queue { 357 /** @scheduler: DRM scheduler used for this queue. */ 358 struct drm_gpu_scheduler scheduler; 359 360 /** @entity: DRM scheduling entity used for this queue. */ 361 struct drm_sched_entity entity; 362 363 /** 364 * @remaining_time: Time remaining before the job timeout expires. 365 * 366 * The job timeout is suspended when the queue is not scheduled by the 367 * FW. Every time we suspend the timer, we need to save the remaining 368 * time so we can restore it later on. 369 */ 370 unsigned long remaining_time; 371 372 /** @timeout_suspended: True if the job timeout was suspended. */ 373 bool timeout_suspended; 374 375 /** 376 * @doorbell_id: Doorbell assigned to this queue. 377 * 378 * Right now, all groups share the same doorbell, and the doorbell ID 379 * is assigned to group_slot + 1 when the group is assigned a slot. But 380 * we might decide to provide fine grained doorbell assignment at some 381 * point, so don't have to wake up all queues in a group every time one 382 * of them is updated. 383 */ 384 u8 doorbell_id; 385 386 /** 387 * @priority: Priority of the queue inside the group. 388 * 389 * Must be less than 16 (Only 4 bits available). 390 */ 391 u8 priority; 392 #define CSF_MAX_QUEUE_PRIO GENMASK(3, 0) 393 394 /** @ringbuf: Command stream ring-buffer. */ 395 struct panthor_kernel_bo *ringbuf; 396 397 /** @iface: Firmware interface. */ 398 struct { 399 /** @mem: FW memory allocated for this interface. */ 400 struct panthor_kernel_bo *mem; 401 402 /** @input: Input interface. */ 403 struct panthor_fw_ringbuf_input_iface *input; 404 405 /** @output: Output interface. */ 406 const struct panthor_fw_ringbuf_output_iface *output; 407 408 /** @input_fw_va: FW virtual address of the input interface buffer. */ 409 u32 input_fw_va; 410 411 /** @output_fw_va: FW virtual address of the output interface buffer. */ 412 u32 output_fw_va; 413 } iface; 414 415 /** 416 * @syncwait: Stores information about the synchronization object this 417 * queue is waiting on. 418 */ 419 struct { 420 /** @gpu_va: GPU address of the synchronization object. */ 421 u64 gpu_va; 422 423 /** @ref: Reference value to compare against. */ 424 u64 ref; 425 426 /** @gt: True if this is a greater-than test. */ 427 bool gt; 428 429 /** @sync64: True if this is a 64-bit sync object. */ 430 bool sync64; 431 432 /** @bo: Buffer object holding the synchronization object. */ 433 struct drm_gem_object *obj; 434 435 /** @offset: Offset of the synchronization object inside @bo. */ 436 u64 offset; 437 438 /** 439 * @kmap: Kernel mapping of the buffer object holding the 440 * synchronization object. 441 */ 442 void *kmap; 443 } syncwait; 444 445 /** @fence_ctx: Fence context fields. */ 446 struct { 447 /** @lock: Used to protect access to all fences allocated by this context. */ 448 spinlock_t lock; 449 450 /** 451 * @id: Fence context ID. 452 * 453 * Allocated with dma_fence_context_alloc(). 454 */ 455 u64 id; 456 457 /** @seqno: Sequence number of the last initialized fence. */ 458 atomic64_t seqno; 459 460 /** 461 * @last_fence: Fence of the last submitted job. 462 * 463 * We return this fence when we get an empty command stream. 464 * This way, we are guaranteed that all earlier jobs have completed 465 * when drm_sched_job::s_fence::finished without having to feed 466 * the CS ring buffer with a dummy job that only signals the fence. 467 */ 468 struct dma_fence *last_fence; 469 470 /** 471 * @in_flight_jobs: List containing all in-flight jobs. 472 * 473 * Used to keep track and signal panthor_job::done_fence when the 474 * synchronization object attached to the queue is signaled. 475 */ 476 struct list_head in_flight_jobs; 477 } fence_ctx; 478 479 /** @profiling: Job profiling data slots and access information. */ 480 struct { 481 /** @slots: Kernel BO holding the slots. */ 482 struct panthor_kernel_bo *slots; 483 484 /** @slot_count: Number of jobs ringbuffer can hold at once. */ 485 u32 slot_count; 486 487 /** @seqno: Index of the next available profiling information slot. */ 488 u32 seqno; 489 } profiling; 490 }; 491 492 /** 493 * enum panthor_group_state - Scheduling group state. 494 */ 495 enum panthor_group_state { 496 /** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */ 497 PANTHOR_CS_GROUP_CREATED, 498 499 /** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */ 500 PANTHOR_CS_GROUP_ACTIVE, 501 502 /** 503 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is 504 * inactive/suspended right now. 505 */ 506 PANTHOR_CS_GROUP_SUSPENDED, 507 508 /** 509 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated. 510 * 511 * Can no longer be scheduled. The only allowed action is a destruction. 512 */ 513 PANTHOR_CS_GROUP_TERMINATED, 514 515 /** 516 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state. 517 * 518 * The FW returned an inconsistent state. The group is flagged unusable 519 * and can no longer be scheduled. The only allowed action is a 520 * destruction. 521 * 522 * When that happens, we also schedule a FW reset, to start from a fresh 523 * state. 524 */ 525 PANTHOR_CS_GROUP_UNKNOWN_STATE, 526 }; 527 528 /** 529 * struct panthor_group - Scheduling group object 530 */ 531 struct panthor_group { 532 /** @refcount: Reference count */ 533 struct kref refcount; 534 535 /** @ptdev: Device. */ 536 struct panthor_device *ptdev; 537 538 /** @vm: VM bound to the group. */ 539 struct panthor_vm *vm; 540 541 /** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */ 542 u64 compute_core_mask; 543 544 /** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */ 545 u64 fragment_core_mask; 546 547 /** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */ 548 u64 tiler_core_mask; 549 550 /** @max_compute_cores: Maximum number of shader cores used for compute jobs. */ 551 u8 max_compute_cores; 552 553 /** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */ 554 u8 max_fragment_cores; 555 556 /** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */ 557 u8 max_tiler_cores; 558 559 /** @priority: Group priority (check panthor_csg_priority). */ 560 u8 priority; 561 562 /** @blocked_queues: Bitmask reflecting the blocked queues. */ 563 u32 blocked_queues; 564 565 /** @idle_queues: Bitmask reflecting the idle queues. */ 566 u32 idle_queues; 567 568 /** @fatal_lock: Lock used to protect access to fatal fields. */ 569 spinlock_t fatal_lock; 570 571 /** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */ 572 u32 fatal_queues; 573 574 /** @tiler_oom: Mask of queues that have a tiler OOM event to process. */ 575 atomic_t tiler_oom; 576 577 /** @queue_count: Number of queues in this group. */ 578 u32 queue_count; 579 580 /** @queues: Queues owned by this group. */ 581 struct panthor_queue *queues[MAX_CS_PER_CSG]; 582 583 /** 584 * @csg_id: ID of the FW group slot. 585 * 586 * -1 when the group is not scheduled/active. 587 */ 588 int csg_id; 589 590 /** 591 * @destroyed: True when the group has been destroyed. 592 * 593 * If a group is destroyed it becomes useless: no further jobs can be submitted 594 * to its queues. We simply wait for all references to be dropped so we can 595 * release the group object. 596 */ 597 bool destroyed; 598 599 /** 600 * @timedout: True when a timeout occurred on any of the queues owned by 601 * this group. 602 * 603 * Timeouts can be reported by drm_sched or by the FW. If a reset is required, 604 * and the group can't be suspended, this also leads to a timeout. In any case, 605 * any timeout situation is unrecoverable, and the group becomes useless. We 606 * simply wait for all references to be dropped so we can release the group 607 * object. 608 */ 609 bool timedout; 610 611 /** 612 * @innocent: True when the group becomes unusable because the group suspension 613 * failed during a reset. 614 * 615 * Sometimes the FW was put in a bad state by other groups, causing the group 616 * suspension happening in the reset path to fail. In that case, we consider the 617 * group innocent. 618 */ 619 bool innocent; 620 621 /** 622 * @syncobjs: Pool of per-queue synchronization objects. 623 * 624 * One sync object per queue. The position of the sync object is 625 * determined by the queue index. 626 */ 627 struct panthor_kernel_bo *syncobjs; 628 629 /** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */ 630 struct { 631 /** @data: Total sampled values for jobs in queues from this group. */ 632 struct panthor_gpu_usage data; 633 634 /** 635 * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo 636 * callback and job post-completion processing function 637 */ 638 spinlock_t lock; 639 640 /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */ 641 size_t kbo_sizes; 642 } fdinfo; 643 644 /** @task_info: Info of current->group_leader that created the group. */ 645 struct { 646 /** @task_info.pid: pid of current->group_leader */ 647 pid_t pid; 648 649 /** @task_info.comm: comm of current->group_leader */ 650 char comm[TASK_COMM_LEN]; 651 } task_info; 652 653 /** @state: Group state. */ 654 enum panthor_group_state state; 655 656 /** 657 * @suspend_buf: Suspend buffer. 658 * 659 * Stores the state of the group and its queues when a group is suspended. 660 * Used at resume time to restore the group in its previous state. 661 * 662 * The size of the suspend buffer is exposed through the FW interface. 663 */ 664 struct panthor_kernel_bo *suspend_buf; 665 666 /** 667 * @protm_suspend_buf: Protection mode suspend buffer. 668 * 669 * Stores the state of the group and its queues when a group that's in 670 * protection mode is suspended. 671 * 672 * Used at resume time to restore the group in its previous state. 673 * 674 * The size of the protection mode suspend buffer is exposed through the 675 * FW interface. 676 */ 677 struct panthor_kernel_bo *protm_suspend_buf; 678 679 /** @sync_upd_work: Work used to check/signal job fences. */ 680 struct work_struct sync_upd_work; 681 682 /** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */ 683 struct work_struct tiler_oom_work; 684 685 /** @term_work: Work used to finish the group termination procedure. */ 686 struct work_struct term_work; 687 688 /** 689 * @release_work: Work used to release group resources. 690 * 691 * We need to postpone the group release to avoid a deadlock when 692 * the last ref is released in the tick work. 693 */ 694 struct work_struct release_work; 695 696 /** 697 * @run_node: Node used to insert the group in the 698 * panthor_group::groups::{runnable,idle} and 699 * panthor_group::reset.stopped_groups lists. 700 */ 701 struct list_head run_node; 702 703 /** 704 * @wait_node: Node used to insert the group in the 705 * panthor_group::groups::waiting list. 706 */ 707 struct list_head wait_node; 708 }; 709 710 struct panthor_job_profiling_data { 711 struct { 712 u64 before; 713 u64 after; 714 } cycles; 715 716 struct { 717 u64 before; 718 u64 after; 719 } time; 720 }; 721 722 /** 723 * group_queue_work() - Queue a group work 724 * @group: Group to queue the work for. 725 * @wname: Work name. 726 * 727 * Grabs a ref and queue a work item to the scheduler workqueue. If 728 * the work was already queued, we release the reference we grabbed. 729 * 730 * Work callbacks must release the reference we grabbed here. 731 */ 732 #define group_queue_work(group, wname) \ 733 do { \ 734 group_get(group); \ 735 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \ 736 group_put(group); \ 737 } while (0) 738 739 /** 740 * sched_queue_work() - Queue a scheduler work. 741 * @sched: Scheduler object. 742 * @wname: Work name. 743 * 744 * Conditionally queues a scheduler work if no reset is pending/in-progress. 745 */ 746 #define sched_queue_work(sched, wname) \ 747 do { \ 748 if (!atomic_read(&(sched)->reset.in_progress) && \ 749 !panthor_device_reset_is_pending((sched)->ptdev)) \ 750 queue_work((sched)->wq, &(sched)->wname ## _work); \ 751 } while (0) 752 753 /** 754 * sched_queue_delayed_work() - Queue a scheduler delayed work. 755 * @sched: Scheduler object. 756 * @wname: Work name. 757 * @delay: Work delay in jiffies. 758 * 759 * Conditionally queues a scheduler delayed work if no reset is 760 * pending/in-progress. 761 */ 762 #define sched_queue_delayed_work(sched, wname, delay) \ 763 do { \ 764 if (!atomic_read(&sched->reset.in_progress) && \ 765 !panthor_device_reset_is_pending((sched)->ptdev)) \ 766 mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \ 767 } while (0) 768 769 /* 770 * We currently set the maximum of groups per file to an arbitrary low value. 771 * But this can be updated if we need more. 772 */ 773 #define MAX_GROUPS_PER_POOL 128 774 775 /** 776 * struct panthor_group_pool - Group pool 777 * 778 * Each file get assigned a group pool. 779 */ 780 struct panthor_group_pool { 781 /** @xa: Xarray used to manage group handles. */ 782 struct xarray xa; 783 }; 784 785 /** 786 * struct panthor_job - Used to manage GPU job 787 */ 788 struct panthor_job { 789 /** @base: Inherit from drm_sched_job. */ 790 struct drm_sched_job base; 791 792 /** @refcount: Reference count. */ 793 struct kref refcount; 794 795 /** @group: Group of the queue this job will be pushed to. */ 796 struct panthor_group *group; 797 798 /** @queue_idx: Index of the queue inside @group. */ 799 u32 queue_idx; 800 801 /** @call_info: Information about the userspace command stream call. */ 802 struct { 803 /** @start: GPU address of the userspace command stream. */ 804 u64 start; 805 806 /** @size: Size of the userspace command stream. */ 807 u32 size; 808 809 /** 810 * @latest_flush: Flush ID at the time the userspace command 811 * stream was built. 812 * 813 * Needed for the flush reduction mechanism. 814 */ 815 u32 latest_flush; 816 } call_info; 817 818 /** @ringbuf: Position of this job is in the ring buffer. */ 819 struct { 820 /** @start: Start offset. */ 821 u64 start; 822 823 /** @end: End offset. */ 824 u64 end; 825 } ringbuf; 826 827 /** 828 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs 829 * list. 830 */ 831 struct list_head node; 832 833 /** @done_fence: Fence signaled when the job is finished or cancelled. */ 834 struct dma_fence *done_fence; 835 836 /** @profiling: Job profiling information. */ 837 struct { 838 /** @mask: Current device job profiling enablement bitmask. */ 839 u32 mask; 840 841 /** @slot: Job index in the profiling slots BO. */ 842 u32 slot; 843 } profiling; 844 }; 845 846 static void 847 panthor_queue_put_syncwait_obj(struct panthor_queue *queue) 848 { 849 if (queue->syncwait.kmap) { 850 struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap); 851 852 drm_gem_vunmap(queue->syncwait.obj, &map); 853 queue->syncwait.kmap = NULL; 854 } 855 856 drm_gem_object_put(queue->syncwait.obj); 857 queue->syncwait.obj = NULL; 858 } 859 860 static void * 861 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue) 862 { 863 struct panthor_device *ptdev = group->ptdev; 864 struct panthor_gem_object *bo; 865 struct iosys_map map; 866 int ret; 867 868 if (queue->syncwait.kmap) 869 return queue->syncwait.kmap + queue->syncwait.offset; 870 871 bo = panthor_vm_get_bo_for_va(group->vm, 872 queue->syncwait.gpu_va, 873 &queue->syncwait.offset); 874 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo))) 875 goto err_put_syncwait_obj; 876 877 queue->syncwait.obj = &bo->base.base; 878 ret = drm_gem_vmap(queue->syncwait.obj, &map); 879 if (drm_WARN_ON(&ptdev->base, ret)) 880 goto err_put_syncwait_obj; 881 882 queue->syncwait.kmap = map.vaddr; 883 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap)) 884 goto err_put_syncwait_obj; 885 886 return queue->syncwait.kmap + queue->syncwait.offset; 887 888 err_put_syncwait_obj: 889 panthor_queue_put_syncwait_obj(queue); 890 return NULL; 891 } 892 893 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue) 894 { 895 if (IS_ERR_OR_NULL(queue)) 896 return; 897 898 drm_sched_entity_destroy(&queue->entity); 899 900 if (queue->scheduler.ops) 901 drm_sched_fini(&queue->scheduler); 902 903 panthor_queue_put_syncwait_obj(queue); 904 905 panthor_kernel_bo_destroy(queue->ringbuf); 906 panthor_kernel_bo_destroy(queue->iface.mem); 907 panthor_kernel_bo_destroy(queue->profiling.slots); 908 909 /* Release the last_fence we were holding, if any. */ 910 dma_fence_put(queue->fence_ctx.last_fence); 911 912 kfree(queue); 913 } 914 915 static void group_release_work(struct work_struct *work) 916 { 917 struct panthor_group *group = container_of(work, 918 struct panthor_group, 919 release_work); 920 u32 i; 921 922 for (i = 0; i < group->queue_count; i++) 923 group_free_queue(group, group->queues[i]); 924 925 panthor_kernel_bo_destroy(group->suspend_buf); 926 panthor_kernel_bo_destroy(group->protm_suspend_buf); 927 panthor_kernel_bo_destroy(group->syncobjs); 928 929 panthor_vm_put(group->vm); 930 kfree(group); 931 } 932 933 static void group_release(struct kref *kref) 934 { 935 struct panthor_group *group = container_of(kref, 936 struct panthor_group, 937 refcount); 938 struct panthor_device *ptdev = group->ptdev; 939 940 drm_WARN_ON(&ptdev->base, group->csg_id >= 0); 941 drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node)); 942 drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node)); 943 944 queue_work(panthor_cleanup_wq, &group->release_work); 945 } 946 947 static void group_put(struct panthor_group *group) 948 { 949 if (group) 950 kref_put(&group->refcount, group_release); 951 } 952 953 static struct panthor_group * 954 group_get(struct panthor_group *group) 955 { 956 if (group) 957 kref_get(&group->refcount); 958 959 return group; 960 } 961 962 /** 963 * group_bind_locked() - Bind a group to a group slot 964 * @group: Group. 965 * @csg_id: Slot. 966 * 967 * Return: 0 on success, a negative error code otherwise. 968 */ 969 static int 970 group_bind_locked(struct panthor_group *group, u32 csg_id) 971 { 972 struct panthor_device *ptdev = group->ptdev; 973 struct panthor_csg_slot *csg_slot; 974 int ret; 975 976 lockdep_assert_held(&ptdev->scheduler->lock); 977 978 if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS || 979 ptdev->scheduler->csg_slots[csg_id].group)) 980 return -EINVAL; 981 982 ret = panthor_vm_active(group->vm); 983 if (ret) 984 return ret; 985 986 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 987 group_get(group); 988 group->csg_id = csg_id; 989 990 /* Dummy doorbell allocation: doorbell is assigned to the group and 991 * all queues use the same doorbell. 992 * 993 * TODO: Implement LRU-based doorbell assignment, so the most often 994 * updated queues get their own doorbell, thus avoiding useless checks 995 * on queues belonging to the same group that are rarely updated. 996 */ 997 for (u32 i = 0; i < group->queue_count; i++) 998 group->queues[i]->doorbell_id = csg_id + 1; 999 1000 csg_slot->group = group; 1001 1002 return 0; 1003 } 1004 1005 /** 1006 * group_unbind_locked() - Unbind a group from a slot. 1007 * @group: Group to unbind. 1008 * 1009 * Return: 0 on success, a negative error code otherwise. 1010 */ 1011 static int 1012 group_unbind_locked(struct panthor_group *group) 1013 { 1014 struct panthor_device *ptdev = group->ptdev; 1015 struct panthor_csg_slot *slot; 1016 1017 lockdep_assert_held(&ptdev->scheduler->lock); 1018 1019 if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS)) 1020 return -EINVAL; 1021 1022 if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE)) 1023 return -EINVAL; 1024 1025 slot = &ptdev->scheduler->csg_slots[group->csg_id]; 1026 panthor_vm_idle(group->vm); 1027 group->csg_id = -1; 1028 1029 /* Tiler OOM events will be re-issued next time the group is scheduled. */ 1030 atomic_set(&group->tiler_oom, 0); 1031 cancel_work(&group->tiler_oom_work); 1032 1033 for (u32 i = 0; i < group->queue_count; i++) 1034 group->queues[i]->doorbell_id = -1; 1035 1036 slot->group = NULL; 1037 1038 group_put(group); 1039 return 0; 1040 } 1041 1042 /** 1043 * cs_slot_prog_locked() - Program a queue slot 1044 * @ptdev: Device. 1045 * @csg_id: Group slot ID. 1046 * @cs_id: Queue slot ID. 1047 * 1048 * Program a queue slot with the queue information so things can start being 1049 * executed on this queue. 1050 * 1051 * The group slot must have a group bound to it already (group_bind_locked()). 1052 */ 1053 static void 1054 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1055 { 1056 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id]; 1057 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1058 1059 lockdep_assert_held(&ptdev->scheduler->lock); 1060 1061 queue->iface.input->extract = queue->iface.output->extract; 1062 drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract); 1063 1064 cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf); 1065 cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); 1066 cs_iface->input->ringbuf_input = queue->iface.input_fw_va; 1067 cs_iface->input->ringbuf_output = queue->iface.output_fw_va; 1068 cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) | 1069 CS_CONFIG_DOORBELL(queue->doorbell_id); 1070 cs_iface->input->ack_irq_mask = ~0; 1071 panthor_fw_update_reqs(cs_iface, req, 1072 CS_IDLE_SYNC_WAIT | 1073 CS_IDLE_EMPTY | 1074 CS_STATE_START | 1075 CS_EXTRACT_EVENT, 1076 CS_IDLE_SYNC_WAIT | 1077 CS_IDLE_EMPTY | 1078 CS_STATE_MASK | 1079 CS_EXTRACT_EVENT); 1080 if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) { 1081 drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time); 1082 queue->timeout_suspended = false; 1083 } 1084 } 1085 1086 /** 1087 * cs_slot_reset_locked() - Reset a queue slot 1088 * @ptdev: Device. 1089 * @csg_id: Group slot. 1090 * @cs_id: Queue slot. 1091 * 1092 * Change the queue slot state to STOP and suspend the queue timeout if 1093 * the queue is not blocked. 1094 * 1095 * The group slot must have a group bound to it (group_bind_locked()). 1096 */ 1097 static int 1098 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1099 { 1100 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1101 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; 1102 struct panthor_queue *queue = group->queues[cs_id]; 1103 1104 lockdep_assert_held(&ptdev->scheduler->lock); 1105 1106 panthor_fw_update_reqs(cs_iface, req, 1107 CS_STATE_STOP, 1108 CS_STATE_MASK); 1109 1110 /* If the queue is blocked, we want to keep the timeout running, so 1111 * we can detect unbounded waits and kill the group when that happens. 1112 */ 1113 if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) { 1114 queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); 1115 queue->timeout_suspended = true; 1116 WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS)); 1117 } 1118 1119 return 0; 1120 } 1121 1122 /** 1123 * csg_slot_sync_priority_locked() - Synchronize the group slot priority 1124 * @ptdev: Device. 1125 * @csg_id: Group slot ID. 1126 * 1127 * Group slot priority update happens asynchronously. When we receive a 1128 * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can 1129 * reflect it to our panthor_csg_slot object. 1130 */ 1131 static void 1132 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id) 1133 { 1134 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1135 struct panthor_fw_csg_iface *csg_iface; 1136 1137 lockdep_assert_held(&ptdev->scheduler->lock); 1138 1139 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1140 csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28; 1141 } 1142 1143 /** 1144 * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority 1145 * @ptdev: Device. 1146 * @csg_id: Group slot. 1147 * @cs_id: Queue slot. 1148 * 1149 * Queue state is updated on group suspend or STATUS_UPDATE event. 1150 */ 1151 static void 1152 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1153 { 1154 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; 1155 struct panthor_queue *queue = group->queues[cs_id]; 1156 struct panthor_fw_cs_iface *cs_iface = 1157 panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id); 1158 1159 u32 status_wait_cond; 1160 1161 switch (cs_iface->output->status_blocked_reason) { 1162 case CS_STATUS_BLOCKED_REASON_UNBLOCKED: 1163 if (queue->iface.input->insert == queue->iface.output->extract && 1164 cs_iface->output->status_scoreboards == 0) 1165 group->idle_queues |= BIT(cs_id); 1166 break; 1167 1168 case CS_STATUS_BLOCKED_REASON_SYNC_WAIT: 1169 if (list_empty(&group->wait_node)) { 1170 list_move_tail(&group->wait_node, 1171 &group->ptdev->scheduler->groups.waiting); 1172 } 1173 1174 /* The queue is only blocked if there's no deferred operation 1175 * pending, which can be checked through the scoreboard status. 1176 */ 1177 if (!cs_iface->output->status_scoreboards) 1178 group->blocked_queues |= BIT(cs_id); 1179 1180 queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr; 1181 queue->syncwait.ref = cs_iface->output->status_wait_sync_value; 1182 status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK; 1183 queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT; 1184 if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) { 1185 u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi; 1186 1187 queue->syncwait.sync64 = true; 1188 queue->syncwait.ref |= sync_val_hi << 32; 1189 } else { 1190 queue->syncwait.sync64 = false; 1191 } 1192 break; 1193 1194 default: 1195 /* Other reasons are not blocking. Consider the queue as runnable 1196 * in those cases. 1197 */ 1198 break; 1199 } 1200 } 1201 1202 static void 1203 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id) 1204 { 1205 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1206 struct panthor_group *group = csg_slot->group; 1207 u32 i; 1208 1209 lockdep_assert_held(&ptdev->scheduler->lock); 1210 1211 group->idle_queues = 0; 1212 group->blocked_queues = 0; 1213 1214 for (i = 0; i < group->queue_count; i++) { 1215 if (group->queues[i]) 1216 cs_slot_sync_queue_state_locked(ptdev, csg_id, i); 1217 } 1218 } 1219 1220 static void 1221 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id) 1222 { 1223 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1224 struct panthor_fw_csg_iface *csg_iface; 1225 struct panthor_group *group; 1226 enum panthor_group_state new_state, old_state; 1227 u32 csg_state; 1228 1229 lockdep_assert_held(&ptdev->scheduler->lock); 1230 1231 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1232 group = csg_slot->group; 1233 1234 if (!group) 1235 return; 1236 1237 old_state = group->state; 1238 csg_state = csg_iface->output->ack & CSG_STATE_MASK; 1239 switch (csg_state) { 1240 case CSG_STATE_START: 1241 case CSG_STATE_RESUME: 1242 new_state = PANTHOR_CS_GROUP_ACTIVE; 1243 break; 1244 case CSG_STATE_TERMINATE: 1245 new_state = PANTHOR_CS_GROUP_TERMINATED; 1246 break; 1247 case CSG_STATE_SUSPEND: 1248 new_state = PANTHOR_CS_GROUP_SUSPENDED; 1249 break; 1250 default: 1251 /* The unknown state might be caused by a FW state corruption, 1252 * which means the group metadata can't be trusted anymore, and 1253 * the SUSPEND operation might propagate the corruption to the 1254 * suspend buffers. Flag the group state as unknown to make 1255 * sure it's unusable after that point. 1256 */ 1257 drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)", 1258 csg_id, csg_state); 1259 new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE; 1260 break; 1261 } 1262 1263 if (old_state == new_state) 1264 return; 1265 1266 /* The unknown state might be caused by a FW issue, reset the FW to 1267 * take a fresh start. 1268 */ 1269 if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE) 1270 panthor_device_schedule_reset(ptdev); 1271 1272 if (new_state == PANTHOR_CS_GROUP_SUSPENDED) 1273 csg_slot_sync_queues_state_locked(ptdev, csg_id); 1274 1275 if (old_state == PANTHOR_CS_GROUP_ACTIVE) { 1276 u32 i; 1277 1278 /* Reset the queue slots so we start from a clean 1279 * state when starting/resuming a new group on this 1280 * CSG slot. No wait needed here, and no ringbell 1281 * either, since the CS slot will only be re-used 1282 * on the next CSG start operation. 1283 */ 1284 for (i = 0; i < group->queue_count; i++) { 1285 if (group->queues[i]) 1286 cs_slot_reset_locked(ptdev, csg_id, i); 1287 } 1288 } 1289 1290 group->state = new_state; 1291 } 1292 1293 static int 1294 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority) 1295 { 1296 struct panthor_fw_csg_iface *csg_iface; 1297 struct panthor_csg_slot *csg_slot; 1298 struct panthor_group *group; 1299 u32 queue_mask = 0, i; 1300 1301 lockdep_assert_held(&ptdev->scheduler->lock); 1302 1303 if (priority > MAX_CSG_PRIO) 1304 return -EINVAL; 1305 1306 if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS)) 1307 return -EINVAL; 1308 1309 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1310 group = csg_slot->group; 1311 if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE) 1312 return 0; 1313 1314 csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id); 1315 1316 for (i = 0; i < group->queue_count; i++) { 1317 if (group->queues[i]) { 1318 cs_slot_prog_locked(ptdev, csg_id, i); 1319 queue_mask |= BIT(i); 1320 } 1321 } 1322 1323 csg_iface->input->allow_compute = group->compute_core_mask; 1324 csg_iface->input->allow_fragment = group->fragment_core_mask; 1325 csg_iface->input->allow_other = group->tiler_core_mask; 1326 csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | 1327 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | 1328 CSG_EP_REQ_TILER(group->max_tiler_cores) | 1329 CSG_EP_REQ_PRIORITY(priority); 1330 csg_iface->input->config = panthor_vm_as(group->vm); 1331 1332 if (group->suspend_buf) 1333 csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf); 1334 else 1335 csg_iface->input->suspend_buf = 0; 1336 1337 if (group->protm_suspend_buf) { 1338 csg_iface->input->protm_suspend_buf = 1339 panthor_kernel_bo_gpuva(group->protm_suspend_buf); 1340 } else { 1341 csg_iface->input->protm_suspend_buf = 0; 1342 } 1343 1344 csg_iface->input->ack_irq_mask = ~0; 1345 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask); 1346 return 0; 1347 } 1348 1349 static void 1350 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev, 1351 u32 csg_id, u32 cs_id) 1352 { 1353 struct panthor_scheduler *sched = ptdev->scheduler; 1354 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1355 struct panthor_group *group = csg_slot->group; 1356 struct panthor_fw_cs_iface *cs_iface; 1357 u32 fatal; 1358 u64 info; 1359 1360 lockdep_assert_held(&sched->lock); 1361 1362 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1363 fatal = cs_iface->output->fatal; 1364 info = cs_iface->output->fatal_info; 1365 1366 if (group) { 1367 drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n", 1368 group->task_info.pid, group->task_info.comm); 1369 1370 group->fatal_queues |= BIT(cs_id); 1371 } 1372 1373 if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) { 1374 /* If this exception is unrecoverable, queue a reset, and make 1375 * sure we stop scheduling groups until the reset has happened. 1376 */ 1377 panthor_device_schedule_reset(ptdev); 1378 cancel_delayed_work(&sched->tick_work); 1379 } else { 1380 sched_queue_delayed_work(sched, tick, 0); 1381 } 1382 1383 drm_warn(&ptdev->base, 1384 "CSG slot %d CS slot: %d\n" 1385 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n" 1386 "CS_FATAL.EXCEPTION_DATA: 0x%x\n" 1387 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n", 1388 csg_id, cs_id, 1389 (unsigned int)CS_EXCEPTION_TYPE(fatal), 1390 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)), 1391 (unsigned int)CS_EXCEPTION_DATA(fatal), 1392 info); 1393 } 1394 1395 static void 1396 cs_slot_process_fault_event_locked(struct panthor_device *ptdev, 1397 u32 csg_id, u32 cs_id) 1398 { 1399 struct panthor_scheduler *sched = ptdev->scheduler; 1400 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1401 struct panthor_group *group = csg_slot->group; 1402 struct panthor_queue *queue = group && cs_id < group->queue_count ? 1403 group->queues[cs_id] : NULL; 1404 struct panthor_fw_cs_iface *cs_iface; 1405 u32 fault; 1406 u64 info; 1407 1408 lockdep_assert_held(&sched->lock); 1409 1410 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1411 fault = cs_iface->output->fault; 1412 info = cs_iface->output->fault_info; 1413 1414 if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) { 1415 u64 cs_extract = queue->iface.output->extract; 1416 struct panthor_job *job; 1417 1418 spin_lock(&queue->fence_ctx.lock); 1419 list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) { 1420 if (cs_extract >= job->ringbuf.end) 1421 continue; 1422 1423 if (cs_extract < job->ringbuf.start) 1424 break; 1425 1426 dma_fence_set_error(job->done_fence, -EINVAL); 1427 } 1428 spin_unlock(&queue->fence_ctx.lock); 1429 } 1430 1431 if (group) { 1432 drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n", 1433 group->task_info.pid, group->task_info.comm); 1434 } 1435 1436 drm_warn(&ptdev->base, 1437 "CSG slot %d CS slot: %d\n" 1438 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n" 1439 "CS_FAULT.EXCEPTION_DATA: 0x%x\n" 1440 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n", 1441 csg_id, cs_id, 1442 (unsigned int)CS_EXCEPTION_TYPE(fault), 1443 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)), 1444 (unsigned int)CS_EXCEPTION_DATA(fault), 1445 info); 1446 } 1447 1448 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id) 1449 { 1450 struct panthor_device *ptdev = group->ptdev; 1451 struct panthor_scheduler *sched = ptdev->scheduler; 1452 u32 renderpasses_in_flight, pending_frag_count; 1453 struct panthor_heap_pool *heaps = NULL; 1454 u64 heap_address, new_chunk_va = 0; 1455 u32 vt_start, vt_end, frag_end; 1456 int ret, csg_id; 1457 1458 mutex_lock(&sched->lock); 1459 csg_id = group->csg_id; 1460 if (csg_id >= 0) { 1461 struct panthor_fw_cs_iface *cs_iface; 1462 1463 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1464 heaps = panthor_vm_get_heap_pool(group->vm, false); 1465 heap_address = cs_iface->output->heap_address; 1466 vt_start = cs_iface->output->heap_vt_start; 1467 vt_end = cs_iface->output->heap_vt_end; 1468 frag_end = cs_iface->output->heap_frag_end; 1469 renderpasses_in_flight = vt_start - frag_end; 1470 pending_frag_count = vt_end - frag_end; 1471 } 1472 mutex_unlock(&sched->lock); 1473 1474 /* The group got scheduled out, we stop here. We will get a new tiler OOM event 1475 * when it's scheduled again. 1476 */ 1477 if (unlikely(csg_id < 0)) 1478 return 0; 1479 1480 if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) { 1481 ret = -EINVAL; 1482 } else { 1483 /* We do the allocation without holding the scheduler lock to avoid 1484 * blocking the scheduling. 1485 */ 1486 ret = panthor_heap_grow(heaps, heap_address, 1487 renderpasses_in_flight, 1488 pending_frag_count, &new_chunk_va); 1489 } 1490 1491 /* If the heap context doesn't have memory for us, we want to let the 1492 * FW try to reclaim memory by waiting for fragment jobs to land or by 1493 * executing the tiler OOM exception handler, which is supposed to 1494 * implement incremental rendering. 1495 */ 1496 if (ret && ret != -ENOMEM) { 1497 drm_warn(&ptdev->base, "Failed to extend the tiler heap\n"); 1498 group->fatal_queues |= BIT(cs_id); 1499 sched_queue_delayed_work(sched, tick, 0); 1500 goto out_put_heap_pool; 1501 } 1502 1503 mutex_lock(&sched->lock); 1504 csg_id = group->csg_id; 1505 if (csg_id >= 0) { 1506 struct panthor_fw_csg_iface *csg_iface; 1507 struct panthor_fw_cs_iface *cs_iface; 1508 1509 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1510 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1511 1512 cs_iface->input->heap_start = new_chunk_va; 1513 cs_iface->input->heap_end = new_chunk_va; 1514 panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM); 1515 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id)); 1516 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); 1517 } 1518 mutex_unlock(&sched->lock); 1519 1520 /* We allocated a chunck, but couldn't link it to the heap 1521 * context because the group was scheduled out while we were 1522 * allocating memory. We need to return this chunk to the heap. 1523 */ 1524 if (unlikely(csg_id < 0 && new_chunk_va)) 1525 panthor_heap_return_chunk(heaps, heap_address, new_chunk_va); 1526 1527 ret = 0; 1528 1529 out_put_heap_pool: 1530 panthor_heap_pool_put(heaps); 1531 return ret; 1532 } 1533 1534 static void group_tiler_oom_work(struct work_struct *work) 1535 { 1536 struct panthor_group *group = 1537 container_of(work, struct panthor_group, tiler_oom_work); 1538 u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0); 1539 1540 while (tiler_oom) { 1541 u32 cs_id = ffs(tiler_oom) - 1; 1542 1543 group_process_tiler_oom(group, cs_id); 1544 tiler_oom &= ~BIT(cs_id); 1545 } 1546 1547 group_put(group); 1548 } 1549 1550 static void 1551 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev, 1552 u32 csg_id, u32 cs_id) 1553 { 1554 struct panthor_scheduler *sched = ptdev->scheduler; 1555 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1556 struct panthor_group *group = csg_slot->group; 1557 1558 lockdep_assert_held(&sched->lock); 1559 1560 if (drm_WARN_ON(&ptdev->base, !group)) 1561 return; 1562 1563 atomic_or(BIT(cs_id), &group->tiler_oom); 1564 1565 /* We don't use group_queue_work() here because we want to queue the 1566 * work item to the heap_alloc_wq. 1567 */ 1568 group_get(group); 1569 if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work)) 1570 group_put(group); 1571 } 1572 1573 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev, 1574 u32 csg_id, u32 cs_id) 1575 { 1576 struct panthor_fw_cs_iface *cs_iface; 1577 u32 req, ack, events; 1578 1579 lockdep_assert_held(&ptdev->scheduler->lock); 1580 1581 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1582 req = cs_iface->input->req; 1583 ack = cs_iface->output->ack; 1584 events = (req ^ ack) & CS_EVT_MASK; 1585 1586 if (events & CS_FATAL) 1587 cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id); 1588 1589 if (events & CS_FAULT) 1590 cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id); 1591 1592 if (events & CS_TILER_OOM) 1593 cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id); 1594 1595 /* We don't acknowledge the TILER_OOM event since its handling is 1596 * deferred to a separate work. 1597 */ 1598 panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT); 1599 1600 return (events & (CS_FAULT | CS_TILER_OOM)) != 0; 1601 } 1602 1603 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id) 1604 { 1605 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1606 struct panthor_fw_csg_iface *csg_iface; 1607 1608 lockdep_assert_held(&ptdev->scheduler->lock); 1609 1610 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1611 csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE; 1612 } 1613 1614 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id) 1615 { 1616 struct panthor_scheduler *sched = ptdev->scheduler; 1617 1618 lockdep_assert_held(&sched->lock); 1619 1620 sched->might_have_idle_groups = true; 1621 1622 /* Schedule a tick so we can evict idle groups and schedule non-idle 1623 * ones. This will also update runtime PM and devfreq busy/idle states, 1624 * so the device can lower its frequency or get suspended. 1625 */ 1626 sched_queue_delayed_work(sched, tick, 0); 1627 } 1628 1629 static void csg_slot_sync_update_locked(struct panthor_device *ptdev, 1630 u32 csg_id) 1631 { 1632 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1633 struct panthor_group *group = csg_slot->group; 1634 1635 lockdep_assert_held(&ptdev->scheduler->lock); 1636 1637 if (group) 1638 group_queue_work(group, sync_upd); 1639 1640 sched_queue_work(ptdev->scheduler, sync_upd); 1641 } 1642 1643 static void 1644 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id) 1645 { 1646 struct panthor_scheduler *sched = ptdev->scheduler; 1647 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1648 struct panthor_group *group = csg_slot->group; 1649 1650 lockdep_assert_held(&sched->lock); 1651 1652 group = csg_slot->group; 1653 if (!drm_WARN_ON(&ptdev->base, !group)) { 1654 drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n", 1655 group->task_info.pid, group->task_info.comm); 1656 1657 group->timedout = true; 1658 } 1659 1660 drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id); 1661 1662 sched_queue_delayed_work(sched, tick, 0); 1663 } 1664 1665 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id) 1666 { 1667 u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events; 1668 struct panthor_fw_csg_iface *csg_iface; 1669 u32 ring_cs_db_mask = 0; 1670 1671 lockdep_assert_held(&ptdev->scheduler->lock); 1672 1673 if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) 1674 return; 1675 1676 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1677 req = READ_ONCE(csg_iface->input->req); 1678 ack = READ_ONCE(csg_iface->output->ack); 1679 cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req); 1680 cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack); 1681 csg_events = (req ^ ack) & CSG_EVT_MASK; 1682 1683 /* There may not be any pending CSG/CS interrupts to process */ 1684 if (req == ack && cs_irq_req == cs_irq_ack) 1685 return; 1686 1687 /* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before 1688 * examining the CS_ACK & CS_REQ bits. This would ensure that Host 1689 * doesn't miss an interrupt for the CS in the race scenario where 1690 * whilst Host is servicing an interrupt for the CS, firmware sends 1691 * another interrupt for that CS. 1692 */ 1693 csg_iface->input->cs_irq_ack = cs_irq_req; 1694 1695 panthor_fw_update_reqs(csg_iface, req, ack, 1696 CSG_SYNC_UPDATE | 1697 CSG_IDLE | 1698 CSG_PROGRESS_TIMER_EVENT); 1699 1700 if (csg_events & CSG_IDLE) 1701 csg_slot_process_idle_event_locked(ptdev, csg_id); 1702 1703 if (csg_events & CSG_PROGRESS_TIMER_EVENT) 1704 csg_slot_process_progress_timer_event_locked(ptdev, csg_id); 1705 1706 cs_irqs = cs_irq_req ^ cs_irq_ack; 1707 while (cs_irqs) { 1708 u32 cs_id = ffs(cs_irqs) - 1; 1709 1710 if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id)) 1711 ring_cs_db_mask |= BIT(cs_id); 1712 1713 cs_irqs &= ~BIT(cs_id); 1714 } 1715 1716 if (csg_events & CSG_SYNC_UPDATE) 1717 csg_slot_sync_update_locked(ptdev, csg_id); 1718 1719 if (ring_cs_db_mask) 1720 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask); 1721 1722 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); 1723 } 1724 1725 static void sched_process_idle_event_locked(struct panthor_device *ptdev) 1726 { 1727 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1728 1729 lockdep_assert_held(&ptdev->scheduler->lock); 1730 1731 /* Acknowledge the idle event and schedule a tick. */ 1732 panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE); 1733 sched_queue_delayed_work(ptdev->scheduler, tick, 0); 1734 } 1735 1736 /** 1737 * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ 1738 * @ptdev: Device. 1739 */ 1740 static void sched_process_global_irq_locked(struct panthor_device *ptdev) 1741 { 1742 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1743 u32 req, ack, evts; 1744 1745 lockdep_assert_held(&ptdev->scheduler->lock); 1746 1747 req = READ_ONCE(glb_iface->input->req); 1748 ack = READ_ONCE(glb_iface->output->ack); 1749 evts = (req ^ ack) & GLB_EVT_MASK; 1750 1751 if (evts & GLB_IDLE) 1752 sched_process_idle_event_locked(ptdev); 1753 } 1754 1755 static void process_fw_events_work(struct work_struct *work) 1756 { 1757 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, 1758 fw_events_work); 1759 u32 events = atomic_xchg(&sched->fw_events, 0); 1760 struct panthor_device *ptdev = sched->ptdev; 1761 1762 mutex_lock(&sched->lock); 1763 1764 if (events & JOB_INT_GLOBAL_IF) { 1765 sched_process_global_irq_locked(ptdev); 1766 events &= ~JOB_INT_GLOBAL_IF; 1767 } 1768 1769 while (events) { 1770 u32 csg_id = ffs(events) - 1; 1771 1772 sched_process_csg_irq_locked(ptdev, csg_id); 1773 events &= ~BIT(csg_id); 1774 } 1775 1776 mutex_unlock(&sched->lock); 1777 } 1778 1779 /** 1780 * panthor_sched_report_fw_events() - Report FW events to the scheduler. 1781 */ 1782 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events) 1783 { 1784 if (!ptdev->scheduler) 1785 return; 1786 1787 atomic_or(events, &ptdev->scheduler->fw_events); 1788 sched_queue_work(ptdev->scheduler, fw_events); 1789 } 1790 1791 static const char *fence_get_driver_name(struct dma_fence *fence) 1792 { 1793 return "panthor"; 1794 } 1795 1796 static const char *queue_fence_get_timeline_name(struct dma_fence *fence) 1797 { 1798 return "queue-fence"; 1799 } 1800 1801 static const struct dma_fence_ops panthor_queue_fence_ops = { 1802 .get_driver_name = fence_get_driver_name, 1803 .get_timeline_name = queue_fence_get_timeline_name, 1804 }; 1805 1806 struct panthor_csg_slots_upd_ctx { 1807 u32 update_mask; 1808 u32 timedout_mask; 1809 struct { 1810 u32 value; 1811 u32 mask; 1812 } requests[MAX_CSGS]; 1813 }; 1814 1815 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx) 1816 { 1817 memset(ctx, 0, sizeof(*ctx)); 1818 } 1819 1820 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev, 1821 struct panthor_csg_slots_upd_ctx *ctx, 1822 u32 csg_id, u32 value, u32 mask) 1823 { 1824 if (drm_WARN_ON(&ptdev->base, !mask) || 1825 drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) 1826 return; 1827 1828 ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask); 1829 ctx->requests[csg_id].mask |= mask; 1830 ctx->update_mask |= BIT(csg_id); 1831 } 1832 1833 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev, 1834 struct panthor_csg_slots_upd_ctx *ctx) 1835 { 1836 struct panthor_scheduler *sched = ptdev->scheduler; 1837 u32 update_slots = ctx->update_mask; 1838 1839 lockdep_assert_held(&sched->lock); 1840 1841 if (!ctx->update_mask) 1842 return 0; 1843 1844 while (update_slots) { 1845 struct panthor_fw_csg_iface *csg_iface; 1846 u32 csg_id = ffs(update_slots) - 1; 1847 1848 update_slots &= ~BIT(csg_id); 1849 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1850 panthor_fw_update_reqs(csg_iface, req, 1851 ctx->requests[csg_id].value, 1852 ctx->requests[csg_id].mask); 1853 } 1854 1855 panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask); 1856 1857 update_slots = ctx->update_mask; 1858 while (update_slots) { 1859 struct panthor_fw_csg_iface *csg_iface; 1860 u32 csg_id = ffs(update_slots) - 1; 1861 u32 req_mask = ctx->requests[csg_id].mask, acked; 1862 int ret; 1863 1864 update_slots &= ~BIT(csg_id); 1865 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1866 1867 ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100); 1868 1869 if (acked & CSG_ENDPOINT_CONFIG) 1870 csg_slot_sync_priority_locked(ptdev, csg_id); 1871 1872 if (acked & CSG_STATE_MASK) 1873 csg_slot_sync_state_locked(ptdev, csg_id); 1874 1875 if (acked & CSG_STATUS_UPDATE) { 1876 csg_slot_sync_queues_state_locked(ptdev, csg_id); 1877 csg_slot_sync_idle_state_locked(ptdev, csg_id); 1878 } 1879 1880 if (ret && acked != req_mask && 1881 ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) { 1882 drm_err(&ptdev->base, "CSG %d update request timedout", csg_id); 1883 ctx->timedout_mask |= BIT(csg_id); 1884 } 1885 } 1886 1887 if (ctx->timedout_mask) 1888 return -ETIMEDOUT; 1889 1890 return 0; 1891 } 1892 1893 struct panthor_sched_tick_ctx { 1894 struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT]; 1895 struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT]; 1896 u32 idle_group_count; 1897 u32 group_count; 1898 enum panthor_csg_priority min_priority; 1899 struct panthor_vm *vms[MAX_CS_PER_CSG]; 1900 u32 as_count; 1901 bool immediate_tick; 1902 u32 csg_upd_failed_mask; 1903 }; 1904 1905 static bool 1906 tick_ctx_is_full(const struct panthor_scheduler *sched, 1907 const struct panthor_sched_tick_ctx *ctx) 1908 { 1909 return ctx->group_count == sched->csg_slot_count; 1910 } 1911 1912 static bool 1913 group_is_idle(struct panthor_group *group) 1914 { 1915 struct panthor_device *ptdev = group->ptdev; 1916 u32 inactive_queues; 1917 1918 if (group->csg_id >= 0) 1919 return ptdev->scheduler->csg_slots[group->csg_id].idle; 1920 1921 inactive_queues = group->idle_queues | group->blocked_queues; 1922 return hweight32(inactive_queues) == group->queue_count; 1923 } 1924 1925 static bool 1926 group_can_run(struct panthor_group *group) 1927 { 1928 return group->state != PANTHOR_CS_GROUP_TERMINATED && 1929 group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && 1930 !group->destroyed && group->fatal_queues == 0 && 1931 !group->timedout; 1932 } 1933 1934 static void 1935 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched, 1936 struct panthor_sched_tick_ctx *ctx, 1937 struct list_head *queue, 1938 bool skip_idle_groups, 1939 bool owned_by_tick_ctx) 1940 { 1941 struct panthor_group *group, *tmp; 1942 1943 if (tick_ctx_is_full(sched, ctx)) 1944 return; 1945 1946 list_for_each_entry_safe(group, tmp, queue, run_node) { 1947 u32 i; 1948 1949 if (!group_can_run(group)) 1950 continue; 1951 1952 if (skip_idle_groups && group_is_idle(group)) 1953 continue; 1954 1955 for (i = 0; i < ctx->as_count; i++) { 1956 if (ctx->vms[i] == group->vm) 1957 break; 1958 } 1959 1960 if (i == ctx->as_count && ctx->as_count == sched->as_slot_count) 1961 continue; 1962 1963 if (!owned_by_tick_ctx) 1964 group_get(group); 1965 1966 list_move_tail(&group->run_node, &ctx->groups[group->priority]); 1967 ctx->group_count++; 1968 if (group_is_idle(group)) 1969 ctx->idle_group_count++; 1970 1971 if (i == ctx->as_count) 1972 ctx->vms[ctx->as_count++] = group->vm; 1973 1974 if (ctx->min_priority > group->priority) 1975 ctx->min_priority = group->priority; 1976 1977 if (tick_ctx_is_full(sched, ctx)) 1978 return; 1979 } 1980 } 1981 1982 static void 1983 tick_ctx_insert_old_group(struct panthor_scheduler *sched, 1984 struct panthor_sched_tick_ctx *ctx, 1985 struct panthor_group *group, 1986 bool full_tick) 1987 { 1988 struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id]; 1989 struct panthor_group *other_group; 1990 1991 if (!full_tick) { 1992 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]); 1993 return; 1994 } 1995 1996 /* Rotate to make sure groups with lower CSG slot 1997 * priorities have a chance to get a higher CSG slot 1998 * priority next time they get picked. This priority 1999 * has an impact on resource request ordering, so it's 2000 * important to make sure we don't let one group starve 2001 * all other groups with the same group priority. 2002 */ 2003 list_for_each_entry(other_group, 2004 &ctx->old_groups[csg_slot->group->priority], 2005 run_node) { 2006 struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id]; 2007 2008 if (other_csg_slot->priority > csg_slot->priority) { 2009 list_add_tail(&csg_slot->group->run_node, &other_group->run_node); 2010 return; 2011 } 2012 } 2013 2014 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]); 2015 } 2016 2017 static void 2018 tick_ctx_init(struct panthor_scheduler *sched, 2019 struct panthor_sched_tick_ctx *ctx, 2020 bool full_tick) 2021 { 2022 struct panthor_device *ptdev = sched->ptdev; 2023 struct panthor_csg_slots_upd_ctx upd_ctx; 2024 int ret; 2025 u32 i; 2026 2027 memset(ctx, 0, sizeof(*ctx)); 2028 csgs_upd_ctx_init(&upd_ctx); 2029 2030 ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT; 2031 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { 2032 INIT_LIST_HEAD(&ctx->groups[i]); 2033 INIT_LIST_HEAD(&ctx->old_groups[i]); 2034 } 2035 2036 for (i = 0; i < sched->csg_slot_count; i++) { 2037 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2038 struct panthor_group *group = csg_slot->group; 2039 struct panthor_fw_csg_iface *csg_iface; 2040 2041 if (!group) 2042 continue; 2043 2044 csg_iface = panthor_fw_get_csg_iface(ptdev, i); 2045 group_get(group); 2046 2047 /* If there was unhandled faults on the VM, force processing of 2048 * CSG IRQs, so we can flag the faulty queue. 2049 */ 2050 if (panthor_vm_has_unhandled_faults(group->vm)) { 2051 sched_process_csg_irq_locked(ptdev, i); 2052 2053 /* No fatal fault reported, flag all queues as faulty. */ 2054 if (!group->fatal_queues) 2055 group->fatal_queues |= GENMASK(group->queue_count - 1, 0); 2056 } 2057 2058 tick_ctx_insert_old_group(sched, ctx, group, full_tick); 2059 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, 2060 csg_iface->output->ack ^ CSG_STATUS_UPDATE, 2061 CSG_STATUS_UPDATE); 2062 } 2063 2064 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2065 if (ret) { 2066 panthor_device_schedule_reset(ptdev); 2067 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2068 } 2069 } 2070 2071 static void 2072 group_term_post_processing(struct panthor_group *group) 2073 { 2074 struct panthor_job *job, *tmp; 2075 LIST_HEAD(faulty_jobs); 2076 bool cookie; 2077 u32 i = 0; 2078 2079 if (drm_WARN_ON(&group->ptdev->base, group_can_run(group))) 2080 return; 2081 2082 cookie = dma_fence_begin_signalling(); 2083 for (i = 0; i < group->queue_count; i++) { 2084 struct panthor_queue *queue = group->queues[i]; 2085 struct panthor_syncobj_64b *syncobj; 2086 int err; 2087 2088 if (group->fatal_queues & BIT(i)) 2089 err = -EINVAL; 2090 else if (group->timedout) 2091 err = -ETIMEDOUT; 2092 else 2093 err = -ECANCELED; 2094 2095 if (!queue) 2096 continue; 2097 2098 spin_lock(&queue->fence_ctx.lock); 2099 list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) { 2100 list_move_tail(&job->node, &faulty_jobs); 2101 dma_fence_set_error(job->done_fence, err); 2102 dma_fence_signal_locked(job->done_fence); 2103 } 2104 spin_unlock(&queue->fence_ctx.lock); 2105 2106 /* Manually update the syncobj seqno to unblock waiters. */ 2107 syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj)); 2108 syncobj->status = ~0; 2109 syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno); 2110 sched_queue_work(group->ptdev->scheduler, sync_upd); 2111 } 2112 dma_fence_end_signalling(cookie); 2113 2114 list_for_each_entry_safe(job, tmp, &faulty_jobs, node) { 2115 list_del_init(&job->node); 2116 panthor_job_put(&job->base); 2117 } 2118 } 2119 2120 static void group_term_work(struct work_struct *work) 2121 { 2122 struct panthor_group *group = 2123 container_of(work, struct panthor_group, term_work); 2124 2125 group_term_post_processing(group); 2126 group_put(group); 2127 } 2128 2129 static void 2130 tick_ctx_cleanup(struct panthor_scheduler *sched, 2131 struct panthor_sched_tick_ctx *ctx) 2132 { 2133 struct panthor_device *ptdev = sched->ptdev; 2134 struct panthor_group *group, *tmp; 2135 u32 i; 2136 2137 for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) { 2138 list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) { 2139 /* If everything went fine, we should only have groups 2140 * to be terminated in the old_groups lists. 2141 */ 2142 drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask && 2143 group_can_run(group)); 2144 2145 if (!group_can_run(group)) { 2146 list_del_init(&group->run_node); 2147 list_del_init(&group->wait_node); 2148 group_queue_work(group, term); 2149 } else if (group->csg_id >= 0) { 2150 list_del_init(&group->run_node); 2151 } else { 2152 list_move(&group->run_node, 2153 group_is_idle(group) ? 2154 &sched->groups.idle[group->priority] : 2155 &sched->groups.runnable[group->priority]); 2156 } 2157 group_put(group); 2158 } 2159 } 2160 2161 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { 2162 /* If everything went fine, the groups to schedule lists should 2163 * be empty. 2164 */ 2165 drm_WARN_ON(&ptdev->base, 2166 !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i])); 2167 2168 list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) { 2169 if (group->csg_id >= 0) { 2170 list_del_init(&group->run_node); 2171 } else { 2172 list_move(&group->run_node, 2173 group_is_idle(group) ? 2174 &sched->groups.idle[group->priority] : 2175 &sched->groups.runnable[group->priority]); 2176 } 2177 group_put(group); 2178 } 2179 } 2180 } 2181 2182 static void 2183 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx) 2184 { 2185 struct panthor_group *group, *tmp; 2186 struct panthor_device *ptdev = sched->ptdev; 2187 struct panthor_csg_slot *csg_slot; 2188 int prio, new_csg_prio = MAX_CSG_PRIO, i; 2189 u32 free_csg_slots = 0; 2190 struct panthor_csg_slots_upd_ctx upd_ctx; 2191 int ret; 2192 2193 csgs_upd_ctx_init(&upd_ctx); 2194 2195 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2196 /* Suspend or terminate evicted groups. */ 2197 list_for_each_entry(group, &ctx->old_groups[prio], run_node) { 2198 bool term = !group_can_run(group); 2199 int csg_id = group->csg_id; 2200 2201 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) 2202 continue; 2203 2204 csg_slot = &sched->csg_slots[csg_id]; 2205 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2206 term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND, 2207 CSG_STATE_MASK); 2208 } 2209 2210 /* Update priorities on already running groups. */ 2211 list_for_each_entry(group, &ctx->groups[prio], run_node) { 2212 struct panthor_fw_csg_iface *csg_iface; 2213 int csg_id = group->csg_id; 2214 2215 if (csg_id < 0) { 2216 new_csg_prio--; 2217 continue; 2218 } 2219 2220 csg_slot = &sched->csg_slots[csg_id]; 2221 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 2222 if (csg_slot->priority == new_csg_prio) { 2223 new_csg_prio--; 2224 continue; 2225 } 2226 2227 panthor_fw_update_reqs(csg_iface, endpoint_req, 2228 CSG_EP_REQ_PRIORITY(new_csg_prio), 2229 CSG_EP_REQ_PRIORITY_MASK); 2230 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2231 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, 2232 CSG_ENDPOINT_CONFIG); 2233 new_csg_prio--; 2234 } 2235 } 2236 2237 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2238 if (ret) { 2239 panthor_device_schedule_reset(ptdev); 2240 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2241 return; 2242 } 2243 2244 /* Unbind evicted groups. */ 2245 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2246 list_for_each_entry(group, &ctx->old_groups[prio], run_node) { 2247 /* This group is gone. Process interrupts to clear 2248 * any pending interrupts before we start the new 2249 * group. 2250 */ 2251 if (group->csg_id >= 0) 2252 sched_process_csg_irq_locked(ptdev, group->csg_id); 2253 2254 group_unbind_locked(group); 2255 } 2256 } 2257 2258 for (i = 0; i < sched->csg_slot_count; i++) { 2259 if (!sched->csg_slots[i].group) 2260 free_csg_slots |= BIT(i); 2261 } 2262 2263 csgs_upd_ctx_init(&upd_ctx); 2264 new_csg_prio = MAX_CSG_PRIO; 2265 2266 /* Start new groups. */ 2267 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2268 list_for_each_entry(group, &ctx->groups[prio], run_node) { 2269 int csg_id = group->csg_id; 2270 struct panthor_fw_csg_iface *csg_iface; 2271 2272 if (csg_id >= 0) { 2273 new_csg_prio--; 2274 continue; 2275 } 2276 2277 csg_id = ffs(free_csg_slots) - 1; 2278 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) 2279 break; 2280 2281 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 2282 csg_slot = &sched->csg_slots[csg_id]; 2283 group_bind_locked(group, csg_id); 2284 csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--); 2285 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2286 group->state == PANTHOR_CS_GROUP_SUSPENDED ? 2287 CSG_STATE_RESUME : CSG_STATE_START, 2288 CSG_STATE_MASK); 2289 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2290 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, 2291 CSG_ENDPOINT_CONFIG); 2292 free_csg_slots &= ~BIT(csg_id); 2293 } 2294 } 2295 2296 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2297 if (ret) { 2298 panthor_device_schedule_reset(ptdev); 2299 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2300 return; 2301 } 2302 2303 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2304 list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) { 2305 list_del_init(&group->run_node); 2306 2307 /* If the group has been destroyed while we were 2308 * scheduling, ask for an immediate tick to 2309 * re-evaluate as soon as possible and get rid of 2310 * this dangling group. 2311 */ 2312 if (group->destroyed) 2313 ctx->immediate_tick = true; 2314 group_put(group); 2315 } 2316 2317 /* Return evicted groups to the idle or run queues. Groups 2318 * that can no longer be run (because they've been destroyed 2319 * or experienced an unrecoverable error) will be scheduled 2320 * for destruction in tick_ctx_cleanup(). 2321 */ 2322 list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) { 2323 if (!group_can_run(group)) 2324 continue; 2325 2326 if (group_is_idle(group)) 2327 list_move_tail(&group->run_node, &sched->groups.idle[prio]); 2328 else 2329 list_move_tail(&group->run_node, &sched->groups.runnable[prio]); 2330 group_put(group); 2331 } 2332 } 2333 2334 sched->used_csg_slot_count = ctx->group_count; 2335 sched->might_have_idle_groups = ctx->idle_group_count > 0; 2336 } 2337 2338 static u64 2339 tick_ctx_update_resched_target(struct panthor_scheduler *sched, 2340 const struct panthor_sched_tick_ctx *ctx) 2341 { 2342 /* We had space left, no need to reschedule until some external event happens. */ 2343 if (!tick_ctx_is_full(sched, ctx)) 2344 goto no_tick; 2345 2346 /* If idle groups were scheduled, no need to wake up until some external 2347 * event happens (group unblocked, new job submitted, ...). 2348 */ 2349 if (ctx->idle_group_count) 2350 goto no_tick; 2351 2352 if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT)) 2353 goto no_tick; 2354 2355 /* If there are groups of the same priority waiting, we need to 2356 * keep the scheduler ticking, otherwise, we'll just wait for 2357 * new groups with higher priority to be queued. 2358 */ 2359 if (!list_empty(&sched->groups.runnable[ctx->min_priority])) { 2360 u64 resched_target = sched->last_tick + sched->tick_period; 2361 2362 if (time_before64(sched->resched_target, sched->last_tick) || 2363 time_before64(resched_target, sched->resched_target)) 2364 sched->resched_target = resched_target; 2365 2366 return sched->resched_target - sched->last_tick; 2367 } 2368 2369 no_tick: 2370 sched->resched_target = U64_MAX; 2371 return U64_MAX; 2372 } 2373 2374 static void tick_work(struct work_struct *work) 2375 { 2376 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, 2377 tick_work.work); 2378 struct panthor_device *ptdev = sched->ptdev; 2379 struct panthor_sched_tick_ctx ctx; 2380 u64 remaining_jiffies = 0, resched_delay; 2381 u64 now = get_jiffies_64(); 2382 int prio, ret, cookie; 2383 2384 if (!drm_dev_enter(&ptdev->base, &cookie)) 2385 return; 2386 2387 ret = panthor_device_resume_and_get(ptdev); 2388 if (drm_WARN_ON(&ptdev->base, ret)) 2389 goto out_dev_exit; 2390 2391 if (time_before64(now, sched->resched_target)) 2392 remaining_jiffies = sched->resched_target - now; 2393 2394 mutex_lock(&sched->lock); 2395 if (panthor_device_reset_is_pending(sched->ptdev)) 2396 goto out_unlock; 2397 2398 tick_ctx_init(sched, &ctx, remaining_jiffies != 0); 2399 if (ctx.csg_upd_failed_mask) 2400 goto out_cleanup_ctx; 2401 2402 if (remaining_jiffies) { 2403 /* Scheduling forced in the middle of a tick. Only RT groups 2404 * can preempt non-RT ones. Currently running RT groups can't be 2405 * preempted. 2406 */ 2407 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2408 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2409 prio--) { 2410 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], 2411 true, true); 2412 if (prio == PANTHOR_CSG_PRIORITY_RT) { 2413 tick_ctx_pick_groups_from_list(sched, &ctx, 2414 &sched->groups.runnable[prio], 2415 true, false); 2416 } 2417 } 2418 } 2419 2420 /* First pick non-idle groups */ 2421 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2422 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2423 prio--) { 2424 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio], 2425 true, false); 2426 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true); 2427 } 2428 2429 /* If we have free CSG slots left, pick idle groups */ 2430 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2431 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2432 prio--) { 2433 /* Check the old_group queue first to avoid reprogramming the slots */ 2434 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true); 2435 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio], 2436 false, false); 2437 } 2438 2439 tick_ctx_apply(sched, &ctx); 2440 if (ctx.csg_upd_failed_mask) 2441 goto out_cleanup_ctx; 2442 2443 if (ctx.idle_group_count == ctx.group_count) { 2444 panthor_devfreq_record_idle(sched->ptdev); 2445 if (sched->pm.has_ref) { 2446 pm_runtime_put_autosuspend(ptdev->base.dev); 2447 sched->pm.has_ref = false; 2448 } 2449 } else { 2450 panthor_devfreq_record_busy(sched->ptdev); 2451 if (!sched->pm.has_ref) { 2452 pm_runtime_get(ptdev->base.dev); 2453 sched->pm.has_ref = true; 2454 } 2455 } 2456 2457 sched->last_tick = now; 2458 resched_delay = tick_ctx_update_resched_target(sched, &ctx); 2459 if (ctx.immediate_tick) 2460 resched_delay = 0; 2461 2462 if (resched_delay != U64_MAX) 2463 sched_queue_delayed_work(sched, tick, resched_delay); 2464 2465 out_cleanup_ctx: 2466 tick_ctx_cleanup(sched, &ctx); 2467 2468 out_unlock: 2469 mutex_unlock(&sched->lock); 2470 pm_runtime_mark_last_busy(ptdev->base.dev); 2471 pm_runtime_put_autosuspend(ptdev->base.dev); 2472 2473 out_dev_exit: 2474 drm_dev_exit(cookie); 2475 } 2476 2477 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx) 2478 { 2479 struct panthor_queue *queue = group->queues[queue_idx]; 2480 union { 2481 struct panthor_syncobj_64b sync64; 2482 struct panthor_syncobj_32b sync32; 2483 } *syncobj; 2484 bool result; 2485 u64 value; 2486 2487 syncobj = panthor_queue_get_syncwait_obj(group, queue); 2488 if (!syncobj) 2489 return -EINVAL; 2490 2491 value = queue->syncwait.sync64 ? 2492 syncobj->sync64.seqno : 2493 syncobj->sync32.seqno; 2494 2495 if (queue->syncwait.gt) 2496 result = value > queue->syncwait.ref; 2497 else 2498 result = value <= queue->syncwait.ref; 2499 2500 if (result) 2501 panthor_queue_put_syncwait_obj(queue); 2502 2503 return result; 2504 } 2505 2506 static void sync_upd_work(struct work_struct *work) 2507 { 2508 struct panthor_scheduler *sched = container_of(work, 2509 struct panthor_scheduler, 2510 sync_upd_work); 2511 struct panthor_group *group, *tmp; 2512 bool immediate_tick = false; 2513 2514 mutex_lock(&sched->lock); 2515 list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) { 2516 u32 tested_queues = group->blocked_queues; 2517 u32 unblocked_queues = 0; 2518 2519 while (tested_queues) { 2520 u32 cs_id = ffs(tested_queues) - 1; 2521 int ret; 2522 2523 ret = panthor_queue_eval_syncwait(group, cs_id); 2524 drm_WARN_ON(&group->ptdev->base, ret < 0); 2525 if (ret) 2526 unblocked_queues |= BIT(cs_id); 2527 2528 tested_queues &= ~BIT(cs_id); 2529 } 2530 2531 if (unblocked_queues) { 2532 group->blocked_queues &= ~unblocked_queues; 2533 2534 if (group->csg_id < 0) { 2535 list_move(&group->run_node, 2536 &sched->groups.runnable[group->priority]); 2537 if (group->priority == PANTHOR_CSG_PRIORITY_RT) 2538 immediate_tick = true; 2539 } 2540 } 2541 2542 if (!group->blocked_queues) 2543 list_del_init(&group->wait_node); 2544 } 2545 mutex_unlock(&sched->lock); 2546 2547 if (immediate_tick) 2548 sched_queue_delayed_work(sched, tick, 0); 2549 } 2550 2551 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) 2552 { 2553 struct panthor_device *ptdev = group->ptdev; 2554 struct panthor_scheduler *sched = ptdev->scheduler; 2555 struct list_head *queue = &sched->groups.runnable[group->priority]; 2556 u64 delay_jiffies = 0; 2557 bool was_idle; 2558 u64 now; 2559 2560 if (!group_can_run(group)) 2561 return; 2562 2563 /* All updated queues are blocked, no need to wake up the scheduler. */ 2564 if ((queue_mask & group->blocked_queues) == queue_mask) 2565 return; 2566 2567 was_idle = group_is_idle(group); 2568 group->idle_queues &= ~queue_mask; 2569 2570 /* Don't mess up with the lists if we're in a middle of a reset. */ 2571 if (atomic_read(&sched->reset.in_progress)) 2572 return; 2573 2574 if (was_idle && !group_is_idle(group)) 2575 list_move_tail(&group->run_node, queue); 2576 2577 /* RT groups are preemptive. */ 2578 if (group->priority == PANTHOR_CSG_PRIORITY_RT) { 2579 sched_queue_delayed_work(sched, tick, 0); 2580 return; 2581 } 2582 2583 /* Some groups might be idle, force an immediate tick to 2584 * re-evaluate. 2585 */ 2586 if (sched->might_have_idle_groups) { 2587 sched_queue_delayed_work(sched, tick, 0); 2588 return; 2589 } 2590 2591 /* Scheduler is ticking, nothing to do. */ 2592 if (sched->resched_target != U64_MAX) { 2593 /* If there are free slots, force immediating ticking. */ 2594 if (sched->used_csg_slot_count < sched->csg_slot_count) 2595 sched_queue_delayed_work(sched, tick, 0); 2596 2597 return; 2598 } 2599 2600 /* Scheduler tick was off, recalculate the resched_target based on the 2601 * last tick event, and queue the scheduler work. 2602 */ 2603 now = get_jiffies_64(); 2604 sched->resched_target = sched->last_tick + sched->tick_period; 2605 if (sched->used_csg_slot_count == sched->csg_slot_count && 2606 time_before64(now, sched->resched_target)) 2607 delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX); 2608 2609 sched_queue_delayed_work(sched, tick, delay_jiffies); 2610 } 2611 2612 static void queue_stop(struct panthor_queue *queue, 2613 struct panthor_job *bad_job) 2614 { 2615 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); 2616 } 2617 2618 static void queue_start(struct panthor_queue *queue) 2619 { 2620 struct panthor_job *job; 2621 2622 /* Re-assign the parent fences. */ 2623 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) 2624 job->base.s_fence->parent = dma_fence_get(job->done_fence); 2625 2626 drm_sched_start(&queue->scheduler, 0); 2627 } 2628 2629 static void panthor_group_stop(struct panthor_group *group) 2630 { 2631 struct panthor_scheduler *sched = group->ptdev->scheduler; 2632 2633 lockdep_assert_held(&sched->reset.lock); 2634 2635 for (u32 i = 0; i < group->queue_count; i++) 2636 queue_stop(group->queues[i], NULL); 2637 2638 group_get(group); 2639 list_move_tail(&group->run_node, &sched->reset.stopped_groups); 2640 } 2641 2642 static void panthor_group_start(struct panthor_group *group) 2643 { 2644 struct panthor_scheduler *sched = group->ptdev->scheduler; 2645 2646 lockdep_assert_held(&group->ptdev->scheduler->reset.lock); 2647 2648 for (u32 i = 0; i < group->queue_count; i++) 2649 queue_start(group->queues[i]); 2650 2651 if (group_can_run(group)) { 2652 list_move_tail(&group->run_node, 2653 group_is_idle(group) ? 2654 &sched->groups.idle[group->priority] : 2655 &sched->groups.runnable[group->priority]); 2656 } else { 2657 list_del_init(&group->run_node); 2658 list_del_init(&group->wait_node); 2659 group_queue_work(group, term); 2660 } 2661 2662 group_put(group); 2663 } 2664 2665 static void panthor_sched_immediate_tick(struct panthor_device *ptdev) 2666 { 2667 struct panthor_scheduler *sched = ptdev->scheduler; 2668 2669 sched_queue_delayed_work(sched, tick, 0); 2670 } 2671 2672 /** 2673 * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler. 2674 */ 2675 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev) 2676 { 2677 /* Force a tick to immediately kill faulty groups. */ 2678 if (ptdev->scheduler) 2679 panthor_sched_immediate_tick(ptdev); 2680 } 2681 2682 void panthor_sched_resume(struct panthor_device *ptdev) 2683 { 2684 /* Force a tick to re-evaluate after a resume. */ 2685 panthor_sched_immediate_tick(ptdev); 2686 } 2687 2688 void panthor_sched_suspend(struct panthor_device *ptdev) 2689 { 2690 struct panthor_scheduler *sched = ptdev->scheduler; 2691 struct panthor_csg_slots_upd_ctx upd_ctx; 2692 struct panthor_group *group; 2693 u32 suspended_slots; 2694 u32 i; 2695 2696 mutex_lock(&sched->lock); 2697 csgs_upd_ctx_init(&upd_ctx); 2698 for (i = 0; i < sched->csg_slot_count; i++) { 2699 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2700 2701 if (csg_slot->group) { 2702 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, 2703 group_can_run(csg_slot->group) ? 2704 CSG_STATE_SUSPEND : CSG_STATE_TERMINATE, 2705 CSG_STATE_MASK); 2706 } 2707 } 2708 2709 suspended_slots = upd_ctx.update_mask; 2710 2711 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2712 suspended_slots &= ~upd_ctx.timedout_mask; 2713 2714 if (upd_ctx.timedout_mask) { 2715 u32 slot_mask = upd_ctx.timedout_mask; 2716 2717 drm_err(&ptdev->base, "CSG suspend failed, escalating to termination"); 2718 csgs_upd_ctx_init(&upd_ctx); 2719 while (slot_mask) { 2720 u32 csg_id = ffs(slot_mask) - 1; 2721 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2722 2723 /* If the group was still usable before that point, we consider 2724 * it innocent. 2725 */ 2726 if (group_can_run(csg_slot->group)) 2727 csg_slot->group->innocent = true; 2728 2729 /* We consider group suspension failures as fatal and flag the 2730 * group as unusable by setting timedout=true. 2731 */ 2732 csg_slot->group->timedout = true; 2733 2734 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2735 CSG_STATE_TERMINATE, 2736 CSG_STATE_MASK); 2737 slot_mask &= ~BIT(csg_id); 2738 } 2739 2740 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2741 2742 slot_mask = upd_ctx.timedout_mask; 2743 while (slot_mask) { 2744 u32 csg_id = ffs(slot_mask) - 1; 2745 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2746 2747 /* Terminate command timedout, but the soft-reset will 2748 * automatically terminate all active groups, so let's 2749 * force the state to halted here. 2750 */ 2751 if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED) 2752 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; 2753 slot_mask &= ~BIT(csg_id); 2754 } 2755 } 2756 2757 /* Flush L2 and LSC caches to make sure suspend state is up-to-date. 2758 * If the flush fails, flag all queues for termination. 2759 */ 2760 if (suspended_slots) { 2761 bool flush_caches_failed = false; 2762 u32 slot_mask = suspended_slots; 2763 2764 if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0)) 2765 flush_caches_failed = true; 2766 2767 while (slot_mask) { 2768 u32 csg_id = ffs(slot_mask) - 1; 2769 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2770 2771 if (flush_caches_failed) 2772 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; 2773 else 2774 csg_slot_sync_update_locked(ptdev, csg_id); 2775 2776 slot_mask &= ~BIT(csg_id); 2777 } 2778 } 2779 2780 for (i = 0; i < sched->csg_slot_count; i++) { 2781 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2782 2783 group = csg_slot->group; 2784 if (!group) 2785 continue; 2786 2787 group_get(group); 2788 2789 if (group->csg_id >= 0) 2790 sched_process_csg_irq_locked(ptdev, group->csg_id); 2791 2792 group_unbind_locked(group); 2793 2794 drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node)); 2795 2796 if (group_can_run(group)) { 2797 list_add(&group->run_node, 2798 &sched->groups.idle[group->priority]); 2799 } else { 2800 /* We don't bother stopping the scheduler if the group is 2801 * faulty, the group termination work will finish the job. 2802 */ 2803 list_del_init(&group->wait_node); 2804 group_queue_work(group, term); 2805 } 2806 group_put(group); 2807 } 2808 mutex_unlock(&sched->lock); 2809 } 2810 2811 void panthor_sched_pre_reset(struct panthor_device *ptdev) 2812 { 2813 struct panthor_scheduler *sched = ptdev->scheduler; 2814 struct panthor_group *group, *group_tmp; 2815 u32 i; 2816 2817 mutex_lock(&sched->reset.lock); 2818 atomic_set(&sched->reset.in_progress, true); 2819 2820 /* Cancel all scheduler works. Once this is done, these works can't be 2821 * scheduled again until the reset operation is complete. 2822 */ 2823 cancel_work_sync(&sched->sync_upd_work); 2824 cancel_delayed_work_sync(&sched->tick_work); 2825 2826 panthor_sched_suspend(ptdev); 2827 2828 /* Stop all groups that might still accept jobs, so we don't get passed 2829 * new jobs while we're resetting. 2830 */ 2831 for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) { 2832 /* All groups should be in the idle lists. */ 2833 drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i])); 2834 list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node) 2835 panthor_group_stop(group); 2836 } 2837 2838 for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) { 2839 list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node) 2840 panthor_group_stop(group); 2841 } 2842 2843 mutex_unlock(&sched->reset.lock); 2844 } 2845 2846 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) 2847 { 2848 struct panthor_scheduler *sched = ptdev->scheduler; 2849 struct panthor_group *group, *group_tmp; 2850 2851 mutex_lock(&sched->reset.lock); 2852 2853 list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) { 2854 /* Consider all previously running group as terminated if the 2855 * reset failed. 2856 */ 2857 if (reset_failed) 2858 group->state = PANTHOR_CS_GROUP_TERMINATED; 2859 2860 panthor_group_start(group); 2861 } 2862 2863 /* We're done resetting the GPU, clear the reset.in_progress bit so we can 2864 * kick the scheduler. 2865 */ 2866 atomic_set(&sched->reset.in_progress, false); 2867 mutex_unlock(&sched->reset.lock); 2868 2869 /* No need to queue a tick and update syncs if the reset failed. */ 2870 if (!reset_failed) { 2871 sched_queue_delayed_work(sched, tick, 0); 2872 sched_queue_work(sched, sync_upd); 2873 } 2874 } 2875 2876 static void update_fdinfo_stats(struct panthor_job *job) 2877 { 2878 struct panthor_group *group = job->group; 2879 struct panthor_queue *queue = group->queues[job->queue_idx]; 2880 struct panthor_gpu_usage *fdinfo = &group->fdinfo.data; 2881 struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap; 2882 struct panthor_job_profiling_data *data = &slots[job->profiling.slot]; 2883 2884 scoped_guard(spinlock, &group->fdinfo.lock) { 2885 if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) 2886 fdinfo->cycles += data->cycles.after - data->cycles.before; 2887 if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) 2888 fdinfo->time += data->time.after - data->time.before; 2889 } 2890 } 2891 2892 void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) 2893 { 2894 struct panthor_group_pool *gpool = pfile->groups; 2895 struct panthor_group *group; 2896 unsigned long i; 2897 2898 if (IS_ERR_OR_NULL(gpool)) 2899 return; 2900 2901 xa_lock(&gpool->xa); 2902 xa_for_each(&gpool->xa, i, group) { 2903 guard(spinlock)(&group->fdinfo.lock); 2904 pfile->stats.cycles += group->fdinfo.data.cycles; 2905 pfile->stats.time += group->fdinfo.data.time; 2906 group->fdinfo.data.cycles = 0; 2907 group->fdinfo.data.time = 0; 2908 } 2909 xa_unlock(&gpool->xa); 2910 } 2911 2912 static void group_sync_upd_work(struct work_struct *work) 2913 { 2914 struct panthor_group *group = 2915 container_of(work, struct panthor_group, sync_upd_work); 2916 struct panthor_job *job, *job_tmp; 2917 LIST_HEAD(done_jobs); 2918 u32 queue_idx; 2919 bool cookie; 2920 2921 cookie = dma_fence_begin_signalling(); 2922 for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { 2923 struct panthor_queue *queue = group->queues[queue_idx]; 2924 struct panthor_syncobj_64b *syncobj; 2925 2926 if (!queue) 2927 continue; 2928 2929 syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj)); 2930 2931 spin_lock(&queue->fence_ctx.lock); 2932 list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { 2933 if (syncobj->seqno < job->done_fence->seqno) 2934 break; 2935 2936 list_move_tail(&job->node, &done_jobs); 2937 dma_fence_signal_locked(job->done_fence); 2938 } 2939 spin_unlock(&queue->fence_ctx.lock); 2940 } 2941 dma_fence_end_signalling(cookie); 2942 2943 list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { 2944 if (job->profiling.mask) 2945 update_fdinfo_stats(job); 2946 list_del_init(&job->node); 2947 panthor_job_put(&job->base); 2948 } 2949 2950 group_put(group); 2951 } 2952 2953 struct panthor_job_ringbuf_instrs { 2954 u64 buffer[MAX_INSTRS_PER_JOB]; 2955 u32 count; 2956 }; 2957 2958 struct panthor_job_instr { 2959 u32 profile_mask; 2960 u64 instr; 2961 }; 2962 2963 #define JOB_INSTR(__prof, __instr) \ 2964 { \ 2965 .profile_mask = __prof, \ 2966 .instr = __instr, \ 2967 } 2968 2969 static void 2970 copy_instrs_to_ringbuf(struct panthor_queue *queue, 2971 struct panthor_job *job, 2972 struct panthor_job_ringbuf_instrs *instrs) 2973 { 2974 u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); 2975 u64 start = job->ringbuf.start & (ringbuf_size - 1); 2976 u64 size, written; 2977 2978 /* 2979 * We need to write a whole slot, including any trailing zeroes 2980 * that may come at the end of it. Also, because instrs.buffer has 2981 * been zero-initialised, there's no need to pad it with 0's 2982 */ 2983 instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE); 2984 size = instrs->count * sizeof(u64); 2985 WARN_ON(size > ringbuf_size); 2986 written = min(ringbuf_size - start, size); 2987 2988 memcpy(queue->ringbuf->kmap + start, instrs->buffer, written); 2989 2990 if (written < size) 2991 memcpy(queue->ringbuf->kmap, 2992 &instrs->buffer[written / sizeof(u64)], 2993 size - written); 2994 } 2995 2996 struct panthor_job_cs_params { 2997 u32 profile_mask; 2998 u64 addr_reg; u64 val_reg; 2999 u64 cycle_reg; u64 time_reg; 3000 u64 sync_addr; u64 times_addr; 3001 u64 cs_start; u64 cs_size; 3002 u32 last_flush; u32 waitall_mask; 3003 }; 3004 3005 static void 3006 get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params) 3007 { 3008 struct panthor_group *group = job->group; 3009 struct panthor_queue *queue = group->queues[job->queue_idx]; 3010 struct panthor_device *ptdev = group->ptdev; 3011 struct panthor_scheduler *sched = ptdev->scheduler; 3012 3013 params->addr_reg = ptdev->csif_info.cs_reg_count - 3014 ptdev->csif_info.unpreserved_cs_reg_count; 3015 params->val_reg = params->addr_reg + 2; 3016 params->cycle_reg = params->addr_reg; 3017 params->time_reg = params->val_reg; 3018 3019 params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) + 3020 job->queue_idx * sizeof(struct panthor_syncobj_64b); 3021 params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) + 3022 (job->profiling.slot * sizeof(struct panthor_job_profiling_data)); 3023 params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0); 3024 3025 params->cs_start = job->call_info.start; 3026 params->cs_size = job->call_info.size; 3027 params->last_flush = job->call_info.latest_flush; 3028 3029 params->profile_mask = job->profiling.mask; 3030 } 3031 3032 #define JOB_INSTR_ALWAYS(instr) \ 3033 JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr)) 3034 #define JOB_INSTR_TIMESTAMP(instr) \ 3035 JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr)) 3036 #define JOB_INSTR_CYCLES(instr) \ 3037 JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr)) 3038 3039 static void 3040 prepare_job_instrs(const struct panthor_job_cs_params *params, 3041 struct panthor_job_ringbuf_instrs *instrs) 3042 { 3043 const struct panthor_job_instr instr_seq[] = { 3044 /* MOV32 rX+2, cs.latest_flush */ 3045 JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush), 3046 /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */ 3047 JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) | 3048 (0 << 16) | 0x233), 3049 /* MOV48 rX:rX+1, cycles_offset */ 3050 JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) | 3051 (params->times_addr + 3052 offsetof(struct panthor_job_profiling_data, cycles.before))), 3053 /* STORE_STATE cycles */ 3054 JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)), 3055 /* MOV48 rX:rX+1, time_offset */ 3056 JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) | 3057 (params->times_addr + 3058 offsetof(struct panthor_job_profiling_data, time.before))), 3059 /* STORE_STATE timer */ 3060 JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)), 3061 /* MOV48 rX:rX+1, cs.start */ 3062 JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start), 3063 /* MOV32 rX+2, cs.size */ 3064 JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size), 3065 /* WAIT(0) => waits for FLUSH_CACHE2 instruction */ 3066 JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)), 3067 /* CALL rX:rX+1, rX+2 */ 3068 JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) | 3069 (params->val_reg << 32)), 3070 /* MOV48 rX:rX+1, cycles_offset */ 3071 JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) | 3072 (params->times_addr + 3073 offsetof(struct panthor_job_profiling_data, cycles.after))), 3074 /* STORE_STATE cycles */ 3075 JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)), 3076 /* MOV48 rX:rX+1, time_offset */ 3077 JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) | 3078 (params->times_addr + 3079 offsetof(struct panthor_job_profiling_data, time.after))), 3080 /* STORE_STATE timer */ 3081 JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)), 3082 /* MOV48 rX:rX+1, sync_addr */ 3083 JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr), 3084 /* MOV48 rX+2, #1 */ 3085 JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1), 3086 /* WAIT(all) */ 3087 JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)), 3088 /* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/ 3089 JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) | 3090 (params->val_reg << 32) | (0 << 16) | 1), 3091 /* ERROR_BARRIER, so we can recover from faults at job boundaries. */ 3092 JOB_INSTR_ALWAYS((47ull << 56)), 3093 }; 3094 u32 pad; 3095 3096 instrs->count = 0; 3097 3098 /* NEED to be cacheline aligned to please the prefetcher. */ 3099 static_assert(sizeof(instrs->buffer) % 64 == 0, 3100 "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline"); 3101 3102 /* Make sure we have enough storage to store the whole sequence. */ 3103 static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) == 3104 ARRAY_SIZE(instrs->buffer), 3105 "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch"); 3106 3107 for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) { 3108 /* If the profile mask of this instruction is not enabled, skip it. */ 3109 if (instr_seq[i].profile_mask && 3110 !(instr_seq[i].profile_mask & params->profile_mask)) 3111 continue; 3112 3113 instrs->buffer[instrs->count++] = instr_seq[i].instr; 3114 } 3115 3116 pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE); 3117 memset(&instrs->buffer[instrs->count], 0, 3118 (pad - instrs->count) * sizeof(instrs->buffer[0])); 3119 instrs->count = pad; 3120 } 3121 3122 static u32 calc_job_credits(u32 profile_mask) 3123 { 3124 struct panthor_job_ringbuf_instrs instrs; 3125 struct panthor_job_cs_params params = { 3126 .profile_mask = profile_mask, 3127 }; 3128 3129 prepare_job_instrs(¶ms, &instrs); 3130 return instrs.count; 3131 } 3132 3133 static struct dma_fence * 3134 queue_run_job(struct drm_sched_job *sched_job) 3135 { 3136 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3137 struct panthor_group *group = job->group; 3138 struct panthor_queue *queue = group->queues[job->queue_idx]; 3139 struct panthor_device *ptdev = group->ptdev; 3140 struct panthor_scheduler *sched = ptdev->scheduler; 3141 struct panthor_job_ringbuf_instrs instrs; 3142 struct panthor_job_cs_params cs_params; 3143 struct dma_fence *done_fence; 3144 int ret; 3145 3146 /* Stream size is zero, nothing to do except making sure all previously 3147 * submitted jobs are done before we signal the 3148 * drm_sched_job::s_fence::finished fence. 3149 */ 3150 if (!job->call_info.size) { 3151 job->done_fence = dma_fence_get(queue->fence_ctx.last_fence); 3152 return dma_fence_get(job->done_fence); 3153 } 3154 3155 ret = panthor_device_resume_and_get(ptdev); 3156 if (drm_WARN_ON(&ptdev->base, ret)) 3157 return ERR_PTR(ret); 3158 3159 mutex_lock(&sched->lock); 3160 if (!group_can_run(group)) { 3161 done_fence = ERR_PTR(-ECANCELED); 3162 goto out_unlock; 3163 } 3164 3165 dma_fence_init(job->done_fence, 3166 &panthor_queue_fence_ops, 3167 &queue->fence_ctx.lock, 3168 queue->fence_ctx.id, 3169 atomic64_inc_return(&queue->fence_ctx.seqno)); 3170 3171 job->profiling.slot = queue->profiling.seqno++; 3172 if (queue->profiling.seqno == queue->profiling.slot_count) 3173 queue->profiling.seqno = 0; 3174 3175 job->ringbuf.start = queue->iface.input->insert; 3176 3177 get_job_cs_params(job, &cs_params); 3178 prepare_job_instrs(&cs_params, &instrs); 3179 copy_instrs_to_ringbuf(queue, job, &instrs); 3180 3181 job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64)); 3182 3183 panthor_job_get(&job->base); 3184 spin_lock(&queue->fence_ctx.lock); 3185 list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs); 3186 spin_unlock(&queue->fence_ctx.lock); 3187 3188 /* Make sure the ring buffer is updated before the INSERT 3189 * register. 3190 */ 3191 wmb(); 3192 3193 queue->iface.input->extract = queue->iface.output->extract; 3194 queue->iface.input->insert = job->ringbuf.end; 3195 3196 if (group->csg_id < 0) { 3197 /* If the queue is blocked, we want to keep the timeout running, so we 3198 * can detect unbounded waits and kill the group when that happens. 3199 * Otherwise, we suspend the timeout so the time we spend waiting for 3200 * a CSG slot is not counted. 3201 */ 3202 if (!(group->blocked_queues & BIT(job->queue_idx)) && 3203 !queue->timeout_suspended) { 3204 queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); 3205 queue->timeout_suspended = true; 3206 } 3207 3208 group_schedule_locked(group, BIT(job->queue_idx)); 3209 } else { 3210 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1); 3211 if (!sched->pm.has_ref && 3212 !(group->blocked_queues & BIT(job->queue_idx))) { 3213 pm_runtime_get(ptdev->base.dev); 3214 sched->pm.has_ref = true; 3215 } 3216 panthor_devfreq_record_busy(sched->ptdev); 3217 } 3218 3219 /* Update the last fence. */ 3220 dma_fence_put(queue->fence_ctx.last_fence); 3221 queue->fence_ctx.last_fence = dma_fence_get(job->done_fence); 3222 3223 done_fence = dma_fence_get(job->done_fence); 3224 3225 out_unlock: 3226 mutex_unlock(&sched->lock); 3227 pm_runtime_mark_last_busy(ptdev->base.dev); 3228 pm_runtime_put_autosuspend(ptdev->base.dev); 3229 3230 return done_fence; 3231 } 3232 3233 static enum drm_gpu_sched_stat 3234 queue_timedout_job(struct drm_sched_job *sched_job) 3235 { 3236 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3237 struct panthor_group *group = job->group; 3238 struct panthor_device *ptdev = group->ptdev; 3239 struct panthor_scheduler *sched = ptdev->scheduler; 3240 struct panthor_queue *queue = group->queues[job->queue_idx]; 3241 3242 drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n", 3243 group->task_info.pid, group->task_info.comm, job->done_fence->seqno); 3244 3245 drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress)); 3246 3247 queue_stop(queue, job); 3248 3249 mutex_lock(&sched->lock); 3250 group->timedout = true; 3251 if (group->csg_id >= 0) { 3252 sched_queue_delayed_work(ptdev->scheduler, tick, 0); 3253 } else { 3254 /* Remove from the run queues, so the scheduler can't 3255 * pick the group on the next tick. 3256 */ 3257 list_del_init(&group->run_node); 3258 list_del_init(&group->wait_node); 3259 3260 group_queue_work(group, term); 3261 } 3262 mutex_unlock(&sched->lock); 3263 3264 queue_start(queue); 3265 3266 return DRM_GPU_SCHED_STAT_RESET; 3267 } 3268 3269 static void queue_free_job(struct drm_sched_job *sched_job) 3270 { 3271 drm_sched_job_cleanup(sched_job); 3272 panthor_job_put(sched_job); 3273 } 3274 3275 static const struct drm_sched_backend_ops panthor_queue_sched_ops = { 3276 .run_job = queue_run_job, 3277 .timedout_job = queue_timedout_job, 3278 .free_job = queue_free_job, 3279 }; 3280 3281 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev, 3282 u32 cs_ringbuf_size) 3283 { 3284 u32 min_profiled_job_instrs = U32_MAX; 3285 u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL); 3286 3287 /* 3288 * We want to calculate the minimum size of a profiled job's CS, 3289 * because since they need additional instructions for the sampling 3290 * of performance metrics, they might take up further slots in 3291 * the queue's ringbuffer. This means we might not need as many job 3292 * slots for keeping track of their profiling information. What we 3293 * need is the maximum number of slots we should allocate to this end, 3294 * which matches the maximum number of profiled jobs we can place 3295 * simultaneously in the queue's ring buffer. 3296 * That has to be calculated separately for every single job profiling 3297 * flag, but not in the case job profiling is disabled, since unprofiled 3298 * jobs don't need to keep track of this at all. 3299 */ 3300 for (u32 i = 0; i < last_flag; i++) { 3301 min_profiled_job_instrs = 3302 min(min_profiled_job_instrs, calc_job_credits(BIT(i))); 3303 } 3304 3305 return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64)); 3306 } 3307 3308 static struct panthor_queue * 3309 group_create_queue(struct panthor_group *group, 3310 const struct drm_panthor_queue_create *args) 3311 { 3312 const struct drm_sched_init_args sched_args = { 3313 .ops = &panthor_queue_sched_ops, 3314 .submit_wq = group->ptdev->scheduler->wq, 3315 .num_rqs = 1, 3316 /* 3317 * The credit limit argument tells us the total number of 3318 * instructions across all CS slots in the ringbuffer, with 3319 * some jobs requiring twice as many as others, depending on 3320 * their profiling status. 3321 */ 3322 .credit_limit = args->ringbuf_size / sizeof(u64), 3323 .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), 3324 .timeout_wq = group->ptdev->reset.wq, 3325 .name = "panthor-queue", 3326 .dev = group->ptdev->base.dev, 3327 }; 3328 struct drm_gpu_scheduler *drm_sched; 3329 struct panthor_queue *queue; 3330 int ret; 3331 3332 if (args->pad[0] || args->pad[1] || args->pad[2]) 3333 return ERR_PTR(-EINVAL); 3334 3335 if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K || 3336 !is_power_of_2(args->ringbuf_size)) 3337 return ERR_PTR(-EINVAL); 3338 3339 if (args->priority > CSF_MAX_QUEUE_PRIO) 3340 return ERR_PTR(-EINVAL); 3341 3342 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 3343 if (!queue) 3344 return ERR_PTR(-ENOMEM); 3345 3346 queue->fence_ctx.id = dma_fence_context_alloc(1); 3347 spin_lock_init(&queue->fence_ctx.lock); 3348 INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs); 3349 3350 queue->priority = args->priority; 3351 3352 queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm, 3353 args->ringbuf_size, 3354 DRM_PANTHOR_BO_NO_MMAP, 3355 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3356 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3357 PANTHOR_VM_KERNEL_AUTO_VA, 3358 "CS ring buffer"); 3359 if (IS_ERR(queue->ringbuf)) { 3360 ret = PTR_ERR(queue->ringbuf); 3361 goto err_free_queue; 3362 } 3363 3364 ret = panthor_kernel_bo_vmap(queue->ringbuf); 3365 if (ret) 3366 goto err_free_queue; 3367 3368 queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev, 3369 &queue->iface.input, 3370 &queue->iface.output, 3371 &queue->iface.input_fw_va, 3372 &queue->iface.output_fw_va); 3373 if (IS_ERR(queue->iface.mem)) { 3374 ret = PTR_ERR(queue->iface.mem); 3375 goto err_free_queue; 3376 } 3377 3378 queue->profiling.slot_count = 3379 calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size); 3380 3381 queue->profiling.slots = 3382 panthor_kernel_bo_create(group->ptdev, group->vm, 3383 queue->profiling.slot_count * 3384 sizeof(struct panthor_job_profiling_data), 3385 DRM_PANTHOR_BO_NO_MMAP, 3386 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3387 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3388 PANTHOR_VM_KERNEL_AUTO_VA, 3389 "Group job stats"); 3390 3391 if (IS_ERR(queue->profiling.slots)) { 3392 ret = PTR_ERR(queue->profiling.slots); 3393 goto err_free_queue; 3394 } 3395 3396 ret = panthor_kernel_bo_vmap(queue->profiling.slots); 3397 if (ret) 3398 goto err_free_queue; 3399 3400 ret = drm_sched_init(&queue->scheduler, &sched_args); 3401 if (ret) 3402 goto err_free_queue; 3403 3404 drm_sched = &queue->scheduler; 3405 ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL); 3406 3407 return queue; 3408 3409 err_free_queue: 3410 group_free_queue(group, queue); 3411 return ERR_PTR(ret); 3412 } 3413 3414 static void group_init_task_info(struct panthor_group *group) 3415 { 3416 struct task_struct *task = current->group_leader; 3417 3418 group->task_info.pid = task->pid; 3419 get_task_comm(group->task_info.comm, task); 3420 } 3421 3422 static void add_group_kbo_sizes(struct panthor_device *ptdev, 3423 struct panthor_group *group) 3424 { 3425 struct panthor_queue *queue; 3426 int i; 3427 3428 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group))) 3429 return; 3430 if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev)) 3431 return; 3432 3433 group->fdinfo.kbo_sizes += group->suspend_buf->obj->size; 3434 group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size; 3435 group->fdinfo.kbo_sizes += group->syncobjs->obj->size; 3436 3437 for (i = 0; i < group->queue_count; i++) { 3438 queue = group->queues[i]; 3439 group->fdinfo.kbo_sizes += queue->ringbuf->obj->size; 3440 group->fdinfo.kbo_sizes += queue->iface.mem->obj->size; 3441 group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size; 3442 } 3443 } 3444 3445 #define MAX_GROUPS_PER_POOL 128 3446 3447 int panthor_group_create(struct panthor_file *pfile, 3448 const struct drm_panthor_group_create *group_args, 3449 const struct drm_panthor_queue_create *queue_args) 3450 { 3451 struct panthor_device *ptdev = pfile->ptdev; 3452 struct panthor_group_pool *gpool = pfile->groups; 3453 struct panthor_scheduler *sched = ptdev->scheduler; 3454 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); 3455 struct panthor_group *group = NULL; 3456 u32 gid, i, suspend_size; 3457 int ret; 3458 3459 if (group_args->pad) 3460 return -EINVAL; 3461 3462 if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT) 3463 return -EINVAL; 3464 3465 if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) || 3466 (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) || 3467 (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present)) 3468 return -EINVAL; 3469 3470 if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores || 3471 hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores || 3472 hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores) 3473 return -EINVAL; 3474 3475 group = kzalloc(sizeof(*group), GFP_KERNEL); 3476 if (!group) 3477 return -ENOMEM; 3478 3479 spin_lock_init(&group->fatal_lock); 3480 kref_init(&group->refcount); 3481 group->state = PANTHOR_CS_GROUP_CREATED; 3482 group->csg_id = -1; 3483 3484 group->ptdev = ptdev; 3485 group->max_compute_cores = group_args->max_compute_cores; 3486 group->compute_core_mask = group_args->compute_core_mask; 3487 group->max_fragment_cores = group_args->max_fragment_cores; 3488 group->fragment_core_mask = group_args->fragment_core_mask; 3489 group->max_tiler_cores = group_args->max_tiler_cores; 3490 group->tiler_core_mask = group_args->tiler_core_mask; 3491 group->priority = group_args->priority; 3492 3493 INIT_LIST_HEAD(&group->wait_node); 3494 INIT_LIST_HEAD(&group->run_node); 3495 INIT_WORK(&group->term_work, group_term_work); 3496 INIT_WORK(&group->sync_upd_work, group_sync_upd_work); 3497 INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work); 3498 INIT_WORK(&group->release_work, group_release_work); 3499 3500 group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id); 3501 if (!group->vm) { 3502 ret = -EINVAL; 3503 goto err_put_group; 3504 } 3505 3506 suspend_size = csg_iface->control->suspend_size; 3507 group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); 3508 if (IS_ERR(group->suspend_buf)) { 3509 ret = PTR_ERR(group->suspend_buf); 3510 group->suspend_buf = NULL; 3511 goto err_put_group; 3512 } 3513 3514 suspend_size = csg_iface->control->protm_suspend_size; 3515 group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); 3516 if (IS_ERR(group->protm_suspend_buf)) { 3517 ret = PTR_ERR(group->protm_suspend_buf); 3518 group->protm_suspend_buf = NULL; 3519 goto err_put_group; 3520 } 3521 3522 group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm, 3523 group_args->queues.count * 3524 sizeof(struct panthor_syncobj_64b), 3525 DRM_PANTHOR_BO_NO_MMAP, 3526 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3527 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3528 PANTHOR_VM_KERNEL_AUTO_VA, 3529 "Group sync objects"); 3530 if (IS_ERR(group->syncobjs)) { 3531 ret = PTR_ERR(group->syncobjs); 3532 goto err_put_group; 3533 } 3534 3535 ret = panthor_kernel_bo_vmap(group->syncobjs); 3536 if (ret) 3537 goto err_put_group; 3538 3539 memset(group->syncobjs->kmap, 0, 3540 group_args->queues.count * sizeof(struct panthor_syncobj_64b)); 3541 3542 for (i = 0; i < group_args->queues.count; i++) { 3543 group->queues[i] = group_create_queue(group, &queue_args[i]); 3544 if (IS_ERR(group->queues[i])) { 3545 ret = PTR_ERR(group->queues[i]); 3546 group->queues[i] = NULL; 3547 goto err_put_group; 3548 } 3549 3550 group->queue_count++; 3551 } 3552 3553 group->idle_queues = GENMASK(group->queue_count - 1, 0); 3554 3555 ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); 3556 if (ret) 3557 goto err_put_group; 3558 3559 mutex_lock(&sched->reset.lock); 3560 if (atomic_read(&sched->reset.in_progress)) { 3561 panthor_group_stop(group); 3562 } else { 3563 mutex_lock(&sched->lock); 3564 list_add_tail(&group->run_node, 3565 &sched->groups.idle[group->priority]); 3566 mutex_unlock(&sched->lock); 3567 } 3568 mutex_unlock(&sched->reset.lock); 3569 3570 add_group_kbo_sizes(group->ptdev, group); 3571 spin_lock_init(&group->fdinfo.lock); 3572 3573 group_init_task_info(group); 3574 3575 return gid; 3576 3577 err_put_group: 3578 group_put(group); 3579 return ret; 3580 } 3581 3582 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) 3583 { 3584 struct panthor_group_pool *gpool = pfile->groups; 3585 struct panthor_device *ptdev = pfile->ptdev; 3586 struct panthor_scheduler *sched = ptdev->scheduler; 3587 struct panthor_group *group; 3588 3589 group = xa_erase(&gpool->xa, group_handle); 3590 if (!group) 3591 return -EINVAL; 3592 3593 mutex_lock(&sched->reset.lock); 3594 mutex_lock(&sched->lock); 3595 group->destroyed = true; 3596 if (group->csg_id >= 0) { 3597 sched_queue_delayed_work(sched, tick, 0); 3598 } else if (!atomic_read(&sched->reset.in_progress)) { 3599 /* Remove from the run queues, so the scheduler can't 3600 * pick the group on the next tick. 3601 */ 3602 list_del_init(&group->run_node); 3603 list_del_init(&group->wait_node); 3604 group_queue_work(group, term); 3605 } 3606 mutex_unlock(&sched->lock); 3607 mutex_unlock(&sched->reset.lock); 3608 3609 group_put(group); 3610 return 0; 3611 } 3612 3613 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool, 3614 u32 group_handle) 3615 { 3616 struct panthor_group *group; 3617 3618 xa_lock(&pool->xa); 3619 group = group_get(xa_load(&pool->xa, group_handle)); 3620 xa_unlock(&pool->xa); 3621 3622 return group; 3623 } 3624 3625 int panthor_group_get_state(struct panthor_file *pfile, 3626 struct drm_panthor_group_get_state *get_state) 3627 { 3628 struct panthor_group_pool *gpool = pfile->groups; 3629 struct panthor_device *ptdev = pfile->ptdev; 3630 struct panthor_scheduler *sched = ptdev->scheduler; 3631 struct panthor_group *group; 3632 3633 if (get_state->pad) 3634 return -EINVAL; 3635 3636 group = group_from_handle(gpool, get_state->group_handle); 3637 if (!group) 3638 return -EINVAL; 3639 3640 memset(get_state, 0, sizeof(*get_state)); 3641 3642 mutex_lock(&sched->lock); 3643 if (group->timedout) 3644 get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT; 3645 if (group->fatal_queues) { 3646 get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT; 3647 get_state->fatal_queues = group->fatal_queues; 3648 } 3649 if (group->innocent) 3650 get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT; 3651 mutex_unlock(&sched->lock); 3652 3653 group_put(group); 3654 return 0; 3655 } 3656 3657 int panthor_group_pool_create(struct panthor_file *pfile) 3658 { 3659 struct panthor_group_pool *gpool; 3660 3661 gpool = kzalloc(sizeof(*gpool), GFP_KERNEL); 3662 if (!gpool) 3663 return -ENOMEM; 3664 3665 xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1); 3666 pfile->groups = gpool; 3667 return 0; 3668 } 3669 3670 void panthor_group_pool_destroy(struct panthor_file *pfile) 3671 { 3672 struct panthor_group_pool *gpool = pfile->groups; 3673 struct panthor_group *group; 3674 unsigned long i; 3675 3676 if (IS_ERR_OR_NULL(gpool)) 3677 return; 3678 3679 xa_for_each(&gpool->xa, i, group) 3680 panthor_group_destroy(pfile, i); 3681 3682 xa_destroy(&gpool->xa); 3683 kfree(gpool); 3684 pfile->groups = NULL; 3685 } 3686 3687 /** 3688 * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's 3689 * belonging to all the groups owned by an open Panthor file 3690 * @pfile: File. 3691 * @stats: Memory statistics to be updated. 3692 * 3693 */ 3694 void 3695 panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile, 3696 struct drm_memory_stats *stats) 3697 { 3698 struct panthor_group_pool *gpool = pfile->groups; 3699 struct panthor_group *group; 3700 unsigned long i; 3701 3702 if (IS_ERR_OR_NULL(gpool)) 3703 return; 3704 3705 xa_lock(&gpool->xa); 3706 xa_for_each(&gpool->xa, i, group) { 3707 stats->resident += group->fdinfo.kbo_sizes; 3708 if (group->csg_id >= 0) 3709 stats->active += group->fdinfo.kbo_sizes; 3710 } 3711 xa_unlock(&gpool->xa); 3712 } 3713 3714 static void job_release(struct kref *ref) 3715 { 3716 struct panthor_job *job = container_of(ref, struct panthor_job, refcount); 3717 3718 drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node)); 3719 3720 if (job->base.s_fence) 3721 drm_sched_job_cleanup(&job->base); 3722 3723 if (job->done_fence && job->done_fence->ops) 3724 dma_fence_put(job->done_fence); 3725 else 3726 dma_fence_free(job->done_fence); 3727 3728 group_put(job->group); 3729 3730 kfree(job); 3731 } 3732 3733 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job) 3734 { 3735 if (sched_job) { 3736 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3737 3738 kref_get(&job->refcount); 3739 } 3740 3741 return sched_job; 3742 } 3743 3744 void panthor_job_put(struct drm_sched_job *sched_job) 3745 { 3746 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3747 3748 if (sched_job) 3749 kref_put(&job->refcount, job_release); 3750 } 3751 3752 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job) 3753 { 3754 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3755 3756 return job->group->vm; 3757 } 3758 3759 struct drm_sched_job * 3760 panthor_job_create(struct panthor_file *pfile, 3761 u16 group_handle, 3762 const struct drm_panthor_queue_submit *qsubmit, 3763 u64 drm_client_id) 3764 { 3765 struct panthor_group_pool *gpool = pfile->groups; 3766 struct panthor_job *job; 3767 u32 credits; 3768 int ret; 3769 3770 if (qsubmit->pad) 3771 return ERR_PTR(-EINVAL); 3772 3773 /* If stream_addr is zero, so stream_size should be. */ 3774 if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0)) 3775 return ERR_PTR(-EINVAL); 3776 3777 /* Make sure the address is aligned on 64-byte (cacheline) and the size is 3778 * aligned on 8-byte (instruction size). 3779 */ 3780 if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7)) 3781 return ERR_PTR(-EINVAL); 3782 3783 /* bits 24:30 must be zero. */ 3784 if (qsubmit->latest_flush & GENMASK(30, 24)) 3785 return ERR_PTR(-EINVAL); 3786 3787 job = kzalloc(sizeof(*job), GFP_KERNEL); 3788 if (!job) 3789 return ERR_PTR(-ENOMEM); 3790 3791 kref_init(&job->refcount); 3792 job->queue_idx = qsubmit->queue_index; 3793 job->call_info.size = qsubmit->stream_size; 3794 job->call_info.start = qsubmit->stream_addr; 3795 job->call_info.latest_flush = qsubmit->latest_flush; 3796 INIT_LIST_HEAD(&job->node); 3797 3798 job->group = group_from_handle(gpool, group_handle); 3799 if (!job->group) { 3800 ret = -EINVAL; 3801 goto err_put_job; 3802 } 3803 3804 if (!group_can_run(job->group)) { 3805 ret = -EINVAL; 3806 goto err_put_job; 3807 } 3808 3809 if (job->queue_idx >= job->group->queue_count || 3810 !job->group->queues[job->queue_idx]) { 3811 ret = -EINVAL; 3812 goto err_put_job; 3813 } 3814 3815 /* Empty command streams don't need a fence, they'll pick the one from 3816 * the previously submitted job. 3817 */ 3818 if (job->call_info.size) { 3819 job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); 3820 if (!job->done_fence) { 3821 ret = -ENOMEM; 3822 goto err_put_job; 3823 } 3824 } 3825 3826 job->profiling.mask = pfile->ptdev->profile_mask; 3827 credits = calc_job_credits(job->profiling.mask); 3828 if (credits == 0) { 3829 ret = -EINVAL; 3830 goto err_put_job; 3831 } 3832 3833 ret = drm_sched_job_init(&job->base, 3834 &job->group->queues[job->queue_idx]->entity, 3835 credits, job->group, drm_client_id); 3836 if (ret) 3837 goto err_put_job; 3838 3839 return &job->base; 3840 3841 err_put_job: 3842 panthor_job_put(&job->base); 3843 return ERR_PTR(ret); 3844 } 3845 3846 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job) 3847 { 3848 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3849 3850 panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished, 3851 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); 3852 } 3853 3854 void panthor_sched_unplug(struct panthor_device *ptdev) 3855 { 3856 struct panthor_scheduler *sched = ptdev->scheduler; 3857 3858 cancel_delayed_work_sync(&sched->tick_work); 3859 3860 mutex_lock(&sched->lock); 3861 if (sched->pm.has_ref) { 3862 pm_runtime_put(ptdev->base.dev); 3863 sched->pm.has_ref = false; 3864 } 3865 mutex_unlock(&sched->lock); 3866 } 3867 3868 static void panthor_sched_fini(struct drm_device *ddev, void *res) 3869 { 3870 struct panthor_scheduler *sched = res; 3871 int prio; 3872 3873 if (!sched || !sched->csg_slot_count) 3874 return; 3875 3876 cancel_delayed_work_sync(&sched->tick_work); 3877 3878 if (sched->wq) 3879 destroy_workqueue(sched->wq); 3880 3881 if (sched->heap_alloc_wq) 3882 destroy_workqueue(sched->heap_alloc_wq); 3883 3884 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 3885 drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio])); 3886 drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio])); 3887 } 3888 3889 drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting)); 3890 } 3891 3892 int panthor_sched_init(struct panthor_device *ptdev) 3893 { 3894 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 3895 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); 3896 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0); 3897 struct panthor_scheduler *sched; 3898 u32 gpu_as_count, num_groups; 3899 int prio, ret; 3900 3901 sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL); 3902 if (!sched) 3903 return -ENOMEM; 3904 3905 /* The highest bit in JOB_INT_* is reserved for globabl IRQs. That 3906 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here. 3907 */ 3908 num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num); 3909 3910 /* The FW-side scheduler might deadlock if two groups with the same 3911 * priority try to access a set of resources that overlaps, with part 3912 * of the resources being allocated to one group and the other part to 3913 * the other group, both groups waiting for the remaining resources to 3914 * be allocated. To avoid that, it is recommended to assign each CSG a 3915 * different priority. In theory we could allow several groups to have 3916 * the same CSG priority if they don't request the same resources, but 3917 * that makes the scheduling logic more complicated, so let's clamp 3918 * the number of CSG slots to MAX_CSG_PRIO + 1 for now. 3919 */ 3920 num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups); 3921 3922 /* We need at least one AS for the MCU and one for the GPU contexts. */ 3923 gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1)); 3924 if (!gpu_as_count) { 3925 drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)", 3926 gpu_as_count + 1); 3927 return -EINVAL; 3928 } 3929 3930 sched->ptdev = ptdev; 3931 sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features); 3932 sched->csg_slot_count = num_groups; 3933 sched->cs_slot_count = csg_iface->control->stream_num; 3934 sched->as_slot_count = gpu_as_count; 3935 ptdev->csif_info.csg_slot_count = sched->csg_slot_count; 3936 ptdev->csif_info.cs_slot_count = sched->cs_slot_count; 3937 ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count; 3938 3939 sched->last_tick = 0; 3940 sched->resched_target = U64_MAX; 3941 sched->tick_period = msecs_to_jiffies(10); 3942 INIT_DELAYED_WORK(&sched->tick_work, tick_work); 3943 INIT_WORK(&sched->sync_upd_work, sync_upd_work); 3944 INIT_WORK(&sched->fw_events_work, process_fw_events_work); 3945 3946 ret = drmm_mutex_init(&ptdev->base, &sched->lock); 3947 if (ret) 3948 return ret; 3949 3950 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 3951 INIT_LIST_HEAD(&sched->groups.runnable[prio]); 3952 INIT_LIST_HEAD(&sched->groups.idle[prio]); 3953 } 3954 INIT_LIST_HEAD(&sched->groups.waiting); 3955 3956 ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock); 3957 if (ret) 3958 return ret; 3959 3960 INIT_LIST_HEAD(&sched->reset.stopped_groups); 3961 3962 /* sched->heap_alloc_wq will be used for heap chunk allocation on 3963 * tiler OOM events, which means we can't use the same workqueue for 3964 * the scheduler because works queued by the scheduler are in 3965 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to 3966 * work around this limitation. 3967 * 3968 * FIXME: Ultimately, what we need is a failable/non-blocking GEM 3969 * allocation path that we can call when a heap OOM is reported. The 3970 * FW is smart enough to fall back on other methods if the kernel can't 3971 * allocate memory, and fail the tiling job if none of these 3972 * countermeasures worked. 3973 * 3974 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the 3975 * system is running out of memory. 3976 */ 3977 sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0); 3978 sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 3979 if (!sched->wq || !sched->heap_alloc_wq) { 3980 panthor_sched_fini(&ptdev->base, sched); 3981 drm_err(&ptdev->base, "Failed to allocate the workqueues"); 3982 return -ENOMEM; 3983 } 3984 3985 ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched); 3986 if (ret) 3987 return ret; 3988 3989 ptdev->scheduler = sched; 3990 return 0; 3991 } 3992