1 // SPDX-License-Identifier: GPL-2.0 or MIT 2 /* Copyright 2023 Collabora ltd. */ 3 4 #include <drm/drm_drv.h> 5 #include <drm/drm_exec.h> 6 #include <drm/drm_gem_shmem_helper.h> 7 #include <drm/drm_managed.h> 8 #include <drm/drm_print.h> 9 #include <drm/gpu_scheduler.h> 10 #include <drm/panthor_drm.h> 11 12 #include <linux/build_bug.h> 13 #include <linux/cleanup.h> 14 #include <linux/clk.h> 15 #include <linux/delay.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/dma-resv.h> 18 #include <linux/firmware.h> 19 #include <linux/interrupt.h> 20 #include <linux/io.h> 21 #include <linux/iopoll.h> 22 #include <linux/iosys-map.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/pm_runtime.h> 26 27 #include "panthor_devfreq.h" 28 #include "panthor_device.h" 29 #include "panthor_fw.h" 30 #include "panthor_gem.h" 31 #include "panthor_gpu.h" 32 #include "panthor_heap.h" 33 #include "panthor_mmu.h" 34 #include "panthor_regs.h" 35 #include "panthor_sched.h" 36 37 /** 38 * DOC: Scheduler 39 * 40 * Mali CSF hardware adopts a firmware-assisted scheduling model, where 41 * the firmware takes care of scheduling aspects, to some extent. 42 * 43 * The scheduling happens at the scheduling group level, each group 44 * contains 1 to N queues (N is FW/hardware dependent, and exposed 45 * through the firmware interface). Each queue is assigned a command 46 * stream ring buffer, which serves as a way to get jobs submitted to 47 * the GPU, among other things. 48 * 49 * The firmware can schedule a maximum of M groups (M is FW/hardware 50 * dependent, and exposed through the firmware interface). Passed 51 * this maximum number of groups, the kernel must take care of 52 * rotating the groups passed to the firmware so every group gets 53 * a chance to have his queues scheduled for execution. 54 * 55 * The current implementation only supports with kernel-mode queues. 56 * In other terms, userspace doesn't have access to the ring-buffer. 57 * Instead, userspace passes indirect command stream buffers that are 58 * called from the queue ring-buffer by the kernel using a pre-defined 59 * sequence of command stream instructions to ensure the userspace driver 60 * always gets consistent results (cache maintenance, 61 * synchronization, ...). 62 * 63 * We rely on the drm_gpu_scheduler framework to deal with job 64 * dependencies and submission. As any other driver dealing with a 65 * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each 66 * entity has its own job scheduler. When a job is ready to be executed 67 * (all its dependencies are met), it is pushed to the appropriate 68 * queue ring-buffer, and the group is scheduled for execution if it 69 * wasn't already active. 70 * 71 * Kernel-side group scheduling is timeslice-based. When we have less 72 * groups than there are slots, the periodic tick is disabled and we 73 * just let the FW schedule the active groups. When there are more 74 * groups than slots, we let each group a chance to execute stuff for 75 * a given amount of time, and then re-evaluate and pick new groups 76 * to schedule. The group selection algorithm is based on 77 * priority+round-robin. 78 * 79 * Even though user-mode queues is out of the scope right now, the 80 * current design takes them into account by avoiding any guess on the 81 * group/queue state that would be based on information we wouldn't have 82 * if userspace was in charge of the ring-buffer. That's also one of the 83 * reason we don't do 'cooperative' scheduling (encoding FW group slot 84 * reservation as dma_fence that would be returned from the 85 * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as 86 * a queue of waiters, ordered by job submission order). This approach 87 * would work for kernel-mode queues, but would make user-mode queues a 88 * lot more complicated to retrofit. 89 */ 90 91 #define JOB_TIMEOUT_MS 5000 92 93 #define MAX_CSG_PRIO 0xf 94 95 #define NUM_INSTRS_PER_CACHE_LINE (64 / sizeof(u64)) 96 #define MAX_INSTRS_PER_JOB 24 97 98 struct panthor_group; 99 100 /** 101 * struct panthor_csg_slot - Command stream group slot 102 * 103 * This represents a FW slot for a scheduling group. 104 */ 105 struct panthor_csg_slot { 106 /** @group: Scheduling group bound to this slot. */ 107 struct panthor_group *group; 108 109 /** @priority: Group priority. */ 110 u8 priority; 111 112 /** 113 * @idle: True if the group bound to this slot is idle. 114 * 115 * A group is idle when it has nothing waiting for execution on 116 * all its queues, or when queues are blocked waiting for something 117 * to happen (synchronization object). 118 */ 119 bool idle; 120 }; 121 122 /** 123 * enum panthor_csg_priority - Group priority 124 */ 125 enum panthor_csg_priority { 126 /** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */ 127 PANTHOR_CSG_PRIORITY_LOW = 0, 128 129 /** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */ 130 PANTHOR_CSG_PRIORITY_MEDIUM, 131 132 /** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */ 133 PANTHOR_CSG_PRIORITY_HIGH, 134 135 /** 136 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group. 137 * 138 * Real-time priority allows one to preempt scheduling of other 139 * non-real-time groups. When such a group becomes executable, 140 * it will evict the group with the lowest non-rt priority if 141 * there's no free group slot available. 142 */ 143 PANTHOR_CSG_PRIORITY_RT, 144 145 /** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */ 146 PANTHOR_CSG_PRIORITY_COUNT, 147 }; 148 149 /** 150 * struct panthor_scheduler - Object used to manage the scheduler 151 */ 152 struct panthor_scheduler { 153 /** @ptdev: Device. */ 154 struct panthor_device *ptdev; 155 156 /** 157 * @wq: Workqueue used by our internal scheduler logic and 158 * drm_gpu_scheduler. 159 * 160 * Used for the scheduler tick, group update or other kind of FW 161 * event processing that can't be handled in the threaded interrupt 162 * path. Also passed to the drm_gpu_scheduler instances embedded 163 * in panthor_queue. 164 */ 165 struct workqueue_struct *wq; 166 167 /** 168 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works. 169 * 170 * We have a queue dedicated to heap chunk allocation works to avoid 171 * blocking the rest of the scheduler if the allocation tries to 172 * reclaim memory. 173 */ 174 struct workqueue_struct *heap_alloc_wq; 175 176 /** @tick_work: Work executed on a scheduling tick. */ 177 struct delayed_work tick_work; 178 179 /** 180 * @sync_upd_work: Work used to process synchronization object updates. 181 * 182 * We use this work to unblock queues/groups that were waiting on a 183 * synchronization object. 184 */ 185 struct work_struct sync_upd_work; 186 187 /** 188 * @fw_events_work: Work used to process FW events outside the interrupt path. 189 * 190 * Even if the interrupt is threaded, we need any event processing 191 * that require taking the panthor_scheduler::lock to be processed 192 * outside the interrupt path so we don't block the tick logic when 193 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the 194 * event processing requires taking this lock, we just delegate all 195 * FW event processing to the scheduler workqueue. 196 */ 197 struct work_struct fw_events_work; 198 199 /** 200 * @fw_events: Bitmask encoding pending FW events. 201 */ 202 atomic_t fw_events; 203 204 /** 205 * @resched_target: When the next tick should occur. 206 * 207 * Expressed in jiffies. 208 */ 209 u64 resched_target; 210 211 /** 212 * @last_tick: When the last tick occurred. 213 * 214 * Expressed in jiffies. 215 */ 216 u64 last_tick; 217 218 /** @tick_period: Tick period in jiffies. */ 219 u64 tick_period; 220 221 /** 222 * @lock: Lock protecting access to all the scheduler fields. 223 * 224 * Should be taken in the tick work, the irq handler, and anywhere the @groups 225 * fields are touched. 226 */ 227 struct mutex lock; 228 229 /** @groups: Various lists used to classify groups. */ 230 struct { 231 /** 232 * @runnable: Runnable group lists. 233 * 234 * When a group has queues that want to execute something, 235 * its panthor_group::run_node should be inserted here. 236 * 237 * One list per-priority. 238 */ 239 struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT]; 240 241 /** 242 * @idle: Idle group lists. 243 * 244 * When all queues of a group are idle (either because they 245 * have nothing to execute, or because they are blocked), the 246 * panthor_group::run_node field should be inserted here. 247 * 248 * One list per-priority. 249 */ 250 struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT]; 251 252 /** 253 * @waiting: List of groups whose queues are blocked on a 254 * synchronization object. 255 * 256 * Insert panthor_group::wait_node here when a group is waiting 257 * for synchronization objects to be signaled. 258 * 259 * This list is evaluated in the @sync_upd_work work. 260 */ 261 struct list_head waiting; 262 } groups; 263 264 /** 265 * @csg_slots: FW command stream group slots. 266 */ 267 struct panthor_csg_slot csg_slots[MAX_CSGS]; 268 269 /** @csg_slot_count: Number of command stream group slots exposed by the FW. */ 270 u32 csg_slot_count; 271 272 /** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */ 273 u32 cs_slot_count; 274 275 /** @as_slot_count: Number of address space slots supported by the MMU. */ 276 u32 as_slot_count; 277 278 /** @used_csg_slot_count: Number of command stream group slot currently used. */ 279 u32 used_csg_slot_count; 280 281 /** @sb_slot_count: Number of scoreboard slots. */ 282 u32 sb_slot_count; 283 284 /** 285 * @might_have_idle_groups: True if an active group might have become idle. 286 * 287 * This will force a tick, so other runnable groups can be scheduled if one 288 * or more active groups became idle. 289 */ 290 bool might_have_idle_groups; 291 292 /** @pm: Power management related fields. */ 293 struct { 294 /** @has_ref: True if the scheduler owns a runtime PM reference. */ 295 bool has_ref; 296 } pm; 297 298 /** @reset: Reset related fields. */ 299 struct { 300 /** @lock: Lock protecting the other reset fields. */ 301 struct mutex lock; 302 303 /** 304 * @in_progress: True if a reset is in progress. 305 * 306 * Set to true in panthor_sched_pre_reset() and back to false in 307 * panthor_sched_post_reset(). 308 */ 309 atomic_t in_progress; 310 311 /** 312 * @stopped_groups: List containing all groups that were stopped 313 * before a reset. 314 * 315 * Insert panthor_group::run_node in the pre_reset path. 316 */ 317 struct list_head stopped_groups; 318 } reset; 319 }; 320 321 /** 322 * struct panthor_syncobj_32b - 32-bit FW synchronization object 323 */ 324 struct panthor_syncobj_32b { 325 /** @seqno: Sequence number. */ 326 u32 seqno; 327 328 /** 329 * @status: Status. 330 * 331 * Not zero on failure. 332 */ 333 u32 status; 334 }; 335 336 /** 337 * struct panthor_syncobj_64b - 64-bit FW synchronization object 338 */ 339 struct panthor_syncobj_64b { 340 /** @seqno: Sequence number. */ 341 u64 seqno; 342 343 /** 344 * @status: Status. 345 * 346 * Not zero on failure. 347 */ 348 u32 status; 349 350 /** @pad: MBZ. */ 351 u32 pad; 352 }; 353 354 /** 355 * struct panthor_queue - Execution queue 356 */ 357 struct panthor_queue { 358 /** @scheduler: DRM scheduler used for this queue. */ 359 struct drm_gpu_scheduler scheduler; 360 361 /** @entity: DRM scheduling entity used for this queue. */ 362 struct drm_sched_entity entity; 363 364 /** @name: DRM scheduler name for this queue. */ 365 char *name; 366 367 /** @timeout: Queue timeout related fields. */ 368 struct { 369 /** @timeout.work: Work executed when a queue timeout occurs. */ 370 struct delayed_work work; 371 372 /** 373 * @timeout.remaining: Time remaining before a queue timeout. 374 * 375 * When the timer is running, this value is set to MAX_SCHEDULE_TIMEOUT. 376 * When the timer is suspended, it's set to the time remaining when the 377 * timer was suspended. 378 */ 379 unsigned long remaining; 380 } timeout; 381 382 /** 383 * @doorbell_id: Doorbell assigned to this queue. 384 * 385 * Right now, all groups share the same doorbell, and the doorbell ID 386 * is assigned to group_slot + 1 when the group is assigned a slot. But 387 * we might decide to provide fine grained doorbell assignment at some 388 * point, so don't have to wake up all queues in a group every time one 389 * of them is updated. 390 */ 391 u8 doorbell_id; 392 393 /** 394 * @priority: Priority of the queue inside the group. 395 * 396 * Must be less than 16 (Only 4 bits available). 397 */ 398 u8 priority; 399 #define CSF_MAX_QUEUE_PRIO GENMASK(3, 0) 400 401 /** @ringbuf: Command stream ring-buffer. */ 402 struct panthor_kernel_bo *ringbuf; 403 404 /** @iface: Firmware interface. */ 405 struct { 406 /** @mem: FW memory allocated for this interface. */ 407 struct panthor_kernel_bo *mem; 408 409 /** @input: Input interface. */ 410 struct panthor_fw_ringbuf_input_iface *input; 411 412 /** @output: Output interface. */ 413 const struct panthor_fw_ringbuf_output_iface *output; 414 415 /** @input_fw_va: FW virtual address of the input interface buffer. */ 416 u32 input_fw_va; 417 418 /** @output_fw_va: FW virtual address of the output interface buffer. */ 419 u32 output_fw_va; 420 } iface; 421 422 /** 423 * @syncwait: Stores information about the synchronization object this 424 * queue is waiting on. 425 */ 426 struct { 427 /** @gpu_va: GPU address of the synchronization object. */ 428 u64 gpu_va; 429 430 /** @ref: Reference value to compare against. */ 431 u64 ref; 432 433 /** @gt: True if this is a greater-than test. */ 434 bool gt; 435 436 /** @sync64: True if this is a 64-bit sync object. */ 437 bool sync64; 438 439 /** @bo: Buffer object holding the synchronization object. */ 440 struct drm_gem_object *obj; 441 442 /** @offset: Offset of the synchronization object inside @bo. */ 443 u64 offset; 444 445 /** 446 * @kmap: Kernel mapping of the buffer object holding the 447 * synchronization object. 448 */ 449 void *kmap; 450 } syncwait; 451 452 /** @fence_ctx: Fence context fields. */ 453 struct { 454 /** @lock: Used to protect access to all fences allocated by this context. */ 455 spinlock_t lock; 456 457 /** 458 * @id: Fence context ID. 459 * 460 * Allocated with dma_fence_context_alloc(). 461 */ 462 u64 id; 463 464 /** @seqno: Sequence number of the last initialized fence. */ 465 atomic64_t seqno; 466 467 /** 468 * @last_fence: Fence of the last submitted job. 469 * 470 * We return this fence when we get an empty command stream. 471 * This way, we are guaranteed that all earlier jobs have completed 472 * when drm_sched_job::s_fence::finished without having to feed 473 * the CS ring buffer with a dummy job that only signals the fence. 474 */ 475 struct dma_fence *last_fence; 476 477 /** 478 * @in_flight_jobs: List containing all in-flight jobs. 479 * 480 * Used to keep track and signal panthor_job::done_fence when the 481 * synchronization object attached to the queue is signaled. 482 */ 483 struct list_head in_flight_jobs; 484 } fence_ctx; 485 486 /** @profiling: Job profiling data slots and access information. */ 487 struct { 488 /** @slots: Kernel BO holding the slots. */ 489 struct panthor_kernel_bo *slots; 490 491 /** @slot_count: Number of jobs ringbuffer can hold at once. */ 492 u32 slot_count; 493 494 /** @seqno: Index of the next available profiling information slot. */ 495 u32 seqno; 496 } profiling; 497 }; 498 499 /** 500 * enum panthor_group_state - Scheduling group state. 501 */ 502 enum panthor_group_state { 503 /** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */ 504 PANTHOR_CS_GROUP_CREATED, 505 506 /** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */ 507 PANTHOR_CS_GROUP_ACTIVE, 508 509 /** 510 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is 511 * inactive/suspended right now. 512 */ 513 PANTHOR_CS_GROUP_SUSPENDED, 514 515 /** 516 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated. 517 * 518 * Can no longer be scheduled. The only allowed action is a destruction. 519 */ 520 PANTHOR_CS_GROUP_TERMINATED, 521 522 /** 523 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state. 524 * 525 * The FW returned an inconsistent state. The group is flagged unusable 526 * and can no longer be scheduled. The only allowed action is a 527 * destruction. 528 * 529 * When that happens, we also schedule a FW reset, to start from a fresh 530 * state. 531 */ 532 PANTHOR_CS_GROUP_UNKNOWN_STATE, 533 }; 534 535 /** 536 * struct panthor_group - Scheduling group object 537 */ 538 struct panthor_group { 539 /** @refcount: Reference count */ 540 struct kref refcount; 541 542 /** @ptdev: Device. */ 543 struct panthor_device *ptdev; 544 545 /** @vm: VM bound to the group. */ 546 struct panthor_vm *vm; 547 548 /** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */ 549 u64 compute_core_mask; 550 551 /** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */ 552 u64 fragment_core_mask; 553 554 /** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */ 555 u64 tiler_core_mask; 556 557 /** @max_compute_cores: Maximum number of shader cores used for compute jobs. */ 558 u8 max_compute_cores; 559 560 /** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */ 561 u8 max_fragment_cores; 562 563 /** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */ 564 u8 max_tiler_cores; 565 566 /** @priority: Group priority (check panthor_csg_priority). */ 567 u8 priority; 568 569 /** @blocked_queues: Bitmask reflecting the blocked queues. */ 570 u32 blocked_queues; 571 572 /** @idle_queues: Bitmask reflecting the idle queues. */ 573 u32 idle_queues; 574 575 /** @fatal_lock: Lock used to protect access to fatal fields. */ 576 spinlock_t fatal_lock; 577 578 /** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */ 579 u32 fatal_queues; 580 581 /** @tiler_oom: Mask of queues that have a tiler OOM event to process. */ 582 atomic_t tiler_oom; 583 584 /** @queue_count: Number of queues in this group. */ 585 u32 queue_count; 586 587 /** @queues: Queues owned by this group. */ 588 struct panthor_queue *queues[MAX_CS_PER_CSG]; 589 590 /** 591 * @csg_id: ID of the FW group slot. 592 * 593 * -1 when the group is not scheduled/active. 594 */ 595 int csg_id; 596 597 /** 598 * @destroyed: True when the group has been destroyed. 599 * 600 * If a group is destroyed it becomes useless: no further jobs can be submitted 601 * to its queues. We simply wait for all references to be dropped so we can 602 * release the group object. 603 */ 604 bool destroyed; 605 606 /** 607 * @timedout: True when a timeout occurred on any of the queues owned by 608 * this group. 609 * 610 * Timeouts can be reported by drm_sched or by the FW. If a reset is required, 611 * and the group can't be suspended, this also leads to a timeout. In any case, 612 * any timeout situation is unrecoverable, and the group becomes useless. We 613 * simply wait for all references to be dropped so we can release the group 614 * object. 615 */ 616 bool timedout; 617 618 /** 619 * @innocent: True when the group becomes unusable because the group suspension 620 * failed during a reset. 621 * 622 * Sometimes the FW was put in a bad state by other groups, causing the group 623 * suspension happening in the reset path to fail. In that case, we consider the 624 * group innocent. 625 */ 626 bool innocent; 627 628 /** 629 * @syncobjs: Pool of per-queue synchronization objects. 630 * 631 * One sync object per queue. The position of the sync object is 632 * determined by the queue index. 633 */ 634 struct panthor_kernel_bo *syncobjs; 635 636 /** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */ 637 struct { 638 /** @data: Total sampled values for jobs in queues from this group. */ 639 struct panthor_gpu_usage data; 640 641 /** 642 * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo 643 * callback and job post-completion processing function 644 */ 645 spinlock_t lock; 646 647 /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */ 648 size_t kbo_sizes; 649 } fdinfo; 650 651 /** @task_info: Info of current->group_leader that created the group. */ 652 struct { 653 /** @task_info.pid: pid of current->group_leader */ 654 pid_t pid; 655 656 /** @task_info.comm: comm of current->group_leader */ 657 char comm[TASK_COMM_LEN]; 658 } task_info; 659 660 /** @state: Group state. */ 661 enum panthor_group_state state; 662 663 /** 664 * @suspend_buf: Suspend buffer. 665 * 666 * Stores the state of the group and its queues when a group is suspended. 667 * Used at resume time to restore the group in its previous state. 668 * 669 * The size of the suspend buffer is exposed through the FW interface. 670 */ 671 struct panthor_kernel_bo *suspend_buf; 672 673 /** 674 * @protm_suspend_buf: Protection mode suspend buffer. 675 * 676 * Stores the state of the group and its queues when a group that's in 677 * protection mode is suspended. 678 * 679 * Used at resume time to restore the group in its previous state. 680 * 681 * The size of the protection mode suspend buffer is exposed through the 682 * FW interface. 683 */ 684 struct panthor_kernel_bo *protm_suspend_buf; 685 686 /** @sync_upd_work: Work used to check/signal job fences. */ 687 struct work_struct sync_upd_work; 688 689 /** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */ 690 struct work_struct tiler_oom_work; 691 692 /** @term_work: Work used to finish the group termination procedure. */ 693 struct work_struct term_work; 694 695 /** 696 * @release_work: Work used to release group resources. 697 * 698 * We need to postpone the group release to avoid a deadlock when 699 * the last ref is released in the tick work. 700 */ 701 struct work_struct release_work; 702 703 /** 704 * @run_node: Node used to insert the group in the 705 * panthor_group::groups::{runnable,idle} and 706 * panthor_group::reset.stopped_groups lists. 707 */ 708 struct list_head run_node; 709 710 /** 711 * @wait_node: Node used to insert the group in the 712 * panthor_group::groups::waiting list. 713 */ 714 struct list_head wait_node; 715 }; 716 717 struct panthor_job_profiling_data { 718 struct { 719 u64 before; 720 u64 after; 721 } cycles; 722 723 struct { 724 u64 before; 725 u64 after; 726 } time; 727 }; 728 729 /** 730 * group_queue_work() - Queue a group work 731 * @group: Group to queue the work for. 732 * @wname: Work name. 733 * 734 * Grabs a ref and queue a work item to the scheduler workqueue. If 735 * the work was already queued, we release the reference we grabbed. 736 * 737 * Work callbacks must release the reference we grabbed here. 738 */ 739 #define group_queue_work(group, wname) \ 740 do { \ 741 group_get(group); \ 742 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \ 743 group_put(group); \ 744 } while (0) 745 746 /** 747 * sched_queue_work() - Queue a scheduler work. 748 * @sched: Scheduler object. 749 * @wname: Work name. 750 * 751 * Conditionally queues a scheduler work if no reset is pending/in-progress. 752 */ 753 #define sched_queue_work(sched, wname) \ 754 do { \ 755 if (!atomic_read(&(sched)->reset.in_progress) && \ 756 !panthor_device_reset_is_pending((sched)->ptdev)) \ 757 queue_work((sched)->wq, &(sched)->wname ## _work); \ 758 } while (0) 759 760 /** 761 * sched_queue_delayed_work() - Queue a scheduler delayed work. 762 * @sched: Scheduler object. 763 * @wname: Work name. 764 * @delay: Work delay in jiffies. 765 * 766 * Conditionally queues a scheduler delayed work if no reset is 767 * pending/in-progress. 768 */ 769 #define sched_queue_delayed_work(sched, wname, delay) \ 770 do { \ 771 if (!atomic_read(&sched->reset.in_progress) && \ 772 !panthor_device_reset_is_pending((sched)->ptdev)) \ 773 mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \ 774 } while (0) 775 776 /* 777 * We currently set the maximum of groups per file to an arbitrary low value. 778 * But this can be updated if we need more. 779 */ 780 #define MAX_GROUPS_PER_POOL 128 781 782 /** 783 * struct panthor_group_pool - Group pool 784 * 785 * Each file get assigned a group pool. 786 */ 787 struct panthor_group_pool { 788 /** @xa: Xarray used to manage group handles. */ 789 struct xarray xa; 790 }; 791 792 /** 793 * struct panthor_job - Used to manage GPU job 794 */ 795 struct panthor_job { 796 /** @base: Inherit from drm_sched_job. */ 797 struct drm_sched_job base; 798 799 /** @refcount: Reference count. */ 800 struct kref refcount; 801 802 /** @group: Group of the queue this job will be pushed to. */ 803 struct panthor_group *group; 804 805 /** @queue_idx: Index of the queue inside @group. */ 806 u32 queue_idx; 807 808 /** @call_info: Information about the userspace command stream call. */ 809 struct { 810 /** @start: GPU address of the userspace command stream. */ 811 u64 start; 812 813 /** @size: Size of the userspace command stream. */ 814 u32 size; 815 816 /** 817 * @latest_flush: Flush ID at the time the userspace command 818 * stream was built. 819 * 820 * Needed for the flush reduction mechanism. 821 */ 822 u32 latest_flush; 823 } call_info; 824 825 /** @ringbuf: Position of this job is in the ring buffer. */ 826 struct { 827 /** @start: Start offset. */ 828 u64 start; 829 830 /** @end: End offset. */ 831 u64 end; 832 } ringbuf; 833 834 /** 835 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs 836 * list. 837 */ 838 struct list_head node; 839 840 /** @done_fence: Fence signaled when the job is finished or cancelled. */ 841 struct dma_fence *done_fence; 842 843 /** @profiling: Job profiling information. */ 844 struct { 845 /** @mask: Current device job profiling enablement bitmask. */ 846 u32 mask; 847 848 /** @slot: Job index in the profiling slots BO. */ 849 u32 slot; 850 } profiling; 851 }; 852 853 static void 854 panthor_queue_put_syncwait_obj(struct panthor_queue *queue) 855 { 856 if (queue->syncwait.kmap) { 857 struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap); 858 859 drm_gem_vunmap(queue->syncwait.obj, &map); 860 queue->syncwait.kmap = NULL; 861 } 862 863 drm_gem_object_put(queue->syncwait.obj); 864 queue->syncwait.obj = NULL; 865 } 866 867 static void * 868 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue) 869 { 870 struct panthor_device *ptdev = group->ptdev; 871 struct panthor_gem_object *bo; 872 struct iosys_map map; 873 int ret; 874 875 if (queue->syncwait.kmap) 876 return queue->syncwait.kmap + queue->syncwait.offset; 877 878 bo = panthor_vm_get_bo_for_va(group->vm, 879 queue->syncwait.gpu_va, 880 &queue->syncwait.offset); 881 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo))) 882 goto err_put_syncwait_obj; 883 884 queue->syncwait.obj = &bo->base.base; 885 ret = drm_gem_vmap(queue->syncwait.obj, &map); 886 if (drm_WARN_ON(&ptdev->base, ret)) 887 goto err_put_syncwait_obj; 888 889 queue->syncwait.kmap = map.vaddr; 890 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap)) 891 goto err_put_syncwait_obj; 892 893 return queue->syncwait.kmap + queue->syncwait.offset; 894 895 err_put_syncwait_obj: 896 panthor_queue_put_syncwait_obj(queue); 897 return NULL; 898 } 899 900 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue) 901 { 902 if (IS_ERR_OR_NULL(queue)) 903 return; 904 905 /* This should have been disabled before that point. */ 906 drm_WARN_ON(&group->ptdev->base, 907 disable_delayed_work_sync(&queue->timeout.work)); 908 909 if (queue->entity.fence_context) 910 drm_sched_entity_destroy(&queue->entity); 911 912 if (queue->scheduler.ops) 913 drm_sched_fini(&queue->scheduler); 914 915 kfree(queue->name); 916 917 panthor_queue_put_syncwait_obj(queue); 918 919 panthor_kernel_bo_destroy(queue->ringbuf); 920 panthor_kernel_bo_destroy(queue->iface.mem); 921 panthor_kernel_bo_destroy(queue->profiling.slots); 922 923 /* Release the last_fence we were holding, if any. */ 924 dma_fence_put(queue->fence_ctx.last_fence); 925 926 kfree(queue); 927 } 928 929 static void group_release_work(struct work_struct *work) 930 { 931 struct panthor_group *group = container_of(work, 932 struct panthor_group, 933 release_work); 934 u32 i; 935 936 for (i = 0; i < group->queue_count; i++) 937 group_free_queue(group, group->queues[i]); 938 939 panthor_kernel_bo_destroy(group->suspend_buf); 940 panthor_kernel_bo_destroy(group->protm_suspend_buf); 941 panthor_kernel_bo_destroy(group->syncobjs); 942 943 panthor_vm_put(group->vm); 944 kfree(group); 945 } 946 947 static void group_release(struct kref *kref) 948 { 949 struct panthor_group *group = container_of(kref, 950 struct panthor_group, 951 refcount); 952 struct panthor_device *ptdev = group->ptdev; 953 954 drm_WARN_ON(&ptdev->base, group->csg_id >= 0); 955 drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node)); 956 drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node)); 957 958 queue_work(panthor_cleanup_wq, &group->release_work); 959 } 960 961 static void group_put(struct panthor_group *group) 962 { 963 if (group) 964 kref_put(&group->refcount, group_release); 965 } 966 967 static struct panthor_group * 968 group_get(struct panthor_group *group) 969 { 970 if (group) 971 kref_get(&group->refcount); 972 973 return group; 974 } 975 976 /** 977 * group_bind_locked() - Bind a group to a group slot 978 * @group: Group. 979 * @csg_id: Slot. 980 * 981 * Return: 0 on success, a negative error code otherwise. 982 */ 983 static int 984 group_bind_locked(struct panthor_group *group, u32 csg_id) 985 { 986 struct panthor_device *ptdev = group->ptdev; 987 struct panthor_csg_slot *csg_slot; 988 int ret; 989 990 lockdep_assert_held(&ptdev->scheduler->lock); 991 992 if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS || 993 ptdev->scheduler->csg_slots[csg_id].group)) 994 return -EINVAL; 995 996 ret = panthor_vm_active(group->vm); 997 if (ret) 998 return ret; 999 1000 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1001 group_get(group); 1002 group->csg_id = csg_id; 1003 1004 /* Dummy doorbell allocation: doorbell is assigned to the group and 1005 * all queues use the same doorbell. 1006 * 1007 * TODO: Implement LRU-based doorbell assignment, so the most often 1008 * updated queues get their own doorbell, thus avoiding useless checks 1009 * on queues belonging to the same group that are rarely updated. 1010 */ 1011 for (u32 i = 0; i < group->queue_count; i++) 1012 group->queues[i]->doorbell_id = csg_id + 1; 1013 1014 csg_slot->group = group; 1015 1016 return 0; 1017 } 1018 1019 /** 1020 * group_unbind_locked() - Unbind a group from a slot. 1021 * @group: Group to unbind. 1022 * 1023 * Return: 0 on success, a negative error code otherwise. 1024 */ 1025 static int 1026 group_unbind_locked(struct panthor_group *group) 1027 { 1028 struct panthor_device *ptdev = group->ptdev; 1029 struct panthor_csg_slot *slot; 1030 1031 lockdep_assert_held(&ptdev->scheduler->lock); 1032 1033 if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS)) 1034 return -EINVAL; 1035 1036 if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE)) 1037 return -EINVAL; 1038 1039 slot = &ptdev->scheduler->csg_slots[group->csg_id]; 1040 panthor_vm_idle(group->vm); 1041 group->csg_id = -1; 1042 1043 /* Tiler OOM events will be re-issued next time the group is scheduled. */ 1044 atomic_set(&group->tiler_oom, 0); 1045 cancel_work(&group->tiler_oom_work); 1046 1047 for (u32 i = 0; i < group->queue_count; i++) 1048 group->queues[i]->doorbell_id = -1; 1049 1050 slot->group = NULL; 1051 1052 group_put(group); 1053 return 0; 1054 } 1055 1056 static bool 1057 group_is_idle(struct panthor_group *group) 1058 { 1059 struct panthor_device *ptdev = group->ptdev; 1060 u32 inactive_queues; 1061 1062 if (group->csg_id >= 0) 1063 return ptdev->scheduler->csg_slots[group->csg_id].idle; 1064 1065 inactive_queues = group->idle_queues | group->blocked_queues; 1066 return hweight32(inactive_queues) == group->queue_count; 1067 } 1068 1069 static void 1070 queue_reset_timeout_locked(struct panthor_queue *queue) 1071 { 1072 lockdep_assert_held(&queue->fence_ctx.lock); 1073 1074 if (queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT) { 1075 mod_delayed_work(queue->scheduler.timeout_wq, 1076 &queue->timeout.work, 1077 msecs_to_jiffies(JOB_TIMEOUT_MS)); 1078 } 1079 } 1080 1081 static bool 1082 group_can_run(struct panthor_group *group) 1083 { 1084 return group->state != PANTHOR_CS_GROUP_TERMINATED && 1085 group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && 1086 !group->destroyed && group->fatal_queues == 0 && 1087 !group->timedout; 1088 } 1089 1090 static bool 1091 queue_timeout_is_suspended(struct panthor_queue *queue) 1092 { 1093 /* When running, the remaining time is set to MAX_SCHEDULE_TIMEOUT. */ 1094 return queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT; 1095 } 1096 1097 static void 1098 queue_suspend_timeout_locked(struct panthor_queue *queue) 1099 { 1100 unsigned long qtimeout, now; 1101 struct panthor_group *group; 1102 struct panthor_job *job; 1103 bool timer_was_active; 1104 1105 lockdep_assert_held(&queue->fence_ctx.lock); 1106 1107 /* Already suspended, nothing to do. */ 1108 if (queue_timeout_is_suspended(queue)) 1109 return; 1110 1111 job = list_first_entry_or_null(&queue->fence_ctx.in_flight_jobs, 1112 struct panthor_job, node); 1113 group = job ? job->group : NULL; 1114 1115 /* If the queue is blocked and the group is idle, we want the timer to 1116 * keep running because the group can't be unblocked by other queues, 1117 * so it has to come from an external source, and we want to timebox 1118 * this external signalling. 1119 */ 1120 if (group && group_can_run(group) && 1121 (group->blocked_queues & BIT(job->queue_idx)) && 1122 group_is_idle(group)) 1123 return; 1124 1125 now = jiffies; 1126 qtimeout = queue->timeout.work.timer.expires; 1127 1128 /* Cancel the timer. */ 1129 timer_was_active = cancel_delayed_work(&queue->timeout.work); 1130 if (!timer_was_active || !job) 1131 queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); 1132 else if (time_after(qtimeout, now)) 1133 queue->timeout.remaining = qtimeout - now; 1134 else 1135 queue->timeout.remaining = 0; 1136 1137 if (WARN_ON_ONCE(queue->timeout.remaining > msecs_to_jiffies(JOB_TIMEOUT_MS))) 1138 queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); 1139 } 1140 1141 static void 1142 queue_suspend_timeout(struct panthor_queue *queue) 1143 { 1144 spin_lock(&queue->fence_ctx.lock); 1145 queue_suspend_timeout_locked(queue); 1146 spin_unlock(&queue->fence_ctx.lock); 1147 } 1148 1149 static void 1150 queue_resume_timeout(struct panthor_queue *queue) 1151 { 1152 spin_lock(&queue->fence_ctx.lock); 1153 1154 if (queue_timeout_is_suspended(queue)) { 1155 mod_delayed_work(queue->scheduler.timeout_wq, 1156 &queue->timeout.work, 1157 queue->timeout.remaining); 1158 1159 queue->timeout.remaining = MAX_SCHEDULE_TIMEOUT; 1160 } 1161 1162 spin_unlock(&queue->fence_ctx.lock); 1163 } 1164 1165 /** 1166 * cs_slot_prog_locked() - Program a queue slot 1167 * @ptdev: Device. 1168 * @csg_id: Group slot ID. 1169 * @cs_id: Queue slot ID. 1170 * 1171 * Program a queue slot with the queue information so things can start being 1172 * executed on this queue. 1173 * 1174 * The group slot must have a group bound to it already (group_bind_locked()). 1175 */ 1176 static void 1177 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1178 { 1179 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id]; 1180 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1181 1182 lockdep_assert_held(&ptdev->scheduler->lock); 1183 1184 queue->iface.input->extract = queue->iface.output->extract; 1185 drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract); 1186 1187 cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf); 1188 cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); 1189 cs_iface->input->ringbuf_input = queue->iface.input_fw_va; 1190 cs_iface->input->ringbuf_output = queue->iface.output_fw_va; 1191 cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) | 1192 CS_CONFIG_DOORBELL(queue->doorbell_id); 1193 cs_iface->input->ack_irq_mask = ~0; 1194 panthor_fw_update_reqs(cs_iface, req, 1195 CS_IDLE_SYNC_WAIT | 1196 CS_IDLE_EMPTY | 1197 CS_STATE_START | 1198 CS_EXTRACT_EVENT, 1199 CS_IDLE_SYNC_WAIT | 1200 CS_IDLE_EMPTY | 1201 CS_STATE_MASK | 1202 CS_EXTRACT_EVENT); 1203 if (queue->iface.input->insert != queue->iface.input->extract) 1204 queue_resume_timeout(queue); 1205 } 1206 1207 /** 1208 * cs_slot_reset_locked() - Reset a queue slot 1209 * @ptdev: Device. 1210 * @csg_id: Group slot. 1211 * @cs_id: Queue slot. 1212 * 1213 * Change the queue slot state to STOP and suspend the queue timeout if 1214 * the queue is not blocked. 1215 * 1216 * The group slot must have a group bound to it (group_bind_locked()). 1217 */ 1218 static int 1219 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1220 { 1221 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1222 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; 1223 struct panthor_queue *queue = group->queues[cs_id]; 1224 1225 lockdep_assert_held(&ptdev->scheduler->lock); 1226 1227 panthor_fw_update_reqs(cs_iface, req, 1228 CS_STATE_STOP, 1229 CS_STATE_MASK); 1230 1231 queue_suspend_timeout(queue); 1232 1233 return 0; 1234 } 1235 1236 /** 1237 * csg_slot_sync_priority_locked() - Synchronize the group slot priority 1238 * @ptdev: Device. 1239 * @csg_id: Group slot ID. 1240 * 1241 * Group slot priority update happens asynchronously. When we receive a 1242 * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can 1243 * reflect it to our panthor_csg_slot object. 1244 */ 1245 static void 1246 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id) 1247 { 1248 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1249 struct panthor_fw_csg_iface *csg_iface; 1250 u64 endpoint_req; 1251 1252 lockdep_assert_held(&ptdev->scheduler->lock); 1253 1254 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1255 endpoint_req = panthor_fw_csg_endpoint_req_get(ptdev, csg_iface); 1256 csg_slot->priority = CSG_EP_REQ_PRIORITY_GET(endpoint_req); 1257 } 1258 1259 /** 1260 * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority 1261 * @ptdev: Device. 1262 * @csg_id: Group slot. 1263 * @cs_id: Queue slot. 1264 * 1265 * Queue state is updated on group suspend or STATUS_UPDATE event. 1266 */ 1267 static void 1268 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1269 { 1270 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; 1271 struct panthor_queue *queue = group->queues[cs_id]; 1272 struct panthor_fw_cs_iface *cs_iface = 1273 panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id); 1274 1275 u32 status_wait_cond; 1276 1277 switch (cs_iface->output->status_blocked_reason) { 1278 case CS_STATUS_BLOCKED_REASON_UNBLOCKED: 1279 if (queue->iface.input->insert == queue->iface.output->extract && 1280 cs_iface->output->status_scoreboards == 0) 1281 group->idle_queues |= BIT(cs_id); 1282 break; 1283 1284 case CS_STATUS_BLOCKED_REASON_SYNC_WAIT: 1285 if (list_empty(&group->wait_node)) { 1286 list_move_tail(&group->wait_node, 1287 &group->ptdev->scheduler->groups.waiting); 1288 } 1289 1290 /* The queue is only blocked if there's no deferred operation 1291 * pending, which can be checked through the scoreboard status. 1292 */ 1293 if (!cs_iface->output->status_scoreboards) 1294 group->blocked_queues |= BIT(cs_id); 1295 1296 queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr; 1297 queue->syncwait.ref = cs_iface->output->status_wait_sync_value; 1298 status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK; 1299 queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT; 1300 if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) { 1301 u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi; 1302 1303 queue->syncwait.sync64 = true; 1304 queue->syncwait.ref |= sync_val_hi << 32; 1305 } else { 1306 queue->syncwait.sync64 = false; 1307 } 1308 break; 1309 1310 default: 1311 /* Other reasons are not blocking. Consider the queue as runnable 1312 * in those cases. 1313 */ 1314 break; 1315 } 1316 } 1317 1318 static void 1319 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id) 1320 { 1321 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1322 struct panthor_group *group = csg_slot->group; 1323 u32 i; 1324 1325 lockdep_assert_held(&ptdev->scheduler->lock); 1326 1327 group->idle_queues = 0; 1328 group->blocked_queues = 0; 1329 1330 for (i = 0; i < group->queue_count; i++) { 1331 if (group->queues[i]) 1332 cs_slot_sync_queue_state_locked(ptdev, csg_id, i); 1333 } 1334 } 1335 1336 static void 1337 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id) 1338 { 1339 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1340 struct panthor_fw_csg_iface *csg_iface; 1341 struct panthor_group *group; 1342 enum panthor_group_state new_state, old_state; 1343 u32 csg_state; 1344 1345 lockdep_assert_held(&ptdev->scheduler->lock); 1346 1347 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1348 group = csg_slot->group; 1349 1350 if (!group) 1351 return; 1352 1353 old_state = group->state; 1354 csg_state = csg_iface->output->ack & CSG_STATE_MASK; 1355 switch (csg_state) { 1356 case CSG_STATE_START: 1357 case CSG_STATE_RESUME: 1358 new_state = PANTHOR_CS_GROUP_ACTIVE; 1359 break; 1360 case CSG_STATE_TERMINATE: 1361 new_state = PANTHOR_CS_GROUP_TERMINATED; 1362 break; 1363 case CSG_STATE_SUSPEND: 1364 new_state = PANTHOR_CS_GROUP_SUSPENDED; 1365 break; 1366 default: 1367 /* The unknown state might be caused by a FW state corruption, 1368 * which means the group metadata can't be trusted anymore, and 1369 * the SUSPEND operation might propagate the corruption to the 1370 * suspend buffers. Flag the group state as unknown to make 1371 * sure it's unusable after that point. 1372 */ 1373 drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)", 1374 csg_id, csg_state); 1375 new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE; 1376 break; 1377 } 1378 1379 if (old_state == new_state) 1380 return; 1381 1382 /* The unknown state might be caused by a FW issue, reset the FW to 1383 * take a fresh start. 1384 */ 1385 if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE) 1386 panthor_device_schedule_reset(ptdev); 1387 1388 if (new_state == PANTHOR_CS_GROUP_SUSPENDED) 1389 csg_slot_sync_queues_state_locked(ptdev, csg_id); 1390 1391 if (old_state == PANTHOR_CS_GROUP_ACTIVE) { 1392 u32 i; 1393 1394 /* Reset the queue slots so we start from a clean 1395 * state when starting/resuming a new group on this 1396 * CSG slot. No wait needed here, and no ringbell 1397 * either, since the CS slot will only be re-used 1398 * on the next CSG start operation. 1399 */ 1400 for (i = 0; i < group->queue_count; i++) { 1401 if (group->queues[i]) 1402 cs_slot_reset_locked(ptdev, csg_id, i); 1403 } 1404 } 1405 1406 group->state = new_state; 1407 } 1408 1409 static int 1410 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority) 1411 { 1412 struct panthor_fw_csg_iface *csg_iface; 1413 struct panthor_csg_slot *csg_slot; 1414 struct panthor_group *group; 1415 u32 queue_mask = 0, i; 1416 u64 endpoint_req; 1417 1418 lockdep_assert_held(&ptdev->scheduler->lock); 1419 1420 if (priority > MAX_CSG_PRIO) 1421 return -EINVAL; 1422 1423 if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS)) 1424 return -EINVAL; 1425 1426 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1427 group = csg_slot->group; 1428 if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE) 1429 return 0; 1430 1431 csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id); 1432 1433 for (i = 0; i < group->queue_count; i++) { 1434 if (group->queues[i]) { 1435 cs_slot_prog_locked(ptdev, csg_id, i); 1436 queue_mask |= BIT(i); 1437 } 1438 } 1439 1440 csg_iface->input->allow_compute = group->compute_core_mask; 1441 csg_iface->input->allow_fragment = group->fragment_core_mask; 1442 csg_iface->input->allow_other = group->tiler_core_mask; 1443 endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | 1444 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | 1445 CSG_EP_REQ_TILER(group->max_tiler_cores) | 1446 CSG_EP_REQ_PRIORITY(priority); 1447 panthor_fw_csg_endpoint_req_set(ptdev, csg_iface, endpoint_req); 1448 1449 csg_iface->input->config = panthor_vm_as(group->vm); 1450 1451 if (group->suspend_buf) 1452 csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf); 1453 else 1454 csg_iface->input->suspend_buf = 0; 1455 1456 if (group->protm_suspend_buf) { 1457 csg_iface->input->protm_suspend_buf = 1458 panthor_kernel_bo_gpuva(group->protm_suspend_buf); 1459 } else { 1460 csg_iface->input->protm_suspend_buf = 0; 1461 } 1462 1463 csg_iface->input->ack_irq_mask = ~0; 1464 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask); 1465 return 0; 1466 } 1467 1468 static void 1469 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev, 1470 u32 csg_id, u32 cs_id) 1471 { 1472 struct panthor_scheduler *sched = ptdev->scheduler; 1473 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1474 struct panthor_group *group = csg_slot->group; 1475 struct panthor_fw_cs_iface *cs_iface; 1476 u32 fatal; 1477 u64 info; 1478 1479 lockdep_assert_held(&sched->lock); 1480 1481 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1482 fatal = cs_iface->output->fatal; 1483 info = cs_iface->output->fatal_info; 1484 1485 if (group) { 1486 drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n", 1487 group->task_info.pid, group->task_info.comm); 1488 1489 group->fatal_queues |= BIT(cs_id); 1490 } 1491 1492 if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) { 1493 /* If this exception is unrecoverable, queue a reset, and make 1494 * sure we stop scheduling groups until the reset has happened. 1495 */ 1496 panthor_device_schedule_reset(ptdev); 1497 cancel_delayed_work(&sched->tick_work); 1498 } else { 1499 sched_queue_delayed_work(sched, tick, 0); 1500 } 1501 1502 drm_warn(&ptdev->base, 1503 "CSG slot %d CS slot: %d\n" 1504 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n" 1505 "CS_FATAL.EXCEPTION_DATA: 0x%x\n" 1506 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n", 1507 csg_id, cs_id, 1508 (unsigned int)CS_EXCEPTION_TYPE(fatal), 1509 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)), 1510 (unsigned int)CS_EXCEPTION_DATA(fatal), 1511 info); 1512 } 1513 1514 static void 1515 cs_slot_process_fault_event_locked(struct panthor_device *ptdev, 1516 u32 csg_id, u32 cs_id) 1517 { 1518 struct panthor_scheduler *sched = ptdev->scheduler; 1519 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1520 struct panthor_group *group = csg_slot->group; 1521 struct panthor_queue *queue = group && cs_id < group->queue_count ? 1522 group->queues[cs_id] : NULL; 1523 struct panthor_fw_cs_iface *cs_iface; 1524 u32 fault; 1525 u64 info; 1526 1527 lockdep_assert_held(&sched->lock); 1528 1529 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1530 fault = cs_iface->output->fault; 1531 info = cs_iface->output->fault_info; 1532 1533 if (queue) { 1534 u64 cs_extract = queue->iface.output->extract; 1535 struct panthor_job *job; 1536 1537 spin_lock(&queue->fence_ctx.lock); 1538 list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) { 1539 if (cs_extract >= job->ringbuf.end) 1540 continue; 1541 1542 if (cs_extract < job->ringbuf.start) 1543 break; 1544 1545 dma_fence_set_error(job->done_fence, -EINVAL); 1546 } 1547 spin_unlock(&queue->fence_ctx.lock); 1548 } 1549 1550 if (group) { 1551 drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n", 1552 group->task_info.pid, group->task_info.comm); 1553 } 1554 1555 drm_warn(&ptdev->base, 1556 "CSG slot %d CS slot: %d\n" 1557 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n" 1558 "CS_FAULT.EXCEPTION_DATA: 0x%x\n" 1559 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n", 1560 csg_id, cs_id, 1561 (unsigned int)CS_EXCEPTION_TYPE(fault), 1562 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)), 1563 (unsigned int)CS_EXCEPTION_DATA(fault), 1564 info); 1565 } 1566 1567 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id) 1568 { 1569 struct panthor_device *ptdev = group->ptdev; 1570 struct panthor_scheduler *sched = ptdev->scheduler; 1571 u32 renderpasses_in_flight, pending_frag_count; 1572 struct panthor_heap_pool *heaps = NULL; 1573 u64 heap_address, new_chunk_va = 0; 1574 u32 vt_start, vt_end, frag_end; 1575 int ret, csg_id; 1576 1577 mutex_lock(&sched->lock); 1578 csg_id = group->csg_id; 1579 if (csg_id >= 0) { 1580 struct panthor_fw_cs_iface *cs_iface; 1581 1582 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1583 heaps = panthor_vm_get_heap_pool(group->vm, false); 1584 heap_address = cs_iface->output->heap_address; 1585 vt_start = cs_iface->output->heap_vt_start; 1586 vt_end = cs_iface->output->heap_vt_end; 1587 frag_end = cs_iface->output->heap_frag_end; 1588 renderpasses_in_flight = vt_start - frag_end; 1589 pending_frag_count = vt_end - frag_end; 1590 } 1591 mutex_unlock(&sched->lock); 1592 1593 /* The group got scheduled out, we stop here. We will get a new tiler OOM event 1594 * when it's scheduled again. 1595 */ 1596 if (unlikely(csg_id < 0)) 1597 return 0; 1598 1599 if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) { 1600 ret = -EINVAL; 1601 } else { 1602 /* We do the allocation without holding the scheduler lock to avoid 1603 * blocking the scheduling. 1604 */ 1605 ret = panthor_heap_grow(heaps, heap_address, 1606 renderpasses_in_flight, 1607 pending_frag_count, &new_chunk_va); 1608 } 1609 1610 /* If the heap context doesn't have memory for us, we want to let the 1611 * FW try to reclaim memory by waiting for fragment jobs to land or by 1612 * executing the tiler OOM exception handler, which is supposed to 1613 * implement incremental rendering. 1614 */ 1615 if (ret && ret != -ENOMEM) { 1616 drm_warn(&ptdev->base, "Failed to extend the tiler heap\n"); 1617 group->fatal_queues |= BIT(cs_id); 1618 sched_queue_delayed_work(sched, tick, 0); 1619 goto out_put_heap_pool; 1620 } 1621 1622 mutex_lock(&sched->lock); 1623 csg_id = group->csg_id; 1624 if (csg_id >= 0) { 1625 struct panthor_fw_csg_iface *csg_iface; 1626 struct panthor_fw_cs_iface *cs_iface; 1627 1628 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1629 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1630 1631 cs_iface->input->heap_start = new_chunk_va; 1632 cs_iface->input->heap_end = new_chunk_va; 1633 panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM); 1634 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id)); 1635 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); 1636 } 1637 mutex_unlock(&sched->lock); 1638 1639 /* We allocated a chunck, but couldn't link it to the heap 1640 * context because the group was scheduled out while we were 1641 * allocating memory. We need to return this chunk to the heap. 1642 */ 1643 if (unlikely(csg_id < 0 && new_chunk_va)) 1644 panthor_heap_return_chunk(heaps, heap_address, new_chunk_va); 1645 1646 ret = 0; 1647 1648 out_put_heap_pool: 1649 panthor_heap_pool_put(heaps); 1650 return ret; 1651 } 1652 1653 static void group_tiler_oom_work(struct work_struct *work) 1654 { 1655 struct panthor_group *group = 1656 container_of(work, struct panthor_group, tiler_oom_work); 1657 u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0); 1658 1659 while (tiler_oom) { 1660 u32 cs_id = ffs(tiler_oom) - 1; 1661 1662 group_process_tiler_oom(group, cs_id); 1663 tiler_oom &= ~BIT(cs_id); 1664 } 1665 1666 group_put(group); 1667 } 1668 1669 static void 1670 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev, 1671 u32 csg_id, u32 cs_id) 1672 { 1673 struct panthor_scheduler *sched = ptdev->scheduler; 1674 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1675 struct panthor_group *group = csg_slot->group; 1676 1677 lockdep_assert_held(&sched->lock); 1678 1679 if (drm_WARN_ON(&ptdev->base, !group)) 1680 return; 1681 1682 atomic_or(BIT(cs_id), &group->tiler_oom); 1683 1684 /* We don't use group_queue_work() here because we want to queue the 1685 * work item to the heap_alloc_wq. 1686 */ 1687 group_get(group); 1688 if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work)) 1689 group_put(group); 1690 } 1691 1692 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev, 1693 u32 csg_id, u32 cs_id) 1694 { 1695 struct panthor_fw_cs_iface *cs_iface; 1696 u32 req, ack, events; 1697 1698 lockdep_assert_held(&ptdev->scheduler->lock); 1699 1700 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1701 req = cs_iface->input->req; 1702 ack = cs_iface->output->ack; 1703 events = (req ^ ack) & CS_EVT_MASK; 1704 1705 if (events & CS_FATAL) 1706 cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id); 1707 1708 if (events & CS_FAULT) 1709 cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id); 1710 1711 if (events & CS_TILER_OOM) 1712 cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id); 1713 1714 /* We don't acknowledge the TILER_OOM event since its handling is 1715 * deferred to a separate work. 1716 */ 1717 panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT); 1718 1719 return (events & (CS_FAULT | CS_TILER_OOM)) != 0; 1720 } 1721 1722 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id) 1723 { 1724 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1725 struct panthor_fw_csg_iface *csg_iface; 1726 1727 lockdep_assert_held(&ptdev->scheduler->lock); 1728 1729 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1730 csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE; 1731 } 1732 1733 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id) 1734 { 1735 struct panthor_scheduler *sched = ptdev->scheduler; 1736 1737 lockdep_assert_held(&sched->lock); 1738 1739 sched->might_have_idle_groups = true; 1740 1741 /* Schedule a tick so we can evict idle groups and schedule non-idle 1742 * ones. This will also update runtime PM and devfreq busy/idle states, 1743 * so the device can lower its frequency or get suspended. 1744 */ 1745 sched_queue_delayed_work(sched, tick, 0); 1746 } 1747 1748 static void csg_slot_sync_update_locked(struct panthor_device *ptdev, 1749 u32 csg_id) 1750 { 1751 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1752 struct panthor_group *group = csg_slot->group; 1753 1754 lockdep_assert_held(&ptdev->scheduler->lock); 1755 1756 if (group) 1757 group_queue_work(group, sync_upd); 1758 1759 sched_queue_work(ptdev->scheduler, sync_upd); 1760 } 1761 1762 static void 1763 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id) 1764 { 1765 struct panthor_scheduler *sched = ptdev->scheduler; 1766 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1767 struct panthor_group *group = csg_slot->group; 1768 1769 lockdep_assert_held(&sched->lock); 1770 1771 group = csg_slot->group; 1772 if (!drm_WARN_ON(&ptdev->base, !group)) { 1773 drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n", 1774 group->task_info.pid, group->task_info.comm); 1775 1776 group->timedout = true; 1777 } 1778 1779 drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id); 1780 1781 sched_queue_delayed_work(sched, tick, 0); 1782 } 1783 1784 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id) 1785 { 1786 u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events; 1787 struct panthor_fw_csg_iface *csg_iface; 1788 u32 ring_cs_db_mask = 0; 1789 1790 lockdep_assert_held(&ptdev->scheduler->lock); 1791 1792 if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) 1793 return; 1794 1795 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1796 req = READ_ONCE(csg_iface->input->req); 1797 ack = READ_ONCE(csg_iface->output->ack); 1798 cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req); 1799 cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack); 1800 csg_events = (req ^ ack) & CSG_EVT_MASK; 1801 1802 /* There may not be any pending CSG/CS interrupts to process */ 1803 if (req == ack && cs_irq_req == cs_irq_ack) 1804 return; 1805 1806 /* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before 1807 * examining the CS_ACK & CS_REQ bits. This would ensure that Host 1808 * doesn't miss an interrupt for the CS in the race scenario where 1809 * whilst Host is servicing an interrupt for the CS, firmware sends 1810 * another interrupt for that CS. 1811 */ 1812 csg_iface->input->cs_irq_ack = cs_irq_req; 1813 1814 panthor_fw_update_reqs(csg_iface, req, ack, 1815 CSG_SYNC_UPDATE | 1816 CSG_IDLE | 1817 CSG_PROGRESS_TIMER_EVENT); 1818 1819 if (csg_events & CSG_IDLE) 1820 csg_slot_process_idle_event_locked(ptdev, csg_id); 1821 1822 if (csg_events & CSG_PROGRESS_TIMER_EVENT) 1823 csg_slot_process_progress_timer_event_locked(ptdev, csg_id); 1824 1825 cs_irqs = cs_irq_req ^ cs_irq_ack; 1826 while (cs_irqs) { 1827 u32 cs_id = ffs(cs_irqs) - 1; 1828 1829 if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id)) 1830 ring_cs_db_mask |= BIT(cs_id); 1831 1832 cs_irqs &= ~BIT(cs_id); 1833 } 1834 1835 if (csg_events & CSG_SYNC_UPDATE) 1836 csg_slot_sync_update_locked(ptdev, csg_id); 1837 1838 if (ring_cs_db_mask) 1839 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask); 1840 1841 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); 1842 } 1843 1844 static void sched_process_idle_event_locked(struct panthor_device *ptdev) 1845 { 1846 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1847 1848 lockdep_assert_held(&ptdev->scheduler->lock); 1849 1850 /* Acknowledge the idle event and schedule a tick. */ 1851 panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE); 1852 sched_queue_delayed_work(ptdev->scheduler, tick, 0); 1853 } 1854 1855 /** 1856 * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ 1857 * @ptdev: Device. 1858 */ 1859 static void sched_process_global_irq_locked(struct panthor_device *ptdev) 1860 { 1861 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1862 u32 req, ack, evts; 1863 1864 lockdep_assert_held(&ptdev->scheduler->lock); 1865 1866 req = READ_ONCE(glb_iface->input->req); 1867 ack = READ_ONCE(glb_iface->output->ack); 1868 evts = (req ^ ack) & GLB_EVT_MASK; 1869 1870 if (evts & GLB_IDLE) 1871 sched_process_idle_event_locked(ptdev); 1872 } 1873 1874 static void process_fw_events_work(struct work_struct *work) 1875 { 1876 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, 1877 fw_events_work); 1878 u32 events = atomic_xchg(&sched->fw_events, 0); 1879 struct panthor_device *ptdev = sched->ptdev; 1880 1881 mutex_lock(&sched->lock); 1882 1883 if (events & JOB_INT_GLOBAL_IF) { 1884 sched_process_global_irq_locked(ptdev); 1885 events &= ~JOB_INT_GLOBAL_IF; 1886 } 1887 1888 while (events) { 1889 u32 csg_id = ffs(events) - 1; 1890 1891 sched_process_csg_irq_locked(ptdev, csg_id); 1892 events &= ~BIT(csg_id); 1893 } 1894 1895 mutex_unlock(&sched->lock); 1896 } 1897 1898 /** 1899 * panthor_sched_report_fw_events() - Report FW events to the scheduler. 1900 */ 1901 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events) 1902 { 1903 if (!ptdev->scheduler) 1904 return; 1905 1906 atomic_or(events, &ptdev->scheduler->fw_events); 1907 sched_queue_work(ptdev->scheduler, fw_events); 1908 } 1909 1910 static const char *fence_get_driver_name(struct dma_fence *fence) 1911 { 1912 return "panthor"; 1913 } 1914 1915 static const char *queue_fence_get_timeline_name(struct dma_fence *fence) 1916 { 1917 return "queue-fence"; 1918 } 1919 1920 static const struct dma_fence_ops panthor_queue_fence_ops = { 1921 .get_driver_name = fence_get_driver_name, 1922 .get_timeline_name = queue_fence_get_timeline_name, 1923 }; 1924 1925 struct panthor_csg_slots_upd_ctx { 1926 u32 update_mask; 1927 u32 timedout_mask; 1928 struct { 1929 u32 value; 1930 u32 mask; 1931 } requests[MAX_CSGS]; 1932 }; 1933 1934 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx) 1935 { 1936 memset(ctx, 0, sizeof(*ctx)); 1937 } 1938 1939 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev, 1940 struct panthor_csg_slots_upd_ctx *ctx, 1941 u32 csg_id, u32 value, u32 mask) 1942 { 1943 if (drm_WARN_ON(&ptdev->base, !mask) || 1944 drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) 1945 return; 1946 1947 ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask); 1948 ctx->requests[csg_id].mask |= mask; 1949 ctx->update_mask |= BIT(csg_id); 1950 } 1951 1952 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev, 1953 struct panthor_csg_slots_upd_ctx *ctx) 1954 { 1955 struct panthor_scheduler *sched = ptdev->scheduler; 1956 u32 update_slots = ctx->update_mask; 1957 1958 lockdep_assert_held(&sched->lock); 1959 1960 if (!ctx->update_mask) 1961 return 0; 1962 1963 while (update_slots) { 1964 struct panthor_fw_csg_iface *csg_iface; 1965 u32 csg_id = ffs(update_slots) - 1; 1966 1967 update_slots &= ~BIT(csg_id); 1968 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1969 panthor_fw_update_reqs(csg_iface, req, 1970 ctx->requests[csg_id].value, 1971 ctx->requests[csg_id].mask); 1972 } 1973 1974 panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask); 1975 1976 update_slots = ctx->update_mask; 1977 while (update_slots) { 1978 struct panthor_fw_csg_iface *csg_iface; 1979 u32 csg_id = ffs(update_slots) - 1; 1980 u32 req_mask = ctx->requests[csg_id].mask, acked; 1981 int ret; 1982 1983 update_slots &= ~BIT(csg_id); 1984 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1985 1986 ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100); 1987 1988 if (acked & CSG_ENDPOINT_CONFIG) 1989 csg_slot_sync_priority_locked(ptdev, csg_id); 1990 1991 if (acked & CSG_STATE_MASK) 1992 csg_slot_sync_state_locked(ptdev, csg_id); 1993 1994 if (acked & CSG_STATUS_UPDATE) { 1995 csg_slot_sync_queues_state_locked(ptdev, csg_id); 1996 csg_slot_sync_idle_state_locked(ptdev, csg_id); 1997 } 1998 1999 if (ret && acked != req_mask && 2000 ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) { 2001 drm_err(&ptdev->base, "CSG %d update request timedout", csg_id); 2002 ctx->timedout_mask |= BIT(csg_id); 2003 } 2004 } 2005 2006 if (ctx->timedout_mask) 2007 return -ETIMEDOUT; 2008 2009 return 0; 2010 } 2011 2012 struct panthor_sched_tick_ctx { 2013 struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT]; 2014 struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT]; 2015 u32 idle_group_count; 2016 u32 group_count; 2017 enum panthor_csg_priority min_priority; 2018 struct panthor_vm *vms[MAX_CS_PER_CSG]; 2019 u32 as_count; 2020 bool immediate_tick; 2021 u32 csg_upd_failed_mask; 2022 }; 2023 2024 static bool 2025 tick_ctx_is_full(const struct panthor_scheduler *sched, 2026 const struct panthor_sched_tick_ctx *ctx) 2027 { 2028 return ctx->group_count == sched->csg_slot_count; 2029 } 2030 2031 static void 2032 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched, 2033 struct panthor_sched_tick_ctx *ctx, 2034 struct list_head *queue, 2035 bool skip_idle_groups, 2036 bool owned_by_tick_ctx) 2037 { 2038 struct panthor_group *group, *tmp; 2039 2040 if (tick_ctx_is_full(sched, ctx)) 2041 return; 2042 2043 list_for_each_entry_safe(group, tmp, queue, run_node) { 2044 u32 i; 2045 2046 if (!group_can_run(group)) 2047 continue; 2048 2049 if (skip_idle_groups && group_is_idle(group)) 2050 continue; 2051 2052 for (i = 0; i < ctx->as_count; i++) { 2053 if (ctx->vms[i] == group->vm) 2054 break; 2055 } 2056 2057 if (i == ctx->as_count && ctx->as_count == sched->as_slot_count) 2058 continue; 2059 2060 if (!owned_by_tick_ctx) 2061 group_get(group); 2062 2063 list_move_tail(&group->run_node, &ctx->groups[group->priority]); 2064 ctx->group_count++; 2065 if (group_is_idle(group)) 2066 ctx->idle_group_count++; 2067 2068 if (i == ctx->as_count) 2069 ctx->vms[ctx->as_count++] = group->vm; 2070 2071 if (ctx->min_priority > group->priority) 2072 ctx->min_priority = group->priority; 2073 2074 if (tick_ctx_is_full(sched, ctx)) 2075 return; 2076 } 2077 } 2078 2079 static void 2080 tick_ctx_insert_old_group(struct panthor_scheduler *sched, 2081 struct panthor_sched_tick_ctx *ctx, 2082 struct panthor_group *group, 2083 bool full_tick) 2084 { 2085 struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id]; 2086 struct panthor_group *other_group; 2087 2088 if (!full_tick) { 2089 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]); 2090 return; 2091 } 2092 2093 /* Rotate to make sure groups with lower CSG slot 2094 * priorities have a chance to get a higher CSG slot 2095 * priority next time they get picked. This priority 2096 * has an impact on resource request ordering, so it's 2097 * important to make sure we don't let one group starve 2098 * all other groups with the same group priority. 2099 */ 2100 list_for_each_entry(other_group, 2101 &ctx->old_groups[csg_slot->group->priority], 2102 run_node) { 2103 struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id]; 2104 2105 if (other_csg_slot->priority > csg_slot->priority) { 2106 list_add_tail(&csg_slot->group->run_node, &other_group->run_node); 2107 return; 2108 } 2109 } 2110 2111 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]); 2112 } 2113 2114 static void 2115 tick_ctx_init(struct panthor_scheduler *sched, 2116 struct panthor_sched_tick_ctx *ctx, 2117 bool full_tick) 2118 { 2119 struct panthor_device *ptdev = sched->ptdev; 2120 struct panthor_csg_slots_upd_ctx upd_ctx; 2121 int ret; 2122 u32 i; 2123 2124 memset(ctx, 0, sizeof(*ctx)); 2125 csgs_upd_ctx_init(&upd_ctx); 2126 2127 ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT; 2128 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { 2129 INIT_LIST_HEAD(&ctx->groups[i]); 2130 INIT_LIST_HEAD(&ctx->old_groups[i]); 2131 } 2132 2133 for (i = 0; i < sched->csg_slot_count; i++) { 2134 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2135 struct panthor_group *group = csg_slot->group; 2136 struct panthor_fw_csg_iface *csg_iface; 2137 2138 if (!group) 2139 continue; 2140 2141 csg_iface = panthor_fw_get_csg_iface(ptdev, i); 2142 group_get(group); 2143 2144 /* If there was unhandled faults on the VM, force processing of 2145 * CSG IRQs, so we can flag the faulty queue. 2146 */ 2147 if (panthor_vm_has_unhandled_faults(group->vm)) { 2148 sched_process_csg_irq_locked(ptdev, i); 2149 2150 /* No fatal fault reported, flag all queues as faulty. */ 2151 if (!group->fatal_queues) 2152 group->fatal_queues |= GENMASK(group->queue_count - 1, 0); 2153 } 2154 2155 tick_ctx_insert_old_group(sched, ctx, group, full_tick); 2156 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, 2157 csg_iface->output->ack ^ CSG_STATUS_UPDATE, 2158 CSG_STATUS_UPDATE); 2159 } 2160 2161 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2162 if (ret) { 2163 panthor_device_schedule_reset(ptdev); 2164 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2165 } 2166 } 2167 2168 static void 2169 group_term_post_processing(struct panthor_group *group) 2170 { 2171 struct panthor_job *job, *tmp; 2172 LIST_HEAD(faulty_jobs); 2173 bool cookie; 2174 u32 i = 0; 2175 2176 if (drm_WARN_ON(&group->ptdev->base, group_can_run(group))) 2177 return; 2178 2179 cookie = dma_fence_begin_signalling(); 2180 for (i = 0; i < group->queue_count; i++) { 2181 struct panthor_queue *queue = group->queues[i]; 2182 struct panthor_syncobj_64b *syncobj; 2183 int err; 2184 2185 if (group->fatal_queues & BIT(i)) 2186 err = -EINVAL; 2187 else if (group->timedout) 2188 err = -ETIMEDOUT; 2189 else 2190 err = -ECANCELED; 2191 2192 if (!queue) 2193 continue; 2194 2195 spin_lock(&queue->fence_ctx.lock); 2196 list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) { 2197 list_move_tail(&job->node, &faulty_jobs); 2198 dma_fence_set_error(job->done_fence, err); 2199 dma_fence_signal_locked(job->done_fence); 2200 } 2201 spin_unlock(&queue->fence_ctx.lock); 2202 2203 /* Manually update the syncobj seqno to unblock waiters. */ 2204 syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj)); 2205 syncobj->status = ~0; 2206 syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno); 2207 sched_queue_work(group->ptdev->scheduler, sync_upd); 2208 } 2209 dma_fence_end_signalling(cookie); 2210 2211 list_for_each_entry_safe(job, tmp, &faulty_jobs, node) { 2212 list_del_init(&job->node); 2213 panthor_job_put(&job->base); 2214 } 2215 } 2216 2217 static void group_term_work(struct work_struct *work) 2218 { 2219 struct panthor_group *group = 2220 container_of(work, struct panthor_group, term_work); 2221 2222 group_term_post_processing(group); 2223 group_put(group); 2224 } 2225 2226 static void 2227 tick_ctx_cleanup(struct panthor_scheduler *sched, 2228 struct panthor_sched_tick_ctx *ctx) 2229 { 2230 struct panthor_device *ptdev = sched->ptdev; 2231 struct panthor_group *group, *tmp; 2232 u32 i; 2233 2234 for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) { 2235 list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) { 2236 /* If everything went fine, we should only have groups 2237 * to be terminated in the old_groups lists. 2238 */ 2239 drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask && 2240 group_can_run(group)); 2241 2242 if (!group_can_run(group)) { 2243 list_del_init(&group->run_node); 2244 list_del_init(&group->wait_node); 2245 group_queue_work(group, term); 2246 } else if (group->csg_id >= 0) { 2247 list_del_init(&group->run_node); 2248 } else { 2249 list_move(&group->run_node, 2250 group_is_idle(group) ? 2251 &sched->groups.idle[group->priority] : 2252 &sched->groups.runnable[group->priority]); 2253 } 2254 group_put(group); 2255 } 2256 } 2257 2258 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { 2259 /* If everything went fine, the groups to schedule lists should 2260 * be empty. 2261 */ 2262 drm_WARN_ON(&ptdev->base, 2263 !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i])); 2264 2265 list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) { 2266 if (group->csg_id >= 0) { 2267 list_del_init(&group->run_node); 2268 } else { 2269 list_move(&group->run_node, 2270 group_is_idle(group) ? 2271 &sched->groups.idle[group->priority] : 2272 &sched->groups.runnable[group->priority]); 2273 } 2274 group_put(group); 2275 } 2276 } 2277 } 2278 2279 static void 2280 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx) 2281 { 2282 struct panthor_group *group, *tmp; 2283 struct panthor_device *ptdev = sched->ptdev; 2284 struct panthor_csg_slot *csg_slot; 2285 int prio, new_csg_prio = MAX_CSG_PRIO, i; 2286 u32 free_csg_slots = 0; 2287 struct panthor_csg_slots_upd_ctx upd_ctx; 2288 int ret; 2289 2290 csgs_upd_ctx_init(&upd_ctx); 2291 2292 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2293 /* Suspend or terminate evicted groups. */ 2294 list_for_each_entry(group, &ctx->old_groups[prio], run_node) { 2295 bool term = !group_can_run(group); 2296 int csg_id = group->csg_id; 2297 2298 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) 2299 continue; 2300 2301 csg_slot = &sched->csg_slots[csg_id]; 2302 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2303 term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND, 2304 CSG_STATE_MASK); 2305 } 2306 2307 /* Update priorities on already running groups. */ 2308 list_for_each_entry(group, &ctx->groups[prio], run_node) { 2309 struct panthor_fw_csg_iface *csg_iface; 2310 int csg_id = group->csg_id; 2311 2312 if (csg_id < 0) { 2313 new_csg_prio--; 2314 continue; 2315 } 2316 2317 csg_slot = &sched->csg_slots[csg_id]; 2318 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 2319 if (csg_slot->priority == new_csg_prio) { 2320 new_csg_prio--; 2321 continue; 2322 } 2323 2324 panthor_fw_csg_endpoint_req_update(ptdev, csg_iface, 2325 CSG_EP_REQ_PRIORITY(new_csg_prio), 2326 CSG_EP_REQ_PRIORITY_MASK); 2327 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2328 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, 2329 CSG_ENDPOINT_CONFIG); 2330 new_csg_prio--; 2331 } 2332 } 2333 2334 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2335 if (ret) { 2336 panthor_device_schedule_reset(ptdev); 2337 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2338 return; 2339 } 2340 2341 /* Unbind evicted groups. */ 2342 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2343 list_for_each_entry(group, &ctx->old_groups[prio], run_node) { 2344 /* This group is gone. Process interrupts to clear 2345 * any pending interrupts before we start the new 2346 * group. 2347 */ 2348 if (group->csg_id >= 0) 2349 sched_process_csg_irq_locked(ptdev, group->csg_id); 2350 2351 group_unbind_locked(group); 2352 } 2353 } 2354 2355 for (i = 0; i < sched->csg_slot_count; i++) { 2356 if (!sched->csg_slots[i].group) 2357 free_csg_slots |= BIT(i); 2358 } 2359 2360 csgs_upd_ctx_init(&upd_ctx); 2361 new_csg_prio = MAX_CSG_PRIO; 2362 2363 /* Start new groups. */ 2364 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2365 list_for_each_entry(group, &ctx->groups[prio], run_node) { 2366 int csg_id = group->csg_id; 2367 struct panthor_fw_csg_iface *csg_iface; 2368 2369 if (csg_id >= 0) { 2370 new_csg_prio--; 2371 continue; 2372 } 2373 2374 csg_id = ffs(free_csg_slots) - 1; 2375 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) 2376 break; 2377 2378 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 2379 csg_slot = &sched->csg_slots[csg_id]; 2380 group_bind_locked(group, csg_id); 2381 csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--); 2382 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2383 group->state == PANTHOR_CS_GROUP_SUSPENDED ? 2384 CSG_STATE_RESUME : CSG_STATE_START, 2385 CSG_STATE_MASK); 2386 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2387 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, 2388 CSG_ENDPOINT_CONFIG); 2389 free_csg_slots &= ~BIT(csg_id); 2390 } 2391 } 2392 2393 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2394 if (ret) { 2395 panthor_device_schedule_reset(ptdev); 2396 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2397 return; 2398 } 2399 2400 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2401 list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) { 2402 list_del_init(&group->run_node); 2403 2404 /* If the group has been destroyed while we were 2405 * scheduling, ask for an immediate tick to 2406 * re-evaluate as soon as possible and get rid of 2407 * this dangling group. 2408 */ 2409 if (group->destroyed) 2410 ctx->immediate_tick = true; 2411 group_put(group); 2412 } 2413 2414 /* Return evicted groups to the idle or run queues. Groups 2415 * that can no longer be run (because they've been destroyed 2416 * or experienced an unrecoverable error) will be scheduled 2417 * for destruction in tick_ctx_cleanup(). 2418 */ 2419 list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) { 2420 if (!group_can_run(group)) 2421 continue; 2422 2423 if (group_is_idle(group)) 2424 list_move_tail(&group->run_node, &sched->groups.idle[prio]); 2425 else 2426 list_move_tail(&group->run_node, &sched->groups.runnable[prio]); 2427 group_put(group); 2428 } 2429 } 2430 2431 sched->used_csg_slot_count = ctx->group_count; 2432 sched->might_have_idle_groups = ctx->idle_group_count > 0; 2433 } 2434 2435 static u64 2436 tick_ctx_update_resched_target(struct panthor_scheduler *sched, 2437 const struct panthor_sched_tick_ctx *ctx) 2438 { 2439 /* We had space left, no need to reschedule until some external event happens. */ 2440 if (!tick_ctx_is_full(sched, ctx)) 2441 goto no_tick; 2442 2443 /* If idle groups were scheduled, no need to wake up until some external 2444 * event happens (group unblocked, new job submitted, ...). 2445 */ 2446 if (ctx->idle_group_count) 2447 goto no_tick; 2448 2449 if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT)) 2450 goto no_tick; 2451 2452 /* If there are groups of the same priority waiting, we need to 2453 * keep the scheduler ticking, otherwise, we'll just wait for 2454 * new groups with higher priority to be queued. 2455 */ 2456 if (!list_empty(&sched->groups.runnable[ctx->min_priority])) { 2457 u64 resched_target = sched->last_tick + sched->tick_period; 2458 2459 if (time_before64(sched->resched_target, sched->last_tick) || 2460 time_before64(resched_target, sched->resched_target)) 2461 sched->resched_target = resched_target; 2462 2463 return sched->resched_target - sched->last_tick; 2464 } 2465 2466 no_tick: 2467 sched->resched_target = U64_MAX; 2468 return U64_MAX; 2469 } 2470 2471 static void tick_work(struct work_struct *work) 2472 { 2473 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, 2474 tick_work.work); 2475 struct panthor_device *ptdev = sched->ptdev; 2476 struct panthor_sched_tick_ctx ctx; 2477 u64 remaining_jiffies = 0, resched_delay; 2478 u64 now = get_jiffies_64(); 2479 int prio, ret, cookie; 2480 2481 if (!drm_dev_enter(&ptdev->base, &cookie)) 2482 return; 2483 2484 ret = panthor_device_resume_and_get(ptdev); 2485 if (drm_WARN_ON(&ptdev->base, ret)) 2486 goto out_dev_exit; 2487 2488 if (time_before64(now, sched->resched_target)) 2489 remaining_jiffies = sched->resched_target - now; 2490 2491 mutex_lock(&sched->lock); 2492 if (panthor_device_reset_is_pending(sched->ptdev)) 2493 goto out_unlock; 2494 2495 tick_ctx_init(sched, &ctx, remaining_jiffies != 0); 2496 if (ctx.csg_upd_failed_mask) 2497 goto out_cleanup_ctx; 2498 2499 if (remaining_jiffies) { 2500 /* Scheduling forced in the middle of a tick. Only RT groups 2501 * can preempt non-RT ones. Currently running RT groups can't be 2502 * preempted. 2503 */ 2504 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2505 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2506 prio--) { 2507 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], 2508 true, true); 2509 if (prio == PANTHOR_CSG_PRIORITY_RT) { 2510 tick_ctx_pick_groups_from_list(sched, &ctx, 2511 &sched->groups.runnable[prio], 2512 true, false); 2513 } 2514 } 2515 } 2516 2517 /* First pick non-idle groups */ 2518 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2519 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2520 prio--) { 2521 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio], 2522 true, false); 2523 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true); 2524 } 2525 2526 /* If we have free CSG slots left, pick idle groups */ 2527 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2528 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2529 prio--) { 2530 /* Check the old_group queue first to avoid reprogramming the slots */ 2531 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true); 2532 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio], 2533 false, false); 2534 } 2535 2536 tick_ctx_apply(sched, &ctx); 2537 if (ctx.csg_upd_failed_mask) 2538 goto out_cleanup_ctx; 2539 2540 if (ctx.idle_group_count == ctx.group_count) { 2541 panthor_devfreq_record_idle(sched->ptdev); 2542 if (sched->pm.has_ref) { 2543 pm_runtime_put_autosuspend(ptdev->base.dev); 2544 sched->pm.has_ref = false; 2545 } 2546 } else { 2547 panthor_devfreq_record_busy(sched->ptdev); 2548 if (!sched->pm.has_ref) { 2549 pm_runtime_get(ptdev->base.dev); 2550 sched->pm.has_ref = true; 2551 } 2552 } 2553 2554 sched->last_tick = now; 2555 resched_delay = tick_ctx_update_resched_target(sched, &ctx); 2556 if (ctx.immediate_tick) 2557 resched_delay = 0; 2558 2559 if (resched_delay != U64_MAX) 2560 sched_queue_delayed_work(sched, tick, resched_delay); 2561 2562 out_cleanup_ctx: 2563 tick_ctx_cleanup(sched, &ctx); 2564 2565 out_unlock: 2566 mutex_unlock(&sched->lock); 2567 pm_runtime_mark_last_busy(ptdev->base.dev); 2568 pm_runtime_put_autosuspend(ptdev->base.dev); 2569 2570 out_dev_exit: 2571 drm_dev_exit(cookie); 2572 } 2573 2574 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx) 2575 { 2576 struct panthor_queue *queue = group->queues[queue_idx]; 2577 union { 2578 struct panthor_syncobj_64b sync64; 2579 struct panthor_syncobj_32b sync32; 2580 } *syncobj; 2581 bool result; 2582 u64 value; 2583 2584 syncobj = panthor_queue_get_syncwait_obj(group, queue); 2585 if (!syncobj) 2586 return -EINVAL; 2587 2588 value = queue->syncwait.sync64 ? 2589 syncobj->sync64.seqno : 2590 syncobj->sync32.seqno; 2591 2592 if (queue->syncwait.gt) 2593 result = value > queue->syncwait.ref; 2594 else 2595 result = value <= queue->syncwait.ref; 2596 2597 if (result) 2598 panthor_queue_put_syncwait_obj(queue); 2599 2600 return result; 2601 } 2602 2603 static void sync_upd_work(struct work_struct *work) 2604 { 2605 struct panthor_scheduler *sched = container_of(work, 2606 struct panthor_scheduler, 2607 sync_upd_work); 2608 struct panthor_group *group, *tmp; 2609 bool immediate_tick = false; 2610 2611 mutex_lock(&sched->lock); 2612 list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) { 2613 u32 tested_queues = group->blocked_queues; 2614 u32 unblocked_queues = 0; 2615 2616 while (tested_queues) { 2617 u32 cs_id = ffs(tested_queues) - 1; 2618 int ret; 2619 2620 ret = panthor_queue_eval_syncwait(group, cs_id); 2621 drm_WARN_ON(&group->ptdev->base, ret < 0); 2622 if (ret) 2623 unblocked_queues |= BIT(cs_id); 2624 2625 tested_queues &= ~BIT(cs_id); 2626 } 2627 2628 if (unblocked_queues) { 2629 group->blocked_queues &= ~unblocked_queues; 2630 2631 if (group->csg_id < 0) { 2632 list_move(&group->run_node, 2633 &sched->groups.runnable[group->priority]); 2634 if (group->priority == PANTHOR_CSG_PRIORITY_RT) 2635 immediate_tick = true; 2636 } 2637 } 2638 2639 if (!group->blocked_queues) 2640 list_del_init(&group->wait_node); 2641 } 2642 mutex_unlock(&sched->lock); 2643 2644 if (immediate_tick) 2645 sched_queue_delayed_work(sched, tick, 0); 2646 } 2647 2648 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) 2649 { 2650 struct panthor_device *ptdev = group->ptdev; 2651 struct panthor_scheduler *sched = ptdev->scheduler; 2652 struct list_head *queue = &sched->groups.runnable[group->priority]; 2653 u64 delay_jiffies = 0; 2654 bool was_idle; 2655 u64 now; 2656 2657 if (!group_can_run(group)) 2658 return; 2659 2660 /* All updated queues are blocked, no need to wake up the scheduler. */ 2661 if ((queue_mask & group->blocked_queues) == queue_mask) 2662 return; 2663 2664 was_idle = group_is_idle(group); 2665 group->idle_queues &= ~queue_mask; 2666 2667 /* Don't mess up with the lists if we're in a middle of a reset. */ 2668 if (atomic_read(&sched->reset.in_progress)) 2669 return; 2670 2671 if (was_idle && !group_is_idle(group)) 2672 list_move_tail(&group->run_node, queue); 2673 2674 /* RT groups are preemptive. */ 2675 if (group->priority == PANTHOR_CSG_PRIORITY_RT) { 2676 sched_queue_delayed_work(sched, tick, 0); 2677 return; 2678 } 2679 2680 /* Some groups might be idle, force an immediate tick to 2681 * re-evaluate. 2682 */ 2683 if (sched->might_have_idle_groups) { 2684 sched_queue_delayed_work(sched, tick, 0); 2685 return; 2686 } 2687 2688 /* Scheduler is ticking, nothing to do. */ 2689 if (sched->resched_target != U64_MAX) { 2690 /* If there are free slots, force immediating ticking. */ 2691 if (sched->used_csg_slot_count < sched->csg_slot_count) 2692 sched_queue_delayed_work(sched, tick, 0); 2693 2694 return; 2695 } 2696 2697 /* Scheduler tick was off, recalculate the resched_target based on the 2698 * last tick event, and queue the scheduler work. 2699 */ 2700 now = get_jiffies_64(); 2701 sched->resched_target = sched->last_tick + sched->tick_period; 2702 if (sched->used_csg_slot_count == sched->csg_slot_count && 2703 time_before64(now, sched->resched_target)) 2704 delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX); 2705 2706 sched_queue_delayed_work(sched, tick, delay_jiffies); 2707 } 2708 2709 static void queue_stop(struct panthor_queue *queue, 2710 struct panthor_job *bad_job) 2711 { 2712 disable_delayed_work_sync(&queue->timeout.work); 2713 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); 2714 } 2715 2716 static void queue_start(struct panthor_queue *queue) 2717 { 2718 struct panthor_job *job; 2719 2720 /* Re-assign the parent fences. */ 2721 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) 2722 job->base.s_fence->parent = dma_fence_get(job->done_fence); 2723 2724 enable_delayed_work(&queue->timeout.work); 2725 drm_sched_start(&queue->scheduler, 0); 2726 } 2727 2728 static void panthor_group_stop(struct panthor_group *group) 2729 { 2730 struct panthor_scheduler *sched = group->ptdev->scheduler; 2731 2732 lockdep_assert_held(&sched->reset.lock); 2733 2734 for (u32 i = 0; i < group->queue_count; i++) 2735 queue_stop(group->queues[i], NULL); 2736 2737 group_get(group); 2738 list_move_tail(&group->run_node, &sched->reset.stopped_groups); 2739 } 2740 2741 static void panthor_group_start(struct panthor_group *group) 2742 { 2743 struct panthor_scheduler *sched = group->ptdev->scheduler; 2744 2745 lockdep_assert_held(&group->ptdev->scheduler->reset.lock); 2746 2747 for (u32 i = 0; i < group->queue_count; i++) 2748 queue_start(group->queues[i]); 2749 2750 if (group_can_run(group)) { 2751 list_move_tail(&group->run_node, 2752 group_is_idle(group) ? 2753 &sched->groups.idle[group->priority] : 2754 &sched->groups.runnable[group->priority]); 2755 } else { 2756 list_del_init(&group->run_node); 2757 list_del_init(&group->wait_node); 2758 group_queue_work(group, term); 2759 } 2760 2761 group_put(group); 2762 } 2763 2764 static void panthor_sched_immediate_tick(struct panthor_device *ptdev) 2765 { 2766 struct panthor_scheduler *sched = ptdev->scheduler; 2767 2768 sched_queue_delayed_work(sched, tick, 0); 2769 } 2770 2771 /** 2772 * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler. 2773 */ 2774 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev) 2775 { 2776 /* Force a tick to immediately kill faulty groups. */ 2777 if (ptdev->scheduler) 2778 panthor_sched_immediate_tick(ptdev); 2779 } 2780 2781 void panthor_sched_resume(struct panthor_device *ptdev) 2782 { 2783 /* Force a tick to re-evaluate after a resume. */ 2784 panthor_sched_immediate_tick(ptdev); 2785 } 2786 2787 void panthor_sched_suspend(struct panthor_device *ptdev) 2788 { 2789 struct panthor_scheduler *sched = ptdev->scheduler; 2790 struct panthor_csg_slots_upd_ctx upd_ctx; 2791 u32 suspended_slots; 2792 u32 i; 2793 2794 mutex_lock(&sched->lock); 2795 csgs_upd_ctx_init(&upd_ctx); 2796 for (i = 0; i < sched->csg_slot_count; i++) { 2797 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2798 2799 if (csg_slot->group) { 2800 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, 2801 group_can_run(csg_slot->group) ? 2802 CSG_STATE_SUSPEND : CSG_STATE_TERMINATE, 2803 CSG_STATE_MASK); 2804 } 2805 } 2806 2807 suspended_slots = upd_ctx.update_mask; 2808 2809 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2810 suspended_slots &= ~upd_ctx.timedout_mask; 2811 2812 if (upd_ctx.timedout_mask) { 2813 u32 slot_mask = upd_ctx.timedout_mask; 2814 2815 drm_err(&ptdev->base, "CSG suspend failed, escalating to termination"); 2816 csgs_upd_ctx_init(&upd_ctx); 2817 while (slot_mask) { 2818 u32 csg_id = ffs(slot_mask) - 1; 2819 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2820 2821 /* If the group was still usable before that point, we consider 2822 * it innocent. 2823 */ 2824 if (group_can_run(csg_slot->group)) 2825 csg_slot->group->innocent = true; 2826 2827 /* We consider group suspension failures as fatal and flag the 2828 * group as unusable by setting timedout=true. 2829 */ 2830 csg_slot->group->timedout = true; 2831 2832 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2833 CSG_STATE_TERMINATE, 2834 CSG_STATE_MASK); 2835 slot_mask &= ~BIT(csg_id); 2836 } 2837 2838 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2839 2840 slot_mask = upd_ctx.timedout_mask; 2841 while (slot_mask) { 2842 u32 csg_id = ffs(slot_mask) - 1; 2843 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2844 struct panthor_group *group = csg_slot->group; 2845 2846 /* Terminate command timedout, but the soft-reset will 2847 * automatically terminate all active groups, so let's 2848 * force the state to halted here. 2849 */ 2850 if (group->state != PANTHOR_CS_GROUP_TERMINATED) { 2851 group->state = PANTHOR_CS_GROUP_TERMINATED; 2852 2853 /* Reset the queue slots manually if the termination 2854 * request failed. 2855 */ 2856 for (i = 0; i < group->queue_count; i++) { 2857 if (group->queues[i]) 2858 cs_slot_reset_locked(ptdev, csg_id, i); 2859 } 2860 } 2861 slot_mask &= ~BIT(csg_id); 2862 } 2863 } 2864 2865 /* Flush L2 and LSC caches to make sure suspend state is up-to-date. 2866 * If the flush fails, flag all queues for termination. 2867 */ 2868 if (suspended_slots) { 2869 bool flush_caches_failed = false; 2870 u32 slot_mask = suspended_slots; 2871 2872 if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0)) 2873 flush_caches_failed = true; 2874 2875 while (slot_mask) { 2876 u32 csg_id = ffs(slot_mask) - 1; 2877 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2878 2879 if (flush_caches_failed) 2880 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; 2881 else 2882 csg_slot_sync_update_locked(ptdev, csg_id); 2883 2884 slot_mask &= ~BIT(csg_id); 2885 } 2886 } 2887 2888 for (i = 0; i < sched->csg_slot_count; i++) { 2889 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2890 struct panthor_group *group = csg_slot->group; 2891 2892 if (!group) 2893 continue; 2894 2895 group_get(group); 2896 2897 if (group->csg_id >= 0) 2898 sched_process_csg_irq_locked(ptdev, group->csg_id); 2899 2900 group_unbind_locked(group); 2901 2902 drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node)); 2903 2904 if (group_can_run(group)) { 2905 list_add(&group->run_node, 2906 &sched->groups.idle[group->priority]); 2907 } else { 2908 /* We don't bother stopping the scheduler if the group is 2909 * faulty, the group termination work will finish the job. 2910 */ 2911 list_del_init(&group->wait_node); 2912 group_queue_work(group, term); 2913 } 2914 group_put(group); 2915 } 2916 mutex_unlock(&sched->lock); 2917 } 2918 2919 void panthor_sched_pre_reset(struct panthor_device *ptdev) 2920 { 2921 struct panthor_scheduler *sched = ptdev->scheduler; 2922 struct panthor_group *group, *group_tmp; 2923 u32 i; 2924 2925 mutex_lock(&sched->reset.lock); 2926 atomic_set(&sched->reset.in_progress, true); 2927 2928 /* Cancel all scheduler works. Once this is done, these works can't be 2929 * scheduled again until the reset operation is complete. 2930 */ 2931 cancel_work_sync(&sched->sync_upd_work); 2932 cancel_delayed_work_sync(&sched->tick_work); 2933 2934 panthor_sched_suspend(ptdev); 2935 2936 /* Stop all groups that might still accept jobs, so we don't get passed 2937 * new jobs while we're resetting. 2938 */ 2939 for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) { 2940 /* All groups should be in the idle lists. */ 2941 drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i])); 2942 list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node) 2943 panthor_group_stop(group); 2944 } 2945 2946 for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) { 2947 list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node) 2948 panthor_group_stop(group); 2949 } 2950 2951 mutex_unlock(&sched->reset.lock); 2952 } 2953 2954 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) 2955 { 2956 struct panthor_scheduler *sched = ptdev->scheduler; 2957 struct panthor_group *group, *group_tmp; 2958 2959 mutex_lock(&sched->reset.lock); 2960 2961 list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) { 2962 /* Consider all previously running group as terminated if the 2963 * reset failed. 2964 */ 2965 if (reset_failed) 2966 group->state = PANTHOR_CS_GROUP_TERMINATED; 2967 2968 panthor_group_start(group); 2969 } 2970 2971 /* We're done resetting the GPU, clear the reset.in_progress bit so we can 2972 * kick the scheduler. 2973 */ 2974 atomic_set(&sched->reset.in_progress, false); 2975 mutex_unlock(&sched->reset.lock); 2976 2977 /* No need to queue a tick and update syncs if the reset failed. */ 2978 if (!reset_failed) { 2979 sched_queue_delayed_work(sched, tick, 0); 2980 sched_queue_work(sched, sync_upd); 2981 } 2982 } 2983 2984 static void update_fdinfo_stats(struct panthor_job *job) 2985 { 2986 struct panthor_group *group = job->group; 2987 struct panthor_queue *queue = group->queues[job->queue_idx]; 2988 struct panthor_gpu_usage *fdinfo = &group->fdinfo.data; 2989 struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap; 2990 struct panthor_job_profiling_data *data = &slots[job->profiling.slot]; 2991 2992 scoped_guard(spinlock, &group->fdinfo.lock) { 2993 if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) 2994 fdinfo->cycles += data->cycles.after - data->cycles.before; 2995 if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) 2996 fdinfo->time += data->time.after - data->time.before; 2997 } 2998 } 2999 3000 void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) 3001 { 3002 struct panthor_group_pool *gpool = pfile->groups; 3003 struct panthor_group *group; 3004 unsigned long i; 3005 3006 if (IS_ERR_OR_NULL(gpool)) 3007 return; 3008 3009 xa_lock(&gpool->xa); 3010 xa_for_each(&gpool->xa, i, group) { 3011 guard(spinlock)(&group->fdinfo.lock); 3012 pfile->stats.cycles += group->fdinfo.data.cycles; 3013 pfile->stats.time += group->fdinfo.data.time; 3014 group->fdinfo.data.cycles = 0; 3015 group->fdinfo.data.time = 0; 3016 } 3017 xa_unlock(&gpool->xa); 3018 } 3019 3020 static bool queue_check_job_completion(struct panthor_queue *queue) 3021 { 3022 struct panthor_syncobj_64b *syncobj = NULL; 3023 struct panthor_job *job, *job_tmp; 3024 bool cookie, progress = false; 3025 LIST_HEAD(done_jobs); 3026 3027 cookie = dma_fence_begin_signalling(); 3028 spin_lock(&queue->fence_ctx.lock); 3029 list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { 3030 if (!syncobj) { 3031 struct panthor_group *group = job->group; 3032 3033 syncobj = group->syncobjs->kmap + 3034 (job->queue_idx * sizeof(*syncobj)); 3035 } 3036 3037 if (syncobj->seqno < job->done_fence->seqno) 3038 break; 3039 3040 list_move_tail(&job->node, &done_jobs); 3041 dma_fence_signal_locked(job->done_fence); 3042 } 3043 3044 if (list_empty(&queue->fence_ctx.in_flight_jobs)) { 3045 /* If we have no job left, we cancel the timer, and reset remaining 3046 * time to its default so it can be restarted next time 3047 * queue_resume_timeout() is called. 3048 */ 3049 queue_suspend_timeout_locked(queue); 3050 3051 /* If there's no job pending, we consider it progress to avoid a 3052 * spurious timeout if the timeout handler and the sync update 3053 * handler raced. 3054 */ 3055 progress = true; 3056 } else if (!list_empty(&done_jobs)) { 3057 queue_reset_timeout_locked(queue); 3058 progress = true; 3059 } 3060 spin_unlock(&queue->fence_ctx.lock); 3061 dma_fence_end_signalling(cookie); 3062 3063 list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { 3064 if (job->profiling.mask) 3065 update_fdinfo_stats(job); 3066 list_del_init(&job->node); 3067 panthor_job_put(&job->base); 3068 } 3069 3070 return progress; 3071 } 3072 3073 static void group_sync_upd_work(struct work_struct *work) 3074 { 3075 struct panthor_group *group = 3076 container_of(work, struct panthor_group, sync_upd_work); 3077 u32 queue_idx; 3078 bool cookie; 3079 3080 cookie = dma_fence_begin_signalling(); 3081 for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { 3082 struct panthor_queue *queue = group->queues[queue_idx]; 3083 3084 if (!queue) 3085 continue; 3086 3087 queue_check_job_completion(queue); 3088 } 3089 dma_fence_end_signalling(cookie); 3090 3091 group_put(group); 3092 } 3093 3094 struct panthor_job_ringbuf_instrs { 3095 u64 buffer[MAX_INSTRS_PER_JOB]; 3096 u32 count; 3097 }; 3098 3099 struct panthor_job_instr { 3100 u32 profile_mask; 3101 u64 instr; 3102 }; 3103 3104 #define JOB_INSTR(__prof, __instr) \ 3105 { \ 3106 .profile_mask = __prof, \ 3107 .instr = __instr, \ 3108 } 3109 3110 static void 3111 copy_instrs_to_ringbuf(struct panthor_queue *queue, 3112 struct panthor_job *job, 3113 struct panthor_job_ringbuf_instrs *instrs) 3114 { 3115 u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); 3116 u64 start = job->ringbuf.start & (ringbuf_size - 1); 3117 u64 size, written; 3118 3119 /* 3120 * We need to write a whole slot, including any trailing zeroes 3121 * that may come at the end of it. Also, because instrs.buffer has 3122 * been zero-initialised, there's no need to pad it with 0's 3123 */ 3124 instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE); 3125 size = instrs->count * sizeof(u64); 3126 WARN_ON(size > ringbuf_size); 3127 written = min(ringbuf_size - start, size); 3128 3129 memcpy(queue->ringbuf->kmap + start, instrs->buffer, written); 3130 3131 if (written < size) 3132 memcpy(queue->ringbuf->kmap, 3133 &instrs->buffer[written / sizeof(u64)], 3134 size - written); 3135 } 3136 3137 struct panthor_job_cs_params { 3138 u32 profile_mask; 3139 u64 addr_reg; u64 val_reg; 3140 u64 cycle_reg; u64 time_reg; 3141 u64 sync_addr; u64 times_addr; 3142 u64 cs_start; u64 cs_size; 3143 u32 last_flush; u32 waitall_mask; 3144 }; 3145 3146 static void 3147 get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params) 3148 { 3149 struct panthor_group *group = job->group; 3150 struct panthor_queue *queue = group->queues[job->queue_idx]; 3151 struct panthor_device *ptdev = group->ptdev; 3152 struct panthor_scheduler *sched = ptdev->scheduler; 3153 3154 params->addr_reg = ptdev->csif_info.cs_reg_count - 3155 ptdev->csif_info.unpreserved_cs_reg_count; 3156 params->val_reg = params->addr_reg + 2; 3157 params->cycle_reg = params->addr_reg; 3158 params->time_reg = params->val_reg; 3159 3160 params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) + 3161 job->queue_idx * sizeof(struct panthor_syncobj_64b); 3162 params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) + 3163 (job->profiling.slot * sizeof(struct panthor_job_profiling_data)); 3164 params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0); 3165 3166 params->cs_start = job->call_info.start; 3167 params->cs_size = job->call_info.size; 3168 params->last_flush = job->call_info.latest_flush; 3169 3170 params->profile_mask = job->profiling.mask; 3171 } 3172 3173 #define JOB_INSTR_ALWAYS(instr) \ 3174 JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr)) 3175 #define JOB_INSTR_TIMESTAMP(instr) \ 3176 JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr)) 3177 #define JOB_INSTR_CYCLES(instr) \ 3178 JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr)) 3179 3180 static void 3181 prepare_job_instrs(const struct panthor_job_cs_params *params, 3182 struct panthor_job_ringbuf_instrs *instrs) 3183 { 3184 const struct panthor_job_instr instr_seq[] = { 3185 /* MOV32 rX+2, cs.latest_flush */ 3186 JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush), 3187 /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */ 3188 JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) | 3189 (0 << 16) | 0x233), 3190 /* MOV48 rX:rX+1, cycles_offset */ 3191 JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) | 3192 (params->times_addr + 3193 offsetof(struct panthor_job_profiling_data, cycles.before))), 3194 /* STORE_STATE cycles */ 3195 JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)), 3196 /* MOV48 rX:rX+1, time_offset */ 3197 JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) | 3198 (params->times_addr + 3199 offsetof(struct panthor_job_profiling_data, time.before))), 3200 /* STORE_STATE timer */ 3201 JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)), 3202 /* MOV48 rX:rX+1, cs.start */ 3203 JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start), 3204 /* MOV32 rX+2, cs.size */ 3205 JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size), 3206 /* WAIT(0) => waits for FLUSH_CACHE2 instruction */ 3207 JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)), 3208 /* CALL rX:rX+1, rX+2 */ 3209 JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) | 3210 (params->val_reg << 32)), 3211 /* MOV48 rX:rX+1, cycles_offset */ 3212 JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) | 3213 (params->times_addr + 3214 offsetof(struct panthor_job_profiling_data, cycles.after))), 3215 /* STORE_STATE cycles */ 3216 JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)), 3217 /* MOV48 rX:rX+1, time_offset */ 3218 JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) | 3219 (params->times_addr + 3220 offsetof(struct panthor_job_profiling_data, time.after))), 3221 /* STORE_STATE timer */ 3222 JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)), 3223 /* MOV48 rX:rX+1, sync_addr */ 3224 JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr), 3225 /* MOV48 rX+2, #1 */ 3226 JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1), 3227 /* WAIT(all) */ 3228 JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)), 3229 /* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/ 3230 JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) | 3231 (params->val_reg << 32) | (0 << 16) | 1), 3232 /* ERROR_BARRIER, so we can recover from faults at job boundaries. */ 3233 JOB_INSTR_ALWAYS((47ull << 56)), 3234 }; 3235 u32 pad; 3236 3237 instrs->count = 0; 3238 3239 /* NEED to be cacheline aligned to please the prefetcher. */ 3240 static_assert(sizeof(instrs->buffer) % 64 == 0, 3241 "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline"); 3242 3243 /* Make sure we have enough storage to store the whole sequence. */ 3244 static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) == 3245 ARRAY_SIZE(instrs->buffer), 3246 "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch"); 3247 3248 for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) { 3249 /* If the profile mask of this instruction is not enabled, skip it. */ 3250 if (instr_seq[i].profile_mask && 3251 !(instr_seq[i].profile_mask & params->profile_mask)) 3252 continue; 3253 3254 instrs->buffer[instrs->count++] = instr_seq[i].instr; 3255 } 3256 3257 pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE); 3258 memset(&instrs->buffer[instrs->count], 0, 3259 (pad - instrs->count) * sizeof(instrs->buffer[0])); 3260 instrs->count = pad; 3261 } 3262 3263 static u32 calc_job_credits(u32 profile_mask) 3264 { 3265 struct panthor_job_ringbuf_instrs instrs; 3266 struct panthor_job_cs_params params = { 3267 .profile_mask = profile_mask, 3268 }; 3269 3270 prepare_job_instrs(¶ms, &instrs); 3271 return instrs.count; 3272 } 3273 3274 static struct dma_fence * 3275 queue_run_job(struct drm_sched_job *sched_job) 3276 { 3277 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3278 struct panthor_group *group = job->group; 3279 struct panthor_queue *queue = group->queues[job->queue_idx]; 3280 struct panthor_device *ptdev = group->ptdev; 3281 struct panthor_scheduler *sched = ptdev->scheduler; 3282 struct panthor_job_ringbuf_instrs instrs; 3283 struct panthor_job_cs_params cs_params; 3284 struct dma_fence *done_fence; 3285 int ret; 3286 3287 /* Stream size is zero, nothing to do except making sure all previously 3288 * submitted jobs are done before we signal the 3289 * drm_sched_job::s_fence::finished fence. 3290 */ 3291 if (!job->call_info.size) { 3292 job->done_fence = dma_fence_get(queue->fence_ctx.last_fence); 3293 return dma_fence_get(job->done_fence); 3294 } 3295 3296 ret = panthor_device_resume_and_get(ptdev); 3297 if (drm_WARN_ON(&ptdev->base, ret)) 3298 return ERR_PTR(ret); 3299 3300 mutex_lock(&sched->lock); 3301 if (!group_can_run(group)) { 3302 done_fence = ERR_PTR(-ECANCELED); 3303 goto out_unlock; 3304 } 3305 3306 dma_fence_init(job->done_fence, 3307 &panthor_queue_fence_ops, 3308 &queue->fence_ctx.lock, 3309 queue->fence_ctx.id, 3310 atomic64_inc_return(&queue->fence_ctx.seqno)); 3311 3312 job->profiling.slot = queue->profiling.seqno++; 3313 if (queue->profiling.seqno == queue->profiling.slot_count) 3314 queue->profiling.seqno = 0; 3315 3316 job->ringbuf.start = queue->iface.input->insert; 3317 3318 get_job_cs_params(job, &cs_params); 3319 prepare_job_instrs(&cs_params, &instrs); 3320 copy_instrs_to_ringbuf(queue, job, &instrs); 3321 3322 job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64)); 3323 3324 panthor_job_get(&job->base); 3325 spin_lock(&queue->fence_ctx.lock); 3326 list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs); 3327 spin_unlock(&queue->fence_ctx.lock); 3328 3329 /* Make sure the ring buffer is updated before the INSERT 3330 * register. 3331 */ 3332 wmb(); 3333 3334 queue->iface.input->extract = queue->iface.output->extract; 3335 queue->iface.input->insert = job->ringbuf.end; 3336 3337 if (group->csg_id < 0) { 3338 group_schedule_locked(group, BIT(job->queue_idx)); 3339 } else { 3340 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1); 3341 if (!sched->pm.has_ref && 3342 !(group->blocked_queues & BIT(job->queue_idx))) { 3343 pm_runtime_get(ptdev->base.dev); 3344 sched->pm.has_ref = true; 3345 } 3346 queue_resume_timeout(queue); 3347 panthor_devfreq_record_busy(sched->ptdev); 3348 } 3349 3350 /* Update the last fence. */ 3351 dma_fence_put(queue->fence_ctx.last_fence); 3352 queue->fence_ctx.last_fence = dma_fence_get(job->done_fence); 3353 3354 done_fence = dma_fence_get(job->done_fence); 3355 3356 out_unlock: 3357 mutex_unlock(&sched->lock); 3358 pm_runtime_mark_last_busy(ptdev->base.dev); 3359 pm_runtime_put_autosuspend(ptdev->base.dev); 3360 3361 return done_fence; 3362 } 3363 3364 static enum drm_gpu_sched_stat 3365 queue_timedout_job(struct drm_sched_job *sched_job) 3366 { 3367 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3368 struct panthor_group *group = job->group; 3369 struct panthor_device *ptdev = group->ptdev; 3370 struct panthor_scheduler *sched = ptdev->scheduler; 3371 struct panthor_queue *queue = group->queues[job->queue_idx]; 3372 3373 drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n", 3374 group->task_info.pid, group->task_info.comm, job->done_fence->seqno); 3375 3376 drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress)); 3377 3378 queue_stop(queue, job); 3379 3380 mutex_lock(&sched->lock); 3381 group->timedout = true; 3382 if (group->csg_id >= 0) { 3383 sched_queue_delayed_work(ptdev->scheduler, tick, 0); 3384 } else { 3385 /* Remove from the run queues, so the scheduler can't 3386 * pick the group on the next tick. 3387 */ 3388 list_del_init(&group->run_node); 3389 list_del_init(&group->wait_node); 3390 3391 group_queue_work(group, term); 3392 } 3393 mutex_unlock(&sched->lock); 3394 3395 queue_start(queue); 3396 return DRM_GPU_SCHED_STAT_RESET; 3397 } 3398 3399 static void queue_free_job(struct drm_sched_job *sched_job) 3400 { 3401 drm_sched_job_cleanup(sched_job); 3402 panthor_job_put(sched_job); 3403 } 3404 3405 static const struct drm_sched_backend_ops panthor_queue_sched_ops = { 3406 .run_job = queue_run_job, 3407 .timedout_job = queue_timedout_job, 3408 .free_job = queue_free_job, 3409 }; 3410 3411 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev, 3412 u32 cs_ringbuf_size) 3413 { 3414 u32 min_profiled_job_instrs = U32_MAX; 3415 u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL); 3416 3417 /* 3418 * We want to calculate the minimum size of a profiled job's CS, 3419 * because since they need additional instructions for the sampling 3420 * of performance metrics, they might take up further slots in 3421 * the queue's ringbuffer. This means we might not need as many job 3422 * slots for keeping track of their profiling information. What we 3423 * need is the maximum number of slots we should allocate to this end, 3424 * which matches the maximum number of profiled jobs we can place 3425 * simultaneously in the queue's ring buffer. 3426 * That has to be calculated separately for every single job profiling 3427 * flag, but not in the case job profiling is disabled, since unprofiled 3428 * jobs don't need to keep track of this at all. 3429 */ 3430 for (u32 i = 0; i < last_flag; i++) { 3431 min_profiled_job_instrs = 3432 min(min_profiled_job_instrs, calc_job_credits(BIT(i))); 3433 } 3434 3435 return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64)); 3436 } 3437 3438 static void queue_timeout_work(struct work_struct *work) 3439 { 3440 struct panthor_queue *queue = container_of(work, struct panthor_queue, 3441 timeout.work.work); 3442 bool progress; 3443 3444 progress = queue_check_job_completion(queue); 3445 if (!progress) 3446 drm_sched_fault(&queue->scheduler); 3447 } 3448 3449 static struct panthor_queue * 3450 group_create_queue(struct panthor_group *group, 3451 const struct drm_panthor_queue_create *args, 3452 u64 drm_client_id, u32 gid, u32 qid) 3453 { 3454 struct drm_sched_init_args sched_args = { 3455 .ops = &panthor_queue_sched_ops, 3456 .submit_wq = group->ptdev->scheduler->wq, 3457 .num_rqs = 1, 3458 /* 3459 * The credit limit argument tells us the total number of 3460 * instructions across all CS slots in the ringbuffer, with 3461 * some jobs requiring twice as many as others, depending on 3462 * their profiling status. 3463 */ 3464 .credit_limit = args->ringbuf_size / sizeof(u64), 3465 .timeout = MAX_SCHEDULE_TIMEOUT, 3466 .timeout_wq = group->ptdev->reset.wq, 3467 .dev = group->ptdev->base.dev, 3468 }; 3469 struct drm_gpu_scheduler *drm_sched; 3470 struct panthor_queue *queue; 3471 int ret; 3472 3473 if (args->pad[0] || args->pad[1] || args->pad[2]) 3474 return ERR_PTR(-EINVAL); 3475 3476 if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K || 3477 !is_power_of_2(args->ringbuf_size)) 3478 return ERR_PTR(-EINVAL); 3479 3480 if (args->priority > CSF_MAX_QUEUE_PRIO) 3481 return ERR_PTR(-EINVAL); 3482 3483 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 3484 if (!queue) 3485 return ERR_PTR(-ENOMEM); 3486 3487 queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); 3488 INIT_DELAYED_WORK(&queue->timeout.work, queue_timeout_work); 3489 queue->fence_ctx.id = dma_fence_context_alloc(1); 3490 spin_lock_init(&queue->fence_ctx.lock); 3491 INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs); 3492 3493 queue->priority = args->priority; 3494 3495 queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm, 3496 args->ringbuf_size, 3497 DRM_PANTHOR_BO_NO_MMAP, 3498 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3499 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3500 PANTHOR_VM_KERNEL_AUTO_VA, 3501 "CS ring buffer"); 3502 if (IS_ERR(queue->ringbuf)) { 3503 ret = PTR_ERR(queue->ringbuf); 3504 goto err_free_queue; 3505 } 3506 3507 ret = panthor_kernel_bo_vmap(queue->ringbuf); 3508 if (ret) 3509 goto err_free_queue; 3510 3511 queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev, 3512 &queue->iface.input, 3513 &queue->iface.output, 3514 &queue->iface.input_fw_va, 3515 &queue->iface.output_fw_va); 3516 if (IS_ERR(queue->iface.mem)) { 3517 ret = PTR_ERR(queue->iface.mem); 3518 goto err_free_queue; 3519 } 3520 3521 queue->profiling.slot_count = 3522 calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size); 3523 3524 queue->profiling.slots = 3525 panthor_kernel_bo_create(group->ptdev, group->vm, 3526 queue->profiling.slot_count * 3527 sizeof(struct panthor_job_profiling_data), 3528 DRM_PANTHOR_BO_NO_MMAP, 3529 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3530 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3531 PANTHOR_VM_KERNEL_AUTO_VA, 3532 "Group job stats"); 3533 3534 if (IS_ERR(queue->profiling.slots)) { 3535 ret = PTR_ERR(queue->profiling.slots); 3536 goto err_free_queue; 3537 } 3538 3539 ret = panthor_kernel_bo_vmap(queue->profiling.slots); 3540 if (ret) 3541 goto err_free_queue; 3542 3543 /* assign a unique name */ 3544 queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid); 3545 if (!queue->name) { 3546 ret = -ENOMEM; 3547 goto err_free_queue; 3548 } 3549 3550 sched_args.name = queue->name; 3551 3552 ret = drm_sched_init(&queue->scheduler, &sched_args); 3553 if (ret) 3554 goto err_free_queue; 3555 3556 drm_sched = &queue->scheduler; 3557 ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL); 3558 if (ret) 3559 goto err_free_queue; 3560 3561 return queue; 3562 3563 err_free_queue: 3564 group_free_queue(group, queue); 3565 return ERR_PTR(ret); 3566 } 3567 3568 static void group_init_task_info(struct panthor_group *group) 3569 { 3570 struct task_struct *task = current->group_leader; 3571 3572 group->task_info.pid = task->pid; 3573 get_task_comm(group->task_info.comm, task); 3574 } 3575 3576 static void add_group_kbo_sizes(struct panthor_device *ptdev, 3577 struct panthor_group *group) 3578 { 3579 struct panthor_queue *queue; 3580 int i; 3581 3582 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group))) 3583 return; 3584 if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev)) 3585 return; 3586 3587 group->fdinfo.kbo_sizes += group->suspend_buf->obj->size; 3588 group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size; 3589 group->fdinfo.kbo_sizes += group->syncobjs->obj->size; 3590 3591 for (i = 0; i < group->queue_count; i++) { 3592 queue = group->queues[i]; 3593 group->fdinfo.kbo_sizes += queue->ringbuf->obj->size; 3594 group->fdinfo.kbo_sizes += queue->iface.mem->obj->size; 3595 group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size; 3596 } 3597 } 3598 3599 #define MAX_GROUPS_PER_POOL 128 3600 3601 int panthor_group_create(struct panthor_file *pfile, 3602 const struct drm_panthor_group_create *group_args, 3603 const struct drm_panthor_queue_create *queue_args, 3604 u64 drm_client_id) 3605 { 3606 struct panthor_device *ptdev = pfile->ptdev; 3607 struct panthor_group_pool *gpool = pfile->groups; 3608 struct panthor_scheduler *sched = ptdev->scheduler; 3609 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); 3610 struct panthor_group *group = NULL; 3611 u32 gid, i, suspend_size; 3612 int ret; 3613 3614 if (group_args->pad) 3615 return -EINVAL; 3616 3617 if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT) 3618 return -EINVAL; 3619 3620 if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) || 3621 (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) || 3622 (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present)) 3623 return -EINVAL; 3624 3625 if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores || 3626 hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores || 3627 hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores) 3628 return -EINVAL; 3629 3630 group = kzalloc(sizeof(*group), GFP_KERNEL); 3631 if (!group) 3632 return -ENOMEM; 3633 3634 spin_lock_init(&group->fatal_lock); 3635 kref_init(&group->refcount); 3636 group->state = PANTHOR_CS_GROUP_CREATED; 3637 group->csg_id = -1; 3638 3639 group->ptdev = ptdev; 3640 group->max_compute_cores = group_args->max_compute_cores; 3641 group->compute_core_mask = group_args->compute_core_mask; 3642 group->max_fragment_cores = group_args->max_fragment_cores; 3643 group->fragment_core_mask = group_args->fragment_core_mask; 3644 group->max_tiler_cores = group_args->max_tiler_cores; 3645 group->tiler_core_mask = group_args->tiler_core_mask; 3646 group->priority = group_args->priority; 3647 3648 INIT_LIST_HEAD(&group->wait_node); 3649 INIT_LIST_HEAD(&group->run_node); 3650 INIT_WORK(&group->term_work, group_term_work); 3651 INIT_WORK(&group->sync_upd_work, group_sync_upd_work); 3652 INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work); 3653 INIT_WORK(&group->release_work, group_release_work); 3654 3655 group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id); 3656 if (!group->vm) { 3657 ret = -EINVAL; 3658 goto err_put_group; 3659 } 3660 3661 suspend_size = csg_iface->control->suspend_size; 3662 group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); 3663 if (IS_ERR(group->suspend_buf)) { 3664 ret = PTR_ERR(group->suspend_buf); 3665 group->suspend_buf = NULL; 3666 goto err_put_group; 3667 } 3668 3669 suspend_size = csg_iface->control->protm_suspend_size; 3670 group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); 3671 if (IS_ERR(group->protm_suspend_buf)) { 3672 ret = PTR_ERR(group->protm_suspend_buf); 3673 group->protm_suspend_buf = NULL; 3674 goto err_put_group; 3675 } 3676 3677 group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm, 3678 group_args->queues.count * 3679 sizeof(struct panthor_syncobj_64b), 3680 DRM_PANTHOR_BO_NO_MMAP, 3681 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3682 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3683 PANTHOR_VM_KERNEL_AUTO_VA, 3684 "Group sync objects"); 3685 if (IS_ERR(group->syncobjs)) { 3686 ret = PTR_ERR(group->syncobjs); 3687 goto err_put_group; 3688 } 3689 3690 ret = panthor_kernel_bo_vmap(group->syncobjs); 3691 if (ret) 3692 goto err_put_group; 3693 3694 memset(group->syncobjs->kmap, 0, 3695 group_args->queues.count * sizeof(struct panthor_syncobj_64b)); 3696 3697 ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); 3698 if (ret) 3699 goto err_put_group; 3700 3701 for (i = 0; i < group_args->queues.count; i++) { 3702 group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i); 3703 if (IS_ERR(group->queues[i])) { 3704 ret = PTR_ERR(group->queues[i]); 3705 group->queues[i] = NULL; 3706 goto err_erase_gid; 3707 } 3708 3709 group->queue_count++; 3710 } 3711 3712 group->idle_queues = GENMASK(group->queue_count - 1, 0); 3713 3714 mutex_lock(&sched->reset.lock); 3715 if (atomic_read(&sched->reset.in_progress)) { 3716 panthor_group_stop(group); 3717 } else { 3718 mutex_lock(&sched->lock); 3719 list_add_tail(&group->run_node, 3720 &sched->groups.idle[group->priority]); 3721 mutex_unlock(&sched->lock); 3722 } 3723 mutex_unlock(&sched->reset.lock); 3724 3725 add_group_kbo_sizes(group->ptdev, group); 3726 spin_lock_init(&group->fdinfo.lock); 3727 3728 group_init_task_info(group); 3729 3730 return gid; 3731 3732 err_erase_gid: 3733 xa_erase(&gpool->xa, gid); 3734 3735 err_put_group: 3736 group_put(group); 3737 return ret; 3738 } 3739 3740 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) 3741 { 3742 struct panthor_group_pool *gpool = pfile->groups; 3743 struct panthor_device *ptdev = pfile->ptdev; 3744 struct panthor_scheduler *sched = ptdev->scheduler; 3745 struct panthor_group *group; 3746 3747 group = xa_erase(&gpool->xa, group_handle); 3748 if (!group) 3749 return -EINVAL; 3750 3751 mutex_lock(&sched->reset.lock); 3752 mutex_lock(&sched->lock); 3753 group->destroyed = true; 3754 if (group->csg_id >= 0) { 3755 sched_queue_delayed_work(sched, tick, 0); 3756 } else if (!atomic_read(&sched->reset.in_progress)) { 3757 /* Remove from the run queues, so the scheduler can't 3758 * pick the group on the next tick. 3759 */ 3760 list_del_init(&group->run_node); 3761 list_del_init(&group->wait_node); 3762 group_queue_work(group, term); 3763 } 3764 mutex_unlock(&sched->lock); 3765 mutex_unlock(&sched->reset.lock); 3766 3767 group_put(group); 3768 return 0; 3769 } 3770 3771 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool, 3772 u32 group_handle) 3773 { 3774 struct panthor_group *group; 3775 3776 xa_lock(&pool->xa); 3777 group = group_get(xa_load(&pool->xa, group_handle)); 3778 xa_unlock(&pool->xa); 3779 3780 return group; 3781 } 3782 3783 int panthor_group_get_state(struct panthor_file *pfile, 3784 struct drm_panthor_group_get_state *get_state) 3785 { 3786 struct panthor_group_pool *gpool = pfile->groups; 3787 struct panthor_device *ptdev = pfile->ptdev; 3788 struct panthor_scheduler *sched = ptdev->scheduler; 3789 struct panthor_group *group; 3790 3791 if (get_state->pad) 3792 return -EINVAL; 3793 3794 group = group_from_handle(gpool, get_state->group_handle); 3795 if (!group) 3796 return -EINVAL; 3797 3798 memset(get_state, 0, sizeof(*get_state)); 3799 3800 mutex_lock(&sched->lock); 3801 if (group->timedout) 3802 get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT; 3803 if (group->fatal_queues) { 3804 get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT; 3805 get_state->fatal_queues = group->fatal_queues; 3806 } 3807 if (group->innocent) 3808 get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT; 3809 mutex_unlock(&sched->lock); 3810 3811 group_put(group); 3812 return 0; 3813 } 3814 3815 int panthor_group_pool_create(struct panthor_file *pfile) 3816 { 3817 struct panthor_group_pool *gpool; 3818 3819 gpool = kzalloc(sizeof(*gpool), GFP_KERNEL); 3820 if (!gpool) 3821 return -ENOMEM; 3822 3823 xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1); 3824 pfile->groups = gpool; 3825 return 0; 3826 } 3827 3828 void panthor_group_pool_destroy(struct panthor_file *pfile) 3829 { 3830 struct panthor_group_pool *gpool = pfile->groups; 3831 struct panthor_group *group; 3832 unsigned long i; 3833 3834 if (IS_ERR_OR_NULL(gpool)) 3835 return; 3836 3837 xa_for_each(&gpool->xa, i, group) 3838 panthor_group_destroy(pfile, i); 3839 3840 xa_destroy(&gpool->xa); 3841 kfree(gpool); 3842 pfile->groups = NULL; 3843 } 3844 3845 /** 3846 * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's 3847 * belonging to all the groups owned by an open Panthor file 3848 * @pfile: File. 3849 * @stats: Memory statistics to be updated. 3850 * 3851 */ 3852 void 3853 panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile, 3854 struct drm_memory_stats *stats) 3855 { 3856 struct panthor_group_pool *gpool = pfile->groups; 3857 struct panthor_group *group; 3858 unsigned long i; 3859 3860 if (IS_ERR_OR_NULL(gpool)) 3861 return; 3862 3863 xa_lock(&gpool->xa); 3864 xa_for_each(&gpool->xa, i, group) { 3865 stats->resident += group->fdinfo.kbo_sizes; 3866 if (group->csg_id >= 0) 3867 stats->active += group->fdinfo.kbo_sizes; 3868 } 3869 xa_unlock(&gpool->xa); 3870 } 3871 3872 static void job_release(struct kref *ref) 3873 { 3874 struct panthor_job *job = container_of(ref, struct panthor_job, refcount); 3875 3876 drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node)); 3877 3878 if (job->base.s_fence) 3879 drm_sched_job_cleanup(&job->base); 3880 3881 if (job->done_fence && job->done_fence->ops) 3882 dma_fence_put(job->done_fence); 3883 else 3884 dma_fence_free(job->done_fence); 3885 3886 group_put(job->group); 3887 3888 kfree(job); 3889 } 3890 3891 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job) 3892 { 3893 if (sched_job) { 3894 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3895 3896 kref_get(&job->refcount); 3897 } 3898 3899 return sched_job; 3900 } 3901 3902 void panthor_job_put(struct drm_sched_job *sched_job) 3903 { 3904 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3905 3906 if (sched_job) 3907 kref_put(&job->refcount, job_release); 3908 } 3909 3910 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job) 3911 { 3912 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3913 3914 return job->group->vm; 3915 } 3916 3917 struct drm_sched_job * 3918 panthor_job_create(struct panthor_file *pfile, 3919 u16 group_handle, 3920 const struct drm_panthor_queue_submit *qsubmit, 3921 u64 drm_client_id) 3922 { 3923 struct panthor_group_pool *gpool = pfile->groups; 3924 struct panthor_job *job; 3925 u32 credits; 3926 int ret; 3927 3928 if (qsubmit->pad) 3929 return ERR_PTR(-EINVAL); 3930 3931 /* If stream_addr is zero, so stream_size should be. */ 3932 if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0)) 3933 return ERR_PTR(-EINVAL); 3934 3935 /* Make sure the address is aligned on 64-byte (cacheline) and the size is 3936 * aligned on 8-byte (instruction size). 3937 */ 3938 if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7)) 3939 return ERR_PTR(-EINVAL); 3940 3941 /* bits 24:30 must be zero. */ 3942 if (qsubmit->latest_flush & GENMASK(30, 24)) 3943 return ERR_PTR(-EINVAL); 3944 3945 job = kzalloc(sizeof(*job), GFP_KERNEL); 3946 if (!job) 3947 return ERR_PTR(-ENOMEM); 3948 3949 kref_init(&job->refcount); 3950 job->queue_idx = qsubmit->queue_index; 3951 job->call_info.size = qsubmit->stream_size; 3952 job->call_info.start = qsubmit->stream_addr; 3953 job->call_info.latest_flush = qsubmit->latest_flush; 3954 INIT_LIST_HEAD(&job->node); 3955 3956 job->group = group_from_handle(gpool, group_handle); 3957 if (!job->group) { 3958 ret = -EINVAL; 3959 goto err_put_job; 3960 } 3961 3962 if (!group_can_run(job->group)) { 3963 ret = -EINVAL; 3964 goto err_put_job; 3965 } 3966 3967 if (job->queue_idx >= job->group->queue_count || 3968 !job->group->queues[job->queue_idx]) { 3969 ret = -EINVAL; 3970 goto err_put_job; 3971 } 3972 3973 /* Empty command streams don't need a fence, they'll pick the one from 3974 * the previously submitted job. 3975 */ 3976 if (job->call_info.size) { 3977 job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); 3978 if (!job->done_fence) { 3979 ret = -ENOMEM; 3980 goto err_put_job; 3981 } 3982 } 3983 3984 job->profiling.mask = pfile->ptdev->profile_mask; 3985 credits = calc_job_credits(job->profiling.mask); 3986 if (credits == 0) { 3987 ret = -EINVAL; 3988 goto err_put_job; 3989 } 3990 3991 ret = drm_sched_job_init(&job->base, 3992 &job->group->queues[job->queue_idx]->entity, 3993 credits, job->group, drm_client_id); 3994 if (ret) 3995 goto err_put_job; 3996 3997 return &job->base; 3998 3999 err_put_job: 4000 panthor_job_put(&job->base); 4001 return ERR_PTR(ret); 4002 } 4003 4004 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job) 4005 { 4006 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 4007 4008 panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished, 4009 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); 4010 } 4011 4012 void panthor_sched_unplug(struct panthor_device *ptdev) 4013 { 4014 struct panthor_scheduler *sched = ptdev->scheduler; 4015 4016 disable_delayed_work_sync(&sched->tick_work); 4017 disable_work_sync(&sched->fw_events_work); 4018 disable_work_sync(&sched->sync_upd_work); 4019 4020 mutex_lock(&sched->lock); 4021 if (sched->pm.has_ref) { 4022 pm_runtime_put(ptdev->base.dev); 4023 sched->pm.has_ref = false; 4024 } 4025 mutex_unlock(&sched->lock); 4026 } 4027 4028 static void panthor_sched_fini(struct drm_device *ddev, void *res) 4029 { 4030 struct panthor_scheduler *sched = res; 4031 int prio; 4032 4033 if (!sched || !sched->csg_slot_count) 4034 return; 4035 4036 if (sched->wq) 4037 destroy_workqueue(sched->wq); 4038 4039 if (sched->heap_alloc_wq) 4040 destroy_workqueue(sched->heap_alloc_wq); 4041 4042 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 4043 drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio])); 4044 drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio])); 4045 } 4046 4047 drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting)); 4048 } 4049 4050 int panthor_sched_init(struct panthor_device *ptdev) 4051 { 4052 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 4053 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); 4054 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0); 4055 struct panthor_scheduler *sched; 4056 u32 gpu_as_count, num_groups; 4057 int prio, ret; 4058 4059 sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL); 4060 if (!sched) 4061 return -ENOMEM; 4062 4063 /* The highest bit in JOB_INT_* is reserved for globabl IRQs. That 4064 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here. 4065 */ 4066 num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num); 4067 4068 /* The FW-side scheduler might deadlock if two groups with the same 4069 * priority try to access a set of resources that overlaps, with part 4070 * of the resources being allocated to one group and the other part to 4071 * the other group, both groups waiting for the remaining resources to 4072 * be allocated. To avoid that, it is recommended to assign each CSG a 4073 * different priority. In theory we could allow several groups to have 4074 * the same CSG priority if they don't request the same resources, but 4075 * that makes the scheduling logic more complicated, so let's clamp 4076 * the number of CSG slots to MAX_CSG_PRIO + 1 for now. 4077 */ 4078 num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups); 4079 4080 /* We need at least one AS for the MCU and one for the GPU contexts. */ 4081 gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1)); 4082 if (!gpu_as_count) { 4083 drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)", 4084 gpu_as_count + 1); 4085 return -EINVAL; 4086 } 4087 4088 sched->ptdev = ptdev; 4089 sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features); 4090 sched->csg_slot_count = num_groups; 4091 sched->cs_slot_count = csg_iface->control->stream_num; 4092 sched->as_slot_count = gpu_as_count; 4093 ptdev->csif_info.csg_slot_count = sched->csg_slot_count; 4094 ptdev->csif_info.cs_slot_count = sched->cs_slot_count; 4095 ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count; 4096 4097 sched->last_tick = 0; 4098 sched->resched_target = U64_MAX; 4099 sched->tick_period = msecs_to_jiffies(10); 4100 INIT_DELAYED_WORK(&sched->tick_work, tick_work); 4101 INIT_WORK(&sched->sync_upd_work, sync_upd_work); 4102 INIT_WORK(&sched->fw_events_work, process_fw_events_work); 4103 4104 ret = drmm_mutex_init(&ptdev->base, &sched->lock); 4105 if (ret) 4106 return ret; 4107 4108 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 4109 INIT_LIST_HEAD(&sched->groups.runnable[prio]); 4110 INIT_LIST_HEAD(&sched->groups.idle[prio]); 4111 } 4112 INIT_LIST_HEAD(&sched->groups.waiting); 4113 4114 ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock); 4115 if (ret) 4116 return ret; 4117 4118 INIT_LIST_HEAD(&sched->reset.stopped_groups); 4119 4120 /* sched->heap_alloc_wq will be used for heap chunk allocation on 4121 * tiler OOM events, which means we can't use the same workqueue for 4122 * the scheduler because works queued by the scheduler are in 4123 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to 4124 * work around this limitation. 4125 * 4126 * FIXME: Ultimately, what we need is a failable/non-blocking GEM 4127 * allocation path that we can call when a heap OOM is reported. The 4128 * FW is smart enough to fall back on other methods if the kernel can't 4129 * allocate memory, and fail the tiling job if none of these 4130 * countermeasures worked. 4131 * 4132 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the 4133 * system is running out of memory. 4134 */ 4135 sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0); 4136 sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 4137 if (!sched->wq || !sched->heap_alloc_wq) { 4138 panthor_sched_fini(&ptdev->base, sched); 4139 drm_err(&ptdev->base, "Failed to allocate the workqueues"); 4140 return -ENOMEM; 4141 } 4142 4143 ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched); 4144 if (ret) 4145 return ret; 4146 4147 ptdev->scheduler = sched; 4148 return 0; 4149 } 4150