1 // SPDX-License-Identifier: GPL-2.0 or MIT 2 /* Copyright 2023 Collabora ltd. */ 3 4 #include <drm/drm_drv.h> 5 #include <drm/drm_exec.h> 6 #include <drm/drm_gem_shmem_helper.h> 7 #include <drm/drm_managed.h> 8 #include <drm/drm_print.h> 9 #include <drm/gpu_scheduler.h> 10 #include <drm/panthor_drm.h> 11 12 #include <linux/build_bug.h> 13 #include <linux/cleanup.h> 14 #include <linux/clk.h> 15 #include <linux/delay.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/dma-resv.h> 18 #include <linux/firmware.h> 19 #include <linux/interrupt.h> 20 #include <linux/io.h> 21 #include <linux/iopoll.h> 22 #include <linux/iosys-map.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/pm_runtime.h> 26 27 #include "panthor_devfreq.h" 28 #include "panthor_device.h" 29 #include "panthor_fw.h" 30 #include "panthor_gem.h" 31 #include "panthor_gpu.h" 32 #include "panthor_heap.h" 33 #include "panthor_mmu.h" 34 #include "panthor_regs.h" 35 #include "panthor_sched.h" 36 37 /** 38 * DOC: Scheduler 39 * 40 * Mali CSF hardware adopts a firmware-assisted scheduling model, where 41 * the firmware takes care of scheduling aspects, to some extent. 42 * 43 * The scheduling happens at the scheduling group level, each group 44 * contains 1 to N queues (N is FW/hardware dependent, and exposed 45 * through the firmware interface). Each queue is assigned a command 46 * stream ring buffer, which serves as a way to get jobs submitted to 47 * the GPU, among other things. 48 * 49 * The firmware can schedule a maximum of M groups (M is FW/hardware 50 * dependent, and exposed through the firmware interface). Passed 51 * this maximum number of groups, the kernel must take care of 52 * rotating the groups passed to the firmware so every group gets 53 * a chance to have his queues scheduled for execution. 54 * 55 * The current implementation only supports with kernel-mode queues. 56 * In other terms, userspace doesn't have access to the ring-buffer. 57 * Instead, userspace passes indirect command stream buffers that are 58 * called from the queue ring-buffer by the kernel using a pre-defined 59 * sequence of command stream instructions to ensure the userspace driver 60 * always gets consistent results (cache maintenance, 61 * synchronization, ...). 62 * 63 * We rely on the drm_gpu_scheduler framework to deal with job 64 * dependencies and submission. As any other driver dealing with a 65 * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each 66 * entity has its own job scheduler. When a job is ready to be executed 67 * (all its dependencies are met), it is pushed to the appropriate 68 * queue ring-buffer, and the group is scheduled for execution if it 69 * wasn't already active. 70 * 71 * Kernel-side group scheduling is timeslice-based. When we have less 72 * groups than there are slots, the periodic tick is disabled and we 73 * just let the FW schedule the active groups. When there are more 74 * groups than slots, we let each group a chance to execute stuff for 75 * a given amount of time, and then re-evaluate and pick new groups 76 * to schedule. The group selection algorithm is based on 77 * priority+round-robin. 78 * 79 * Even though user-mode queues is out of the scope right now, the 80 * current design takes them into account by avoiding any guess on the 81 * group/queue state that would be based on information we wouldn't have 82 * if userspace was in charge of the ring-buffer. That's also one of the 83 * reason we don't do 'cooperative' scheduling (encoding FW group slot 84 * reservation as dma_fence that would be returned from the 85 * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as 86 * a queue of waiters, ordered by job submission order). This approach 87 * would work for kernel-mode queues, but would make user-mode queues a 88 * lot more complicated to retrofit. 89 */ 90 91 #define JOB_TIMEOUT_MS 5000 92 93 #define MAX_CSG_PRIO 0xf 94 95 #define NUM_INSTRS_PER_CACHE_LINE (64 / sizeof(u64)) 96 #define MAX_INSTRS_PER_JOB 24 97 98 struct panthor_group; 99 100 /** 101 * struct panthor_csg_slot - Command stream group slot 102 * 103 * This represents a FW slot for a scheduling group. 104 */ 105 struct panthor_csg_slot { 106 /** @group: Scheduling group bound to this slot. */ 107 struct panthor_group *group; 108 109 /** @priority: Group priority. */ 110 u8 priority; 111 112 /** 113 * @idle: True if the group bound to this slot is idle. 114 * 115 * A group is idle when it has nothing waiting for execution on 116 * all its queues, or when queues are blocked waiting for something 117 * to happen (synchronization object). 118 */ 119 bool idle; 120 }; 121 122 /** 123 * enum panthor_csg_priority - Group priority 124 */ 125 enum panthor_csg_priority { 126 /** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */ 127 PANTHOR_CSG_PRIORITY_LOW = 0, 128 129 /** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */ 130 PANTHOR_CSG_PRIORITY_MEDIUM, 131 132 /** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */ 133 PANTHOR_CSG_PRIORITY_HIGH, 134 135 /** 136 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group. 137 * 138 * Real-time priority allows one to preempt scheduling of other 139 * non-real-time groups. When such a group becomes executable, 140 * it will evict the group with the lowest non-rt priority if 141 * there's no free group slot available. 142 */ 143 PANTHOR_CSG_PRIORITY_RT, 144 145 /** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */ 146 PANTHOR_CSG_PRIORITY_COUNT, 147 }; 148 149 /** 150 * struct panthor_scheduler - Object used to manage the scheduler 151 */ 152 struct panthor_scheduler { 153 /** @ptdev: Device. */ 154 struct panthor_device *ptdev; 155 156 /** 157 * @wq: Workqueue used by our internal scheduler logic and 158 * drm_gpu_scheduler. 159 * 160 * Used for the scheduler tick, group update or other kind of FW 161 * event processing that can't be handled in the threaded interrupt 162 * path. Also passed to the drm_gpu_scheduler instances embedded 163 * in panthor_queue. 164 */ 165 struct workqueue_struct *wq; 166 167 /** 168 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works. 169 * 170 * We have a queue dedicated to heap chunk allocation works to avoid 171 * blocking the rest of the scheduler if the allocation tries to 172 * reclaim memory. 173 */ 174 struct workqueue_struct *heap_alloc_wq; 175 176 /** @tick_work: Work executed on a scheduling tick. */ 177 struct delayed_work tick_work; 178 179 /** 180 * @sync_upd_work: Work used to process synchronization object updates. 181 * 182 * We use this work to unblock queues/groups that were waiting on a 183 * synchronization object. 184 */ 185 struct work_struct sync_upd_work; 186 187 /** 188 * @fw_events_work: Work used to process FW events outside the interrupt path. 189 * 190 * Even if the interrupt is threaded, we need any event processing 191 * that require taking the panthor_scheduler::lock to be processed 192 * outside the interrupt path so we don't block the tick logic when 193 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the 194 * event processing requires taking this lock, we just delegate all 195 * FW event processing to the scheduler workqueue. 196 */ 197 struct work_struct fw_events_work; 198 199 /** 200 * @fw_events: Bitmask encoding pending FW events. 201 */ 202 atomic_t fw_events; 203 204 /** 205 * @resched_target: When the next tick should occur. 206 * 207 * Expressed in jiffies. 208 */ 209 u64 resched_target; 210 211 /** 212 * @last_tick: When the last tick occurred. 213 * 214 * Expressed in jiffies. 215 */ 216 u64 last_tick; 217 218 /** @tick_period: Tick period in jiffies. */ 219 u64 tick_period; 220 221 /** 222 * @lock: Lock protecting access to all the scheduler fields. 223 * 224 * Should be taken in the tick work, the irq handler, and anywhere the @groups 225 * fields are touched. 226 */ 227 struct mutex lock; 228 229 /** @groups: Various lists used to classify groups. */ 230 struct { 231 /** 232 * @runnable: Runnable group lists. 233 * 234 * When a group has queues that want to execute something, 235 * its panthor_group::run_node should be inserted here. 236 * 237 * One list per-priority. 238 */ 239 struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT]; 240 241 /** 242 * @idle: Idle group lists. 243 * 244 * When all queues of a group are idle (either because they 245 * have nothing to execute, or because they are blocked), the 246 * panthor_group::run_node field should be inserted here. 247 * 248 * One list per-priority. 249 */ 250 struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT]; 251 252 /** 253 * @waiting: List of groups whose queues are blocked on a 254 * synchronization object. 255 * 256 * Insert panthor_group::wait_node here when a group is waiting 257 * for synchronization objects to be signaled. 258 * 259 * This list is evaluated in the @sync_upd_work work. 260 */ 261 struct list_head waiting; 262 } groups; 263 264 /** 265 * @csg_slots: FW command stream group slots. 266 */ 267 struct panthor_csg_slot csg_slots[MAX_CSGS]; 268 269 /** @csg_slot_count: Number of command stream group slots exposed by the FW. */ 270 u32 csg_slot_count; 271 272 /** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */ 273 u32 cs_slot_count; 274 275 /** @as_slot_count: Number of address space slots supported by the MMU. */ 276 u32 as_slot_count; 277 278 /** @used_csg_slot_count: Number of command stream group slot currently used. */ 279 u32 used_csg_slot_count; 280 281 /** @sb_slot_count: Number of scoreboard slots. */ 282 u32 sb_slot_count; 283 284 /** 285 * @might_have_idle_groups: True if an active group might have become idle. 286 * 287 * This will force a tick, so other runnable groups can be scheduled if one 288 * or more active groups became idle. 289 */ 290 bool might_have_idle_groups; 291 292 /** @pm: Power management related fields. */ 293 struct { 294 /** @has_ref: True if the scheduler owns a runtime PM reference. */ 295 bool has_ref; 296 } pm; 297 298 /** @reset: Reset related fields. */ 299 struct { 300 /** @lock: Lock protecting the other reset fields. */ 301 struct mutex lock; 302 303 /** 304 * @in_progress: True if a reset is in progress. 305 * 306 * Set to true in panthor_sched_pre_reset() and back to false in 307 * panthor_sched_post_reset(). 308 */ 309 atomic_t in_progress; 310 311 /** 312 * @stopped_groups: List containing all groups that were stopped 313 * before a reset. 314 * 315 * Insert panthor_group::run_node in the pre_reset path. 316 */ 317 struct list_head stopped_groups; 318 } reset; 319 }; 320 321 /** 322 * struct panthor_syncobj_32b - 32-bit FW synchronization object 323 */ 324 struct panthor_syncobj_32b { 325 /** @seqno: Sequence number. */ 326 u32 seqno; 327 328 /** 329 * @status: Status. 330 * 331 * Not zero on failure. 332 */ 333 u32 status; 334 }; 335 336 /** 337 * struct panthor_syncobj_64b - 64-bit FW synchronization object 338 */ 339 struct panthor_syncobj_64b { 340 /** @seqno: Sequence number. */ 341 u64 seqno; 342 343 /** 344 * @status: Status. 345 * 346 * Not zero on failure. 347 */ 348 u32 status; 349 350 /** @pad: MBZ. */ 351 u32 pad; 352 }; 353 354 /** 355 * struct panthor_queue - Execution queue 356 */ 357 struct panthor_queue { 358 /** @scheduler: DRM scheduler used for this queue. */ 359 struct drm_gpu_scheduler scheduler; 360 361 /** @entity: DRM scheduling entity used for this queue. */ 362 struct drm_sched_entity entity; 363 364 /** @name: DRM scheduler name for this queue. */ 365 char *name; 366 367 /** 368 * @remaining_time: Time remaining before the job timeout expires. 369 * 370 * The job timeout is suspended when the queue is not scheduled by the 371 * FW. Every time we suspend the timer, we need to save the remaining 372 * time so we can restore it later on. 373 */ 374 unsigned long remaining_time; 375 376 /** @timeout_suspended: True if the job timeout was suspended. */ 377 bool timeout_suspended; 378 379 /** 380 * @doorbell_id: Doorbell assigned to this queue. 381 * 382 * Right now, all groups share the same doorbell, and the doorbell ID 383 * is assigned to group_slot + 1 when the group is assigned a slot. But 384 * we might decide to provide fine grained doorbell assignment at some 385 * point, so don't have to wake up all queues in a group every time one 386 * of them is updated. 387 */ 388 u8 doorbell_id; 389 390 /** 391 * @priority: Priority of the queue inside the group. 392 * 393 * Must be less than 16 (Only 4 bits available). 394 */ 395 u8 priority; 396 #define CSF_MAX_QUEUE_PRIO GENMASK(3, 0) 397 398 /** @ringbuf: Command stream ring-buffer. */ 399 struct panthor_kernel_bo *ringbuf; 400 401 /** @iface: Firmware interface. */ 402 struct { 403 /** @mem: FW memory allocated for this interface. */ 404 struct panthor_kernel_bo *mem; 405 406 /** @input: Input interface. */ 407 struct panthor_fw_ringbuf_input_iface *input; 408 409 /** @output: Output interface. */ 410 const struct panthor_fw_ringbuf_output_iface *output; 411 412 /** @input_fw_va: FW virtual address of the input interface buffer. */ 413 u32 input_fw_va; 414 415 /** @output_fw_va: FW virtual address of the output interface buffer. */ 416 u32 output_fw_va; 417 } iface; 418 419 /** 420 * @syncwait: Stores information about the synchronization object this 421 * queue is waiting on. 422 */ 423 struct { 424 /** @gpu_va: GPU address of the synchronization object. */ 425 u64 gpu_va; 426 427 /** @ref: Reference value to compare against. */ 428 u64 ref; 429 430 /** @gt: True if this is a greater-than test. */ 431 bool gt; 432 433 /** @sync64: True if this is a 64-bit sync object. */ 434 bool sync64; 435 436 /** @bo: Buffer object holding the synchronization object. */ 437 struct drm_gem_object *obj; 438 439 /** @offset: Offset of the synchronization object inside @bo. */ 440 u64 offset; 441 442 /** 443 * @kmap: Kernel mapping of the buffer object holding the 444 * synchronization object. 445 */ 446 void *kmap; 447 } syncwait; 448 449 /** @fence_ctx: Fence context fields. */ 450 struct { 451 /** @lock: Used to protect access to all fences allocated by this context. */ 452 spinlock_t lock; 453 454 /** 455 * @id: Fence context ID. 456 * 457 * Allocated with dma_fence_context_alloc(). 458 */ 459 u64 id; 460 461 /** @seqno: Sequence number of the last initialized fence. */ 462 atomic64_t seqno; 463 464 /** 465 * @last_fence: Fence of the last submitted job. 466 * 467 * We return this fence when we get an empty command stream. 468 * This way, we are guaranteed that all earlier jobs have completed 469 * when drm_sched_job::s_fence::finished without having to feed 470 * the CS ring buffer with a dummy job that only signals the fence. 471 */ 472 struct dma_fence *last_fence; 473 474 /** 475 * @in_flight_jobs: List containing all in-flight jobs. 476 * 477 * Used to keep track and signal panthor_job::done_fence when the 478 * synchronization object attached to the queue is signaled. 479 */ 480 struct list_head in_flight_jobs; 481 } fence_ctx; 482 483 /** @profiling: Job profiling data slots and access information. */ 484 struct { 485 /** @slots: Kernel BO holding the slots. */ 486 struct panthor_kernel_bo *slots; 487 488 /** @slot_count: Number of jobs ringbuffer can hold at once. */ 489 u32 slot_count; 490 491 /** @seqno: Index of the next available profiling information slot. */ 492 u32 seqno; 493 } profiling; 494 }; 495 496 /** 497 * enum panthor_group_state - Scheduling group state. 498 */ 499 enum panthor_group_state { 500 /** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */ 501 PANTHOR_CS_GROUP_CREATED, 502 503 /** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */ 504 PANTHOR_CS_GROUP_ACTIVE, 505 506 /** 507 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is 508 * inactive/suspended right now. 509 */ 510 PANTHOR_CS_GROUP_SUSPENDED, 511 512 /** 513 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated. 514 * 515 * Can no longer be scheduled. The only allowed action is a destruction. 516 */ 517 PANTHOR_CS_GROUP_TERMINATED, 518 519 /** 520 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state. 521 * 522 * The FW returned an inconsistent state. The group is flagged unusable 523 * and can no longer be scheduled. The only allowed action is a 524 * destruction. 525 * 526 * When that happens, we also schedule a FW reset, to start from a fresh 527 * state. 528 */ 529 PANTHOR_CS_GROUP_UNKNOWN_STATE, 530 }; 531 532 /** 533 * struct panthor_group - Scheduling group object 534 */ 535 struct panthor_group { 536 /** @refcount: Reference count */ 537 struct kref refcount; 538 539 /** @ptdev: Device. */ 540 struct panthor_device *ptdev; 541 542 /** @vm: VM bound to the group. */ 543 struct panthor_vm *vm; 544 545 /** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */ 546 u64 compute_core_mask; 547 548 /** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */ 549 u64 fragment_core_mask; 550 551 /** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */ 552 u64 tiler_core_mask; 553 554 /** @max_compute_cores: Maximum number of shader cores used for compute jobs. */ 555 u8 max_compute_cores; 556 557 /** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */ 558 u8 max_fragment_cores; 559 560 /** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */ 561 u8 max_tiler_cores; 562 563 /** @priority: Group priority (check panthor_csg_priority). */ 564 u8 priority; 565 566 /** @blocked_queues: Bitmask reflecting the blocked queues. */ 567 u32 blocked_queues; 568 569 /** @idle_queues: Bitmask reflecting the idle queues. */ 570 u32 idle_queues; 571 572 /** @fatal_lock: Lock used to protect access to fatal fields. */ 573 spinlock_t fatal_lock; 574 575 /** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */ 576 u32 fatal_queues; 577 578 /** @tiler_oom: Mask of queues that have a tiler OOM event to process. */ 579 atomic_t tiler_oom; 580 581 /** @queue_count: Number of queues in this group. */ 582 u32 queue_count; 583 584 /** @queues: Queues owned by this group. */ 585 struct panthor_queue *queues[MAX_CS_PER_CSG]; 586 587 /** 588 * @csg_id: ID of the FW group slot. 589 * 590 * -1 when the group is not scheduled/active. 591 */ 592 int csg_id; 593 594 /** 595 * @destroyed: True when the group has been destroyed. 596 * 597 * If a group is destroyed it becomes useless: no further jobs can be submitted 598 * to its queues. We simply wait for all references to be dropped so we can 599 * release the group object. 600 */ 601 bool destroyed; 602 603 /** 604 * @timedout: True when a timeout occurred on any of the queues owned by 605 * this group. 606 * 607 * Timeouts can be reported by drm_sched or by the FW. If a reset is required, 608 * and the group can't be suspended, this also leads to a timeout. In any case, 609 * any timeout situation is unrecoverable, and the group becomes useless. We 610 * simply wait for all references to be dropped so we can release the group 611 * object. 612 */ 613 bool timedout; 614 615 /** 616 * @innocent: True when the group becomes unusable because the group suspension 617 * failed during a reset. 618 * 619 * Sometimes the FW was put in a bad state by other groups, causing the group 620 * suspension happening in the reset path to fail. In that case, we consider the 621 * group innocent. 622 */ 623 bool innocent; 624 625 /** 626 * @syncobjs: Pool of per-queue synchronization objects. 627 * 628 * One sync object per queue. The position of the sync object is 629 * determined by the queue index. 630 */ 631 struct panthor_kernel_bo *syncobjs; 632 633 /** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */ 634 struct { 635 /** @data: Total sampled values for jobs in queues from this group. */ 636 struct panthor_gpu_usage data; 637 638 /** 639 * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo 640 * callback and job post-completion processing function 641 */ 642 spinlock_t lock; 643 644 /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */ 645 size_t kbo_sizes; 646 } fdinfo; 647 648 /** @task_info: Info of current->group_leader that created the group. */ 649 struct { 650 /** @task_info.pid: pid of current->group_leader */ 651 pid_t pid; 652 653 /** @task_info.comm: comm of current->group_leader */ 654 char comm[TASK_COMM_LEN]; 655 } task_info; 656 657 /** @state: Group state. */ 658 enum panthor_group_state state; 659 660 /** 661 * @suspend_buf: Suspend buffer. 662 * 663 * Stores the state of the group and its queues when a group is suspended. 664 * Used at resume time to restore the group in its previous state. 665 * 666 * The size of the suspend buffer is exposed through the FW interface. 667 */ 668 struct panthor_kernel_bo *suspend_buf; 669 670 /** 671 * @protm_suspend_buf: Protection mode suspend buffer. 672 * 673 * Stores the state of the group and its queues when a group that's in 674 * protection mode is suspended. 675 * 676 * Used at resume time to restore the group in its previous state. 677 * 678 * The size of the protection mode suspend buffer is exposed through the 679 * FW interface. 680 */ 681 struct panthor_kernel_bo *protm_suspend_buf; 682 683 /** @sync_upd_work: Work used to check/signal job fences. */ 684 struct work_struct sync_upd_work; 685 686 /** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */ 687 struct work_struct tiler_oom_work; 688 689 /** @term_work: Work used to finish the group termination procedure. */ 690 struct work_struct term_work; 691 692 /** 693 * @release_work: Work used to release group resources. 694 * 695 * We need to postpone the group release to avoid a deadlock when 696 * the last ref is released in the tick work. 697 */ 698 struct work_struct release_work; 699 700 /** 701 * @run_node: Node used to insert the group in the 702 * panthor_group::groups::{runnable,idle} and 703 * panthor_group::reset.stopped_groups lists. 704 */ 705 struct list_head run_node; 706 707 /** 708 * @wait_node: Node used to insert the group in the 709 * panthor_group::groups::waiting list. 710 */ 711 struct list_head wait_node; 712 }; 713 714 struct panthor_job_profiling_data { 715 struct { 716 u64 before; 717 u64 after; 718 } cycles; 719 720 struct { 721 u64 before; 722 u64 after; 723 } time; 724 }; 725 726 /** 727 * group_queue_work() - Queue a group work 728 * @group: Group to queue the work for. 729 * @wname: Work name. 730 * 731 * Grabs a ref and queue a work item to the scheduler workqueue. If 732 * the work was already queued, we release the reference we grabbed. 733 * 734 * Work callbacks must release the reference we grabbed here. 735 */ 736 #define group_queue_work(group, wname) \ 737 do { \ 738 group_get(group); \ 739 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \ 740 group_put(group); \ 741 } while (0) 742 743 /** 744 * sched_queue_work() - Queue a scheduler work. 745 * @sched: Scheduler object. 746 * @wname: Work name. 747 * 748 * Conditionally queues a scheduler work if no reset is pending/in-progress. 749 */ 750 #define sched_queue_work(sched, wname) \ 751 do { \ 752 if (!atomic_read(&(sched)->reset.in_progress) && \ 753 !panthor_device_reset_is_pending((sched)->ptdev)) \ 754 queue_work((sched)->wq, &(sched)->wname ## _work); \ 755 } while (0) 756 757 /** 758 * sched_queue_delayed_work() - Queue a scheduler delayed work. 759 * @sched: Scheduler object. 760 * @wname: Work name. 761 * @delay: Work delay in jiffies. 762 * 763 * Conditionally queues a scheduler delayed work if no reset is 764 * pending/in-progress. 765 */ 766 #define sched_queue_delayed_work(sched, wname, delay) \ 767 do { \ 768 if (!atomic_read(&sched->reset.in_progress) && \ 769 !panthor_device_reset_is_pending((sched)->ptdev)) \ 770 mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \ 771 } while (0) 772 773 /* 774 * We currently set the maximum of groups per file to an arbitrary low value. 775 * But this can be updated if we need more. 776 */ 777 #define MAX_GROUPS_PER_POOL 128 778 779 /** 780 * struct panthor_group_pool - Group pool 781 * 782 * Each file get assigned a group pool. 783 */ 784 struct panthor_group_pool { 785 /** @xa: Xarray used to manage group handles. */ 786 struct xarray xa; 787 }; 788 789 /** 790 * struct panthor_job - Used to manage GPU job 791 */ 792 struct panthor_job { 793 /** @base: Inherit from drm_sched_job. */ 794 struct drm_sched_job base; 795 796 /** @refcount: Reference count. */ 797 struct kref refcount; 798 799 /** @group: Group of the queue this job will be pushed to. */ 800 struct panthor_group *group; 801 802 /** @queue_idx: Index of the queue inside @group. */ 803 u32 queue_idx; 804 805 /** @call_info: Information about the userspace command stream call. */ 806 struct { 807 /** @start: GPU address of the userspace command stream. */ 808 u64 start; 809 810 /** @size: Size of the userspace command stream. */ 811 u32 size; 812 813 /** 814 * @latest_flush: Flush ID at the time the userspace command 815 * stream was built. 816 * 817 * Needed for the flush reduction mechanism. 818 */ 819 u32 latest_flush; 820 } call_info; 821 822 /** @ringbuf: Position of this job is in the ring buffer. */ 823 struct { 824 /** @start: Start offset. */ 825 u64 start; 826 827 /** @end: End offset. */ 828 u64 end; 829 } ringbuf; 830 831 /** 832 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs 833 * list. 834 */ 835 struct list_head node; 836 837 /** @done_fence: Fence signaled when the job is finished or cancelled. */ 838 struct dma_fence *done_fence; 839 840 /** @profiling: Job profiling information. */ 841 struct { 842 /** @mask: Current device job profiling enablement bitmask. */ 843 u32 mask; 844 845 /** @slot: Job index in the profiling slots BO. */ 846 u32 slot; 847 } profiling; 848 }; 849 850 static void 851 panthor_queue_put_syncwait_obj(struct panthor_queue *queue) 852 { 853 if (queue->syncwait.kmap) { 854 struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap); 855 856 drm_gem_vunmap(queue->syncwait.obj, &map); 857 queue->syncwait.kmap = NULL; 858 } 859 860 drm_gem_object_put(queue->syncwait.obj); 861 queue->syncwait.obj = NULL; 862 } 863 864 static void * 865 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue) 866 { 867 struct panthor_device *ptdev = group->ptdev; 868 struct panthor_gem_object *bo; 869 struct iosys_map map; 870 int ret; 871 872 if (queue->syncwait.kmap) 873 return queue->syncwait.kmap + queue->syncwait.offset; 874 875 bo = panthor_vm_get_bo_for_va(group->vm, 876 queue->syncwait.gpu_va, 877 &queue->syncwait.offset); 878 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo))) 879 goto err_put_syncwait_obj; 880 881 queue->syncwait.obj = &bo->base.base; 882 ret = drm_gem_vmap(queue->syncwait.obj, &map); 883 if (drm_WARN_ON(&ptdev->base, ret)) 884 goto err_put_syncwait_obj; 885 886 queue->syncwait.kmap = map.vaddr; 887 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap)) 888 goto err_put_syncwait_obj; 889 890 return queue->syncwait.kmap + queue->syncwait.offset; 891 892 err_put_syncwait_obj: 893 panthor_queue_put_syncwait_obj(queue); 894 return NULL; 895 } 896 897 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue) 898 { 899 if (IS_ERR_OR_NULL(queue)) 900 return; 901 902 if (queue->entity.fence_context) 903 drm_sched_entity_destroy(&queue->entity); 904 905 if (queue->scheduler.ops) 906 drm_sched_fini(&queue->scheduler); 907 908 kfree(queue->name); 909 910 panthor_queue_put_syncwait_obj(queue); 911 912 panthor_kernel_bo_destroy(queue->ringbuf); 913 panthor_kernel_bo_destroy(queue->iface.mem); 914 panthor_kernel_bo_destroy(queue->profiling.slots); 915 916 /* Release the last_fence we were holding, if any. */ 917 dma_fence_put(queue->fence_ctx.last_fence); 918 919 kfree(queue); 920 } 921 922 static void group_release_work(struct work_struct *work) 923 { 924 struct panthor_group *group = container_of(work, 925 struct panthor_group, 926 release_work); 927 u32 i; 928 929 for (i = 0; i < group->queue_count; i++) 930 group_free_queue(group, group->queues[i]); 931 932 panthor_kernel_bo_destroy(group->suspend_buf); 933 panthor_kernel_bo_destroy(group->protm_suspend_buf); 934 panthor_kernel_bo_destroy(group->syncobjs); 935 936 panthor_vm_put(group->vm); 937 kfree(group); 938 } 939 940 static void group_release(struct kref *kref) 941 { 942 struct panthor_group *group = container_of(kref, 943 struct panthor_group, 944 refcount); 945 struct panthor_device *ptdev = group->ptdev; 946 947 drm_WARN_ON(&ptdev->base, group->csg_id >= 0); 948 drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node)); 949 drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node)); 950 951 queue_work(panthor_cleanup_wq, &group->release_work); 952 } 953 954 static void group_put(struct panthor_group *group) 955 { 956 if (group) 957 kref_put(&group->refcount, group_release); 958 } 959 960 static struct panthor_group * 961 group_get(struct panthor_group *group) 962 { 963 if (group) 964 kref_get(&group->refcount); 965 966 return group; 967 } 968 969 /** 970 * group_bind_locked() - Bind a group to a group slot 971 * @group: Group. 972 * @csg_id: Slot. 973 * 974 * Return: 0 on success, a negative error code otherwise. 975 */ 976 static int 977 group_bind_locked(struct panthor_group *group, u32 csg_id) 978 { 979 struct panthor_device *ptdev = group->ptdev; 980 struct panthor_csg_slot *csg_slot; 981 int ret; 982 983 lockdep_assert_held(&ptdev->scheduler->lock); 984 985 if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS || 986 ptdev->scheduler->csg_slots[csg_id].group)) 987 return -EINVAL; 988 989 ret = panthor_vm_active(group->vm); 990 if (ret) 991 return ret; 992 993 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 994 group_get(group); 995 group->csg_id = csg_id; 996 997 /* Dummy doorbell allocation: doorbell is assigned to the group and 998 * all queues use the same doorbell. 999 * 1000 * TODO: Implement LRU-based doorbell assignment, so the most often 1001 * updated queues get their own doorbell, thus avoiding useless checks 1002 * on queues belonging to the same group that are rarely updated. 1003 */ 1004 for (u32 i = 0; i < group->queue_count; i++) 1005 group->queues[i]->doorbell_id = csg_id + 1; 1006 1007 csg_slot->group = group; 1008 1009 return 0; 1010 } 1011 1012 /** 1013 * group_unbind_locked() - Unbind a group from a slot. 1014 * @group: Group to unbind. 1015 * 1016 * Return: 0 on success, a negative error code otherwise. 1017 */ 1018 static int 1019 group_unbind_locked(struct panthor_group *group) 1020 { 1021 struct panthor_device *ptdev = group->ptdev; 1022 struct panthor_csg_slot *slot; 1023 1024 lockdep_assert_held(&ptdev->scheduler->lock); 1025 1026 if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS)) 1027 return -EINVAL; 1028 1029 if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE)) 1030 return -EINVAL; 1031 1032 slot = &ptdev->scheduler->csg_slots[group->csg_id]; 1033 panthor_vm_idle(group->vm); 1034 group->csg_id = -1; 1035 1036 /* Tiler OOM events will be re-issued next time the group is scheduled. */ 1037 atomic_set(&group->tiler_oom, 0); 1038 cancel_work(&group->tiler_oom_work); 1039 1040 for (u32 i = 0; i < group->queue_count; i++) 1041 group->queues[i]->doorbell_id = -1; 1042 1043 slot->group = NULL; 1044 1045 group_put(group); 1046 return 0; 1047 } 1048 1049 /** 1050 * cs_slot_prog_locked() - Program a queue slot 1051 * @ptdev: Device. 1052 * @csg_id: Group slot ID. 1053 * @cs_id: Queue slot ID. 1054 * 1055 * Program a queue slot with the queue information so things can start being 1056 * executed on this queue. 1057 * 1058 * The group slot must have a group bound to it already (group_bind_locked()). 1059 */ 1060 static void 1061 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1062 { 1063 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id]; 1064 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1065 1066 lockdep_assert_held(&ptdev->scheduler->lock); 1067 1068 queue->iface.input->extract = queue->iface.output->extract; 1069 drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract); 1070 1071 cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf); 1072 cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); 1073 cs_iface->input->ringbuf_input = queue->iface.input_fw_va; 1074 cs_iface->input->ringbuf_output = queue->iface.output_fw_va; 1075 cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) | 1076 CS_CONFIG_DOORBELL(queue->doorbell_id); 1077 cs_iface->input->ack_irq_mask = ~0; 1078 panthor_fw_update_reqs(cs_iface, req, 1079 CS_IDLE_SYNC_WAIT | 1080 CS_IDLE_EMPTY | 1081 CS_STATE_START | 1082 CS_EXTRACT_EVENT, 1083 CS_IDLE_SYNC_WAIT | 1084 CS_IDLE_EMPTY | 1085 CS_STATE_MASK | 1086 CS_EXTRACT_EVENT); 1087 if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) { 1088 drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time); 1089 queue->timeout_suspended = false; 1090 } 1091 } 1092 1093 /** 1094 * cs_slot_reset_locked() - Reset a queue slot 1095 * @ptdev: Device. 1096 * @csg_id: Group slot. 1097 * @cs_id: Queue slot. 1098 * 1099 * Change the queue slot state to STOP and suspend the queue timeout if 1100 * the queue is not blocked. 1101 * 1102 * The group slot must have a group bound to it (group_bind_locked()). 1103 */ 1104 static int 1105 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1106 { 1107 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1108 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; 1109 struct panthor_queue *queue = group->queues[cs_id]; 1110 1111 lockdep_assert_held(&ptdev->scheduler->lock); 1112 1113 panthor_fw_update_reqs(cs_iface, req, 1114 CS_STATE_STOP, 1115 CS_STATE_MASK); 1116 1117 /* If the queue is blocked, we want to keep the timeout running, so 1118 * we can detect unbounded waits and kill the group when that happens. 1119 */ 1120 if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) { 1121 queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); 1122 queue->timeout_suspended = true; 1123 WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS)); 1124 } 1125 1126 return 0; 1127 } 1128 1129 /** 1130 * csg_slot_sync_priority_locked() - Synchronize the group slot priority 1131 * @ptdev: Device. 1132 * @csg_id: Group slot ID. 1133 * 1134 * Group slot priority update happens asynchronously. When we receive a 1135 * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can 1136 * reflect it to our panthor_csg_slot object. 1137 */ 1138 static void 1139 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id) 1140 { 1141 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1142 struct panthor_fw_csg_iface *csg_iface; 1143 1144 lockdep_assert_held(&ptdev->scheduler->lock); 1145 1146 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1147 csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28; 1148 } 1149 1150 /** 1151 * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority 1152 * @ptdev: Device. 1153 * @csg_id: Group slot. 1154 * @cs_id: Queue slot. 1155 * 1156 * Queue state is updated on group suspend or STATUS_UPDATE event. 1157 */ 1158 static void 1159 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1160 { 1161 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; 1162 struct panthor_queue *queue = group->queues[cs_id]; 1163 struct panthor_fw_cs_iface *cs_iface = 1164 panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id); 1165 1166 u32 status_wait_cond; 1167 1168 switch (cs_iface->output->status_blocked_reason) { 1169 case CS_STATUS_BLOCKED_REASON_UNBLOCKED: 1170 if (queue->iface.input->insert == queue->iface.output->extract && 1171 cs_iface->output->status_scoreboards == 0) 1172 group->idle_queues |= BIT(cs_id); 1173 break; 1174 1175 case CS_STATUS_BLOCKED_REASON_SYNC_WAIT: 1176 if (list_empty(&group->wait_node)) { 1177 list_move_tail(&group->wait_node, 1178 &group->ptdev->scheduler->groups.waiting); 1179 } 1180 1181 /* The queue is only blocked if there's no deferred operation 1182 * pending, which can be checked through the scoreboard status. 1183 */ 1184 if (!cs_iface->output->status_scoreboards) 1185 group->blocked_queues |= BIT(cs_id); 1186 1187 queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr; 1188 queue->syncwait.ref = cs_iface->output->status_wait_sync_value; 1189 status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK; 1190 queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT; 1191 if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) { 1192 u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi; 1193 1194 queue->syncwait.sync64 = true; 1195 queue->syncwait.ref |= sync_val_hi << 32; 1196 } else { 1197 queue->syncwait.sync64 = false; 1198 } 1199 break; 1200 1201 default: 1202 /* Other reasons are not blocking. Consider the queue as runnable 1203 * in those cases. 1204 */ 1205 break; 1206 } 1207 } 1208 1209 static void 1210 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id) 1211 { 1212 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1213 struct panthor_group *group = csg_slot->group; 1214 u32 i; 1215 1216 lockdep_assert_held(&ptdev->scheduler->lock); 1217 1218 group->idle_queues = 0; 1219 group->blocked_queues = 0; 1220 1221 for (i = 0; i < group->queue_count; i++) { 1222 if (group->queues[i]) 1223 cs_slot_sync_queue_state_locked(ptdev, csg_id, i); 1224 } 1225 } 1226 1227 static void 1228 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id) 1229 { 1230 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1231 struct panthor_fw_csg_iface *csg_iface; 1232 struct panthor_group *group; 1233 enum panthor_group_state new_state, old_state; 1234 u32 csg_state; 1235 1236 lockdep_assert_held(&ptdev->scheduler->lock); 1237 1238 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1239 group = csg_slot->group; 1240 1241 if (!group) 1242 return; 1243 1244 old_state = group->state; 1245 csg_state = csg_iface->output->ack & CSG_STATE_MASK; 1246 switch (csg_state) { 1247 case CSG_STATE_START: 1248 case CSG_STATE_RESUME: 1249 new_state = PANTHOR_CS_GROUP_ACTIVE; 1250 break; 1251 case CSG_STATE_TERMINATE: 1252 new_state = PANTHOR_CS_GROUP_TERMINATED; 1253 break; 1254 case CSG_STATE_SUSPEND: 1255 new_state = PANTHOR_CS_GROUP_SUSPENDED; 1256 break; 1257 default: 1258 /* The unknown state might be caused by a FW state corruption, 1259 * which means the group metadata can't be trusted anymore, and 1260 * the SUSPEND operation might propagate the corruption to the 1261 * suspend buffers. Flag the group state as unknown to make 1262 * sure it's unusable after that point. 1263 */ 1264 drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)", 1265 csg_id, csg_state); 1266 new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE; 1267 break; 1268 } 1269 1270 if (old_state == new_state) 1271 return; 1272 1273 /* The unknown state might be caused by a FW issue, reset the FW to 1274 * take a fresh start. 1275 */ 1276 if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE) 1277 panthor_device_schedule_reset(ptdev); 1278 1279 if (new_state == PANTHOR_CS_GROUP_SUSPENDED) 1280 csg_slot_sync_queues_state_locked(ptdev, csg_id); 1281 1282 if (old_state == PANTHOR_CS_GROUP_ACTIVE) { 1283 u32 i; 1284 1285 /* Reset the queue slots so we start from a clean 1286 * state when starting/resuming a new group on this 1287 * CSG slot. No wait needed here, and no ringbell 1288 * either, since the CS slot will only be re-used 1289 * on the next CSG start operation. 1290 */ 1291 for (i = 0; i < group->queue_count; i++) { 1292 if (group->queues[i]) 1293 cs_slot_reset_locked(ptdev, csg_id, i); 1294 } 1295 } 1296 1297 group->state = new_state; 1298 } 1299 1300 static int 1301 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority) 1302 { 1303 struct panthor_fw_csg_iface *csg_iface; 1304 struct panthor_csg_slot *csg_slot; 1305 struct panthor_group *group; 1306 u32 queue_mask = 0, i; 1307 1308 lockdep_assert_held(&ptdev->scheduler->lock); 1309 1310 if (priority > MAX_CSG_PRIO) 1311 return -EINVAL; 1312 1313 if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS)) 1314 return -EINVAL; 1315 1316 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1317 group = csg_slot->group; 1318 if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE) 1319 return 0; 1320 1321 csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id); 1322 1323 for (i = 0; i < group->queue_count; i++) { 1324 if (group->queues[i]) { 1325 cs_slot_prog_locked(ptdev, csg_id, i); 1326 queue_mask |= BIT(i); 1327 } 1328 } 1329 1330 csg_iface->input->allow_compute = group->compute_core_mask; 1331 csg_iface->input->allow_fragment = group->fragment_core_mask; 1332 csg_iface->input->allow_other = group->tiler_core_mask; 1333 csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | 1334 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | 1335 CSG_EP_REQ_TILER(group->max_tiler_cores) | 1336 CSG_EP_REQ_PRIORITY(priority); 1337 csg_iface->input->config = panthor_vm_as(group->vm); 1338 1339 if (group->suspend_buf) 1340 csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf); 1341 else 1342 csg_iface->input->suspend_buf = 0; 1343 1344 if (group->protm_suspend_buf) { 1345 csg_iface->input->protm_suspend_buf = 1346 panthor_kernel_bo_gpuva(group->protm_suspend_buf); 1347 } else { 1348 csg_iface->input->protm_suspend_buf = 0; 1349 } 1350 1351 csg_iface->input->ack_irq_mask = ~0; 1352 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask); 1353 return 0; 1354 } 1355 1356 static void 1357 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev, 1358 u32 csg_id, u32 cs_id) 1359 { 1360 struct panthor_scheduler *sched = ptdev->scheduler; 1361 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1362 struct panthor_group *group = csg_slot->group; 1363 struct panthor_fw_cs_iface *cs_iface; 1364 u32 fatal; 1365 u64 info; 1366 1367 lockdep_assert_held(&sched->lock); 1368 1369 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1370 fatal = cs_iface->output->fatal; 1371 info = cs_iface->output->fatal_info; 1372 1373 if (group) { 1374 drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n", 1375 group->task_info.pid, group->task_info.comm); 1376 1377 group->fatal_queues |= BIT(cs_id); 1378 } 1379 1380 if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) { 1381 /* If this exception is unrecoverable, queue a reset, and make 1382 * sure we stop scheduling groups until the reset has happened. 1383 */ 1384 panthor_device_schedule_reset(ptdev); 1385 cancel_delayed_work(&sched->tick_work); 1386 } else { 1387 sched_queue_delayed_work(sched, tick, 0); 1388 } 1389 1390 drm_warn(&ptdev->base, 1391 "CSG slot %d CS slot: %d\n" 1392 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n" 1393 "CS_FATAL.EXCEPTION_DATA: 0x%x\n" 1394 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n", 1395 csg_id, cs_id, 1396 (unsigned int)CS_EXCEPTION_TYPE(fatal), 1397 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)), 1398 (unsigned int)CS_EXCEPTION_DATA(fatal), 1399 info); 1400 } 1401 1402 static void 1403 cs_slot_process_fault_event_locked(struct panthor_device *ptdev, 1404 u32 csg_id, u32 cs_id) 1405 { 1406 struct panthor_scheduler *sched = ptdev->scheduler; 1407 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1408 struct panthor_group *group = csg_slot->group; 1409 struct panthor_queue *queue = group && cs_id < group->queue_count ? 1410 group->queues[cs_id] : NULL; 1411 struct panthor_fw_cs_iface *cs_iface; 1412 u32 fault; 1413 u64 info; 1414 1415 lockdep_assert_held(&sched->lock); 1416 1417 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1418 fault = cs_iface->output->fault; 1419 info = cs_iface->output->fault_info; 1420 1421 if (queue) { 1422 u64 cs_extract = queue->iface.output->extract; 1423 struct panthor_job *job; 1424 1425 spin_lock(&queue->fence_ctx.lock); 1426 list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) { 1427 if (cs_extract >= job->ringbuf.end) 1428 continue; 1429 1430 if (cs_extract < job->ringbuf.start) 1431 break; 1432 1433 dma_fence_set_error(job->done_fence, -EINVAL); 1434 } 1435 spin_unlock(&queue->fence_ctx.lock); 1436 } 1437 1438 if (group) { 1439 drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n", 1440 group->task_info.pid, group->task_info.comm); 1441 } 1442 1443 drm_warn(&ptdev->base, 1444 "CSG slot %d CS slot: %d\n" 1445 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n" 1446 "CS_FAULT.EXCEPTION_DATA: 0x%x\n" 1447 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n", 1448 csg_id, cs_id, 1449 (unsigned int)CS_EXCEPTION_TYPE(fault), 1450 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)), 1451 (unsigned int)CS_EXCEPTION_DATA(fault), 1452 info); 1453 } 1454 1455 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id) 1456 { 1457 struct panthor_device *ptdev = group->ptdev; 1458 struct panthor_scheduler *sched = ptdev->scheduler; 1459 u32 renderpasses_in_flight, pending_frag_count; 1460 struct panthor_heap_pool *heaps = NULL; 1461 u64 heap_address, new_chunk_va = 0; 1462 u32 vt_start, vt_end, frag_end; 1463 int ret, csg_id; 1464 1465 mutex_lock(&sched->lock); 1466 csg_id = group->csg_id; 1467 if (csg_id >= 0) { 1468 struct panthor_fw_cs_iface *cs_iface; 1469 1470 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1471 heaps = panthor_vm_get_heap_pool(group->vm, false); 1472 heap_address = cs_iface->output->heap_address; 1473 vt_start = cs_iface->output->heap_vt_start; 1474 vt_end = cs_iface->output->heap_vt_end; 1475 frag_end = cs_iface->output->heap_frag_end; 1476 renderpasses_in_flight = vt_start - frag_end; 1477 pending_frag_count = vt_end - frag_end; 1478 } 1479 mutex_unlock(&sched->lock); 1480 1481 /* The group got scheduled out, we stop here. We will get a new tiler OOM event 1482 * when it's scheduled again. 1483 */ 1484 if (unlikely(csg_id < 0)) 1485 return 0; 1486 1487 if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) { 1488 ret = -EINVAL; 1489 } else { 1490 /* We do the allocation without holding the scheduler lock to avoid 1491 * blocking the scheduling. 1492 */ 1493 ret = panthor_heap_grow(heaps, heap_address, 1494 renderpasses_in_flight, 1495 pending_frag_count, &new_chunk_va); 1496 } 1497 1498 /* If the heap context doesn't have memory for us, we want to let the 1499 * FW try to reclaim memory by waiting for fragment jobs to land or by 1500 * executing the tiler OOM exception handler, which is supposed to 1501 * implement incremental rendering. 1502 */ 1503 if (ret && ret != -ENOMEM) { 1504 drm_warn(&ptdev->base, "Failed to extend the tiler heap\n"); 1505 group->fatal_queues |= BIT(cs_id); 1506 sched_queue_delayed_work(sched, tick, 0); 1507 goto out_put_heap_pool; 1508 } 1509 1510 mutex_lock(&sched->lock); 1511 csg_id = group->csg_id; 1512 if (csg_id >= 0) { 1513 struct panthor_fw_csg_iface *csg_iface; 1514 struct panthor_fw_cs_iface *cs_iface; 1515 1516 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1517 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1518 1519 cs_iface->input->heap_start = new_chunk_va; 1520 cs_iface->input->heap_end = new_chunk_va; 1521 panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM); 1522 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id)); 1523 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); 1524 } 1525 mutex_unlock(&sched->lock); 1526 1527 /* We allocated a chunck, but couldn't link it to the heap 1528 * context because the group was scheduled out while we were 1529 * allocating memory. We need to return this chunk to the heap. 1530 */ 1531 if (unlikely(csg_id < 0 && new_chunk_va)) 1532 panthor_heap_return_chunk(heaps, heap_address, new_chunk_va); 1533 1534 ret = 0; 1535 1536 out_put_heap_pool: 1537 panthor_heap_pool_put(heaps); 1538 return ret; 1539 } 1540 1541 static void group_tiler_oom_work(struct work_struct *work) 1542 { 1543 struct panthor_group *group = 1544 container_of(work, struct panthor_group, tiler_oom_work); 1545 u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0); 1546 1547 while (tiler_oom) { 1548 u32 cs_id = ffs(tiler_oom) - 1; 1549 1550 group_process_tiler_oom(group, cs_id); 1551 tiler_oom &= ~BIT(cs_id); 1552 } 1553 1554 group_put(group); 1555 } 1556 1557 static void 1558 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev, 1559 u32 csg_id, u32 cs_id) 1560 { 1561 struct panthor_scheduler *sched = ptdev->scheduler; 1562 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1563 struct panthor_group *group = csg_slot->group; 1564 1565 lockdep_assert_held(&sched->lock); 1566 1567 if (drm_WARN_ON(&ptdev->base, !group)) 1568 return; 1569 1570 atomic_or(BIT(cs_id), &group->tiler_oom); 1571 1572 /* We don't use group_queue_work() here because we want to queue the 1573 * work item to the heap_alloc_wq. 1574 */ 1575 group_get(group); 1576 if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work)) 1577 group_put(group); 1578 } 1579 1580 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev, 1581 u32 csg_id, u32 cs_id) 1582 { 1583 struct panthor_fw_cs_iface *cs_iface; 1584 u32 req, ack, events; 1585 1586 lockdep_assert_held(&ptdev->scheduler->lock); 1587 1588 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1589 req = cs_iface->input->req; 1590 ack = cs_iface->output->ack; 1591 events = (req ^ ack) & CS_EVT_MASK; 1592 1593 if (events & CS_FATAL) 1594 cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id); 1595 1596 if (events & CS_FAULT) 1597 cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id); 1598 1599 if (events & CS_TILER_OOM) 1600 cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id); 1601 1602 /* We don't acknowledge the TILER_OOM event since its handling is 1603 * deferred to a separate work. 1604 */ 1605 panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT); 1606 1607 return (events & (CS_FAULT | CS_TILER_OOM)) != 0; 1608 } 1609 1610 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id) 1611 { 1612 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1613 struct panthor_fw_csg_iface *csg_iface; 1614 1615 lockdep_assert_held(&ptdev->scheduler->lock); 1616 1617 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1618 csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE; 1619 } 1620 1621 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id) 1622 { 1623 struct panthor_scheduler *sched = ptdev->scheduler; 1624 1625 lockdep_assert_held(&sched->lock); 1626 1627 sched->might_have_idle_groups = true; 1628 1629 /* Schedule a tick so we can evict idle groups and schedule non-idle 1630 * ones. This will also update runtime PM and devfreq busy/idle states, 1631 * so the device can lower its frequency or get suspended. 1632 */ 1633 sched_queue_delayed_work(sched, tick, 0); 1634 } 1635 1636 static void csg_slot_sync_update_locked(struct panthor_device *ptdev, 1637 u32 csg_id) 1638 { 1639 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1640 struct panthor_group *group = csg_slot->group; 1641 1642 lockdep_assert_held(&ptdev->scheduler->lock); 1643 1644 if (group) 1645 group_queue_work(group, sync_upd); 1646 1647 sched_queue_work(ptdev->scheduler, sync_upd); 1648 } 1649 1650 static void 1651 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id) 1652 { 1653 struct panthor_scheduler *sched = ptdev->scheduler; 1654 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1655 struct panthor_group *group = csg_slot->group; 1656 1657 lockdep_assert_held(&sched->lock); 1658 1659 group = csg_slot->group; 1660 if (!drm_WARN_ON(&ptdev->base, !group)) { 1661 drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n", 1662 group->task_info.pid, group->task_info.comm); 1663 1664 group->timedout = true; 1665 } 1666 1667 drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id); 1668 1669 sched_queue_delayed_work(sched, tick, 0); 1670 } 1671 1672 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id) 1673 { 1674 u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events; 1675 struct panthor_fw_csg_iface *csg_iface; 1676 u32 ring_cs_db_mask = 0; 1677 1678 lockdep_assert_held(&ptdev->scheduler->lock); 1679 1680 if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) 1681 return; 1682 1683 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1684 req = READ_ONCE(csg_iface->input->req); 1685 ack = READ_ONCE(csg_iface->output->ack); 1686 cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req); 1687 cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack); 1688 csg_events = (req ^ ack) & CSG_EVT_MASK; 1689 1690 /* There may not be any pending CSG/CS interrupts to process */ 1691 if (req == ack && cs_irq_req == cs_irq_ack) 1692 return; 1693 1694 /* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before 1695 * examining the CS_ACK & CS_REQ bits. This would ensure that Host 1696 * doesn't miss an interrupt for the CS in the race scenario where 1697 * whilst Host is servicing an interrupt for the CS, firmware sends 1698 * another interrupt for that CS. 1699 */ 1700 csg_iface->input->cs_irq_ack = cs_irq_req; 1701 1702 panthor_fw_update_reqs(csg_iface, req, ack, 1703 CSG_SYNC_UPDATE | 1704 CSG_IDLE | 1705 CSG_PROGRESS_TIMER_EVENT); 1706 1707 if (csg_events & CSG_IDLE) 1708 csg_slot_process_idle_event_locked(ptdev, csg_id); 1709 1710 if (csg_events & CSG_PROGRESS_TIMER_EVENT) 1711 csg_slot_process_progress_timer_event_locked(ptdev, csg_id); 1712 1713 cs_irqs = cs_irq_req ^ cs_irq_ack; 1714 while (cs_irqs) { 1715 u32 cs_id = ffs(cs_irqs) - 1; 1716 1717 if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id)) 1718 ring_cs_db_mask |= BIT(cs_id); 1719 1720 cs_irqs &= ~BIT(cs_id); 1721 } 1722 1723 if (csg_events & CSG_SYNC_UPDATE) 1724 csg_slot_sync_update_locked(ptdev, csg_id); 1725 1726 if (ring_cs_db_mask) 1727 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask); 1728 1729 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); 1730 } 1731 1732 static void sched_process_idle_event_locked(struct panthor_device *ptdev) 1733 { 1734 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1735 1736 lockdep_assert_held(&ptdev->scheduler->lock); 1737 1738 /* Acknowledge the idle event and schedule a tick. */ 1739 panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE); 1740 sched_queue_delayed_work(ptdev->scheduler, tick, 0); 1741 } 1742 1743 /** 1744 * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ 1745 * @ptdev: Device. 1746 */ 1747 static void sched_process_global_irq_locked(struct panthor_device *ptdev) 1748 { 1749 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1750 u32 req, ack, evts; 1751 1752 lockdep_assert_held(&ptdev->scheduler->lock); 1753 1754 req = READ_ONCE(glb_iface->input->req); 1755 ack = READ_ONCE(glb_iface->output->ack); 1756 evts = (req ^ ack) & GLB_EVT_MASK; 1757 1758 if (evts & GLB_IDLE) 1759 sched_process_idle_event_locked(ptdev); 1760 } 1761 1762 static void process_fw_events_work(struct work_struct *work) 1763 { 1764 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, 1765 fw_events_work); 1766 u32 events = atomic_xchg(&sched->fw_events, 0); 1767 struct panthor_device *ptdev = sched->ptdev; 1768 1769 mutex_lock(&sched->lock); 1770 1771 if (events & JOB_INT_GLOBAL_IF) { 1772 sched_process_global_irq_locked(ptdev); 1773 events &= ~JOB_INT_GLOBAL_IF; 1774 } 1775 1776 while (events) { 1777 u32 csg_id = ffs(events) - 1; 1778 1779 sched_process_csg_irq_locked(ptdev, csg_id); 1780 events &= ~BIT(csg_id); 1781 } 1782 1783 mutex_unlock(&sched->lock); 1784 } 1785 1786 /** 1787 * panthor_sched_report_fw_events() - Report FW events to the scheduler. 1788 */ 1789 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events) 1790 { 1791 if (!ptdev->scheduler) 1792 return; 1793 1794 atomic_or(events, &ptdev->scheduler->fw_events); 1795 sched_queue_work(ptdev->scheduler, fw_events); 1796 } 1797 1798 static const char *fence_get_driver_name(struct dma_fence *fence) 1799 { 1800 return "panthor"; 1801 } 1802 1803 static const char *queue_fence_get_timeline_name(struct dma_fence *fence) 1804 { 1805 return "queue-fence"; 1806 } 1807 1808 static const struct dma_fence_ops panthor_queue_fence_ops = { 1809 .get_driver_name = fence_get_driver_name, 1810 .get_timeline_name = queue_fence_get_timeline_name, 1811 }; 1812 1813 struct panthor_csg_slots_upd_ctx { 1814 u32 update_mask; 1815 u32 timedout_mask; 1816 struct { 1817 u32 value; 1818 u32 mask; 1819 } requests[MAX_CSGS]; 1820 }; 1821 1822 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx) 1823 { 1824 memset(ctx, 0, sizeof(*ctx)); 1825 } 1826 1827 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev, 1828 struct panthor_csg_slots_upd_ctx *ctx, 1829 u32 csg_id, u32 value, u32 mask) 1830 { 1831 if (drm_WARN_ON(&ptdev->base, !mask) || 1832 drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) 1833 return; 1834 1835 ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask); 1836 ctx->requests[csg_id].mask |= mask; 1837 ctx->update_mask |= BIT(csg_id); 1838 } 1839 1840 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev, 1841 struct panthor_csg_slots_upd_ctx *ctx) 1842 { 1843 struct panthor_scheduler *sched = ptdev->scheduler; 1844 u32 update_slots = ctx->update_mask; 1845 1846 lockdep_assert_held(&sched->lock); 1847 1848 if (!ctx->update_mask) 1849 return 0; 1850 1851 while (update_slots) { 1852 struct panthor_fw_csg_iface *csg_iface; 1853 u32 csg_id = ffs(update_slots) - 1; 1854 1855 update_slots &= ~BIT(csg_id); 1856 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1857 panthor_fw_update_reqs(csg_iface, req, 1858 ctx->requests[csg_id].value, 1859 ctx->requests[csg_id].mask); 1860 } 1861 1862 panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask); 1863 1864 update_slots = ctx->update_mask; 1865 while (update_slots) { 1866 struct panthor_fw_csg_iface *csg_iface; 1867 u32 csg_id = ffs(update_slots) - 1; 1868 u32 req_mask = ctx->requests[csg_id].mask, acked; 1869 int ret; 1870 1871 update_slots &= ~BIT(csg_id); 1872 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1873 1874 ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100); 1875 1876 if (acked & CSG_ENDPOINT_CONFIG) 1877 csg_slot_sync_priority_locked(ptdev, csg_id); 1878 1879 if (acked & CSG_STATE_MASK) 1880 csg_slot_sync_state_locked(ptdev, csg_id); 1881 1882 if (acked & CSG_STATUS_UPDATE) { 1883 csg_slot_sync_queues_state_locked(ptdev, csg_id); 1884 csg_slot_sync_idle_state_locked(ptdev, csg_id); 1885 } 1886 1887 if (ret && acked != req_mask && 1888 ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) { 1889 drm_err(&ptdev->base, "CSG %d update request timedout", csg_id); 1890 ctx->timedout_mask |= BIT(csg_id); 1891 } 1892 } 1893 1894 if (ctx->timedout_mask) 1895 return -ETIMEDOUT; 1896 1897 return 0; 1898 } 1899 1900 struct panthor_sched_tick_ctx { 1901 struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT]; 1902 struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT]; 1903 u32 idle_group_count; 1904 u32 group_count; 1905 enum panthor_csg_priority min_priority; 1906 struct panthor_vm *vms[MAX_CS_PER_CSG]; 1907 u32 as_count; 1908 bool immediate_tick; 1909 u32 csg_upd_failed_mask; 1910 }; 1911 1912 static bool 1913 tick_ctx_is_full(const struct panthor_scheduler *sched, 1914 const struct panthor_sched_tick_ctx *ctx) 1915 { 1916 return ctx->group_count == sched->csg_slot_count; 1917 } 1918 1919 static bool 1920 group_is_idle(struct panthor_group *group) 1921 { 1922 struct panthor_device *ptdev = group->ptdev; 1923 u32 inactive_queues; 1924 1925 if (group->csg_id >= 0) 1926 return ptdev->scheduler->csg_slots[group->csg_id].idle; 1927 1928 inactive_queues = group->idle_queues | group->blocked_queues; 1929 return hweight32(inactive_queues) == group->queue_count; 1930 } 1931 1932 static bool 1933 group_can_run(struct panthor_group *group) 1934 { 1935 return group->state != PANTHOR_CS_GROUP_TERMINATED && 1936 group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && 1937 !group->destroyed && group->fatal_queues == 0 && 1938 !group->timedout; 1939 } 1940 1941 static void 1942 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched, 1943 struct panthor_sched_tick_ctx *ctx, 1944 struct list_head *queue, 1945 bool skip_idle_groups, 1946 bool owned_by_tick_ctx) 1947 { 1948 struct panthor_group *group, *tmp; 1949 1950 if (tick_ctx_is_full(sched, ctx)) 1951 return; 1952 1953 list_for_each_entry_safe(group, tmp, queue, run_node) { 1954 u32 i; 1955 1956 if (!group_can_run(group)) 1957 continue; 1958 1959 if (skip_idle_groups && group_is_idle(group)) 1960 continue; 1961 1962 for (i = 0; i < ctx->as_count; i++) { 1963 if (ctx->vms[i] == group->vm) 1964 break; 1965 } 1966 1967 if (i == ctx->as_count && ctx->as_count == sched->as_slot_count) 1968 continue; 1969 1970 if (!owned_by_tick_ctx) 1971 group_get(group); 1972 1973 list_move_tail(&group->run_node, &ctx->groups[group->priority]); 1974 ctx->group_count++; 1975 if (group_is_idle(group)) 1976 ctx->idle_group_count++; 1977 1978 if (i == ctx->as_count) 1979 ctx->vms[ctx->as_count++] = group->vm; 1980 1981 if (ctx->min_priority > group->priority) 1982 ctx->min_priority = group->priority; 1983 1984 if (tick_ctx_is_full(sched, ctx)) 1985 return; 1986 } 1987 } 1988 1989 static void 1990 tick_ctx_insert_old_group(struct panthor_scheduler *sched, 1991 struct panthor_sched_tick_ctx *ctx, 1992 struct panthor_group *group, 1993 bool full_tick) 1994 { 1995 struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id]; 1996 struct panthor_group *other_group; 1997 1998 if (!full_tick) { 1999 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]); 2000 return; 2001 } 2002 2003 /* Rotate to make sure groups with lower CSG slot 2004 * priorities have a chance to get a higher CSG slot 2005 * priority next time they get picked. This priority 2006 * has an impact on resource request ordering, so it's 2007 * important to make sure we don't let one group starve 2008 * all other groups with the same group priority. 2009 */ 2010 list_for_each_entry(other_group, 2011 &ctx->old_groups[csg_slot->group->priority], 2012 run_node) { 2013 struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id]; 2014 2015 if (other_csg_slot->priority > csg_slot->priority) { 2016 list_add_tail(&csg_slot->group->run_node, &other_group->run_node); 2017 return; 2018 } 2019 } 2020 2021 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]); 2022 } 2023 2024 static void 2025 tick_ctx_init(struct panthor_scheduler *sched, 2026 struct panthor_sched_tick_ctx *ctx, 2027 bool full_tick) 2028 { 2029 struct panthor_device *ptdev = sched->ptdev; 2030 struct panthor_csg_slots_upd_ctx upd_ctx; 2031 int ret; 2032 u32 i; 2033 2034 memset(ctx, 0, sizeof(*ctx)); 2035 csgs_upd_ctx_init(&upd_ctx); 2036 2037 ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT; 2038 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { 2039 INIT_LIST_HEAD(&ctx->groups[i]); 2040 INIT_LIST_HEAD(&ctx->old_groups[i]); 2041 } 2042 2043 for (i = 0; i < sched->csg_slot_count; i++) { 2044 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2045 struct panthor_group *group = csg_slot->group; 2046 struct panthor_fw_csg_iface *csg_iface; 2047 2048 if (!group) 2049 continue; 2050 2051 csg_iface = panthor_fw_get_csg_iface(ptdev, i); 2052 group_get(group); 2053 2054 /* If there was unhandled faults on the VM, force processing of 2055 * CSG IRQs, so we can flag the faulty queue. 2056 */ 2057 if (panthor_vm_has_unhandled_faults(group->vm)) { 2058 sched_process_csg_irq_locked(ptdev, i); 2059 2060 /* No fatal fault reported, flag all queues as faulty. */ 2061 if (!group->fatal_queues) 2062 group->fatal_queues |= GENMASK(group->queue_count - 1, 0); 2063 } 2064 2065 tick_ctx_insert_old_group(sched, ctx, group, full_tick); 2066 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, 2067 csg_iface->output->ack ^ CSG_STATUS_UPDATE, 2068 CSG_STATUS_UPDATE); 2069 } 2070 2071 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2072 if (ret) { 2073 panthor_device_schedule_reset(ptdev); 2074 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2075 } 2076 } 2077 2078 static void 2079 group_term_post_processing(struct panthor_group *group) 2080 { 2081 struct panthor_job *job, *tmp; 2082 LIST_HEAD(faulty_jobs); 2083 bool cookie; 2084 u32 i = 0; 2085 2086 if (drm_WARN_ON(&group->ptdev->base, group_can_run(group))) 2087 return; 2088 2089 cookie = dma_fence_begin_signalling(); 2090 for (i = 0; i < group->queue_count; i++) { 2091 struct panthor_queue *queue = group->queues[i]; 2092 struct panthor_syncobj_64b *syncobj; 2093 int err; 2094 2095 if (group->fatal_queues & BIT(i)) 2096 err = -EINVAL; 2097 else if (group->timedout) 2098 err = -ETIMEDOUT; 2099 else 2100 err = -ECANCELED; 2101 2102 if (!queue) 2103 continue; 2104 2105 spin_lock(&queue->fence_ctx.lock); 2106 list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) { 2107 list_move_tail(&job->node, &faulty_jobs); 2108 dma_fence_set_error(job->done_fence, err); 2109 dma_fence_signal_locked(job->done_fence); 2110 } 2111 spin_unlock(&queue->fence_ctx.lock); 2112 2113 /* Manually update the syncobj seqno to unblock waiters. */ 2114 syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj)); 2115 syncobj->status = ~0; 2116 syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno); 2117 sched_queue_work(group->ptdev->scheduler, sync_upd); 2118 } 2119 dma_fence_end_signalling(cookie); 2120 2121 list_for_each_entry_safe(job, tmp, &faulty_jobs, node) { 2122 list_del_init(&job->node); 2123 panthor_job_put(&job->base); 2124 } 2125 } 2126 2127 static void group_term_work(struct work_struct *work) 2128 { 2129 struct panthor_group *group = 2130 container_of(work, struct panthor_group, term_work); 2131 2132 group_term_post_processing(group); 2133 group_put(group); 2134 } 2135 2136 static void 2137 tick_ctx_cleanup(struct panthor_scheduler *sched, 2138 struct panthor_sched_tick_ctx *ctx) 2139 { 2140 struct panthor_device *ptdev = sched->ptdev; 2141 struct panthor_group *group, *tmp; 2142 u32 i; 2143 2144 for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) { 2145 list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) { 2146 /* If everything went fine, we should only have groups 2147 * to be terminated in the old_groups lists. 2148 */ 2149 drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask && 2150 group_can_run(group)); 2151 2152 if (!group_can_run(group)) { 2153 list_del_init(&group->run_node); 2154 list_del_init(&group->wait_node); 2155 group_queue_work(group, term); 2156 } else if (group->csg_id >= 0) { 2157 list_del_init(&group->run_node); 2158 } else { 2159 list_move(&group->run_node, 2160 group_is_idle(group) ? 2161 &sched->groups.idle[group->priority] : 2162 &sched->groups.runnable[group->priority]); 2163 } 2164 group_put(group); 2165 } 2166 } 2167 2168 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { 2169 /* If everything went fine, the groups to schedule lists should 2170 * be empty. 2171 */ 2172 drm_WARN_ON(&ptdev->base, 2173 !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i])); 2174 2175 list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) { 2176 if (group->csg_id >= 0) { 2177 list_del_init(&group->run_node); 2178 } else { 2179 list_move(&group->run_node, 2180 group_is_idle(group) ? 2181 &sched->groups.idle[group->priority] : 2182 &sched->groups.runnable[group->priority]); 2183 } 2184 group_put(group); 2185 } 2186 } 2187 } 2188 2189 static void 2190 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx) 2191 { 2192 struct panthor_group *group, *tmp; 2193 struct panthor_device *ptdev = sched->ptdev; 2194 struct panthor_csg_slot *csg_slot; 2195 int prio, new_csg_prio = MAX_CSG_PRIO, i; 2196 u32 free_csg_slots = 0; 2197 struct panthor_csg_slots_upd_ctx upd_ctx; 2198 int ret; 2199 2200 csgs_upd_ctx_init(&upd_ctx); 2201 2202 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2203 /* Suspend or terminate evicted groups. */ 2204 list_for_each_entry(group, &ctx->old_groups[prio], run_node) { 2205 bool term = !group_can_run(group); 2206 int csg_id = group->csg_id; 2207 2208 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) 2209 continue; 2210 2211 csg_slot = &sched->csg_slots[csg_id]; 2212 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2213 term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND, 2214 CSG_STATE_MASK); 2215 } 2216 2217 /* Update priorities on already running groups. */ 2218 list_for_each_entry(group, &ctx->groups[prio], run_node) { 2219 struct panthor_fw_csg_iface *csg_iface; 2220 int csg_id = group->csg_id; 2221 2222 if (csg_id < 0) { 2223 new_csg_prio--; 2224 continue; 2225 } 2226 2227 csg_slot = &sched->csg_slots[csg_id]; 2228 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 2229 if (csg_slot->priority == new_csg_prio) { 2230 new_csg_prio--; 2231 continue; 2232 } 2233 2234 panthor_fw_update_reqs(csg_iface, endpoint_req, 2235 CSG_EP_REQ_PRIORITY(new_csg_prio), 2236 CSG_EP_REQ_PRIORITY_MASK); 2237 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2238 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, 2239 CSG_ENDPOINT_CONFIG); 2240 new_csg_prio--; 2241 } 2242 } 2243 2244 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2245 if (ret) { 2246 panthor_device_schedule_reset(ptdev); 2247 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2248 return; 2249 } 2250 2251 /* Unbind evicted groups. */ 2252 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2253 list_for_each_entry(group, &ctx->old_groups[prio], run_node) { 2254 /* This group is gone. Process interrupts to clear 2255 * any pending interrupts before we start the new 2256 * group. 2257 */ 2258 if (group->csg_id >= 0) 2259 sched_process_csg_irq_locked(ptdev, group->csg_id); 2260 2261 group_unbind_locked(group); 2262 } 2263 } 2264 2265 for (i = 0; i < sched->csg_slot_count; i++) { 2266 if (!sched->csg_slots[i].group) 2267 free_csg_slots |= BIT(i); 2268 } 2269 2270 csgs_upd_ctx_init(&upd_ctx); 2271 new_csg_prio = MAX_CSG_PRIO; 2272 2273 /* Start new groups. */ 2274 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2275 list_for_each_entry(group, &ctx->groups[prio], run_node) { 2276 int csg_id = group->csg_id; 2277 struct panthor_fw_csg_iface *csg_iface; 2278 2279 if (csg_id >= 0) { 2280 new_csg_prio--; 2281 continue; 2282 } 2283 2284 csg_id = ffs(free_csg_slots) - 1; 2285 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) 2286 break; 2287 2288 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 2289 csg_slot = &sched->csg_slots[csg_id]; 2290 group_bind_locked(group, csg_id); 2291 csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--); 2292 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2293 group->state == PANTHOR_CS_GROUP_SUSPENDED ? 2294 CSG_STATE_RESUME : CSG_STATE_START, 2295 CSG_STATE_MASK); 2296 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2297 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, 2298 CSG_ENDPOINT_CONFIG); 2299 free_csg_slots &= ~BIT(csg_id); 2300 } 2301 } 2302 2303 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2304 if (ret) { 2305 panthor_device_schedule_reset(ptdev); 2306 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2307 return; 2308 } 2309 2310 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2311 list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) { 2312 list_del_init(&group->run_node); 2313 2314 /* If the group has been destroyed while we were 2315 * scheduling, ask for an immediate tick to 2316 * re-evaluate as soon as possible and get rid of 2317 * this dangling group. 2318 */ 2319 if (group->destroyed) 2320 ctx->immediate_tick = true; 2321 group_put(group); 2322 } 2323 2324 /* Return evicted groups to the idle or run queues. Groups 2325 * that can no longer be run (because they've been destroyed 2326 * or experienced an unrecoverable error) will be scheduled 2327 * for destruction in tick_ctx_cleanup(). 2328 */ 2329 list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) { 2330 if (!group_can_run(group)) 2331 continue; 2332 2333 if (group_is_idle(group)) 2334 list_move_tail(&group->run_node, &sched->groups.idle[prio]); 2335 else 2336 list_move_tail(&group->run_node, &sched->groups.runnable[prio]); 2337 group_put(group); 2338 } 2339 } 2340 2341 sched->used_csg_slot_count = ctx->group_count; 2342 sched->might_have_idle_groups = ctx->idle_group_count > 0; 2343 } 2344 2345 static u64 2346 tick_ctx_update_resched_target(struct panthor_scheduler *sched, 2347 const struct panthor_sched_tick_ctx *ctx) 2348 { 2349 /* We had space left, no need to reschedule until some external event happens. */ 2350 if (!tick_ctx_is_full(sched, ctx)) 2351 goto no_tick; 2352 2353 /* If idle groups were scheduled, no need to wake up until some external 2354 * event happens (group unblocked, new job submitted, ...). 2355 */ 2356 if (ctx->idle_group_count) 2357 goto no_tick; 2358 2359 if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT)) 2360 goto no_tick; 2361 2362 /* If there are groups of the same priority waiting, we need to 2363 * keep the scheduler ticking, otherwise, we'll just wait for 2364 * new groups with higher priority to be queued. 2365 */ 2366 if (!list_empty(&sched->groups.runnable[ctx->min_priority])) { 2367 u64 resched_target = sched->last_tick + sched->tick_period; 2368 2369 if (time_before64(sched->resched_target, sched->last_tick) || 2370 time_before64(resched_target, sched->resched_target)) 2371 sched->resched_target = resched_target; 2372 2373 return sched->resched_target - sched->last_tick; 2374 } 2375 2376 no_tick: 2377 sched->resched_target = U64_MAX; 2378 return U64_MAX; 2379 } 2380 2381 static void tick_work(struct work_struct *work) 2382 { 2383 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, 2384 tick_work.work); 2385 struct panthor_device *ptdev = sched->ptdev; 2386 struct panthor_sched_tick_ctx ctx; 2387 u64 remaining_jiffies = 0, resched_delay; 2388 u64 now = get_jiffies_64(); 2389 int prio, ret, cookie; 2390 2391 if (!drm_dev_enter(&ptdev->base, &cookie)) 2392 return; 2393 2394 ret = panthor_device_resume_and_get(ptdev); 2395 if (drm_WARN_ON(&ptdev->base, ret)) 2396 goto out_dev_exit; 2397 2398 if (time_before64(now, sched->resched_target)) 2399 remaining_jiffies = sched->resched_target - now; 2400 2401 mutex_lock(&sched->lock); 2402 if (panthor_device_reset_is_pending(sched->ptdev)) 2403 goto out_unlock; 2404 2405 tick_ctx_init(sched, &ctx, remaining_jiffies != 0); 2406 if (ctx.csg_upd_failed_mask) 2407 goto out_cleanup_ctx; 2408 2409 if (remaining_jiffies) { 2410 /* Scheduling forced in the middle of a tick. Only RT groups 2411 * can preempt non-RT ones. Currently running RT groups can't be 2412 * preempted. 2413 */ 2414 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2415 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2416 prio--) { 2417 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], 2418 true, true); 2419 if (prio == PANTHOR_CSG_PRIORITY_RT) { 2420 tick_ctx_pick_groups_from_list(sched, &ctx, 2421 &sched->groups.runnable[prio], 2422 true, false); 2423 } 2424 } 2425 } 2426 2427 /* First pick non-idle groups */ 2428 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2429 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2430 prio--) { 2431 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio], 2432 true, false); 2433 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true); 2434 } 2435 2436 /* If we have free CSG slots left, pick idle groups */ 2437 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2438 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2439 prio--) { 2440 /* Check the old_group queue first to avoid reprogramming the slots */ 2441 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true); 2442 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio], 2443 false, false); 2444 } 2445 2446 tick_ctx_apply(sched, &ctx); 2447 if (ctx.csg_upd_failed_mask) 2448 goto out_cleanup_ctx; 2449 2450 if (ctx.idle_group_count == ctx.group_count) { 2451 panthor_devfreq_record_idle(sched->ptdev); 2452 if (sched->pm.has_ref) { 2453 pm_runtime_put_autosuspend(ptdev->base.dev); 2454 sched->pm.has_ref = false; 2455 } 2456 } else { 2457 panthor_devfreq_record_busy(sched->ptdev); 2458 if (!sched->pm.has_ref) { 2459 pm_runtime_get(ptdev->base.dev); 2460 sched->pm.has_ref = true; 2461 } 2462 } 2463 2464 sched->last_tick = now; 2465 resched_delay = tick_ctx_update_resched_target(sched, &ctx); 2466 if (ctx.immediate_tick) 2467 resched_delay = 0; 2468 2469 if (resched_delay != U64_MAX) 2470 sched_queue_delayed_work(sched, tick, resched_delay); 2471 2472 out_cleanup_ctx: 2473 tick_ctx_cleanup(sched, &ctx); 2474 2475 out_unlock: 2476 mutex_unlock(&sched->lock); 2477 pm_runtime_mark_last_busy(ptdev->base.dev); 2478 pm_runtime_put_autosuspend(ptdev->base.dev); 2479 2480 out_dev_exit: 2481 drm_dev_exit(cookie); 2482 } 2483 2484 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx) 2485 { 2486 struct panthor_queue *queue = group->queues[queue_idx]; 2487 union { 2488 struct panthor_syncobj_64b sync64; 2489 struct panthor_syncobj_32b sync32; 2490 } *syncobj; 2491 bool result; 2492 u64 value; 2493 2494 syncobj = panthor_queue_get_syncwait_obj(group, queue); 2495 if (!syncobj) 2496 return -EINVAL; 2497 2498 value = queue->syncwait.sync64 ? 2499 syncobj->sync64.seqno : 2500 syncobj->sync32.seqno; 2501 2502 if (queue->syncwait.gt) 2503 result = value > queue->syncwait.ref; 2504 else 2505 result = value <= queue->syncwait.ref; 2506 2507 if (result) 2508 panthor_queue_put_syncwait_obj(queue); 2509 2510 return result; 2511 } 2512 2513 static void sync_upd_work(struct work_struct *work) 2514 { 2515 struct panthor_scheduler *sched = container_of(work, 2516 struct panthor_scheduler, 2517 sync_upd_work); 2518 struct panthor_group *group, *tmp; 2519 bool immediate_tick = false; 2520 2521 mutex_lock(&sched->lock); 2522 list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) { 2523 u32 tested_queues = group->blocked_queues; 2524 u32 unblocked_queues = 0; 2525 2526 while (tested_queues) { 2527 u32 cs_id = ffs(tested_queues) - 1; 2528 int ret; 2529 2530 ret = panthor_queue_eval_syncwait(group, cs_id); 2531 drm_WARN_ON(&group->ptdev->base, ret < 0); 2532 if (ret) 2533 unblocked_queues |= BIT(cs_id); 2534 2535 tested_queues &= ~BIT(cs_id); 2536 } 2537 2538 if (unblocked_queues) { 2539 group->blocked_queues &= ~unblocked_queues; 2540 2541 if (group->csg_id < 0) { 2542 list_move(&group->run_node, 2543 &sched->groups.runnable[group->priority]); 2544 if (group->priority == PANTHOR_CSG_PRIORITY_RT) 2545 immediate_tick = true; 2546 } 2547 } 2548 2549 if (!group->blocked_queues) 2550 list_del_init(&group->wait_node); 2551 } 2552 mutex_unlock(&sched->lock); 2553 2554 if (immediate_tick) 2555 sched_queue_delayed_work(sched, tick, 0); 2556 } 2557 2558 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) 2559 { 2560 struct panthor_device *ptdev = group->ptdev; 2561 struct panthor_scheduler *sched = ptdev->scheduler; 2562 struct list_head *queue = &sched->groups.runnable[group->priority]; 2563 u64 delay_jiffies = 0; 2564 bool was_idle; 2565 u64 now; 2566 2567 if (!group_can_run(group)) 2568 return; 2569 2570 /* All updated queues are blocked, no need to wake up the scheduler. */ 2571 if ((queue_mask & group->blocked_queues) == queue_mask) 2572 return; 2573 2574 was_idle = group_is_idle(group); 2575 group->idle_queues &= ~queue_mask; 2576 2577 /* Don't mess up with the lists if we're in a middle of a reset. */ 2578 if (atomic_read(&sched->reset.in_progress)) 2579 return; 2580 2581 if (was_idle && !group_is_idle(group)) 2582 list_move_tail(&group->run_node, queue); 2583 2584 /* RT groups are preemptive. */ 2585 if (group->priority == PANTHOR_CSG_PRIORITY_RT) { 2586 sched_queue_delayed_work(sched, tick, 0); 2587 return; 2588 } 2589 2590 /* Some groups might be idle, force an immediate tick to 2591 * re-evaluate. 2592 */ 2593 if (sched->might_have_idle_groups) { 2594 sched_queue_delayed_work(sched, tick, 0); 2595 return; 2596 } 2597 2598 /* Scheduler is ticking, nothing to do. */ 2599 if (sched->resched_target != U64_MAX) { 2600 /* If there are free slots, force immediating ticking. */ 2601 if (sched->used_csg_slot_count < sched->csg_slot_count) 2602 sched_queue_delayed_work(sched, tick, 0); 2603 2604 return; 2605 } 2606 2607 /* Scheduler tick was off, recalculate the resched_target based on the 2608 * last tick event, and queue the scheduler work. 2609 */ 2610 now = get_jiffies_64(); 2611 sched->resched_target = sched->last_tick + sched->tick_period; 2612 if (sched->used_csg_slot_count == sched->csg_slot_count && 2613 time_before64(now, sched->resched_target)) 2614 delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX); 2615 2616 sched_queue_delayed_work(sched, tick, delay_jiffies); 2617 } 2618 2619 static void queue_stop(struct panthor_queue *queue, 2620 struct panthor_job *bad_job) 2621 { 2622 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); 2623 } 2624 2625 static void queue_start(struct panthor_queue *queue) 2626 { 2627 struct panthor_job *job; 2628 2629 /* Re-assign the parent fences. */ 2630 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) 2631 job->base.s_fence->parent = dma_fence_get(job->done_fence); 2632 2633 drm_sched_start(&queue->scheduler, 0); 2634 } 2635 2636 static void panthor_group_stop(struct panthor_group *group) 2637 { 2638 struct panthor_scheduler *sched = group->ptdev->scheduler; 2639 2640 lockdep_assert_held(&sched->reset.lock); 2641 2642 for (u32 i = 0; i < group->queue_count; i++) 2643 queue_stop(group->queues[i], NULL); 2644 2645 group_get(group); 2646 list_move_tail(&group->run_node, &sched->reset.stopped_groups); 2647 } 2648 2649 static void panthor_group_start(struct panthor_group *group) 2650 { 2651 struct panthor_scheduler *sched = group->ptdev->scheduler; 2652 2653 lockdep_assert_held(&group->ptdev->scheduler->reset.lock); 2654 2655 for (u32 i = 0; i < group->queue_count; i++) 2656 queue_start(group->queues[i]); 2657 2658 if (group_can_run(group)) { 2659 list_move_tail(&group->run_node, 2660 group_is_idle(group) ? 2661 &sched->groups.idle[group->priority] : 2662 &sched->groups.runnable[group->priority]); 2663 } else { 2664 list_del_init(&group->run_node); 2665 list_del_init(&group->wait_node); 2666 group_queue_work(group, term); 2667 } 2668 2669 group_put(group); 2670 } 2671 2672 static void panthor_sched_immediate_tick(struct panthor_device *ptdev) 2673 { 2674 struct panthor_scheduler *sched = ptdev->scheduler; 2675 2676 sched_queue_delayed_work(sched, tick, 0); 2677 } 2678 2679 /** 2680 * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler. 2681 */ 2682 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev) 2683 { 2684 /* Force a tick to immediately kill faulty groups. */ 2685 if (ptdev->scheduler) 2686 panthor_sched_immediate_tick(ptdev); 2687 } 2688 2689 void panthor_sched_resume(struct panthor_device *ptdev) 2690 { 2691 /* Force a tick to re-evaluate after a resume. */ 2692 panthor_sched_immediate_tick(ptdev); 2693 } 2694 2695 void panthor_sched_suspend(struct panthor_device *ptdev) 2696 { 2697 struct panthor_scheduler *sched = ptdev->scheduler; 2698 struct panthor_csg_slots_upd_ctx upd_ctx; 2699 struct panthor_group *group; 2700 u32 suspended_slots; 2701 u32 i; 2702 2703 mutex_lock(&sched->lock); 2704 csgs_upd_ctx_init(&upd_ctx); 2705 for (i = 0; i < sched->csg_slot_count; i++) { 2706 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2707 2708 if (csg_slot->group) { 2709 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, 2710 group_can_run(csg_slot->group) ? 2711 CSG_STATE_SUSPEND : CSG_STATE_TERMINATE, 2712 CSG_STATE_MASK); 2713 } 2714 } 2715 2716 suspended_slots = upd_ctx.update_mask; 2717 2718 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2719 suspended_slots &= ~upd_ctx.timedout_mask; 2720 2721 if (upd_ctx.timedout_mask) { 2722 u32 slot_mask = upd_ctx.timedout_mask; 2723 2724 drm_err(&ptdev->base, "CSG suspend failed, escalating to termination"); 2725 csgs_upd_ctx_init(&upd_ctx); 2726 while (slot_mask) { 2727 u32 csg_id = ffs(slot_mask) - 1; 2728 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2729 2730 /* If the group was still usable before that point, we consider 2731 * it innocent. 2732 */ 2733 if (group_can_run(csg_slot->group)) 2734 csg_slot->group->innocent = true; 2735 2736 /* We consider group suspension failures as fatal and flag the 2737 * group as unusable by setting timedout=true. 2738 */ 2739 csg_slot->group->timedout = true; 2740 2741 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2742 CSG_STATE_TERMINATE, 2743 CSG_STATE_MASK); 2744 slot_mask &= ~BIT(csg_id); 2745 } 2746 2747 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2748 2749 slot_mask = upd_ctx.timedout_mask; 2750 while (slot_mask) { 2751 u32 csg_id = ffs(slot_mask) - 1; 2752 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2753 2754 /* Terminate command timedout, but the soft-reset will 2755 * automatically terminate all active groups, so let's 2756 * force the state to halted here. 2757 */ 2758 if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED) 2759 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; 2760 slot_mask &= ~BIT(csg_id); 2761 } 2762 } 2763 2764 /* Flush L2 and LSC caches to make sure suspend state is up-to-date. 2765 * If the flush fails, flag all queues for termination. 2766 */ 2767 if (suspended_slots) { 2768 bool flush_caches_failed = false; 2769 u32 slot_mask = suspended_slots; 2770 2771 if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0)) 2772 flush_caches_failed = true; 2773 2774 while (slot_mask) { 2775 u32 csg_id = ffs(slot_mask) - 1; 2776 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2777 2778 if (flush_caches_failed) 2779 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; 2780 else 2781 csg_slot_sync_update_locked(ptdev, csg_id); 2782 2783 slot_mask &= ~BIT(csg_id); 2784 } 2785 } 2786 2787 for (i = 0; i < sched->csg_slot_count; i++) { 2788 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2789 2790 group = csg_slot->group; 2791 if (!group) 2792 continue; 2793 2794 group_get(group); 2795 2796 if (group->csg_id >= 0) 2797 sched_process_csg_irq_locked(ptdev, group->csg_id); 2798 2799 group_unbind_locked(group); 2800 2801 drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node)); 2802 2803 if (group_can_run(group)) { 2804 list_add(&group->run_node, 2805 &sched->groups.idle[group->priority]); 2806 } else { 2807 /* We don't bother stopping the scheduler if the group is 2808 * faulty, the group termination work will finish the job. 2809 */ 2810 list_del_init(&group->wait_node); 2811 group_queue_work(group, term); 2812 } 2813 group_put(group); 2814 } 2815 mutex_unlock(&sched->lock); 2816 } 2817 2818 void panthor_sched_pre_reset(struct panthor_device *ptdev) 2819 { 2820 struct panthor_scheduler *sched = ptdev->scheduler; 2821 struct panthor_group *group, *group_tmp; 2822 u32 i; 2823 2824 mutex_lock(&sched->reset.lock); 2825 atomic_set(&sched->reset.in_progress, true); 2826 2827 /* Cancel all scheduler works. Once this is done, these works can't be 2828 * scheduled again until the reset operation is complete. 2829 */ 2830 cancel_work_sync(&sched->sync_upd_work); 2831 cancel_delayed_work_sync(&sched->tick_work); 2832 2833 panthor_sched_suspend(ptdev); 2834 2835 /* Stop all groups that might still accept jobs, so we don't get passed 2836 * new jobs while we're resetting. 2837 */ 2838 for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) { 2839 /* All groups should be in the idle lists. */ 2840 drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i])); 2841 list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node) 2842 panthor_group_stop(group); 2843 } 2844 2845 for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) { 2846 list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node) 2847 panthor_group_stop(group); 2848 } 2849 2850 mutex_unlock(&sched->reset.lock); 2851 } 2852 2853 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) 2854 { 2855 struct panthor_scheduler *sched = ptdev->scheduler; 2856 struct panthor_group *group, *group_tmp; 2857 2858 mutex_lock(&sched->reset.lock); 2859 2860 list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) { 2861 /* Consider all previously running group as terminated if the 2862 * reset failed. 2863 */ 2864 if (reset_failed) 2865 group->state = PANTHOR_CS_GROUP_TERMINATED; 2866 2867 panthor_group_start(group); 2868 } 2869 2870 /* We're done resetting the GPU, clear the reset.in_progress bit so we can 2871 * kick the scheduler. 2872 */ 2873 atomic_set(&sched->reset.in_progress, false); 2874 mutex_unlock(&sched->reset.lock); 2875 2876 /* No need to queue a tick and update syncs if the reset failed. */ 2877 if (!reset_failed) { 2878 sched_queue_delayed_work(sched, tick, 0); 2879 sched_queue_work(sched, sync_upd); 2880 } 2881 } 2882 2883 static void update_fdinfo_stats(struct panthor_job *job) 2884 { 2885 struct panthor_group *group = job->group; 2886 struct panthor_queue *queue = group->queues[job->queue_idx]; 2887 struct panthor_gpu_usage *fdinfo = &group->fdinfo.data; 2888 struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap; 2889 struct panthor_job_profiling_data *data = &slots[job->profiling.slot]; 2890 2891 scoped_guard(spinlock, &group->fdinfo.lock) { 2892 if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) 2893 fdinfo->cycles += data->cycles.after - data->cycles.before; 2894 if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) 2895 fdinfo->time += data->time.after - data->time.before; 2896 } 2897 } 2898 2899 void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) 2900 { 2901 struct panthor_group_pool *gpool = pfile->groups; 2902 struct panthor_group *group; 2903 unsigned long i; 2904 2905 if (IS_ERR_OR_NULL(gpool)) 2906 return; 2907 2908 xa_lock(&gpool->xa); 2909 xa_for_each(&gpool->xa, i, group) { 2910 guard(spinlock)(&group->fdinfo.lock); 2911 pfile->stats.cycles += group->fdinfo.data.cycles; 2912 pfile->stats.time += group->fdinfo.data.time; 2913 group->fdinfo.data.cycles = 0; 2914 group->fdinfo.data.time = 0; 2915 } 2916 xa_unlock(&gpool->xa); 2917 } 2918 2919 static void group_sync_upd_work(struct work_struct *work) 2920 { 2921 struct panthor_group *group = 2922 container_of(work, struct panthor_group, sync_upd_work); 2923 struct panthor_job *job, *job_tmp; 2924 LIST_HEAD(done_jobs); 2925 u32 queue_idx; 2926 bool cookie; 2927 2928 cookie = dma_fence_begin_signalling(); 2929 for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { 2930 struct panthor_queue *queue = group->queues[queue_idx]; 2931 struct panthor_syncobj_64b *syncobj; 2932 2933 if (!queue) 2934 continue; 2935 2936 syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj)); 2937 2938 spin_lock(&queue->fence_ctx.lock); 2939 list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { 2940 if (syncobj->seqno < job->done_fence->seqno) 2941 break; 2942 2943 list_move_tail(&job->node, &done_jobs); 2944 dma_fence_signal_locked(job->done_fence); 2945 } 2946 spin_unlock(&queue->fence_ctx.lock); 2947 } 2948 dma_fence_end_signalling(cookie); 2949 2950 list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { 2951 if (job->profiling.mask) 2952 update_fdinfo_stats(job); 2953 list_del_init(&job->node); 2954 panthor_job_put(&job->base); 2955 } 2956 2957 group_put(group); 2958 } 2959 2960 struct panthor_job_ringbuf_instrs { 2961 u64 buffer[MAX_INSTRS_PER_JOB]; 2962 u32 count; 2963 }; 2964 2965 struct panthor_job_instr { 2966 u32 profile_mask; 2967 u64 instr; 2968 }; 2969 2970 #define JOB_INSTR(__prof, __instr) \ 2971 { \ 2972 .profile_mask = __prof, \ 2973 .instr = __instr, \ 2974 } 2975 2976 static void 2977 copy_instrs_to_ringbuf(struct panthor_queue *queue, 2978 struct panthor_job *job, 2979 struct panthor_job_ringbuf_instrs *instrs) 2980 { 2981 u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); 2982 u64 start = job->ringbuf.start & (ringbuf_size - 1); 2983 u64 size, written; 2984 2985 /* 2986 * We need to write a whole slot, including any trailing zeroes 2987 * that may come at the end of it. Also, because instrs.buffer has 2988 * been zero-initialised, there's no need to pad it with 0's 2989 */ 2990 instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE); 2991 size = instrs->count * sizeof(u64); 2992 WARN_ON(size > ringbuf_size); 2993 written = min(ringbuf_size - start, size); 2994 2995 memcpy(queue->ringbuf->kmap + start, instrs->buffer, written); 2996 2997 if (written < size) 2998 memcpy(queue->ringbuf->kmap, 2999 &instrs->buffer[written / sizeof(u64)], 3000 size - written); 3001 } 3002 3003 struct panthor_job_cs_params { 3004 u32 profile_mask; 3005 u64 addr_reg; u64 val_reg; 3006 u64 cycle_reg; u64 time_reg; 3007 u64 sync_addr; u64 times_addr; 3008 u64 cs_start; u64 cs_size; 3009 u32 last_flush; u32 waitall_mask; 3010 }; 3011 3012 static void 3013 get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params) 3014 { 3015 struct panthor_group *group = job->group; 3016 struct panthor_queue *queue = group->queues[job->queue_idx]; 3017 struct panthor_device *ptdev = group->ptdev; 3018 struct panthor_scheduler *sched = ptdev->scheduler; 3019 3020 params->addr_reg = ptdev->csif_info.cs_reg_count - 3021 ptdev->csif_info.unpreserved_cs_reg_count; 3022 params->val_reg = params->addr_reg + 2; 3023 params->cycle_reg = params->addr_reg; 3024 params->time_reg = params->val_reg; 3025 3026 params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) + 3027 job->queue_idx * sizeof(struct panthor_syncobj_64b); 3028 params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) + 3029 (job->profiling.slot * sizeof(struct panthor_job_profiling_data)); 3030 params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0); 3031 3032 params->cs_start = job->call_info.start; 3033 params->cs_size = job->call_info.size; 3034 params->last_flush = job->call_info.latest_flush; 3035 3036 params->profile_mask = job->profiling.mask; 3037 } 3038 3039 #define JOB_INSTR_ALWAYS(instr) \ 3040 JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr)) 3041 #define JOB_INSTR_TIMESTAMP(instr) \ 3042 JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr)) 3043 #define JOB_INSTR_CYCLES(instr) \ 3044 JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr)) 3045 3046 static void 3047 prepare_job_instrs(const struct panthor_job_cs_params *params, 3048 struct panthor_job_ringbuf_instrs *instrs) 3049 { 3050 const struct panthor_job_instr instr_seq[] = { 3051 /* MOV32 rX+2, cs.latest_flush */ 3052 JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush), 3053 /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */ 3054 JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) | 3055 (0 << 16) | 0x233), 3056 /* MOV48 rX:rX+1, cycles_offset */ 3057 JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) | 3058 (params->times_addr + 3059 offsetof(struct panthor_job_profiling_data, cycles.before))), 3060 /* STORE_STATE cycles */ 3061 JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)), 3062 /* MOV48 rX:rX+1, time_offset */ 3063 JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) | 3064 (params->times_addr + 3065 offsetof(struct panthor_job_profiling_data, time.before))), 3066 /* STORE_STATE timer */ 3067 JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)), 3068 /* MOV48 rX:rX+1, cs.start */ 3069 JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start), 3070 /* MOV32 rX+2, cs.size */ 3071 JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size), 3072 /* WAIT(0) => waits for FLUSH_CACHE2 instruction */ 3073 JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)), 3074 /* CALL rX:rX+1, rX+2 */ 3075 JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) | 3076 (params->val_reg << 32)), 3077 /* MOV48 rX:rX+1, cycles_offset */ 3078 JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) | 3079 (params->times_addr + 3080 offsetof(struct panthor_job_profiling_data, cycles.after))), 3081 /* STORE_STATE cycles */ 3082 JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)), 3083 /* MOV48 rX:rX+1, time_offset */ 3084 JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) | 3085 (params->times_addr + 3086 offsetof(struct panthor_job_profiling_data, time.after))), 3087 /* STORE_STATE timer */ 3088 JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)), 3089 /* MOV48 rX:rX+1, sync_addr */ 3090 JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr), 3091 /* MOV48 rX+2, #1 */ 3092 JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1), 3093 /* WAIT(all) */ 3094 JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)), 3095 /* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/ 3096 JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) | 3097 (params->val_reg << 32) | (0 << 16) | 1), 3098 /* ERROR_BARRIER, so we can recover from faults at job boundaries. */ 3099 JOB_INSTR_ALWAYS((47ull << 56)), 3100 }; 3101 u32 pad; 3102 3103 instrs->count = 0; 3104 3105 /* NEED to be cacheline aligned to please the prefetcher. */ 3106 static_assert(sizeof(instrs->buffer) % 64 == 0, 3107 "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline"); 3108 3109 /* Make sure we have enough storage to store the whole sequence. */ 3110 static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) == 3111 ARRAY_SIZE(instrs->buffer), 3112 "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch"); 3113 3114 for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) { 3115 /* If the profile mask of this instruction is not enabled, skip it. */ 3116 if (instr_seq[i].profile_mask && 3117 !(instr_seq[i].profile_mask & params->profile_mask)) 3118 continue; 3119 3120 instrs->buffer[instrs->count++] = instr_seq[i].instr; 3121 } 3122 3123 pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE); 3124 memset(&instrs->buffer[instrs->count], 0, 3125 (pad - instrs->count) * sizeof(instrs->buffer[0])); 3126 instrs->count = pad; 3127 } 3128 3129 static u32 calc_job_credits(u32 profile_mask) 3130 { 3131 struct panthor_job_ringbuf_instrs instrs; 3132 struct panthor_job_cs_params params = { 3133 .profile_mask = profile_mask, 3134 }; 3135 3136 prepare_job_instrs(¶ms, &instrs); 3137 return instrs.count; 3138 } 3139 3140 static struct dma_fence * 3141 queue_run_job(struct drm_sched_job *sched_job) 3142 { 3143 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3144 struct panthor_group *group = job->group; 3145 struct panthor_queue *queue = group->queues[job->queue_idx]; 3146 struct panthor_device *ptdev = group->ptdev; 3147 struct panthor_scheduler *sched = ptdev->scheduler; 3148 struct panthor_job_ringbuf_instrs instrs; 3149 struct panthor_job_cs_params cs_params; 3150 struct dma_fence *done_fence; 3151 int ret; 3152 3153 /* Stream size is zero, nothing to do except making sure all previously 3154 * submitted jobs are done before we signal the 3155 * drm_sched_job::s_fence::finished fence. 3156 */ 3157 if (!job->call_info.size) { 3158 job->done_fence = dma_fence_get(queue->fence_ctx.last_fence); 3159 return dma_fence_get(job->done_fence); 3160 } 3161 3162 ret = panthor_device_resume_and_get(ptdev); 3163 if (drm_WARN_ON(&ptdev->base, ret)) 3164 return ERR_PTR(ret); 3165 3166 mutex_lock(&sched->lock); 3167 if (!group_can_run(group)) { 3168 done_fence = ERR_PTR(-ECANCELED); 3169 goto out_unlock; 3170 } 3171 3172 dma_fence_init(job->done_fence, 3173 &panthor_queue_fence_ops, 3174 &queue->fence_ctx.lock, 3175 queue->fence_ctx.id, 3176 atomic64_inc_return(&queue->fence_ctx.seqno)); 3177 3178 job->profiling.slot = queue->profiling.seqno++; 3179 if (queue->profiling.seqno == queue->profiling.slot_count) 3180 queue->profiling.seqno = 0; 3181 3182 job->ringbuf.start = queue->iface.input->insert; 3183 3184 get_job_cs_params(job, &cs_params); 3185 prepare_job_instrs(&cs_params, &instrs); 3186 copy_instrs_to_ringbuf(queue, job, &instrs); 3187 3188 job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64)); 3189 3190 panthor_job_get(&job->base); 3191 spin_lock(&queue->fence_ctx.lock); 3192 list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs); 3193 spin_unlock(&queue->fence_ctx.lock); 3194 3195 /* Make sure the ring buffer is updated before the INSERT 3196 * register. 3197 */ 3198 wmb(); 3199 3200 queue->iface.input->extract = queue->iface.output->extract; 3201 queue->iface.input->insert = job->ringbuf.end; 3202 3203 if (group->csg_id < 0) { 3204 /* If the queue is blocked, we want to keep the timeout running, so we 3205 * can detect unbounded waits and kill the group when that happens. 3206 * Otherwise, we suspend the timeout so the time we spend waiting for 3207 * a CSG slot is not counted. 3208 */ 3209 if (!(group->blocked_queues & BIT(job->queue_idx)) && 3210 !queue->timeout_suspended) { 3211 queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); 3212 queue->timeout_suspended = true; 3213 } 3214 3215 group_schedule_locked(group, BIT(job->queue_idx)); 3216 } else { 3217 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1); 3218 if (!sched->pm.has_ref && 3219 !(group->blocked_queues & BIT(job->queue_idx))) { 3220 pm_runtime_get(ptdev->base.dev); 3221 sched->pm.has_ref = true; 3222 } 3223 panthor_devfreq_record_busy(sched->ptdev); 3224 } 3225 3226 /* Update the last fence. */ 3227 dma_fence_put(queue->fence_ctx.last_fence); 3228 queue->fence_ctx.last_fence = dma_fence_get(job->done_fence); 3229 3230 done_fence = dma_fence_get(job->done_fence); 3231 3232 out_unlock: 3233 mutex_unlock(&sched->lock); 3234 pm_runtime_mark_last_busy(ptdev->base.dev); 3235 pm_runtime_put_autosuspend(ptdev->base.dev); 3236 3237 return done_fence; 3238 } 3239 3240 static enum drm_gpu_sched_stat 3241 queue_timedout_job(struct drm_sched_job *sched_job) 3242 { 3243 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3244 struct panthor_group *group = job->group; 3245 struct panthor_device *ptdev = group->ptdev; 3246 struct panthor_scheduler *sched = ptdev->scheduler; 3247 struct panthor_queue *queue = group->queues[job->queue_idx]; 3248 3249 drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n", 3250 group->task_info.pid, group->task_info.comm, job->done_fence->seqno); 3251 3252 drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress)); 3253 3254 queue_stop(queue, job); 3255 3256 mutex_lock(&sched->lock); 3257 group->timedout = true; 3258 if (group->csg_id >= 0) { 3259 sched_queue_delayed_work(ptdev->scheduler, tick, 0); 3260 } else { 3261 /* Remove from the run queues, so the scheduler can't 3262 * pick the group on the next tick. 3263 */ 3264 list_del_init(&group->run_node); 3265 list_del_init(&group->wait_node); 3266 3267 group_queue_work(group, term); 3268 } 3269 mutex_unlock(&sched->lock); 3270 3271 queue_start(queue); 3272 3273 return DRM_GPU_SCHED_STAT_RESET; 3274 } 3275 3276 static void queue_free_job(struct drm_sched_job *sched_job) 3277 { 3278 drm_sched_job_cleanup(sched_job); 3279 panthor_job_put(sched_job); 3280 } 3281 3282 static const struct drm_sched_backend_ops panthor_queue_sched_ops = { 3283 .run_job = queue_run_job, 3284 .timedout_job = queue_timedout_job, 3285 .free_job = queue_free_job, 3286 }; 3287 3288 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev, 3289 u32 cs_ringbuf_size) 3290 { 3291 u32 min_profiled_job_instrs = U32_MAX; 3292 u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL); 3293 3294 /* 3295 * We want to calculate the minimum size of a profiled job's CS, 3296 * because since they need additional instructions for the sampling 3297 * of performance metrics, they might take up further slots in 3298 * the queue's ringbuffer. This means we might not need as many job 3299 * slots for keeping track of their profiling information. What we 3300 * need is the maximum number of slots we should allocate to this end, 3301 * which matches the maximum number of profiled jobs we can place 3302 * simultaneously in the queue's ring buffer. 3303 * That has to be calculated separately for every single job profiling 3304 * flag, but not in the case job profiling is disabled, since unprofiled 3305 * jobs don't need to keep track of this at all. 3306 */ 3307 for (u32 i = 0; i < last_flag; i++) { 3308 min_profiled_job_instrs = 3309 min(min_profiled_job_instrs, calc_job_credits(BIT(i))); 3310 } 3311 3312 return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64)); 3313 } 3314 3315 static struct panthor_queue * 3316 group_create_queue(struct panthor_group *group, 3317 const struct drm_panthor_queue_create *args, 3318 u64 drm_client_id, u32 gid, u32 qid) 3319 { 3320 struct drm_sched_init_args sched_args = { 3321 .ops = &panthor_queue_sched_ops, 3322 .submit_wq = group->ptdev->scheduler->wq, 3323 .num_rqs = 1, 3324 /* 3325 * The credit limit argument tells us the total number of 3326 * instructions across all CS slots in the ringbuffer, with 3327 * some jobs requiring twice as many as others, depending on 3328 * their profiling status. 3329 */ 3330 .credit_limit = args->ringbuf_size / sizeof(u64), 3331 .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), 3332 .timeout_wq = group->ptdev->reset.wq, 3333 .dev = group->ptdev->base.dev, 3334 }; 3335 struct drm_gpu_scheduler *drm_sched; 3336 struct panthor_queue *queue; 3337 int ret; 3338 3339 if (args->pad[0] || args->pad[1] || args->pad[2]) 3340 return ERR_PTR(-EINVAL); 3341 3342 if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K || 3343 !is_power_of_2(args->ringbuf_size)) 3344 return ERR_PTR(-EINVAL); 3345 3346 if (args->priority > CSF_MAX_QUEUE_PRIO) 3347 return ERR_PTR(-EINVAL); 3348 3349 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 3350 if (!queue) 3351 return ERR_PTR(-ENOMEM); 3352 3353 queue->fence_ctx.id = dma_fence_context_alloc(1); 3354 spin_lock_init(&queue->fence_ctx.lock); 3355 INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs); 3356 3357 queue->priority = args->priority; 3358 3359 queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm, 3360 args->ringbuf_size, 3361 DRM_PANTHOR_BO_NO_MMAP, 3362 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3363 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3364 PANTHOR_VM_KERNEL_AUTO_VA, 3365 "CS ring buffer"); 3366 if (IS_ERR(queue->ringbuf)) { 3367 ret = PTR_ERR(queue->ringbuf); 3368 goto err_free_queue; 3369 } 3370 3371 ret = panthor_kernel_bo_vmap(queue->ringbuf); 3372 if (ret) 3373 goto err_free_queue; 3374 3375 queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev, 3376 &queue->iface.input, 3377 &queue->iface.output, 3378 &queue->iface.input_fw_va, 3379 &queue->iface.output_fw_va); 3380 if (IS_ERR(queue->iface.mem)) { 3381 ret = PTR_ERR(queue->iface.mem); 3382 goto err_free_queue; 3383 } 3384 3385 queue->profiling.slot_count = 3386 calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size); 3387 3388 queue->profiling.slots = 3389 panthor_kernel_bo_create(group->ptdev, group->vm, 3390 queue->profiling.slot_count * 3391 sizeof(struct panthor_job_profiling_data), 3392 DRM_PANTHOR_BO_NO_MMAP, 3393 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3394 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3395 PANTHOR_VM_KERNEL_AUTO_VA, 3396 "Group job stats"); 3397 3398 if (IS_ERR(queue->profiling.slots)) { 3399 ret = PTR_ERR(queue->profiling.slots); 3400 goto err_free_queue; 3401 } 3402 3403 ret = panthor_kernel_bo_vmap(queue->profiling.slots); 3404 if (ret) 3405 goto err_free_queue; 3406 3407 /* assign a unique name */ 3408 queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid); 3409 if (!queue->name) { 3410 ret = -ENOMEM; 3411 goto err_free_queue; 3412 } 3413 3414 sched_args.name = queue->name; 3415 3416 ret = drm_sched_init(&queue->scheduler, &sched_args); 3417 if (ret) 3418 goto err_free_queue; 3419 3420 drm_sched = &queue->scheduler; 3421 ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL); 3422 if (ret) 3423 goto err_free_queue; 3424 3425 return queue; 3426 3427 err_free_queue: 3428 group_free_queue(group, queue); 3429 return ERR_PTR(ret); 3430 } 3431 3432 static void group_init_task_info(struct panthor_group *group) 3433 { 3434 struct task_struct *task = current->group_leader; 3435 3436 group->task_info.pid = task->pid; 3437 get_task_comm(group->task_info.comm, task); 3438 } 3439 3440 static void add_group_kbo_sizes(struct panthor_device *ptdev, 3441 struct panthor_group *group) 3442 { 3443 struct panthor_queue *queue; 3444 int i; 3445 3446 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group))) 3447 return; 3448 if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev)) 3449 return; 3450 3451 group->fdinfo.kbo_sizes += group->suspend_buf->obj->size; 3452 group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size; 3453 group->fdinfo.kbo_sizes += group->syncobjs->obj->size; 3454 3455 for (i = 0; i < group->queue_count; i++) { 3456 queue = group->queues[i]; 3457 group->fdinfo.kbo_sizes += queue->ringbuf->obj->size; 3458 group->fdinfo.kbo_sizes += queue->iface.mem->obj->size; 3459 group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size; 3460 } 3461 } 3462 3463 #define MAX_GROUPS_PER_POOL 128 3464 3465 int panthor_group_create(struct panthor_file *pfile, 3466 const struct drm_panthor_group_create *group_args, 3467 const struct drm_panthor_queue_create *queue_args, 3468 u64 drm_client_id) 3469 { 3470 struct panthor_device *ptdev = pfile->ptdev; 3471 struct panthor_group_pool *gpool = pfile->groups; 3472 struct panthor_scheduler *sched = ptdev->scheduler; 3473 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); 3474 struct panthor_group *group = NULL; 3475 u32 gid, i, suspend_size; 3476 int ret; 3477 3478 if (group_args->pad) 3479 return -EINVAL; 3480 3481 if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT) 3482 return -EINVAL; 3483 3484 if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) || 3485 (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) || 3486 (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present)) 3487 return -EINVAL; 3488 3489 if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores || 3490 hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores || 3491 hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores) 3492 return -EINVAL; 3493 3494 group = kzalloc(sizeof(*group), GFP_KERNEL); 3495 if (!group) 3496 return -ENOMEM; 3497 3498 spin_lock_init(&group->fatal_lock); 3499 kref_init(&group->refcount); 3500 group->state = PANTHOR_CS_GROUP_CREATED; 3501 group->csg_id = -1; 3502 3503 group->ptdev = ptdev; 3504 group->max_compute_cores = group_args->max_compute_cores; 3505 group->compute_core_mask = group_args->compute_core_mask; 3506 group->max_fragment_cores = group_args->max_fragment_cores; 3507 group->fragment_core_mask = group_args->fragment_core_mask; 3508 group->max_tiler_cores = group_args->max_tiler_cores; 3509 group->tiler_core_mask = group_args->tiler_core_mask; 3510 group->priority = group_args->priority; 3511 3512 INIT_LIST_HEAD(&group->wait_node); 3513 INIT_LIST_HEAD(&group->run_node); 3514 INIT_WORK(&group->term_work, group_term_work); 3515 INIT_WORK(&group->sync_upd_work, group_sync_upd_work); 3516 INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work); 3517 INIT_WORK(&group->release_work, group_release_work); 3518 3519 group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id); 3520 if (!group->vm) { 3521 ret = -EINVAL; 3522 goto err_put_group; 3523 } 3524 3525 suspend_size = csg_iface->control->suspend_size; 3526 group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); 3527 if (IS_ERR(group->suspend_buf)) { 3528 ret = PTR_ERR(group->suspend_buf); 3529 group->suspend_buf = NULL; 3530 goto err_put_group; 3531 } 3532 3533 suspend_size = csg_iface->control->protm_suspend_size; 3534 group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); 3535 if (IS_ERR(group->protm_suspend_buf)) { 3536 ret = PTR_ERR(group->protm_suspend_buf); 3537 group->protm_suspend_buf = NULL; 3538 goto err_put_group; 3539 } 3540 3541 group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm, 3542 group_args->queues.count * 3543 sizeof(struct panthor_syncobj_64b), 3544 DRM_PANTHOR_BO_NO_MMAP, 3545 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3546 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3547 PANTHOR_VM_KERNEL_AUTO_VA, 3548 "Group sync objects"); 3549 if (IS_ERR(group->syncobjs)) { 3550 ret = PTR_ERR(group->syncobjs); 3551 goto err_put_group; 3552 } 3553 3554 ret = panthor_kernel_bo_vmap(group->syncobjs); 3555 if (ret) 3556 goto err_put_group; 3557 3558 memset(group->syncobjs->kmap, 0, 3559 group_args->queues.count * sizeof(struct panthor_syncobj_64b)); 3560 3561 ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); 3562 if (ret) 3563 goto err_put_group; 3564 3565 for (i = 0; i < group_args->queues.count; i++) { 3566 group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i); 3567 if (IS_ERR(group->queues[i])) { 3568 ret = PTR_ERR(group->queues[i]); 3569 group->queues[i] = NULL; 3570 goto err_erase_gid; 3571 } 3572 3573 group->queue_count++; 3574 } 3575 3576 group->idle_queues = GENMASK(group->queue_count - 1, 0); 3577 3578 mutex_lock(&sched->reset.lock); 3579 if (atomic_read(&sched->reset.in_progress)) { 3580 panthor_group_stop(group); 3581 } else { 3582 mutex_lock(&sched->lock); 3583 list_add_tail(&group->run_node, 3584 &sched->groups.idle[group->priority]); 3585 mutex_unlock(&sched->lock); 3586 } 3587 mutex_unlock(&sched->reset.lock); 3588 3589 add_group_kbo_sizes(group->ptdev, group); 3590 spin_lock_init(&group->fdinfo.lock); 3591 3592 group_init_task_info(group); 3593 3594 return gid; 3595 3596 err_erase_gid: 3597 xa_erase(&gpool->xa, gid); 3598 3599 err_put_group: 3600 group_put(group); 3601 return ret; 3602 } 3603 3604 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) 3605 { 3606 struct panthor_group_pool *gpool = pfile->groups; 3607 struct panthor_device *ptdev = pfile->ptdev; 3608 struct panthor_scheduler *sched = ptdev->scheduler; 3609 struct panthor_group *group; 3610 3611 group = xa_erase(&gpool->xa, group_handle); 3612 if (!group) 3613 return -EINVAL; 3614 3615 mutex_lock(&sched->reset.lock); 3616 mutex_lock(&sched->lock); 3617 group->destroyed = true; 3618 if (group->csg_id >= 0) { 3619 sched_queue_delayed_work(sched, tick, 0); 3620 } else if (!atomic_read(&sched->reset.in_progress)) { 3621 /* Remove from the run queues, so the scheduler can't 3622 * pick the group on the next tick. 3623 */ 3624 list_del_init(&group->run_node); 3625 list_del_init(&group->wait_node); 3626 group_queue_work(group, term); 3627 } 3628 mutex_unlock(&sched->lock); 3629 mutex_unlock(&sched->reset.lock); 3630 3631 group_put(group); 3632 return 0; 3633 } 3634 3635 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool, 3636 u32 group_handle) 3637 { 3638 struct panthor_group *group; 3639 3640 xa_lock(&pool->xa); 3641 group = group_get(xa_load(&pool->xa, group_handle)); 3642 xa_unlock(&pool->xa); 3643 3644 return group; 3645 } 3646 3647 int panthor_group_get_state(struct panthor_file *pfile, 3648 struct drm_panthor_group_get_state *get_state) 3649 { 3650 struct panthor_group_pool *gpool = pfile->groups; 3651 struct panthor_device *ptdev = pfile->ptdev; 3652 struct panthor_scheduler *sched = ptdev->scheduler; 3653 struct panthor_group *group; 3654 3655 if (get_state->pad) 3656 return -EINVAL; 3657 3658 group = group_from_handle(gpool, get_state->group_handle); 3659 if (!group) 3660 return -EINVAL; 3661 3662 memset(get_state, 0, sizeof(*get_state)); 3663 3664 mutex_lock(&sched->lock); 3665 if (group->timedout) 3666 get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT; 3667 if (group->fatal_queues) { 3668 get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT; 3669 get_state->fatal_queues = group->fatal_queues; 3670 } 3671 if (group->innocent) 3672 get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT; 3673 mutex_unlock(&sched->lock); 3674 3675 group_put(group); 3676 return 0; 3677 } 3678 3679 int panthor_group_pool_create(struct panthor_file *pfile) 3680 { 3681 struct panthor_group_pool *gpool; 3682 3683 gpool = kzalloc(sizeof(*gpool), GFP_KERNEL); 3684 if (!gpool) 3685 return -ENOMEM; 3686 3687 xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1); 3688 pfile->groups = gpool; 3689 return 0; 3690 } 3691 3692 void panthor_group_pool_destroy(struct panthor_file *pfile) 3693 { 3694 struct panthor_group_pool *gpool = pfile->groups; 3695 struct panthor_group *group; 3696 unsigned long i; 3697 3698 if (IS_ERR_OR_NULL(gpool)) 3699 return; 3700 3701 xa_for_each(&gpool->xa, i, group) 3702 panthor_group_destroy(pfile, i); 3703 3704 xa_destroy(&gpool->xa); 3705 kfree(gpool); 3706 pfile->groups = NULL; 3707 } 3708 3709 /** 3710 * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's 3711 * belonging to all the groups owned by an open Panthor file 3712 * @pfile: File. 3713 * @stats: Memory statistics to be updated. 3714 * 3715 */ 3716 void 3717 panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile, 3718 struct drm_memory_stats *stats) 3719 { 3720 struct panthor_group_pool *gpool = pfile->groups; 3721 struct panthor_group *group; 3722 unsigned long i; 3723 3724 if (IS_ERR_OR_NULL(gpool)) 3725 return; 3726 3727 xa_lock(&gpool->xa); 3728 xa_for_each(&gpool->xa, i, group) { 3729 stats->resident += group->fdinfo.kbo_sizes; 3730 if (group->csg_id >= 0) 3731 stats->active += group->fdinfo.kbo_sizes; 3732 } 3733 xa_unlock(&gpool->xa); 3734 } 3735 3736 static void job_release(struct kref *ref) 3737 { 3738 struct panthor_job *job = container_of(ref, struct panthor_job, refcount); 3739 3740 drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node)); 3741 3742 if (job->base.s_fence) 3743 drm_sched_job_cleanup(&job->base); 3744 3745 if (job->done_fence && job->done_fence->ops) 3746 dma_fence_put(job->done_fence); 3747 else 3748 dma_fence_free(job->done_fence); 3749 3750 group_put(job->group); 3751 3752 kfree(job); 3753 } 3754 3755 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job) 3756 { 3757 if (sched_job) { 3758 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3759 3760 kref_get(&job->refcount); 3761 } 3762 3763 return sched_job; 3764 } 3765 3766 void panthor_job_put(struct drm_sched_job *sched_job) 3767 { 3768 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3769 3770 if (sched_job) 3771 kref_put(&job->refcount, job_release); 3772 } 3773 3774 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job) 3775 { 3776 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3777 3778 return job->group->vm; 3779 } 3780 3781 struct drm_sched_job * 3782 panthor_job_create(struct panthor_file *pfile, 3783 u16 group_handle, 3784 const struct drm_panthor_queue_submit *qsubmit, 3785 u64 drm_client_id) 3786 { 3787 struct panthor_group_pool *gpool = pfile->groups; 3788 struct panthor_job *job; 3789 u32 credits; 3790 int ret; 3791 3792 if (qsubmit->pad) 3793 return ERR_PTR(-EINVAL); 3794 3795 /* If stream_addr is zero, so stream_size should be. */ 3796 if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0)) 3797 return ERR_PTR(-EINVAL); 3798 3799 /* Make sure the address is aligned on 64-byte (cacheline) and the size is 3800 * aligned on 8-byte (instruction size). 3801 */ 3802 if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7)) 3803 return ERR_PTR(-EINVAL); 3804 3805 /* bits 24:30 must be zero. */ 3806 if (qsubmit->latest_flush & GENMASK(30, 24)) 3807 return ERR_PTR(-EINVAL); 3808 3809 job = kzalloc(sizeof(*job), GFP_KERNEL); 3810 if (!job) 3811 return ERR_PTR(-ENOMEM); 3812 3813 kref_init(&job->refcount); 3814 job->queue_idx = qsubmit->queue_index; 3815 job->call_info.size = qsubmit->stream_size; 3816 job->call_info.start = qsubmit->stream_addr; 3817 job->call_info.latest_flush = qsubmit->latest_flush; 3818 INIT_LIST_HEAD(&job->node); 3819 3820 job->group = group_from_handle(gpool, group_handle); 3821 if (!job->group) { 3822 ret = -EINVAL; 3823 goto err_put_job; 3824 } 3825 3826 if (!group_can_run(job->group)) { 3827 ret = -EINVAL; 3828 goto err_put_job; 3829 } 3830 3831 if (job->queue_idx >= job->group->queue_count || 3832 !job->group->queues[job->queue_idx]) { 3833 ret = -EINVAL; 3834 goto err_put_job; 3835 } 3836 3837 /* Empty command streams don't need a fence, they'll pick the one from 3838 * the previously submitted job. 3839 */ 3840 if (job->call_info.size) { 3841 job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); 3842 if (!job->done_fence) { 3843 ret = -ENOMEM; 3844 goto err_put_job; 3845 } 3846 } 3847 3848 job->profiling.mask = pfile->ptdev->profile_mask; 3849 credits = calc_job_credits(job->profiling.mask); 3850 if (credits == 0) { 3851 ret = -EINVAL; 3852 goto err_put_job; 3853 } 3854 3855 ret = drm_sched_job_init(&job->base, 3856 &job->group->queues[job->queue_idx]->entity, 3857 credits, job->group, drm_client_id); 3858 if (ret) 3859 goto err_put_job; 3860 3861 return &job->base; 3862 3863 err_put_job: 3864 panthor_job_put(&job->base); 3865 return ERR_PTR(ret); 3866 } 3867 3868 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job) 3869 { 3870 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3871 3872 panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished, 3873 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); 3874 } 3875 3876 void panthor_sched_unplug(struct panthor_device *ptdev) 3877 { 3878 struct panthor_scheduler *sched = ptdev->scheduler; 3879 3880 disable_delayed_work_sync(&sched->tick_work); 3881 disable_work_sync(&sched->fw_events_work); 3882 disable_work_sync(&sched->sync_upd_work); 3883 3884 mutex_lock(&sched->lock); 3885 if (sched->pm.has_ref) { 3886 pm_runtime_put(ptdev->base.dev); 3887 sched->pm.has_ref = false; 3888 } 3889 mutex_unlock(&sched->lock); 3890 } 3891 3892 static void panthor_sched_fini(struct drm_device *ddev, void *res) 3893 { 3894 struct panthor_scheduler *sched = res; 3895 int prio; 3896 3897 if (!sched || !sched->csg_slot_count) 3898 return; 3899 3900 if (sched->wq) 3901 destroy_workqueue(sched->wq); 3902 3903 if (sched->heap_alloc_wq) 3904 destroy_workqueue(sched->heap_alloc_wq); 3905 3906 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 3907 drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio])); 3908 drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio])); 3909 } 3910 3911 drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting)); 3912 } 3913 3914 int panthor_sched_init(struct panthor_device *ptdev) 3915 { 3916 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 3917 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); 3918 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0); 3919 struct panthor_scheduler *sched; 3920 u32 gpu_as_count, num_groups; 3921 int prio, ret; 3922 3923 sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL); 3924 if (!sched) 3925 return -ENOMEM; 3926 3927 /* The highest bit in JOB_INT_* is reserved for globabl IRQs. That 3928 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here. 3929 */ 3930 num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num); 3931 3932 /* The FW-side scheduler might deadlock if two groups with the same 3933 * priority try to access a set of resources that overlaps, with part 3934 * of the resources being allocated to one group and the other part to 3935 * the other group, both groups waiting for the remaining resources to 3936 * be allocated. To avoid that, it is recommended to assign each CSG a 3937 * different priority. In theory we could allow several groups to have 3938 * the same CSG priority if they don't request the same resources, but 3939 * that makes the scheduling logic more complicated, so let's clamp 3940 * the number of CSG slots to MAX_CSG_PRIO + 1 for now. 3941 */ 3942 num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups); 3943 3944 /* We need at least one AS for the MCU and one for the GPU contexts. */ 3945 gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1)); 3946 if (!gpu_as_count) { 3947 drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)", 3948 gpu_as_count + 1); 3949 return -EINVAL; 3950 } 3951 3952 sched->ptdev = ptdev; 3953 sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features); 3954 sched->csg_slot_count = num_groups; 3955 sched->cs_slot_count = csg_iface->control->stream_num; 3956 sched->as_slot_count = gpu_as_count; 3957 ptdev->csif_info.csg_slot_count = sched->csg_slot_count; 3958 ptdev->csif_info.cs_slot_count = sched->cs_slot_count; 3959 ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count; 3960 3961 sched->last_tick = 0; 3962 sched->resched_target = U64_MAX; 3963 sched->tick_period = msecs_to_jiffies(10); 3964 INIT_DELAYED_WORK(&sched->tick_work, tick_work); 3965 INIT_WORK(&sched->sync_upd_work, sync_upd_work); 3966 INIT_WORK(&sched->fw_events_work, process_fw_events_work); 3967 3968 ret = drmm_mutex_init(&ptdev->base, &sched->lock); 3969 if (ret) 3970 return ret; 3971 3972 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 3973 INIT_LIST_HEAD(&sched->groups.runnable[prio]); 3974 INIT_LIST_HEAD(&sched->groups.idle[prio]); 3975 } 3976 INIT_LIST_HEAD(&sched->groups.waiting); 3977 3978 ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock); 3979 if (ret) 3980 return ret; 3981 3982 INIT_LIST_HEAD(&sched->reset.stopped_groups); 3983 3984 /* sched->heap_alloc_wq will be used for heap chunk allocation on 3985 * tiler OOM events, which means we can't use the same workqueue for 3986 * the scheduler because works queued by the scheduler are in 3987 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to 3988 * work around this limitation. 3989 * 3990 * FIXME: Ultimately, what we need is a failable/non-blocking GEM 3991 * allocation path that we can call when a heap OOM is reported. The 3992 * FW is smart enough to fall back on other methods if the kernel can't 3993 * allocate memory, and fail the tiling job if none of these 3994 * countermeasures worked. 3995 * 3996 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the 3997 * system is running out of memory. 3998 */ 3999 sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0); 4000 sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 4001 if (!sched->wq || !sched->heap_alloc_wq) { 4002 panthor_sched_fini(&ptdev->base, sched); 4003 drm_err(&ptdev->base, "Failed to allocate the workqueues"); 4004 return -ENOMEM; 4005 } 4006 4007 ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched); 4008 if (ret) 4009 return ret; 4010 4011 ptdev->scheduler = sched; 4012 return 0; 4013 } 4014