1 // SPDX-License-Identifier: GPL-2.0 or MIT 2 /* Copyright 2023 Collabora ltd. */ 3 4 #include <drm/drm_drv.h> 5 #include <drm/drm_exec.h> 6 #include <drm/drm_gem_shmem_helper.h> 7 #include <drm/drm_managed.h> 8 #include <drm/gpu_scheduler.h> 9 #include <drm/panthor_drm.h> 10 11 #include <linux/build_bug.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/dma-resv.h> 16 #include <linux/firmware.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/iosys-map.h> 21 #include <linux/module.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm_runtime.h> 24 25 #include "panthor_devfreq.h" 26 #include "panthor_device.h" 27 #include "panthor_fw.h" 28 #include "panthor_gem.h" 29 #include "panthor_gpu.h" 30 #include "panthor_heap.h" 31 #include "panthor_mmu.h" 32 #include "panthor_regs.h" 33 #include "panthor_sched.h" 34 35 /** 36 * DOC: Scheduler 37 * 38 * Mali CSF hardware adopts a firmware-assisted scheduling model, where 39 * the firmware takes care of scheduling aspects, to some extent. 40 * 41 * The scheduling happens at the scheduling group level, each group 42 * contains 1 to N queues (N is FW/hardware dependent, and exposed 43 * through the firmware interface). Each queue is assigned a command 44 * stream ring buffer, which serves as a way to get jobs submitted to 45 * the GPU, among other things. 46 * 47 * The firmware can schedule a maximum of M groups (M is FW/hardware 48 * dependent, and exposed through the firmware interface). Passed 49 * this maximum number of groups, the kernel must take care of 50 * rotating the groups passed to the firmware so every group gets 51 * a chance to have his queues scheduled for execution. 52 * 53 * The current implementation only supports with kernel-mode queues. 54 * In other terms, userspace doesn't have access to the ring-buffer. 55 * Instead, userspace passes indirect command stream buffers that are 56 * called from the queue ring-buffer by the kernel using a pre-defined 57 * sequence of command stream instructions to ensure the userspace driver 58 * always gets consistent results (cache maintenance, 59 * synchronization, ...). 60 * 61 * We rely on the drm_gpu_scheduler framework to deal with job 62 * dependencies and submission. As any other driver dealing with a 63 * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each 64 * entity has its own job scheduler. When a job is ready to be executed 65 * (all its dependencies are met), it is pushed to the appropriate 66 * queue ring-buffer, and the group is scheduled for execution if it 67 * wasn't already active. 68 * 69 * Kernel-side group scheduling is timeslice-based. When we have less 70 * groups than there are slots, the periodic tick is disabled and we 71 * just let the FW schedule the active groups. When there are more 72 * groups than slots, we let each group a chance to execute stuff for 73 * a given amount of time, and then re-evaluate and pick new groups 74 * to schedule. The group selection algorithm is based on 75 * priority+round-robin. 76 * 77 * Even though user-mode queues is out of the scope right now, the 78 * current design takes them into account by avoiding any guess on the 79 * group/queue state that would be based on information we wouldn't have 80 * if userspace was in charge of the ring-buffer. That's also one of the 81 * reason we don't do 'cooperative' scheduling (encoding FW group slot 82 * reservation as dma_fence that would be returned from the 83 * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as 84 * a queue of waiters, ordered by job submission order). This approach 85 * would work for kernel-mode queues, but would make user-mode queues a 86 * lot more complicated to retrofit. 87 */ 88 89 #define JOB_TIMEOUT_MS 5000 90 91 #define MIN_CS_PER_CSG 8 92 93 #define MIN_CSGS 3 94 #define MAX_CSG_PRIO 0xf 95 96 struct panthor_group; 97 98 /** 99 * struct panthor_csg_slot - Command stream group slot 100 * 101 * This represents a FW slot for a scheduling group. 102 */ 103 struct panthor_csg_slot { 104 /** @group: Scheduling group bound to this slot. */ 105 struct panthor_group *group; 106 107 /** @priority: Group priority. */ 108 u8 priority; 109 110 /** 111 * @idle: True if the group bound to this slot is idle. 112 * 113 * A group is idle when it has nothing waiting for execution on 114 * all its queues, or when queues are blocked waiting for something 115 * to happen (synchronization object). 116 */ 117 bool idle; 118 }; 119 120 /** 121 * enum panthor_csg_priority - Group priority 122 */ 123 enum panthor_csg_priority { 124 /** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */ 125 PANTHOR_CSG_PRIORITY_LOW = 0, 126 127 /** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */ 128 PANTHOR_CSG_PRIORITY_MEDIUM, 129 130 /** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */ 131 PANTHOR_CSG_PRIORITY_HIGH, 132 133 /** 134 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group. 135 * 136 * Real-time priority allows one to preempt scheduling of other 137 * non-real-time groups. When such a group becomes executable, 138 * it will evict the group with the lowest non-rt priority if 139 * there's no free group slot available. 140 * 141 * Currently not exposed to userspace. 142 */ 143 PANTHOR_CSG_PRIORITY_RT, 144 145 /** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */ 146 PANTHOR_CSG_PRIORITY_COUNT, 147 }; 148 149 /** 150 * struct panthor_scheduler - Object used to manage the scheduler 151 */ 152 struct panthor_scheduler { 153 /** @ptdev: Device. */ 154 struct panthor_device *ptdev; 155 156 /** 157 * @wq: Workqueue used by our internal scheduler logic and 158 * drm_gpu_scheduler. 159 * 160 * Used for the scheduler tick, group update or other kind of FW 161 * event processing that can't be handled in the threaded interrupt 162 * path. Also passed to the drm_gpu_scheduler instances embedded 163 * in panthor_queue. 164 */ 165 struct workqueue_struct *wq; 166 167 /** 168 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works. 169 * 170 * We have a queue dedicated to heap chunk allocation works to avoid 171 * blocking the rest of the scheduler if the allocation tries to 172 * reclaim memory. 173 */ 174 struct workqueue_struct *heap_alloc_wq; 175 176 /** @tick_work: Work executed on a scheduling tick. */ 177 struct delayed_work tick_work; 178 179 /** 180 * @sync_upd_work: Work used to process synchronization object updates. 181 * 182 * We use this work to unblock queues/groups that were waiting on a 183 * synchronization object. 184 */ 185 struct work_struct sync_upd_work; 186 187 /** 188 * @fw_events_work: Work used to process FW events outside the interrupt path. 189 * 190 * Even if the interrupt is threaded, we need any event processing 191 * that require taking the panthor_scheduler::lock to be processed 192 * outside the interrupt path so we don't block the tick logic when 193 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the 194 * event processing requires taking this lock, we just delegate all 195 * FW event processing to the scheduler workqueue. 196 */ 197 struct work_struct fw_events_work; 198 199 /** 200 * @fw_events: Bitmask encoding pending FW events. 201 */ 202 atomic_t fw_events; 203 204 /** 205 * @resched_target: When the next tick should occur. 206 * 207 * Expressed in jiffies. 208 */ 209 u64 resched_target; 210 211 /** 212 * @last_tick: When the last tick occurred. 213 * 214 * Expressed in jiffies. 215 */ 216 u64 last_tick; 217 218 /** @tick_period: Tick period in jiffies. */ 219 u64 tick_period; 220 221 /** 222 * @lock: Lock protecting access to all the scheduler fields. 223 * 224 * Should be taken in the tick work, the irq handler, and anywhere the @groups 225 * fields are touched. 226 */ 227 struct mutex lock; 228 229 /** @groups: Various lists used to classify groups. */ 230 struct { 231 /** 232 * @runnable: Runnable group lists. 233 * 234 * When a group has queues that want to execute something, 235 * its panthor_group::run_node should be inserted here. 236 * 237 * One list per-priority. 238 */ 239 struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT]; 240 241 /** 242 * @idle: Idle group lists. 243 * 244 * When all queues of a group are idle (either because they 245 * have nothing to execute, or because they are blocked), the 246 * panthor_group::run_node field should be inserted here. 247 * 248 * One list per-priority. 249 */ 250 struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT]; 251 252 /** 253 * @waiting: List of groups whose queues are blocked on a 254 * synchronization object. 255 * 256 * Insert panthor_group::wait_node here when a group is waiting 257 * for synchronization objects to be signaled. 258 * 259 * This list is evaluated in the @sync_upd_work work. 260 */ 261 struct list_head waiting; 262 } groups; 263 264 /** 265 * @csg_slots: FW command stream group slots. 266 */ 267 struct panthor_csg_slot csg_slots[MAX_CSGS]; 268 269 /** @csg_slot_count: Number of command stream group slots exposed by the FW. */ 270 u32 csg_slot_count; 271 272 /** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */ 273 u32 cs_slot_count; 274 275 /** @as_slot_count: Number of address space slots supported by the MMU. */ 276 u32 as_slot_count; 277 278 /** @used_csg_slot_count: Number of command stream group slot currently used. */ 279 u32 used_csg_slot_count; 280 281 /** @sb_slot_count: Number of scoreboard slots. */ 282 u32 sb_slot_count; 283 284 /** 285 * @might_have_idle_groups: True if an active group might have become idle. 286 * 287 * This will force a tick, so other runnable groups can be scheduled if one 288 * or more active groups became idle. 289 */ 290 bool might_have_idle_groups; 291 292 /** @pm: Power management related fields. */ 293 struct { 294 /** @has_ref: True if the scheduler owns a runtime PM reference. */ 295 bool has_ref; 296 } pm; 297 298 /** @reset: Reset related fields. */ 299 struct { 300 /** @lock: Lock protecting the other reset fields. */ 301 struct mutex lock; 302 303 /** 304 * @in_progress: True if a reset is in progress. 305 * 306 * Set to true in panthor_sched_pre_reset() and back to false in 307 * panthor_sched_post_reset(). 308 */ 309 atomic_t in_progress; 310 311 /** 312 * @stopped_groups: List containing all groups that were stopped 313 * before a reset. 314 * 315 * Insert panthor_group::run_node in the pre_reset path. 316 */ 317 struct list_head stopped_groups; 318 } reset; 319 }; 320 321 /** 322 * struct panthor_syncobj_32b - 32-bit FW synchronization object 323 */ 324 struct panthor_syncobj_32b { 325 /** @seqno: Sequence number. */ 326 u32 seqno; 327 328 /** 329 * @status: Status. 330 * 331 * Not zero on failure. 332 */ 333 u32 status; 334 }; 335 336 /** 337 * struct panthor_syncobj_64b - 64-bit FW synchronization object 338 */ 339 struct panthor_syncobj_64b { 340 /** @seqno: Sequence number. */ 341 u64 seqno; 342 343 /** 344 * @status: Status. 345 * 346 * Not zero on failure. 347 */ 348 u32 status; 349 350 /** @pad: MBZ. */ 351 u32 pad; 352 }; 353 354 /** 355 * struct panthor_queue - Execution queue 356 */ 357 struct panthor_queue { 358 /** @scheduler: DRM scheduler used for this queue. */ 359 struct drm_gpu_scheduler scheduler; 360 361 /** @entity: DRM scheduling entity used for this queue. */ 362 struct drm_sched_entity entity; 363 364 /** 365 * @remaining_time: Time remaining before the job timeout expires. 366 * 367 * The job timeout is suspended when the queue is not scheduled by the 368 * FW. Every time we suspend the timer, we need to save the remaining 369 * time so we can restore it later on. 370 */ 371 unsigned long remaining_time; 372 373 /** @timeout_suspended: True if the job timeout was suspended. */ 374 bool timeout_suspended; 375 376 /** 377 * @doorbell_id: Doorbell assigned to this queue. 378 * 379 * Right now, all groups share the same doorbell, and the doorbell ID 380 * is assigned to group_slot + 1 when the group is assigned a slot. But 381 * we might decide to provide fine grained doorbell assignment at some 382 * point, so don't have to wake up all queues in a group every time one 383 * of them is updated. 384 */ 385 u8 doorbell_id; 386 387 /** 388 * @priority: Priority of the queue inside the group. 389 * 390 * Must be less than 16 (Only 4 bits available). 391 */ 392 u8 priority; 393 #define CSF_MAX_QUEUE_PRIO GENMASK(3, 0) 394 395 /** @ringbuf: Command stream ring-buffer. */ 396 struct panthor_kernel_bo *ringbuf; 397 398 /** @iface: Firmware interface. */ 399 struct { 400 /** @mem: FW memory allocated for this interface. */ 401 struct panthor_kernel_bo *mem; 402 403 /** @input: Input interface. */ 404 struct panthor_fw_ringbuf_input_iface *input; 405 406 /** @output: Output interface. */ 407 const struct panthor_fw_ringbuf_output_iface *output; 408 409 /** @input_fw_va: FW virtual address of the input interface buffer. */ 410 u32 input_fw_va; 411 412 /** @output_fw_va: FW virtual address of the output interface buffer. */ 413 u32 output_fw_va; 414 } iface; 415 416 /** 417 * @syncwait: Stores information about the synchronization object this 418 * queue is waiting on. 419 */ 420 struct { 421 /** @gpu_va: GPU address of the synchronization object. */ 422 u64 gpu_va; 423 424 /** @ref: Reference value to compare against. */ 425 u64 ref; 426 427 /** @gt: True if this is a greater-than test. */ 428 bool gt; 429 430 /** @sync64: True if this is a 64-bit sync object. */ 431 bool sync64; 432 433 /** @bo: Buffer object holding the synchronization object. */ 434 struct drm_gem_object *obj; 435 436 /** @offset: Offset of the synchronization object inside @bo. */ 437 u64 offset; 438 439 /** 440 * @kmap: Kernel mapping of the buffer object holding the 441 * synchronization object. 442 */ 443 void *kmap; 444 } syncwait; 445 446 /** @fence_ctx: Fence context fields. */ 447 struct { 448 /** @lock: Used to protect access to all fences allocated by this context. */ 449 spinlock_t lock; 450 451 /** 452 * @id: Fence context ID. 453 * 454 * Allocated with dma_fence_context_alloc(). 455 */ 456 u64 id; 457 458 /** @seqno: Sequence number of the last initialized fence. */ 459 atomic64_t seqno; 460 461 /** 462 * @last_fence: Fence of the last submitted job. 463 * 464 * We return this fence when we get an empty command stream. 465 * This way, we are guaranteed that all earlier jobs have completed 466 * when drm_sched_job::s_fence::finished without having to feed 467 * the CS ring buffer with a dummy job that only signals the fence. 468 */ 469 struct dma_fence *last_fence; 470 471 /** 472 * @in_flight_jobs: List containing all in-flight jobs. 473 * 474 * Used to keep track and signal panthor_job::done_fence when the 475 * synchronization object attached to the queue is signaled. 476 */ 477 struct list_head in_flight_jobs; 478 } fence_ctx; 479 }; 480 481 /** 482 * enum panthor_group_state - Scheduling group state. 483 */ 484 enum panthor_group_state { 485 /** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */ 486 PANTHOR_CS_GROUP_CREATED, 487 488 /** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */ 489 PANTHOR_CS_GROUP_ACTIVE, 490 491 /** 492 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is 493 * inactive/suspended right now. 494 */ 495 PANTHOR_CS_GROUP_SUSPENDED, 496 497 /** 498 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated. 499 * 500 * Can no longer be scheduled. The only allowed action is a destruction. 501 */ 502 PANTHOR_CS_GROUP_TERMINATED, 503 504 /** 505 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state. 506 * 507 * The FW returned an inconsistent state. The group is flagged unusable 508 * and can no longer be scheduled. The only allowed action is a 509 * destruction. 510 * 511 * When that happens, we also schedule a FW reset, to start from a fresh 512 * state. 513 */ 514 PANTHOR_CS_GROUP_UNKNOWN_STATE, 515 }; 516 517 /** 518 * struct panthor_group - Scheduling group object 519 */ 520 struct panthor_group { 521 /** @refcount: Reference count */ 522 struct kref refcount; 523 524 /** @ptdev: Device. */ 525 struct panthor_device *ptdev; 526 527 /** @vm: VM bound to the group. */ 528 struct panthor_vm *vm; 529 530 /** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */ 531 u64 compute_core_mask; 532 533 /** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */ 534 u64 fragment_core_mask; 535 536 /** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */ 537 u64 tiler_core_mask; 538 539 /** @max_compute_cores: Maximum number of shader cores used for compute jobs. */ 540 u8 max_compute_cores; 541 542 /** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */ 543 u8 max_fragment_cores; 544 545 /** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */ 546 u8 max_tiler_cores; 547 548 /** @priority: Group priority (check panthor_csg_priority). */ 549 u8 priority; 550 551 /** @blocked_queues: Bitmask reflecting the blocked queues. */ 552 u32 blocked_queues; 553 554 /** @idle_queues: Bitmask reflecting the idle queues. */ 555 u32 idle_queues; 556 557 /** @fatal_lock: Lock used to protect access to fatal fields. */ 558 spinlock_t fatal_lock; 559 560 /** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */ 561 u32 fatal_queues; 562 563 /** @tiler_oom: Mask of queues that have a tiler OOM event to process. */ 564 atomic_t tiler_oom; 565 566 /** @queue_count: Number of queues in this group. */ 567 u32 queue_count; 568 569 /** @queues: Queues owned by this group. */ 570 struct panthor_queue *queues[MAX_CS_PER_CSG]; 571 572 /** 573 * @csg_id: ID of the FW group slot. 574 * 575 * -1 when the group is not scheduled/active. 576 */ 577 int csg_id; 578 579 /** 580 * @destroyed: True when the group has been destroyed. 581 * 582 * If a group is destroyed it becomes useless: no further jobs can be submitted 583 * to its queues. We simply wait for all references to be dropped so we can 584 * release the group object. 585 */ 586 bool destroyed; 587 588 /** 589 * @timedout: True when a timeout occurred on any of the queues owned by 590 * this group. 591 * 592 * Timeouts can be reported by drm_sched or by the FW. In any case, any 593 * timeout situation is unrecoverable, and the group becomes useless. 594 * We simply wait for all references to be dropped so we can release the 595 * group object. 596 */ 597 bool timedout; 598 599 /** 600 * @syncobjs: Pool of per-queue synchronization objects. 601 * 602 * One sync object per queue. The position of the sync object is 603 * determined by the queue index. 604 */ 605 struct panthor_kernel_bo *syncobjs; 606 607 /** @state: Group state. */ 608 enum panthor_group_state state; 609 610 /** 611 * @suspend_buf: Suspend buffer. 612 * 613 * Stores the state of the group and its queues when a group is suspended. 614 * Used at resume time to restore the group in its previous state. 615 * 616 * The size of the suspend buffer is exposed through the FW interface. 617 */ 618 struct panthor_kernel_bo *suspend_buf; 619 620 /** 621 * @protm_suspend_buf: Protection mode suspend buffer. 622 * 623 * Stores the state of the group and its queues when a group that's in 624 * protection mode is suspended. 625 * 626 * Used at resume time to restore the group in its previous state. 627 * 628 * The size of the protection mode suspend buffer is exposed through the 629 * FW interface. 630 */ 631 struct panthor_kernel_bo *protm_suspend_buf; 632 633 /** @sync_upd_work: Work used to check/signal job fences. */ 634 struct work_struct sync_upd_work; 635 636 /** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */ 637 struct work_struct tiler_oom_work; 638 639 /** @term_work: Work used to finish the group termination procedure. */ 640 struct work_struct term_work; 641 642 /** 643 * @release_work: Work used to release group resources. 644 * 645 * We need to postpone the group release to avoid a deadlock when 646 * the last ref is released in the tick work. 647 */ 648 struct work_struct release_work; 649 650 /** 651 * @run_node: Node used to insert the group in the 652 * panthor_group::groups::{runnable,idle} and 653 * panthor_group::reset.stopped_groups lists. 654 */ 655 struct list_head run_node; 656 657 /** 658 * @wait_node: Node used to insert the group in the 659 * panthor_group::groups::waiting list. 660 */ 661 struct list_head wait_node; 662 }; 663 664 /** 665 * group_queue_work() - Queue a group work 666 * @group: Group to queue the work for. 667 * @wname: Work name. 668 * 669 * Grabs a ref and queue a work item to the scheduler workqueue. If 670 * the work was already queued, we release the reference we grabbed. 671 * 672 * Work callbacks must release the reference we grabbed here. 673 */ 674 #define group_queue_work(group, wname) \ 675 do { \ 676 group_get(group); \ 677 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \ 678 group_put(group); \ 679 } while (0) 680 681 /** 682 * sched_queue_work() - Queue a scheduler work. 683 * @sched: Scheduler object. 684 * @wname: Work name. 685 * 686 * Conditionally queues a scheduler work if no reset is pending/in-progress. 687 */ 688 #define sched_queue_work(sched, wname) \ 689 do { \ 690 if (!atomic_read(&(sched)->reset.in_progress) && \ 691 !panthor_device_reset_is_pending((sched)->ptdev)) \ 692 queue_work((sched)->wq, &(sched)->wname ## _work); \ 693 } while (0) 694 695 /** 696 * sched_queue_delayed_work() - Queue a scheduler delayed work. 697 * @sched: Scheduler object. 698 * @wname: Work name. 699 * @delay: Work delay in jiffies. 700 * 701 * Conditionally queues a scheduler delayed work if no reset is 702 * pending/in-progress. 703 */ 704 #define sched_queue_delayed_work(sched, wname, delay) \ 705 do { \ 706 if (!atomic_read(&sched->reset.in_progress) && \ 707 !panthor_device_reset_is_pending((sched)->ptdev)) \ 708 mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \ 709 } while (0) 710 711 /* 712 * We currently set the maximum of groups per file to an arbitrary low value. 713 * But this can be updated if we need more. 714 */ 715 #define MAX_GROUPS_PER_POOL 128 716 717 /** 718 * struct panthor_group_pool - Group pool 719 * 720 * Each file get assigned a group pool. 721 */ 722 struct panthor_group_pool { 723 /** @xa: Xarray used to manage group handles. */ 724 struct xarray xa; 725 }; 726 727 /** 728 * struct panthor_job - Used to manage GPU job 729 */ 730 struct panthor_job { 731 /** @base: Inherit from drm_sched_job. */ 732 struct drm_sched_job base; 733 734 /** @refcount: Reference count. */ 735 struct kref refcount; 736 737 /** @group: Group of the queue this job will be pushed to. */ 738 struct panthor_group *group; 739 740 /** @queue_idx: Index of the queue inside @group. */ 741 u32 queue_idx; 742 743 /** @call_info: Information about the userspace command stream call. */ 744 struct { 745 /** @start: GPU address of the userspace command stream. */ 746 u64 start; 747 748 /** @size: Size of the userspace command stream. */ 749 u32 size; 750 751 /** 752 * @latest_flush: Flush ID at the time the userspace command 753 * stream was built. 754 * 755 * Needed for the flush reduction mechanism. 756 */ 757 u32 latest_flush; 758 } call_info; 759 760 /** @ringbuf: Position of this job is in the ring buffer. */ 761 struct { 762 /** @start: Start offset. */ 763 u64 start; 764 765 /** @end: End offset. */ 766 u64 end; 767 } ringbuf; 768 769 /** 770 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs 771 * list. 772 */ 773 struct list_head node; 774 775 /** @done_fence: Fence signaled when the job is finished or cancelled. */ 776 struct dma_fence *done_fence; 777 }; 778 779 static void 780 panthor_queue_put_syncwait_obj(struct panthor_queue *queue) 781 { 782 if (queue->syncwait.kmap) { 783 struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap); 784 785 drm_gem_vunmap_unlocked(queue->syncwait.obj, &map); 786 queue->syncwait.kmap = NULL; 787 } 788 789 drm_gem_object_put(queue->syncwait.obj); 790 queue->syncwait.obj = NULL; 791 } 792 793 static void * 794 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue) 795 { 796 struct panthor_device *ptdev = group->ptdev; 797 struct panthor_gem_object *bo; 798 struct iosys_map map; 799 int ret; 800 801 if (queue->syncwait.kmap) 802 return queue->syncwait.kmap + queue->syncwait.offset; 803 804 bo = panthor_vm_get_bo_for_va(group->vm, 805 queue->syncwait.gpu_va, 806 &queue->syncwait.offset); 807 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo))) 808 goto err_put_syncwait_obj; 809 810 queue->syncwait.obj = &bo->base.base; 811 ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map); 812 if (drm_WARN_ON(&ptdev->base, ret)) 813 goto err_put_syncwait_obj; 814 815 queue->syncwait.kmap = map.vaddr; 816 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap)) 817 goto err_put_syncwait_obj; 818 819 return queue->syncwait.kmap + queue->syncwait.offset; 820 821 err_put_syncwait_obj: 822 panthor_queue_put_syncwait_obj(queue); 823 return NULL; 824 } 825 826 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue) 827 { 828 if (IS_ERR_OR_NULL(queue)) 829 return; 830 831 if (queue->entity.fence_context) 832 drm_sched_entity_destroy(&queue->entity); 833 834 if (queue->scheduler.ops) 835 drm_sched_fini(&queue->scheduler); 836 837 panthor_queue_put_syncwait_obj(queue); 838 839 panthor_kernel_bo_destroy(queue->ringbuf); 840 panthor_kernel_bo_destroy(queue->iface.mem); 841 842 /* Release the last_fence we were holding, if any. */ 843 dma_fence_put(queue->fence_ctx.last_fence); 844 845 kfree(queue); 846 } 847 848 static void group_release_work(struct work_struct *work) 849 { 850 struct panthor_group *group = container_of(work, 851 struct panthor_group, 852 release_work); 853 u32 i; 854 855 for (i = 0; i < group->queue_count; i++) 856 group_free_queue(group, group->queues[i]); 857 858 panthor_kernel_bo_destroy(group->suspend_buf); 859 panthor_kernel_bo_destroy(group->protm_suspend_buf); 860 panthor_kernel_bo_destroy(group->syncobjs); 861 862 panthor_vm_put(group->vm); 863 kfree(group); 864 } 865 866 static void group_release(struct kref *kref) 867 { 868 struct panthor_group *group = container_of(kref, 869 struct panthor_group, 870 refcount); 871 struct panthor_device *ptdev = group->ptdev; 872 873 drm_WARN_ON(&ptdev->base, group->csg_id >= 0); 874 drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node)); 875 drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node)); 876 877 queue_work(panthor_cleanup_wq, &group->release_work); 878 } 879 880 static void group_put(struct panthor_group *group) 881 { 882 if (group) 883 kref_put(&group->refcount, group_release); 884 } 885 886 static struct panthor_group * 887 group_get(struct panthor_group *group) 888 { 889 if (group) 890 kref_get(&group->refcount); 891 892 return group; 893 } 894 895 /** 896 * group_bind_locked() - Bind a group to a group slot 897 * @group: Group. 898 * @csg_id: Slot. 899 * 900 * Return: 0 on success, a negative error code otherwise. 901 */ 902 static int 903 group_bind_locked(struct panthor_group *group, u32 csg_id) 904 { 905 struct panthor_device *ptdev = group->ptdev; 906 struct panthor_csg_slot *csg_slot; 907 int ret; 908 909 lockdep_assert_held(&ptdev->scheduler->lock); 910 911 if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS || 912 ptdev->scheduler->csg_slots[csg_id].group)) 913 return -EINVAL; 914 915 ret = panthor_vm_active(group->vm); 916 if (ret) 917 return ret; 918 919 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 920 group_get(group); 921 group->csg_id = csg_id; 922 923 /* Dummy doorbell allocation: doorbell is assigned to the group and 924 * all queues use the same doorbell. 925 * 926 * TODO: Implement LRU-based doorbell assignment, so the most often 927 * updated queues get their own doorbell, thus avoiding useless checks 928 * on queues belonging to the same group that are rarely updated. 929 */ 930 for (u32 i = 0; i < group->queue_count; i++) 931 group->queues[i]->doorbell_id = csg_id + 1; 932 933 csg_slot->group = group; 934 935 return 0; 936 } 937 938 /** 939 * group_unbind_locked() - Unbind a group from a slot. 940 * @group: Group to unbind. 941 * 942 * Return: 0 on success, a negative error code otherwise. 943 */ 944 static int 945 group_unbind_locked(struct panthor_group *group) 946 { 947 struct panthor_device *ptdev = group->ptdev; 948 struct panthor_csg_slot *slot; 949 950 lockdep_assert_held(&ptdev->scheduler->lock); 951 952 if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS)) 953 return -EINVAL; 954 955 if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE)) 956 return -EINVAL; 957 958 slot = &ptdev->scheduler->csg_slots[group->csg_id]; 959 panthor_vm_idle(group->vm); 960 group->csg_id = -1; 961 962 /* Tiler OOM events will be re-issued next time the group is scheduled. */ 963 atomic_set(&group->tiler_oom, 0); 964 cancel_work(&group->tiler_oom_work); 965 966 for (u32 i = 0; i < group->queue_count; i++) 967 group->queues[i]->doorbell_id = -1; 968 969 slot->group = NULL; 970 971 group_put(group); 972 return 0; 973 } 974 975 /** 976 * cs_slot_prog_locked() - Program a queue slot 977 * @ptdev: Device. 978 * @csg_id: Group slot ID. 979 * @cs_id: Queue slot ID. 980 * 981 * Program a queue slot with the queue information so things can start being 982 * executed on this queue. 983 * 984 * The group slot must have a group bound to it already (group_bind_locked()). 985 */ 986 static void 987 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 988 { 989 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id]; 990 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 991 992 lockdep_assert_held(&ptdev->scheduler->lock); 993 994 queue->iface.input->extract = queue->iface.output->extract; 995 drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract); 996 997 cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf); 998 cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); 999 cs_iface->input->ringbuf_input = queue->iface.input_fw_va; 1000 cs_iface->input->ringbuf_output = queue->iface.output_fw_va; 1001 cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) | 1002 CS_CONFIG_DOORBELL(queue->doorbell_id); 1003 cs_iface->input->ack_irq_mask = ~0; 1004 panthor_fw_update_reqs(cs_iface, req, 1005 CS_IDLE_SYNC_WAIT | 1006 CS_IDLE_EMPTY | 1007 CS_STATE_START | 1008 CS_EXTRACT_EVENT, 1009 CS_IDLE_SYNC_WAIT | 1010 CS_IDLE_EMPTY | 1011 CS_STATE_MASK | 1012 CS_EXTRACT_EVENT); 1013 if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) { 1014 drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time); 1015 queue->timeout_suspended = false; 1016 } 1017 } 1018 1019 /** 1020 * cs_slot_reset_locked() - Reset a queue slot 1021 * @ptdev: Device. 1022 * @csg_id: Group slot. 1023 * @cs_id: Queue slot. 1024 * 1025 * Change the queue slot state to STOP and suspend the queue timeout if 1026 * the queue is not blocked. 1027 * 1028 * The group slot must have a group bound to it (group_bind_locked()). 1029 */ 1030 static int 1031 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1032 { 1033 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1034 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; 1035 struct panthor_queue *queue = group->queues[cs_id]; 1036 1037 lockdep_assert_held(&ptdev->scheduler->lock); 1038 1039 panthor_fw_update_reqs(cs_iface, req, 1040 CS_STATE_STOP, 1041 CS_STATE_MASK); 1042 1043 /* If the queue is blocked, we want to keep the timeout running, so 1044 * we can detect unbounded waits and kill the group when that happens. 1045 */ 1046 if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) { 1047 queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); 1048 queue->timeout_suspended = true; 1049 WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS)); 1050 } 1051 1052 return 0; 1053 } 1054 1055 /** 1056 * csg_slot_sync_priority_locked() - Synchronize the group slot priority 1057 * @ptdev: Device. 1058 * @csg_id: Group slot ID. 1059 * 1060 * Group slot priority update happens asynchronously. When we receive a 1061 * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can 1062 * reflect it to our panthor_csg_slot object. 1063 */ 1064 static void 1065 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id) 1066 { 1067 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1068 struct panthor_fw_csg_iface *csg_iface; 1069 1070 lockdep_assert_held(&ptdev->scheduler->lock); 1071 1072 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1073 csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28; 1074 } 1075 1076 /** 1077 * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority 1078 * @ptdev: Device. 1079 * @csg_id: Group slot. 1080 * @cs_id: Queue slot. 1081 * 1082 * Queue state is updated on group suspend or STATUS_UPDATE event. 1083 */ 1084 static void 1085 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) 1086 { 1087 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; 1088 struct panthor_queue *queue = group->queues[cs_id]; 1089 struct panthor_fw_cs_iface *cs_iface = 1090 panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id); 1091 1092 u32 status_wait_cond; 1093 1094 switch (cs_iface->output->status_blocked_reason) { 1095 case CS_STATUS_BLOCKED_REASON_UNBLOCKED: 1096 if (queue->iface.input->insert == queue->iface.output->extract && 1097 cs_iface->output->status_scoreboards == 0) 1098 group->idle_queues |= BIT(cs_id); 1099 break; 1100 1101 case CS_STATUS_BLOCKED_REASON_SYNC_WAIT: 1102 if (list_empty(&group->wait_node)) { 1103 list_move_tail(&group->wait_node, 1104 &group->ptdev->scheduler->groups.waiting); 1105 } 1106 1107 /* The queue is only blocked if there's no deferred operation 1108 * pending, which can be checked through the scoreboard status. 1109 */ 1110 if (!cs_iface->output->status_scoreboards) 1111 group->blocked_queues |= BIT(cs_id); 1112 1113 queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr; 1114 queue->syncwait.ref = cs_iface->output->status_wait_sync_value; 1115 status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK; 1116 queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT; 1117 if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) { 1118 u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi; 1119 1120 queue->syncwait.sync64 = true; 1121 queue->syncwait.ref |= sync_val_hi << 32; 1122 } else { 1123 queue->syncwait.sync64 = false; 1124 } 1125 break; 1126 1127 default: 1128 /* Other reasons are not blocking. Consider the queue as runnable 1129 * in those cases. 1130 */ 1131 break; 1132 } 1133 } 1134 1135 static void 1136 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id) 1137 { 1138 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1139 struct panthor_group *group = csg_slot->group; 1140 u32 i; 1141 1142 lockdep_assert_held(&ptdev->scheduler->lock); 1143 1144 group->idle_queues = 0; 1145 group->blocked_queues = 0; 1146 1147 for (i = 0; i < group->queue_count; i++) { 1148 if (group->queues[i]) 1149 cs_slot_sync_queue_state_locked(ptdev, csg_id, i); 1150 } 1151 } 1152 1153 static void 1154 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id) 1155 { 1156 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1157 struct panthor_fw_csg_iface *csg_iface; 1158 struct panthor_group *group; 1159 enum panthor_group_state new_state, old_state; 1160 u32 csg_state; 1161 1162 lockdep_assert_held(&ptdev->scheduler->lock); 1163 1164 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1165 group = csg_slot->group; 1166 1167 if (!group) 1168 return; 1169 1170 old_state = group->state; 1171 csg_state = csg_iface->output->ack & CSG_STATE_MASK; 1172 switch (csg_state) { 1173 case CSG_STATE_START: 1174 case CSG_STATE_RESUME: 1175 new_state = PANTHOR_CS_GROUP_ACTIVE; 1176 break; 1177 case CSG_STATE_TERMINATE: 1178 new_state = PANTHOR_CS_GROUP_TERMINATED; 1179 break; 1180 case CSG_STATE_SUSPEND: 1181 new_state = PANTHOR_CS_GROUP_SUSPENDED; 1182 break; 1183 default: 1184 /* The unknown state might be caused by a FW state corruption, 1185 * which means the group metadata can't be trusted anymore, and 1186 * the SUSPEND operation might propagate the corruption to the 1187 * suspend buffers. Flag the group state as unknown to make 1188 * sure it's unusable after that point. 1189 */ 1190 drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)", 1191 csg_id, csg_state); 1192 new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE; 1193 break; 1194 } 1195 1196 if (old_state == new_state) 1197 return; 1198 1199 /* The unknown state might be caused by a FW issue, reset the FW to 1200 * take a fresh start. 1201 */ 1202 if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE) 1203 panthor_device_schedule_reset(ptdev); 1204 1205 if (new_state == PANTHOR_CS_GROUP_SUSPENDED) 1206 csg_slot_sync_queues_state_locked(ptdev, csg_id); 1207 1208 if (old_state == PANTHOR_CS_GROUP_ACTIVE) { 1209 u32 i; 1210 1211 /* Reset the queue slots so we start from a clean 1212 * state when starting/resuming a new group on this 1213 * CSG slot. No wait needed here, and no ringbell 1214 * either, since the CS slot will only be re-used 1215 * on the next CSG start operation. 1216 */ 1217 for (i = 0; i < group->queue_count; i++) { 1218 if (group->queues[i]) 1219 cs_slot_reset_locked(ptdev, csg_id, i); 1220 } 1221 } 1222 1223 group->state = new_state; 1224 } 1225 1226 static int 1227 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority) 1228 { 1229 struct panthor_fw_csg_iface *csg_iface; 1230 struct panthor_csg_slot *csg_slot; 1231 struct panthor_group *group; 1232 u32 queue_mask = 0, i; 1233 1234 lockdep_assert_held(&ptdev->scheduler->lock); 1235 1236 if (priority > MAX_CSG_PRIO) 1237 return -EINVAL; 1238 1239 if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS)) 1240 return -EINVAL; 1241 1242 csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1243 group = csg_slot->group; 1244 if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE) 1245 return 0; 1246 1247 csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id); 1248 1249 for (i = 0; i < group->queue_count; i++) { 1250 if (group->queues[i]) { 1251 cs_slot_prog_locked(ptdev, csg_id, i); 1252 queue_mask |= BIT(i); 1253 } 1254 } 1255 1256 csg_iface->input->allow_compute = group->compute_core_mask; 1257 csg_iface->input->allow_fragment = group->fragment_core_mask; 1258 csg_iface->input->allow_other = group->tiler_core_mask; 1259 csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | 1260 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | 1261 CSG_EP_REQ_TILER(group->max_tiler_cores) | 1262 CSG_EP_REQ_PRIORITY(priority); 1263 csg_iface->input->config = panthor_vm_as(group->vm); 1264 1265 if (group->suspend_buf) 1266 csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf); 1267 else 1268 csg_iface->input->suspend_buf = 0; 1269 1270 if (group->protm_suspend_buf) { 1271 csg_iface->input->protm_suspend_buf = 1272 panthor_kernel_bo_gpuva(group->protm_suspend_buf); 1273 } else { 1274 csg_iface->input->protm_suspend_buf = 0; 1275 } 1276 1277 csg_iface->input->ack_irq_mask = ~0; 1278 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask); 1279 return 0; 1280 } 1281 1282 static void 1283 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev, 1284 u32 csg_id, u32 cs_id) 1285 { 1286 struct panthor_scheduler *sched = ptdev->scheduler; 1287 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1288 struct panthor_group *group = csg_slot->group; 1289 struct panthor_fw_cs_iface *cs_iface; 1290 u32 fatal; 1291 u64 info; 1292 1293 lockdep_assert_held(&sched->lock); 1294 1295 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1296 fatal = cs_iface->output->fatal; 1297 info = cs_iface->output->fatal_info; 1298 1299 if (group) 1300 group->fatal_queues |= BIT(cs_id); 1301 1302 if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) { 1303 /* If this exception is unrecoverable, queue a reset, and make 1304 * sure we stop scheduling groups until the reset has happened. 1305 */ 1306 panthor_device_schedule_reset(ptdev); 1307 cancel_delayed_work(&sched->tick_work); 1308 } else { 1309 sched_queue_delayed_work(sched, tick, 0); 1310 } 1311 1312 drm_warn(&ptdev->base, 1313 "CSG slot %d CS slot: %d\n" 1314 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n" 1315 "CS_FATAL.EXCEPTION_DATA: 0x%x\n" 1316 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n", 1317 csg_id, cs_id, 1318 (unsigned int)CS_EXCEPTION_TYPE(fatal), 1319 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)), 1320 (unsigned int)CS_EXCEPTION_DATA(fatal), 1321 info); 1322 } 1323 1324 static void 1325 cs_slot_process_fault_event_locked(struct panthor_device *ptdev, 1326 u32 csg_id, u32 cs_id) 1327 { 1328 struct panthor_scheduler *sched = ptdev->scheduler; 1329 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1330 struct panthor_group *group = csg_slot->group; 1331 struct panthor_queue *queue = group && cs_id < group->queue_count ? 1332 group->queues[cs_id] : NULL; 1333 struct panthor_fw_cs_iface *cs_iface; 1334 u32 fault; 1335 u64 info; 1336 1337 lockdep_assert_held(&sched->lock); 1338 1339 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1340 fault = cs_iface->output->fault; 1341 info = cs_iface->output->fault_info; 1342 1343 if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) { 1344 u64 cs_extract = queue->iface.output->extract; 1345 struct panthor_job *job; 1346 1347 spin_lock(&queue->fence_ctx.lock); 1348 list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) { 1349 if (cs_extract >= job->ringbuf.end) 1350 continue; 1351 1352 if (cs_extract < job->ringbuf.start) 1353 break; 1354 1355 dma_fence_set_error(job->done_fence, -EINVAL); 1356 } 1357 spin_unlock(&queue->fence_ctx.lock); 1358 } 1359 1360 drm_warn(&ptdev->base, 1361 "CSG slot %d CS slot: %d\n" 1362 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n" 1363 "CS_FAULT.EXCEPTION_DATA: 0x%x\n" 1364 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n", 1365 csg_id, cs_id, 1366 (unsigned int)CS_EXCEPTION_TYPE(fault), 1367 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)), 1368 (unsigned int)CS_EXCEPTION_DATA(fault), 1369 info); 1370 } 1371 1372 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id) 1373 { 1374 struct panthor_device *ptdev = group->ptdev; 1375 struct panthor_scheduler *sched = ptdev->scheduler; 1376 u32 renderpasses_in_flight, pending_frag_count; 1377 struct panthor_heap_pool *heaps = NULL; 1378 u64 heap_address, new_chunk_va = 0; 1379 u32 vt_start, vt_end, frag_end; 1380 int ret, csg_id; 1381 1382 mutex_lock(&sched->lock); 1383 csg_id = group->csg_id; 1384 if (csg_id >= 0) { 1385 struct panthor_fw_cs_iface *cs_iface; 1386 1387 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1388 heaps = panthor_vm_get_heap_pool(group->vm, false); 1389 heap_address = cs_iface->output->heap_address; 1390 vt_start = cs_iface->output->heap_vt_start; 1391 vt_end = cs_iface->output->heap_vt_end; 1392 frag_end = cs_iface->output->heap_frag_end; 1393 renderpasses_in_flight = vt_start - frag_end; 1394 pending_frag_count = vt_end - frag_end; 1395 } 1396 mutex_unlock(&sched->lock); 1397 1398 /* The group got scheduled out, we stop here. We will get a new tiler OOM event 1399 * when it's scheduled again. 1400 */ 1401 if (unlikely(csg_id < 0)) 1402 return 0; 1403 1404 if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) { 1405 ret = -EINVAL; 1406 } else { 1407 /* We do the allocation without holding the scheduler lock to avoid 1408 * blocking the scheduling. 1409 */ 1410 ret = panthor_heap_grow(heaps, heap_address, 1411 renderpasses_in_flight, 1412 pending_frag_count, &new_chunk_va); 1413 } 1414 1415 /* If the heap context doesn't have memory for us, we want to let the 1416 * FW try to reclaim memory by waiting for fragment jobs to land or by 1417 * executing the tiler OOM exception handler, which is supposed to 1418 * implement incremental rendering. 1419 */ 1420 if (ret && ret != -ENOMEM) { 1421 drm_warn(&ptdev->base, "Failed to extend the tiler heap\n"); 1422 group->fatal_queues |= BIT(cs_id); 1423 sched_queue_delayed_work(sched, tick, 0); 1424 goto out_put_heap_pool; 1425 } 1426 1427 mutex_lock(&sched->lock); 1428 csg_id = group->csg_id; 1429 if (csg_id >= 0) { 1430 struct panthor_fw_csg_iface *csg_iface; 1431 struct panthor_fw_cs_iface *cs_iface; 1432 1433 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1434 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1435 1436 cs_iface->input->heap_start = new_chunk_va; 1437 cs_iface->input->heap_end = new_chunk_va; 1438 panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM); 1439 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id)); 1440 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); 1441 } 1442 mutex_unlock(&sched->lock); 1443 1444 /* We allocated a chunck, but couldn't link it to the heap 1445 * context because the group was scheduled out while we were 1446 * allocating memory. We need to return this chunk to the heap. 1447 */ 1448 if (unlikely(csg_id < 0 && new_chunk_va)) 1449 panthor_heap_return_chunk(heaps, heap_address, new_chunk_va); 1450 1451 ret = 0; 1452 1453 out_put_heap_pool: 1454 panthor_heap_pool_put(heaps); 1455 return ret; 1456 } 1457 1458 static void group_tiler_oom_work(struct work_struct *work) 1459 { 1460 struct panthor_group *group = 1461 container_of(work, struct panthor_group, tiler_oom_work); 1462 u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0); 1463 1464 while (tiler_oom) { 1465 u32 cs_id = ffs(tiler_oom) - 1; 1466 1467 group_process_tiler_oom(group, cs_id); 1468 tiler_oom &= ~BIT(cs_id); 1469 } 1470 1471 group_put(group); 1472 } 1473 1474 static void 1475 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev, 1476 u32 csg_id, u32 cs_id) 1477 { 1478 struct panthor_scheduler *sched = ptdev->scheduler; 1479 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1480 struct panthor_group *group = csg_slot->group; 1481 1482 lockdep_assert_held(&sched->lock); 1483 1484 if (drm_WARN_ON(&ptdev->base, !group)) 1485 return; 1486 1487 atomic_or(BIT(cs_id), &group->tiler_oom); 1488 1489 /* We don't use group_queue_work() here because we want to queue the 1490 * work item to the heap_alloc_wq. 1491 */ 1492 group_get(group); 1493 if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work)) 1494 group_put(group); 1495 } 1496 1497 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev, 1498 u32 csg_id, u32 cs_id) 1499 { 1500 struct panthor_fw_cs_iface *cs_iface; 1501 u32 req, ack, events; 1502 1503 lockdep_assert_held(&ptdev->scheduler->lock); 1504 1505 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id); 1506 req = cs_iface->input->req; 1507 ack = cs_iface->output->ack; 1508 events = (req ^ ack) & CS_EVT_MASK; 1509 1510 if (events & CS_FATAL) 1511 cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id); 1512 1513 if (events & CS_FAULT) 1514 cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id); 1515 1516 if (events & CS_TILER_OOM) 1517 cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id); 1518 1519 /* We don't acknowledge the TILER_OOM event since its handling is 1520 * deferred to a separate work. 1521 */ 1522 panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT); 1523 1524 return (events & (CS_FAULT | CS_TILER_OOM)) != 0; 1525 } 1526 1527 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id) 1528 { 1529 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1530 struct panthor_fw_csg_iface *csg_iface; 1531 1532 lockdep_assert_held(&ptdev->scheduler->lock); 1533 1534 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1535 csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE; 1536 } 1537 1538 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id) 1539 { 1540 struct panthor_scheduler *sched = ptdev->scheduler; 1541 1542 lockdep_assert_held(&sched->lock); 1543 1544 sched->might_have_idle_groups = true; 1545 1546 /* Schedule a tick so we can evict idle groups and schedule non-idle 1547 * ones. This will also update runtime PM and devfreq busy/idle states, 1548 * so the device can lower its frequency or get suspended. 1549 */ 1550 sched_queue_delayed_work(sched, tick, 0); 1551 } 1552 1553 static void csg_slot_sync_update_locked(struct panthor_device *ptdev, 1554 u32 csg_id) 1555 { 1556 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1557 struct panthor_group *group = csg_slot->group; 1558 1559 lockdep_assert_held(&ptdev->scheduler->lock); 1560 1561 if (group) 1562 group_queue_work(group, sync_upd); 1563 1564 sched_queue_work(ptdev->scheduler, sync_upd); 1565 } 1566 1567 static void 1568 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id) 1569 { 1570 struct panthor_scheduler *sched = ptdev->scheduler; 1571 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 1572 struct panthor_group *group = csg_slot->group; 1573 1574 lockdep_assert_held(&sched->lock); 1575 1576 drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id); 1577 1578 group = csg_slot->group; 1579 if (!drm_WARN_ON(&ptdev->base, !group)) 1580 group->timedout = true; 1581 1582 sched_queue_delayed_work(sched, tick, 0); 1583 } 1584 1585 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id) 1586 { 1587 u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events; 1588 struct panthor_fw_csg_iface *csg_iface; 1589 u32 ring_cs_db_mask = 0; 1590 1591 lockdep_assert_held(&ptdev->scheduler->lock); 1592 1593 if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) 1594 return; 1595 1596 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1597 req = READ_ONCE(csg_iface->input->req); 1598 ack = READ_ONCE(csg_iface->output->ack); 1599 cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req); 1600 cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack); 1601 csg_events = (req ^ ack) & CSG_EVT_MASK; 1602 1603 /* There may not be any pending CSG/CS interrupts to process */ 1604 if (req == ack && cs_irq_req == cs_irq_ack) 1605 return; 1606 1607 /* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before 1608 * examining the CS_ACK & CS_REQ bits. This would ensure that Host 1609 * doesn't miss an interrupt for the CS in the race scenario where 1610 * whilst Host is servicing an interrupt for the CS, firmware sends 1611 * another interrupt for that CS. 1612 */ 1613 csg_iface->input->cs_irq_ack = cs_irq_req; 1614 1615 panthor_fw_update_reqs(csg_iface, req, ack, 1616 CSG_SYNC_UPDATE | 1617 CSG_IDLE | 1618 CSG_PROGRESS_TIMER_EVENT); 1619 1620 if (csg_events & CSG_IDLE) 1621 csg_slot_process_idle_event_locked(ptdev, csg_id); 1622 1623 if (csg_events & CSG_PROGRESS_TIMER_EVENT) 1624 csg_slot_process_progress_timer_event_locked(ptdev, csg_id); 1625 1626 cs_irqs = cs_irq_req ^ cs_irq_ack; 1627 while (cs_irqs) { 1628 u32 cs_id = ffs(cs_irqs) - 1; 1629 1630 if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id)) 1631 ring_cs_db_mask |= BIT(cs_id); 1632 1633 cs_irqs &= ~BIT(cs_id); 1634 } 1635 1636 if (csg_events & CSG_SYNC_UPDATE) 1637 csg_slot_sync_update_locked(ptdev, csg_id); 1638 1639 if (ring_cs_db_mask) 1640 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask); 1641 1642 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id)); 1643 } 1644 1645 static void sched_process_idle_event_locked(struct panthor_device *ptdev) 1646 { 1647 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1648 1649 lockdep_assert_held(&ptdev->scheduler->lock); 1650 1651 /* Acknowledge the idle event and schedule a tick. */ 1652 panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE); 1653 sched_queue_delayed_work(ptdev->scheduler, tick, 0); 1654 } 1655 1656 /** 1657 * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ 1658 * @ptdev: Device. 1659 */ 1660 static void sched_process_global_irq_locked(struct panthor_device *ptdev) 1661 { 1662 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1663 u32 req, ack, evts; 1664 1665 lockdep_assert_held(&ptdev->scheduler->lock); 1666 1667 req = READ_ONCE(glb_iface->input->req); 1668 ack = READ_ONCE(glb_iface->output->ack); 1669 evts = (req ^ ack) & GLB_EVT_MASK; 1670 1671 if (evts & GLB_IDLE) 1672 sched_process_idle_event_locked(ptdev); 1673 } 1674 1675 static void process_fw_events_work(struct work_struct *work) 1676 { 1677 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, 1678 fw_events_work); 1679 u32 events = atomic_xchg(&sched->fw_events, 0); 1680 struct panthor_device *ptdev = sched->ptdev; 1681 1682 mutex_lock(&sched->lock); 1683 1684 if (events & JOB_INT_GLOBAL_IF) { 1685 sched_process_global_irq_locked(ptdev); 1686 events &= ~JOB_INT_GLOBAL_IF; 1687 } 1688 1689 while (events) { 1690 u32 csg_id = ffs(events) - 1; 1691 1692 sched_process_csg_irq_locked(ptdev, csg_id); 1693 events &= ~BIT(csg_id); 1694 } 1695 1696 mutex_unlock(&sched->lock); 1697 } 1698 1699 /** 1700 * panthor_sched_report_fw_events() - Report FW events to the scheduler. 1701 */ 1702 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events) 1703 { 1704 if (!ptdev->scheduler) 1705 return; 1706 1707 atomic_or(events, &ptdev->scheduler->fw_events); 1708 sched_queue_work(ptdev->scheduler, fw_events); 1709 } 1710 1711 static const char *fence_get_driver_name(struct dma_fence *fence) 1712 { 1713 return "panthor"; 1714 } 1715 1716 static const char *queue_fence_get_timeline_name(struct dma_fence *fence) 1717 { 1718 return "queue-fence"; 1719 } 1720 1721 static const struct dma_fence_ops panthor_queue_fence_ops = { 1722 .get_driver_name = fence_get_driver_name, 1723 .get_timeline_name = queue_fence_get_timeline_name, 1724 }; 1725 1726 struct panthor_csg_slots_upd_ctx { 1727 u32 update_mask; 1728 u32 timedout_mask; 1729 struct { 1730 u32 value; 1731 u32 mask; 1732 } requests[MAX_CSGS]; 1733 }; 1734 1735 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx) 1736 { 1737 memset(ctx, 0, sizeof(*ctx)); 1738 } 1739 1740 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev, 1741 struct panthor_csg_slots_upd_ctx *ctx, 1742 u32 csg_id, u32 value, u32 mask) 1743 { 1744 if (drm_WARN_ON(&ptdev->base, !mask) || 1745 drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count)) 1746 return; 1747 1748 ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask); 1749 ctx->requests[csg_id].mask |= mask; 1750 ctx->update_mask |= BIT(csg_id); 1751 } 1752 1753 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev, 1754 struct panthor_csg_slots_upd_ctx *ctx) 1755 { 1756 struct panthor_scheduler *sched = ptdev->scheduler; 1757 u32 update_slots = ctx->update_mask; 1758 1759 lockdep_assert_held(&sched->lock); 1760 1761 if (!ctx->update_mask) 1762 return 0; 1763 1764 while (update_slots) { 1765 struct panthor_fw_csg_iface *csg_iface; 1766 u32 csg_id = ffs(update_slots) - 1; 1767 1768 update_slots &= ~BIT(csg_id); 1769 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1770 panthor_fw_update_reqs(csg_iface, req, 1771 ctx->requests[csg_id].value, 1772 ctx->requests[csg_id].mask); 1773 } 1774 1775 panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask); 1776 1777 update_slots = ctx->update_mask; 1778 while (update_slots) { 1779 struct panthor_fw_csg_iface *csg_iface; 1780 u32 csg_id = ffs(update_slots) - 1; 1781 u32 req_mask = ctx->requests[csg_id].mask, acked; 1782 int ret; 1783 1784 update_slots &= ~BIT(csg_id); 1785 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1786 1787 ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100); 1788 1789 if (acked & CSG_ENDPOINT_CONFIG) 1790 csg_slot_sync_priority_locked(ptdev, csg_id); 1791 1792 if (acked & CSG_STATE_MASK) 1793 csg_slot_sync_state_locked(ptdev, csg_id); 1794 1795 if (acked & CSG_STATUS_UPDATE) { 1796 csg_slot_sync_queues_state_locked(ptdev, csg_id); 1797 csg_slot_sync_idle_state_locked(ptdev, csg_id); 1798 } 1799 1800 if (ret && acked != req_mask && 1801 ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) { 1802 drm_err(&ptdev->base, "CSG %d update request timedout", csg_id); 1803 ctx->timedout_mask |= BIT(csg_id); 1804 } 1805 } 1806 1807 if (ctx->timedout_mask) 1808 return -ETIMEDOUT; 1809 1810 return 0; 1811 } 1812 1813 struct panthor_sched_tick_ctx { 1814 struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT]; 1815 struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT]; 1816 u32 idle_group_count; 1817 u32 group_count; 1818 enum panthor_csg_priority min_priority; 1819 struct panthor_vm *vms[MAX_CS_PER_CSG]; 1820 u32 as_count; 1821 bool immediate_tick; 1822 u32 csg_upd_failed_mask; 1823 }; 1824 1825 static bool 1826 tick_ctx_is_full(const struct panthor_scheduler *sched, 1827 const struct panthor_sched_tick_ctx *ctx) 1828 { 1829 return ctx->group_count == sched->csg_slot_count; 1830 } 1831 1832 static bool 1833 group_is_idle(struct panthor_group *group) 1834 { 1835 struct panthor_device *ptdev = group->ptdev; 1836 u32 inactive_queues; 1837 1838 if (group->csg_id >= 0) 1839 return ptdev->scheduler->csg_slots[group->csg_id].idle; 1840 1841 inactive_queues = group->idle_queues | group->blocked_queues; 1842 return hweight32(inactive_queues) == group->queue_count; 1843 } 1844 1845 static bool 1846 group_can_run(struct panthor_group *group) 1847 { 1848 return group->state != PANTHOR_CS_GROUP_TERMINATED && 1849 group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && 1850 !group->destroyed && group->fatal_queues == 0 && 1851 !group->timedout; 1852 } 1853 1854 static void 1855 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched, 1856 struct panthor_sched_tick_ctx *ctx, 1857 struct list_head *queue, 1858 bool skip_idle_groups, 1859 bool owned_by_tick_ctx) 1860 { 1861 struct panthor_group *group, *tmp; 1862 1863 if (tick_ctx_is_full(sched, ctx)) 1864 return; 1865 1866 list_for_each_entry_safe(group, tmp, queue, run_node) { 1867 u32 i; 1868 1869 if (!group_can_run(group)) 1870 continue; 1871 1872 if (skip_idle_groups && group_is_idle(group)) 1873 continue; 1874 1875 for (i = 0; i < ctx->as_count; i++) { 1876 if (ctx->vms[i] == group->vm) 1877 break; 1878 } 1879 1880 if (i == ctx->as_count && ctx->as_count == sched->as_slot_count) 1881 continue; 1882 1883 if (!owned_by_tick_ctx) 1884 group_get(group); 1885 1886 list_move_tail(&group->run_node, &ctx->groups[group->priority]); 1887 ctx->group_count++; 1888 if (group_is_idle(group)) 1889 ctx->idle_group_count++; 1890 1891 if (i == ctx->as_count) 1892 ctx->vms[ctx->as_count++] = group->vm; 1893 1894 if (ctx->min_priority > group->priority) 1895 ctx->min_priority = group->priority; 1896 1897 if (tick_ctx_is_full(sched, ctx)) 1898 return; 1899 } 1900 } 1901 1902 static void 1903 tick_ctx_insert_old_group(struct panthor_scheduler *sched, 1904 struct panthor_sched_tick_ctx *ctx, 1905 struct panthor_group *group, 1906 bool full_tick) 1907 { 1908 struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id]; 1909 struct panthor_group *other_group; 1910 1911 if (!full_tick) { 1912 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]); 1913 return; 1914 } 1915 1916 /* Rotate to make sure groups with lower CSG slot 1917 * priorities have a chance to get a higher CSG slot 1918 * priority next time they get picked. This priority 1919 * has an impact on resource request ordering, so it's 1920 * important to make sure we don't let one group starve 1921 * all other groups with the same group priority. 1922 */ 1923 list_for_each_entry(other_group, 1924 &ctx->old_groups[csg_slot->group->priority], 1925 run_node) { 1926 struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id]; 1927 1928 if (other_csg_slot->priority > csg_slot->priority) { 1929 list_add_tail(&csg_slot->group->run_node, &other_group->run_node); 1930 return; 1931 } 1932 } 1933 1934 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]); 1935 } 1936 1937 static void 1938 tick_ctx_init(struct panthor_scheduler *sched, 1939 struct panthor_sched_tick_ctx *ctx, 1940 bool full_tick) 1941 { 1942 struct panthor_device *ptdev = sched->ptdev; 1943 struct panthor_csg_slots_upd_ctx upd_ctx; 1944 int ret; 1945 u32 i; 1946 1947 memset(ctx, 0, sizeof(*ctx)); 1948 csgs_upd_ctx_init(&upd_ctx); 1949 1950 ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT; 1951 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { 1952 INIT_LIST_HEAD(&ctx->groups[i]); 1953 INIT_LIST_HEAD(&ctx->old_groups[i]); 1954 } 1955 1956 for (i = 0; i < sched->csg_slot_count; i++) { 1957 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 1958 struct panthor_group *group = csg_slot->group; 1959 struct panthor_fw_csg_iface *csg_iface; 1960 1961 if (!group) 1962 continue; 1963 1964 csg_iface = panthor_fw_get_csg_iface(ptdev, i); 1965 group_get(group); 1966 1967 /* If there was unhandled faults on the VM, force processing of 1968 * CSG IRQs, so we can flag the faulty queue. 1969 */ 1970 if (panthor_vm_has_unhandled_faults(group->vm)) { 1971 sched_process_csg_irq_locked(ptdev, i); 1972 1973 /* No fatal fault reported, flag all queues as faulty. */ 1974 if (!group->fatal_queues) 1975 group->fatal_queues |= GENMASK(group->queue_count - 1, 0); 1976 } 1977 1978 tick_ctx_insert_old_group(sched, ctx, group, full_tick); 1979 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, 1980 csg_iface->output->ack ^ CSG_STATUS_UPDATE, 1981 CSG_STATUS_UPDATE); 1982 } 1983 1984 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 1985 if (ret) { 1986 panthor_device_schedule_reset(ptdev); 1987 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 1988 } 1989 } 1990 1991 #define NUM_INSTRS_PER_SLOT 16 1992 1993 static void 1994 group_term_post_processing(struct panthor_group *group) 1995 { 1996 struct panthor_job *job, *tmp; 1997 LIST_HEAD(faulty_jobs); 1998 bool cookie; 1999 u32 i = 0; 2000 2001 if (drm_WARN_ON(&group->ptdev->base, group_can_run(group))) 2002 return; 2003 2004 cookie = dma_fence_begin_signalling(); 2005 for (i = 0; i < group->queue_count; i++) { 2006 struct panthor_queue *queue = group->queues[i]; 2007 struct panthor_syncobj_64b *syncobj; 2008 int err; 2009 2010 if (group->fatal_queues & BIT(i)) 2011 err = -EINVAL; 2012 else if (group->timedout) 2013 err = -ETIMEDOUT; 2014 else 2015 err = -ECANCELED; 2016 2017 if (!queue) 2018 continue; 2019 2020 spin_lock(&queue->fence_ctx.lock); 2021 list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) { 2022 list_move_tail(&job->node, &faulty_jobs); 2023 dma_fence_set_error(job->done_fence, err); 2024 dma_fence_signal_locked(job->done_fence); 2025 } 2026 spin_unlock(&queue->fence_ctx.lock); 2027 2028 /* Manually update the syncobj seqno to unblock waiters. */ 2029 syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj)); 2030 syncobj->status = ~0; 2031 syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno); 2032 sched_queue_work(group->ptdev->scheduler, sync_upd); 2033 } 2034 dma_fence_end_signalling(cookie); 2035 2036 list_for_each_entry_safe(job, tmp, &faulty_jobs, node) { 2037 list_del_init(&job->node); 2038 panthor_job_put(&job->base); 2039 } 2040 } 2041 2042 static void group_term_work(struct work_struct *work) 2043 { 2044 struct panthor_group *group = 2045 container_of(work, struct panthor_group, term_work); 2046 2047 group_term_post_processing(group); 2048 group_put(group); 2049 } 2050 2051 static void 2052 tick_ctx_cleanup(struct panthor_scheduler *sched, 2053 struct panthor_sched_tick_ctx *ctx) 2054 { 2055 struct panthor_device *ptdev = sched->ptdev; 2056 struct panthor_group *group, *tmp; 2057 u32 i; 2058 2059 for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) { 2060 list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) { 2061 /* If everything went fine, we should only have groups 2062 * to be terminated in the old_groups lists. 2063 */ 2064 drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask && 2065 group_can_run(group)); 2066 2067 if (!group_can_run(group)) { 2068 list_del_init(&group->run_node); 2069 list_del_init(&group->wait_node); 2070 group_queue_work(group, term); 2071 } else if (group->csg_id >= 0) { 2072 list_del_init(&group->run_node); 2073 } else { 2074 list_move(&group->run_node, 2075 group_is_idle(group) ? 2076 &sched->groups.idle[group->priority] : 2077 &sched->groups.runnable[group->priority]); 2078 } 2079 group_put(group); 2080 } 2081 } 2082 2083 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { 2084 /* If everything went fine, the groups to schedule lists should 2085 * be empty. 2086 */ 2087 drm_WARN_ON(&ptdev->base, 2088 !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i])); 2089 2090 list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) { 2091 if (group->csg_id >= 0) { 2092 list_del_init(&group->run_node); 2093 } else { 2094 list_move(&group->run_node, 2095 group_is_idle(group) ? 2096 &sched->groups.idle[group->priority] : 2097 &sched->groups.runnable[group->priority]); 2098 } 2099 group_put(group); 2100 } 2101 } 2102 } 2103 2104 static void 2105 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx) 2106 { 2107 struct panthor_group *group, *tmp; 2108 struct panthor_device *ptdev = sched->ptdev; 2109 struct panthor_csg_slot *csg_slot; 2110 int prio, new_csg_prio = MAX_CSG_PRIO, i; 2111 u32 free_csg_slots = 0; 2112 struct panthor_csg_slots_upd_ctx upd_ctx; 2113 int ret; 2114 2115 csgs_upd_ctx_init(&upd_ctx); 2116 2117 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2118 /* Suspend or terminate evicted groups. */ 2119 list_for_each_entry(group, &ctx->old_groups[prio], run_node) { 2120 bool term = !group_can_run(group); 2121 int csg_id = group->csg_id; 2122 2123 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) 2124 continue; 2125 2126 csg_slot = &sched->csg_slots[csg_id]; 2127 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2128 term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND, 2129 CSG_STATE_MASK); 2130 } 2131 2132 /* Update priorities on already running groups. */ 2133 list_for_each_entry(group, &ctx->groups[prio], run_node) { 2134 struct panthor_fw_csg_iface *csg_iface; 2135 int csg_id = group->csg_id; 2136 2137 if (csg_id < 0) { 2138 new_csg_prio--; 2139 continue; 2140 } 2141 2142 csg_slot = &sched->csg_slots[csg_id]; 2143 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 2144 if (csg_slot->priority == new_csg_prio) { 2145 new_csg_prio--; 2146 continue; 2147 } 2148 2149 panthor_fw_update_reqs(csg_iface, endpoint_req, 2150 CSG_EP_REQ_PRIORITY(new_csg_prio), 2151 CSG_EP_REQ_PRIORITY_MASK); 2152 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2153 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, 2154 CSG_ENDPOINT_CONFIG); 2155 new_csg_prio--; 2156 } 2157 } 2158 2159 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2160 if (ret) { 2161 panthor_device_schedule_reset(ptdev); 2162 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2163 return; 2164 } 2165 2166 /* Unbind evicted groups. */ 2167 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2168 list_for_each_entry(group, &ctx->old_groups[prio], run_node) { 2169 /* This group is gone. Process interrupts to clear 2170 * any pending interrupts before we start the new 2171 * group. 2172 */ 2173 if (group->csg_id >= 0) 2174 sched_process_csg_irq_locked(ptdev, group->csg_id); 2175 2176 group_unbind_locked(group); 2177 } 2178 } 2179 2180 for (i = 0; i < sched->csg_slot_count; i++) { 2181 if (!sched->csg_slots[i].group) 2182 free_csg_slots |= BIT(i); 2183 } 2184 2185 csgs_upd_ctx_init(&upd_ctx); 2186 new_csg_prio = MAX_CSG_PRIO; 2187 2188 /* Start new groups. */ 2189 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2190 list_for_each_entry(group, &ctx->groups[prio], run_node) { 2191 int csg_id = group->csg_id; 2192 struct panthor_fw_csg_iface *csg_iface; 2193 2194 if (csg_id >= 0) { 2195 new_csg_prio--; 2196 continue; 2197 } 2198 2199 csg_id = ffs(free_csg_slots) - 1; 2200 if (drm_WARN_ON(&ptdev->base, csg_id < 0)) 2201 break; 2202 2203 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 2204 csg_slot = &sched->csg_slots[csg_id]; 2205 group_bind_locked(group, csg_id); 2206 csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--); 2207 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2208 group->state == PANTHOR_CS_GROUP_SUSPENDED ? 2209 CSG_STATE_RESUME : CSG_STATE_START, 2210 CSG_STATE_MASK); 2211 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2212 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, 2213 CSG_ENDPOINT_CONFIG); 2214 free_csg_slots &= ~BIT(csg_id); 2215 } 2216 } 2217 2218 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2219 if (ret) { 2220 panthor_device_schedule_reset(ptdev); 2221 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask; 2222 return; 2223 } 2224 2225 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 2226 list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) { 2227 list_del_init(&group->run_node); 2228 2229 /* If the group has been destroyed while we were 2230 * scheduling, ask for an immediate tick to 2231 * re-evaluate as soon as possible and get rid of 2232 * this dangling group. 2233 */ 2234 if (group->destroyed) 2235 ctx->immediate_tick = true; 2236 group_put(group); 2237 } 2238 2239 /* Return evicted groups to the idle or run queues. Groups 2240 * that can no longer be run (because they've been destroyed 2241 * or experienced an unrecoverable error) will be scheduled 2242 * for destruction in tick_ctx_cleanup(). 2243 */ 2244 list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) { 2245 if (!group_can_run(group)) 2246 continue; 2247 2248 if (group_is_idle(group)) 2249 list_move_tail(&group->run_node, &sched->groups.idle[prio]); 2250 else 2251 list_move_tail(&group->run_node, &sched->groups.runnable[prio]); 2252 group_put(group); 2253 } 2254 } 2255 2256 sched->used_csg_slot_count = ctx->group_count; 2257 sched->might_have_idle_groups = ctx->idle_group_count > 0; 2258 } 2259 2260 static u64 2261 tick_ctx_update_resched_target(struct panthor_scheduler *sched, 2262 const struct panthor_sched_tick_ctx *ctx) 2263 { 2264 /* We had space left, no need to reschedule until some external event happens. */ 2265 if (!tick_ctx_is_full(sched, ctx)) 2266 goto no_tick; 2267 2268 /* If idle groups were scheduled, no need to wake up until some external 2269 * event happens (group unblocked, new job submitted, ...). 2270 */ 2271 if (ctx->idle_group_count) 2272 goto no_tick; 2273 2274 if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT)) 2275 goto no_tick; 2276 2277 /* If there are groups of the same priority waiting, we need to 2278 * keep the scheduler ticking, otherwise, we'll just wait for 2279 * new groups with higher priority to be queued. 2280 */ 2281 if (!list_empty(&sched->groups.runnable[ctx->min_priority])) { 2282 u64 resched_target = sched->last_tick + sched->tick_period; 2283 2284 if (time_before64(sched->resched_target, sched->last_tick) || 2285 time_before64(resched_target, sched->resched_target)) 2286 sched->resched_target = resched_target; 2287 2288 return sched->resched_target - sched->last_tick; 2289 } 2290 2291 no_tick: 2292 sched->resched_target = U64_MAX; 2293 return U64_MAX; 2294 } 2295 2296 static void tick_work(struct work_struct *work) 2297 { 2298 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler, 2299 tick_work.work); 2300 struct panthor_device *ptdev = sched->ptdev; 2301 struct panthor_sched_tick_ctx ctx; 2302 u64 remaining_jiffies = 0, resched_delay; 2303 u64 now = get_jiffies_64(); 2304 int prio, ret, cookie; 2305 2306 if (!drm_dev_enter(&ptdev->base, &cookie)) 2307 return; 2308 2309 ret = pm_runtime_resume_and_get(ptdev->base.dev); 2310 if (drm_WARN_ON(&ptdev->base, ret)) 2311 goto out_dev_exit; 2312 2313 if (time_before64(now, sched->resched_target)) 2314 remaining_jiffies = sched->resched_target - now; 2315 2316 mutex_lock(&sched->lock); 2317 if (panthor_device_reset_is_pending(sched->ptdev)) 2318 goto out_unlock; 2319 2320 tick_ctx_init(sched, &ctx, remaining_jiffies != 0); 2321 if (ctx.csg_upd_failed_mask) 2322 goto out_cleanup_ctx; 2323 2324 if (remaining_jiffies) { 2325 /* Scheduling forced in the middle of a tick. Only RT groups 2326 * can preempt non-RT ones. Currently running RT groups can't be 2327 * preempted. 2328 */ 2329 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2330 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2331 prio--) { 2332 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], 2333 true, true); 2334 if (prio == PANTHOR_CSG_PRIORITY_RT) { 2335 tick_ctx_pick_groups_from_list(sched, &ctx, 2336 &sched->groups.runnable[prio], 2337 true, false); 2338 } 2339 } 2340 } 2341 2342 /* First pick non-idle groups */ 2343 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2344 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2345 prio--) { 2346 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio], 2347 true, false); 2348 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true); 2349 } 2350 2351 /* If we have free CSG slots left, pick idle groups */ 2352 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; 2353 prio >= 0 && !tick_ctx_is_full(sched, &ctx); 2354 prio--) { 2355 /* Check the old_group queue first to avoid reprogramming the slots */ 2356 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true); 2357 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio], 2358 false, false); 2359 } 2360 2361 tick_ctx_apply(sched, &ctx); 2362 if (ctx.csg_upd_failed_mask) 2363 goto out_cleanup_ctx; 2364 2365 if (ctx.idle_group_count == ctx.group_count) { 2366 panthor_devfreq_record_idle(sched->ptdev); 2367 if (sched->pm.has_ref) { 2368 pm_runtime_put_autosuspend(ptdev->base.dev); 2369 sched->pm.has_ref = false; 2370 } 2371 } else { 2372 panthor_devfreq_record_busy(sched->ptdev); 2373 if (!sched->pm.has_ref) { 2374 pm_runtime_get(ptdev->base.dev); 2375 sched->pm.has_ref = true; 2376 } 2377 } 2378 2379 sched->last_tick = now; 2380 resched_delay = tick_ctx_update_resched_target(sched, &ctx); 2381 if (ctx.immediate_tick) 2382 resched_delay = 0; 2383 2384 if (resched_delay != U64_MAX) 2385 sched_queue_delayed_work(sched, tick, resched_delay); 2386 2387 out_cleanup_ctx: 2388 tick_ctx_cleanup(sched, &ctx); 2389 2390 out_unlock: 2391 mutex_unlock(&sched->lock); 2392 pm_runtime_mark_last_busy(ptdev->base.dev); 2393 pm_runtime_put_autosuspend(ptdev->base.dev); 2394 2395 out_dev_exit: 2396 drm_dev_exit(cookie); 2397 } 2398 2399 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx) 2400 { 2401 struct panthor_queue *queue = group->queues[queue_idx]; 2402 union { 2403 struct panthor_syncobj_64b sync64; 2404 struct panthor_syncobj_32b sync32; 2405 } *syncobj; 2406 bool result; 2407 u64 value; 2408 2409 syncobj = panthor_queue_get_syncwait_obj(group, queue); 2410 if (!syncobj) 2411 return -EINVAL; 2412 2413 value = queue->syncwait.sync64 ? 2414 syncobj->sync64.seqno : 2415 syncobj->sync32.seqno; 2416 2417 if (queue->syncwait.gt) 2418 result = value > queue->syncwait.ref; 2419 else 2420 result = value <= queue->syncwait.ref; 2421 2422 if (result) 2423 panthor_queue_put_syncwait_obj(queue); 2424 2425 return result; 2426 } 2427 2428 static void sync_upd_work(struct work_struct *work) 2429 { 2430 struct panthor_scheduler *sched = container_of(work, 2431 struct panthor_scheduler, 2432 sync_upd_work); 2433 struct panthor_group *group, *tmp; 2434 bool immediate_tick = false; 2435 2436 mutex_lock(&sched->lock); 2437 list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) { 2438 u32 tested_queues = group->blocked_queues; 2439 u32 unblocked_queues = 0; 2440 2441 while (tested_queues) { 2442 u32 cs_id = ffs(tested_queues) - 1; 2443 int ret; 2444 2445 ret = panthor_queue_eval_syncwait(group, cs_id); 2446 drm_WARN_ON(&group->ptdev->base, ret < 0); 2447 if (ret) 2448 unblocked_queues |= BIT(cs_id); 2449 2450 tested_queues &= ~BIT(cs_id); 2451 } 2452 2453 if (unblocked_queues) { 2454 group->blocked_queues &= ~unblocked_queues; 2455 2456 if (group->csg_id < 0) { 2457 list_move(&group->run_node, 2458 &sched->groups.runnable[group->priority]); 2459 if (group->priority == PANTHOR_CSG_PRIORITY_RT) 2460 immediate_tick = true; 2461 } 2462 } 2463 2464 if (!group->blocked_queues) 2465 list_del_init(&group->wait_node); 2466 } 2467 mutex_unlock(&sched->lock); 2468 2469 if (immediate_tick) 2470 sched_queue_delayed_work(sched, tick, 0); 2471 } 2472 2473 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) 2474 { 2475 struct panthor_device *ptdev = group->ptdev; 2476 struct panthor_scheduler *sched = ptdev->scheduler; 2477 struct list_head *queue = &sched->groups.runnable[group->priority]; 2478 u64 delay_jiffies = 0; 2479 bool was_idle; 2480 u64 now; 2481 2482 if (!group_can_run(group)) 2483 return; 2484 2485 /* All updated queues are blocked, no need to wake up the scheduler. */ 2486 if ((queue_mask & group->blocked_queues) == queue_mask) 2487 return; 2488 2489 was_idle = group_is_idle(group); 2490 group->idle_queues &= ~queue_mask; 2491 2492 /* Don't mess up with the lists if we're in a middle of a reset. */ 2493 if (atomic_read(&sched->reset.in_progress)) 2494 return; 2495 2496 if (was_idle && !group_is_idle(group)) 2497 list_move_tail(&group->run_node, queue); 2498 2499 /* RT groups are preemptive. */ 2500 if (group->priority == PANTHOR_CSG_PRIORITY_RT) { 2501 sched_queue_delayed_work(sched, tick, 0); 2502 return; 2503 } 2504 2505 /* Some groups might be idle, force an immediate tick to 2506 * re-evaluate. 2507 */ 2508 if (sched->might_have_idle_groups) { 2509 sched_queue_delayed_work(sched, tick, 0); 2510 return; 2511 } 2512 2513 /* Scheduler is ticking, nothing to do. */ 2514 if (sched->resched_target != U64_MAX) { 2515 /* If there are free slots, force immediating ticking. */ 2516 if (sched->used_csg_slot_count < sched->csg_slot_count) 2517 sched_queue_delayed_work(sched, tick, 0); 2518 2519 return; 2520 } 2521 2522 /* Scheduler tick was off, recalculate the resched_target based on the 2523 * last tick event, and queue the scheduler work. 2524 */ 2525 now = get_jiffies_64(); 2526 sched->resched_target = sched->last_tick + sched->tick_period; 2527 if (sched->used_csg_slot_count == sched->csg_slot_count && 2528 time_before64(now, sched->resched_target)) 2529 delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX); 2530 2531 sched_queue_delayed_work(sched, tick, delay_jiffies); 2532 } 2533 2534 static void queue_stop(struct panthor_queue *queue, 2535 struct panthor_job *bad_job) 2536 { 2537 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); 2538 } 2539 2540 static void queue_start(struct panthor_queue *queue) 2541 { 2542 struct panthor_job *job; 2543 2544 /* Re-assign the parent fences. */ 2545 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) 2546 job->base.s_fence->parent = dma_fence_get(job->done_fence); 2547 2548 drm_sched_start(&queue->scheduler); 2549 } 2550 2551 static void panthor_group_stop(struct panthor_group *group) 2552 { 2553 struct panthor_scheduler *sched = group->ptdev->scheduler; 2554 2555 lockdep_assert_held(&sched->reset.lock); 2556 2557 for (u32 i = 0; i < group->queue_count; i++) 2558 queue_stop(group->queues[i], NULL); 2559 2560 group_get(group); 2561 list_move_tail(&group->run_node, &sched->reset.stopped_groups); 2562 } 2563 2564 static void panthor_group_start(struct panthor_group *group) 2565 { 2566 struct panthor_scheduler *sched = group->ptdev->scheduler; 2567 2568 lockdep_assert_held(&group->ptdev->scheduler->reset.lock); 2569 2570 for (u32 i = 0; i < group->queue_count; i++) 2571 queue_start(group->queues[i]); 2572 2573 if (group_can_run(group)) { 2574 list_move_tail(&group->run_node, 2575 group_is_idle(group) ? 2576 &sched->groups.idle[group->priority] : 2577 &sched->groups.runnable[group->priority]); 2578 } else { 2579 list_del_init(&group->run_node); 2580 list_del_init(&group->wait_node); 2581 group_queue_work(group, term); 2582 } 2583 2584 group_put(group); 2585 } 2586 2587 static void panthor_sched_immediate_tick(struct panthor_device *ptdev) 2588 { 2589 struct panthor_scheduler *sched = ptdev->scheduler; 2590 2591 sched_queue_delayed_work(sched, tick, 0); 2592 } 2593 2594 /** 2595 * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler. 2596 */ 2597 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev) 2598 { 2599 /* Force a tick to immediately kill faulty groups. */ 2600 if (ptdev->scheduler) 2601 panthor_sched_immediate_tick(ptdev); 2602 } 2603 2604 void panthor_sched_resume(struct panthor_device *ptdev) 2605 { 2606 /* Force a tick to re-evaluate after a resume. */ 2607 panthor_sched_immediate_tick(ptdev); 2608 } 2609 2610 void panthor_sched_suspend(struct panthor_device *ptdev) 2611 { 2612 struct panthor_scheduler *sched = ptdev->scheduler; 2613 struct panthor_csg_slots_upd_ctx upd_ctx; 2614 struct panthor_group *group; 2615 u32 suspended_slots; 2616 u32 i; 2617 2618 mutex_lock(&sched->lock); 2619 csgs_upd_ctx_init(&upd_ctx); 2620 for (i = 0; i < sched->csg_slot_count; i++) { 2621 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2622 2623 if (csg_slot->group) { 2624 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i, 2625 group_can_run(csg_slot->group) ? 2626 CSG_STATE_SUSPEND : CSG_STATE_TERMINATE, 2627 CSG_STATE_MASK); 2628 } 2629 } 2630 2631 suspended_slots = upd_ctx.update_mask; 2632 2633 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2634 suspended_slots &= ~upd_ctx.timedout_mask; 2635 2636 if (upd_ctx.timedout_mask) { 2637 u32 slot_mask = upd_ctx.timedout_mask; 2638 2639 drm_err(&ptdev->base, "CSG suspend failed, escalating to termination"); 2640 csgs_upd_ctx_init(&upd_ctx); 2641 while (slot_mask) { 2642 u32 csg_id = ffs(slot_mask) - 1; 2643 2644 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2645 CSG_STATE_TERMINATE, 2646 CSG_STATE_MASK); 2647 slot_mask &= ~BIT(csg_id); 2648 } 2649 2650 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx); 2651 2652 slot_mask = upd_ctx.timedout_mask; 2653 while (slot_mask) { 2654 u32 csg_id = ffs(slot_mask) - 1; 2655 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2656 2657 /* Terminate command timedout, but the soft-reset will 2658 * automatically terminate all active groups, so let's 2659 * force the state to halted here. 2660 */ 2661 if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED) 2662 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; 2663 slot_mask &= ~BIT(csg_id); 2664 } 2665 } 2666 2667 /* Flush L2 and LSC caches to make sure suspend state is up-to-date. 2668 * If the flush fails, flag all queues for termination. 2669 */ 2670 if (suspended_slots) { 2671 bool flush_caches_failed = false; 2672 u32 slot_mask = suspended_slots; 2673 2674 if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0)) 2675 flush_caches_failed = true; 2676 2677 while (slot_mask) { 2678 u32 csg_id = ffs(slot_mask) - 1; 2679 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2680 2681 if (flush_caches_failed) 2682 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; 2683 else 2684 csg_slot_sync_update_locked(ptdev, csg_id); 2685 2686 slot_mask &= ~BIT(csg_id); 2687 } 2688 } 2689 2690 for (i = 0; i < sched->csg_slot_count; i++) { 2691 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2692 2693 group = csg_slot->group; 2694 if (!group) 2695 continue; 2696 2697 group_get(group); 2698 2699 if (group->csg_id >= 0) 2700 sched_process_csg_irq_locked(ptdev, group->csg_id); 2701 2702 group_unbind_locked(group); 2703 2704 drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node)); 2705 2706 if (group_can_run(group)) { 2707 list_add(&group->run_node, 2708 &sched->groups.idle[group->priority]); 2709 } else { 2710 /* We don't bother stopping the scheduler if the group is 2711 * faulty, the group termination work will finish the job. 2712 */ 2713 list_del_init(&group->wait_node); 2714 group_queue_work(group, term); 2715 } 2716 group_put(group); 2717 } 2718 mutex_unlock(&sched->lock); 2719 } 2720 2721 void panthor_sched_pre_reset(struct panthor_device *ptdev) 2722 { 2723 struct panthor_scheduler *sched = ptdev->scheduler; 2724 struct panthor_group *group, *group_tmp; 2725 u32 i; 2726 2727 mutex_lock(&sched->reset.lock); 2728 atomic_set(&sched->reset.in_progress, true); 2729 2730 /* Cancel all scheduler works. Once this is done, these works can't be 2731 * scheduled again until the reset operation is complete. 2732 */ 2733 cancel_work_sync(&sched->sync_upd_work); 2734 cancel_delayed_work_sync(&sched->tick_work); 2735 2736 panthor_sched_suspend(ptdev); 2737 2738 /* Stop all groups that might still accept jobs, so we don't get passed 2739 * new jobs while we're resetting. 2740 */ 2741 for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) { 2742 /* All groups should be in the idle lists. */ 2743 drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i])); 2744 list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node) 2745 panthor_group_stop(group); 2746 } 2747 2748 for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) { 2749 list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node) 2750 panthor_group_stop(group); 2751 } 2752 2753 mutex_unlock(&sched->reset.lock); 2754 } 2755 2756 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) 2757 { 2758 struct panthor_scheduler *sched = ptdev->scheduler; 2759 struct panthor_group *group, *group_tmp; 2760 2761 mutex_lock(&sched->reset.lock); 2762 2763 list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) { 2764 /* Consider all previously running group as terminated if the 2765 * reset failed. 2766 */ 2767 if (reset_failed) 2768 group->state = PANTHOR_CS_GROUP_TERMINATED; 2769 2770 panthor_group_start(group); 2771 } 2772 2773 /* We're done resetting the GPU, clear the reset.in_progress bit so we can 2774 * kick the scheduler. 2775 */ 2776 atomic_set(&sched->reset.in_progress, false); 2777 mutex_unlock(&sched->reset.lock); 2778 2779 /* No need to queue a tick and update syncs if the reset failed. */ 2780 if (!reset_failed) { 2781 sched_queue_delayed_work(sched, tick, 0); 2782 sched_queue_work(sched, sync_upd); 2783 } 2784 } 2785 2786 static void group_sync_upd_work(struct work_struct *work) 2787 { 2788 struct panthor_group *group = 2789 container_of(work, struct panthor_group, sync_upd_work); 2790 struct panthor_job *job, *job_tmp; 2791 LIST_HEAD(done_jobs); 2792 u32 queue_idx; 2793 bool cookie; 2794 2795 cookie = dma_fence_begin_signalling(); 2796 for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { 2797 struct panthor_queue *queue = group->queues[queue_idx]; 2798 struct panthor_syncobj_64b *syncobj; 2799 2800 if (!queue) 2801 continue; 2802 2803 syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj)); 2804 2805 spin_lock(&queue->fence_ctx.lock); 2806 list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { 2807 if (syncobj->seqno < job->done_fence->seqno) 2808 break; 2809 2810 list_move_tail(&job->node, &done_jobs); 2811 dma_fence_signal_locked(job->done_fence); 2812 } 2813 spin_unlock(&queue->fence_ctx.lock); 2814 } 2815 dma_fence_end_signalling(cookie); 2816 2817 list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { 2818 list_del_init(&job->node); 2819 panthor_job_put(&job->base); 2820 } 2821 2822 group_put(group); 2823 } 2824 2825 static struct dma_fence * 2826 queue_run_job(struct drm_sched_job *sched_job) 2827 { 2828 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 2829 struct panthor_group *group = job->group; 2830 struct panthor_queue *queue = group->queues[job->queue_idx]; 2831 struct panthor_device *ptdev = group->ptdev; 2832 struct panthor_scheduler *sched = ptdev->scheduler; 2833 u32 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); 2834 u32 ringbuf_insert = queue->iface.input->insert & (ringbuf_size - 1); 2835 u64 addr_reg = ptdev->csif_info.cs_reg_count - 2836 ptdev->csif_info.unpreserved_cs_reg_count; 2837 u64 val_reg = addr_reg + 2; 2838 u64 sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) + 2839 job->queue_idx * sizeof(struct panthor_syncobj_64b); 2840 u32 waitall_mask = GENMASK(sched->sb_slot_count - 1, 0); 2841 struct dma_fence *done_fence; 2842 int ret; 2843 2844 u64 call_instrs[NUM_INSTRS_PER_SLOT] = { 2845 /* MOV32 rX+2, cs.latest_flush */ 2846 (2ull << 56) | (val_reg << 48) | job->call_info.latest_flush, 2847 2848 /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */ 2849 (36ull << 56) | (0ull << 48) | (val_reg << 40) | (0 << 16) | 0x233, 2850 2851 /* MOV48 rX:rX+1, cs.start */ 2852 (1ull << 56) | (addr_reg << 48) | job->call_info.start, 2853 2854 /* MOV32 rX+2, cs.size */ 2855 (2ull << 56) | (val_reg << 48) | job->call_info.size, 2856 2857 /* WAIT(0) => waits for FLUSH_CACHE2 instruction */ 2858 (3ull << 56) | (1 << 16), 2859 2860 /* CALL rX:rX+1, rX+2 */ 2861 (32ull << 56) | (addr_reg << 40) | (val_reg << 32), 2862 2863 /* MOV48 rX:rX+1, sync_addr */ 2864 (1ull << 56) | (addr_reg << 48) | sync_addr, 2865 2866 /* MOV48 rX+2, #1 */ 2867 (1ull << 56) | (val_reg << 48) | 1, 2868 2869 /* WAIT(all) */ 2870 (3ull << 56) | (waitall_mask << 16), 2871 2872 /* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/ 2873 (51ull << 56) | (0ull << 48) | (addr_reg << 40) | (val_reg << 32) | (0 << 16) | 1, 2874 2875 /* ERROR_BARRIER, so we can recover from faults at job 2876 * boundaries. 2877 */ 2878 (47ull << 56), 2879 }; 2880 2881 /* Need to be cacheline aligned to please the prefetcher. */ 2882 static_assert(sizeof(call_instrs) % 64 == 0, 2883 "call_instrs is not aligned on a cacheline"); 2884 2885 /* Stream size is zero, nothing to do except making sure all previously 2886 * submitted jobs are done before we signal the 2887 * drm_sched_job::s_fence::finished fence. 2888 */ 2889 if (!job->call_info.size) { 2890 job->done_fence = dma_fence_get(queue->fence_ctx.last_fence); 2891 return dma_fence_get(job->done_fence); 2892 } 2893 2894 ret = pm_runtime_resume_and_get(ptdev->base.dev); 2895 if (drm_WARN_ON(&ptdev->base, ret)) 2896 return ERR_PTR(ret); 2897 2898 mutex_lock(&sched->lock); 2899 if (!group_can_run(group)) { 2900 done_fence = ERR_PTR(-ECANCELED); 2901 goto out_unlock; 2902 } 2903 2904 dma_fence_init(job->done_fence, 2905 &panthor_queue_fence_ops, 2906 &queue->fence_ctx.lock, 2907 queue->fence_ctx.id, 2908 atomic64_inc_return(&queue->fence_ctx.seqno)); 2909 2910 memcpy(queue->ringbuf->kmap + ringbuf_insert, 2911 call_instrs, sizeof(call_instrs)); 2912 2913 panthor_job_get(&job->base); 2914 spin_lock(&queue->fence_ctx.lock); 2915 list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs); 2916 spin_unlock(&queue->fence_ctx.lock); 2917 2918 job->ringbuf.start = queue->iface.input->insert; 2919 job->ringbuf.end = job->ringbuf.start + sizeof(call_instrs); 2920 2921 /* Make sure the ring buffer is updated before the INSERT 2922 * register. 2923 */ 2924 wmb(); 2925 2926 queue->iface.input->extract = queue->iface.output->extract; 2927 queue->iface.input->insert = job->ringbuf.end; 2928 2929 if (group->csg_id < 0) { 2930 /* If the queue is blocked, we want to keep the timeout running, so we 2931 * can detect unbounded waits and kill the group when that happens. 2932 * Otherwise, we suspend the timeout so the time we spend waiting for 2933 * a CSG slot is not counted. 2934 */ 2935 if (!(group->blocked_queues & BIT(job->queue_idx)) && 2936 !queue->timeout_suspended) { 2937 queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); 2938 queue->timeout_suspended = true; 2939 } 2940 2941 group_schedule_locked(group, BIT(job->queue_idx)); 2942 } else { 2943 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1); 2944 if (!sched->pm.has_ref && 2945 !(group->blocked_queues & BIT(job->queue_idx))) { 2946 pm_runtime_get(ptdev->base.dev); 2947 sched->pm.has_ref = true; 2948 } 2949 panthor_devfreq_record_busy(sched->ptdev); 2950 } 2951 2952 /* Update the last fence. */ 2953 dma_fence_put(queue->fence_ctx.last_fence); 2954 queue->fence_ctx.last_fence = dma_fence_get(job->done_fence); 2955 2956 done_fence = dma_fence_get(job->done_fence); 2957 2958 out_unlock: 2959 mutex_unlock(&sched->lock); 2960 pm_runtime_mark_last_busy(ptdev->base.dev); 2961 pm_runtime_put_autosuspend(ptdev->base.dev); 2962 2963 return done_fence; 2964 } 2965 2966 static enum drm_gpu_sched_stat 2967 queue_timedout_job(struct drm_sched_job *sched_job) 2968 { 2969 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 2970 struct panthor_group *group = job->group; 2971 struct panthor_device *ptdev = group->ptdev; 2972 struct panthor_scheduler *sched = ptdev->scheduler; 2973 struct panthor_queue *queue = group->queues[job->queue_idx]; 2974 2975 drm_warn(&ptdev->base, "job timeout\n"); 2976 2977 drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress)); 2978 2979 queue_stop(queue, job); 2980 2981 mutex_lock(&sched->lock); 2982 group->timedout = true; 2983 if (group->csg_id >= 0) { 2984 sched_queue_delayed_work(ptdev->scheduler, tick, 0); 2985 } else { 2986 /* Remove from the run queues, so the scheduler can't 2987 * pick the group on the next tick. 2988 */ 2989 list_del_init(&group->run_node); 2990 list_del_init(&group->wait_node); 2991 2992 group_queue_work(group, term); 2993 } 2994 mutex_unlock(&sched->lock); 2995 2996 queue_start(queue); 2997 2998 return DRM_GPU_SCHED_STAT_NOMINAL; 2999 } 3000 3001 static void queue_free_job(struct drm_sched_job *sched_job) 3002 { 3003 drm_sched_job_cleanup(sched_job); 3004 panthor_job_put(sched_job); 3005 } 3006 3007 static const struct drm_sched_backend_ops panthor_queue_sched_ops = { 3008 .run_job = queue_run_job, 3009 .timedout_job = queue_timedout_job, 3010 .free_job = queue_free_job, 3011 }; 3012 3013 static struct panthor_queue * 3014 group_create_queue(struct panthor_group *group, 3015 const struct drm_panthor_queue_create *args) 3016 { 3017 struct drm_gpu_scheduler *drm_sched; 3018 struct panthor_queue *queue; 3019 int ret; 3020 3021 if (args->pad[0] || args->pad[1] || args->pad[2]) 3022 return ERR_PTR(-EINVAL); 3023 3024 if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K || 3025 !is_power_of_2(args->ringbuf_size)) 3026 return ERR_PTR(-EINVAL); 3027 3028 if (args->priority > CSF_MAX_QUEUE_PRIO) 3029 return ERR_PTR(-EINVAL); 3030 3031 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 3032 if (!queue) 3033 return ERR_PTR(-ENOMEM); 3034 3035 queue->fence_ctx.id = dma_fence_context_alloc(1); 3036 spin_lock_init(&queue->fence_ctx.lock); 3037 INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs); 3038 3039 queue->priority = args->priority; 3040 3041 queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm, 3042 args->ringbuf_size, 3043 DRM_PANTHOR_BO_NO_MMAP, 3044 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3045 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3046 PANTHOR_VM_KERNEL_AUTO_VA); 3047 if (IS_ERR(queue->ringbuf)) { 3048 ret = PTR_ERR(queue->ringbuf); 3049 goto err_free_queue; 3050 } 3051 3052 ret = panthor_kernel_bo_vmap(queue->ringbuf); 3053 if (ret) 3054 goto err_free_queue; 3055 3056 queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev, 3057 &queue->iface.input, 3058 &queue->iface.output, 3059 &queue->iface.input_fw_va, 3060 &queue->iface.output_fw_va); 3061 if (IS_ERR(queue->iface.mem)) { 3062 ret = PTR_ERR(queue->iface.mem); 3063 goto err_free_queue; 3064 } 3065 3066 ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops, 3067 group->ptdev->scheduler->wq, 1, 3068 args->ringbuf_size / (NUM_INSTRS_PER_SLOT * sizeof(u64)), 3069 0, msecs_to_jiffies(JOB_TIMEOUT_MS), 3070 group->ptdev->reset.wq, 3071 NULL, "panthor-queue", group->ptdev->base.dev); 3072 if (ret) 3073 goto err_free_queue; 3074 3075 drm_sched = &queue->scheduler; 3076 ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL); 3077 3078 return queue; 3079 3080 err_free_queue: 3081 group_free_queue(group, queue); 3082 return ERR_PTR(ret); 3083 } 3084 3085 #define MAX_GROUPS_PER_POOL 128 3086 3087 int panthor_group_create(struct panthor_file *pfile, 3088 const struct drm_panthor_group_create *group_args, 3089 const struct drm_panthor_queue_create *queue_args) 3090 { 3091 struct panthor_device *ptdev = pfile->ptdev; 3092 struct panthor_group_pool *gpool = pfile->groups; 3093 struct panthor_scheduler *sched = ptdev->scheduler; 3094 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); 3095 struct panthor_group *group = NULL; 3096 u32 gid, i, suspend_size; 3097 int ret; 3098 3099 if (group_args->pad) 3100 return -EINVAL; 3101 3102 if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT) 3103 return -EINVAL; 3104 3105 if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) || 3106 (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) || 3107 (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present)) 3108 return -EINVAL; 3109 3110 if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores || 3111 hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores || 3112 hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores) 3113 return -EINVAL; 3114 3115 group = kzalloc(sizeof(*group), GFP_KERNEL); 3116 if (!group) 3117 return -ENOMEM; 3118 3119 spin_lock_init(&group->fatal_lock); 3120 kref_init(&group->refcount); 3121 group->state = PANTHOR_CS_GROUP_CREATED; 3122 group->csg_id = -1; 3123 3124 group->ptdev = ptdev; 3125 group->max_compute_cores = group_args->max_compute_cores; 3126 group->compute_core_mask = group_args->compute_core_mask; 3127 group->max_fragment_cores = group_args->max_fragment_cores; 3128 group->fragment_core_mask = group_args->fragment_core_mask; 3129 group->max_tiler_cores = group_args->max_tiler_cores; 3130 group->tiler_core_mask = group_args->tiler_core_mask; 3131 group->priority = group_args->priority; 3132 3133 INIT_LIST_HEAD(&group->wait_node); 3134 INIT_LIST_HEAD(&group->run_node); 3135 INIT_WORK(&group->term_work, group_term_work); 3136 INIT_WORK(&group->sync_upd_work, group_sync_upd_work); 3137 INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work); 3138 INIT_WORK(&group->release_work, group_release_work); 3139 3140 group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id); 3141 if (!group->vm) { 3142 ret = -EINVAL; 3143 goto err_put_group; 3144 } 3145 3146 suspend_size = csg_iface->control->suspend_size; 3147 group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); 3148 if (IS_ERR(group->suspend_buf)) { 3149 ret = PTR_ERR(group->suspend_buf); 3150 group->suspend_buf = NULL; 3151 goto err_put_group; 3152 } 3153 3154 suspend_size = csg_iface->control->protm_suspend_size; 3155 group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); 3156 if (IS_ERR(group->protm_suspend_buf)) { 3157 ret = PTR_ERR(group->protm_suspend_buf); 3158 group->protm_suspend_buf = NULL; 3159 goto err_put_group; 3160 } 3161 3162 group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm, 3163 group_args->queues.count * 3164 sizeof(struct panthor_syncobj_64b), 3165 DRM_PANTHOR_BO_NO_MMAP, 3166 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | 3167 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, 3168 PANTHOR_VM_KERNEL_AUTO_VA); 3169 if (IS_ERR(group->syncobjs)) { 3170 ret = PTR_ERR(group->syncobjs); 3171 goto err_put_group; 3172 } 3173 3174 ret = panthor_kernel_bo_vmap(group->syncobjs); 3175 if (ret) 3176 goto err_put_group; 3177 3178 memset(group->syncobjs->kmap, 0, 3179 group_args->queues.count * sizeof(struct panthor_syncobj_64b)); 3180 3181 for (i = 0; i < group_args->queues.count; i++) { 3182 group->queues[i] = group_create_queue(group, &queue_args[i]); 3183 if (IS_ERR(group->queues[i])) { 3184 ret = PTR_ERR(group->queues[i]); 3185 group->queues[i] = NULL; 3186 goto err_put_group; 3187 } 3188 3189 group->queue_count++; 3190 } 3191 3192 group->idle_queues = GENMASK(group->queue_count - 1, 0); 3193 3194 ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); 3195 if (ret) 3196 goto err_put_group; 3197 3198 mutex_lock(&sched->reset.lock); 3199 if (atomic_read(&sched->reset.in_progress)) { 3200 panthor_group_stop(group); 3201 } else { 3202 mutex_lock(&sched->lock); 3203 list_add_tail(&group->run_node, 3204 &sched->groups.idle[group->priority]); 3205 mutex_unlock(&sched->lock); 3206 } 3207 mutex_unlock(&sched->reset.lock); 3208 3209 return gid; 3210 3211 err_put_group: 3212 group_put(group); 3213 return ret; 3214 } 3215 3216 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) 3217 { 3218 struct panthor_group_pool *gpool = pfile->groups; 3219 struct panthor_device *ptdev = pfile->ptdev; 3220 struct panthor_scheduler *sched = ptdev->scheduler; 3221 struct panthor_group *group; 3222 3223 group = xa_erase(&gpool->xa, group_handle); 3224 if (!group) 3225 return -EINVAL; 3226 3227 for (u32 i = 0; i < group->queue_count; i++) { 3228 if (group->queues[i]) 3229 drm_sched_entity_destroy(&group->queues[i]->entity); 3230 } 3231 3232 mutex_lock(&sched->reset.lock); 3233 mutex_lock(&sched->lock); 3234 group->destroyed = true; 3235 if (group->csg_id >= 0) { 3236 sched_queue_delayed_work(sched, tick, 0); 3237 } else if (!atomic_read(&sched->reset.in_progress)) { 3238 /* Remove from the run queues, so the scheduler can't 3239 * pick the group on the next tick. 3240 */ 3241 list_del_init(&group->run_node); 3242 list_del_init(&group->wait_node); 3243 group_queue_work(group, term); 3244 } 3245 mutex_unlock(&sched->lock); 3246 mutex_unlock(&sched->reset.lock); 3247 3248 group_put(group); 3249 return 0; 3250 } 3251 3252 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool, 3253 u32 group_handle) 3254 { 3255 struct panthor_group *group; 3256 3257 xa_lock(&pool->xa); 3258 group = group_get(xa_load(&pool->xa, group_handle)); 3259 xa_unlock(&pool->xa); 3260 3261 return group; 3262 } 3263 3264 int panthor_group_get_state(struct panthor_file *pfile, 3265 struct drm_panthor_group_get_state *get_state) 3266 { 3267 struct panthor_group_pool *gpool = pfile->groups; 3268 struct panthor_device *ptdev = pfile->ptdev; 3269 struct panthor_scheduler *sched = ptdev->scheduler; 3270 struct panthor_group *group; 3271 3272 if (get_state->pad) 3273 return -EINVAL; 3274 3275 group = group_from_handle(gpool, get_state->group_handle); 3276 if (!group) 3277 return -EINVAL; 3278 3279 memset(get_state, 0, sizeof(*get_state)); 3280 3281 mutex_lock(&sched->lock); 3282 if (group->timedout) 3283 get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT; 3284 if (group->fatal_queues) { 3285 get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT; 3286 get_state->fatal_queues = group->fatal_queues; 3287 } 3288 mutex_unlock(&sched->lock); 3289 3290 group_put(group); 3291 return 0; 3292 } 3293 3294 int panthor_group_pool_create(struct panthor_file *pfile) 3295 { 3296 struct panthor_group_pool *gpool; 3297 3298 gpool = kzalloc(sizeof(*gpool), GFP_KERNEL); 3299 if (!gpool) 3300 return -ENOMEM; 3301 3302 xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1); 3303 pfile->groups = gpool; 3304 return 0; 3305 } 3306 3307 void panthor_group_pool_destroy(struct panthor_file *pfile) 3308 { 3309 struct panthor_group_pool *gpool = pfile->groups; 3310 struct panthor_group *group; 3311 unsigned long i; 3312 3313 if (IS_ERR_OR_NULL(gpool)) 3314 return; 3315 3316 xa_for_each(&gpool->xa, i, group) 3317 panthor_group_destroy(pfile, i); 3318 3319 xa_destroy(&gpool->xa); 3320 kfree(gpool); 3321 pfile->groups = NULL; 3322 } 3323 3324 static void job_release(struct kref *ref) 3325 { 3326 struct panthor_job *job = container_of(ref, struct panthor_job, refcount); 3327 3328 drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node)); 3329 3330 if (job->base.s_fence) 3331 drm_sched_job_cleanup(&job->base); 3332 3333 if (job->done_fence && job->done_fence->ops) 3334 dma_fence_put(job->done_fence); 3335 else 3336 dma_fence_free(job->done_fence); 3337 3338 group_put(job->group); 3339 3340 kfree(job); 3341 } 3342 3343 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job) 3344 { 3345 if (sched_job) { 3346 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3347 3348 kref_get(&job->refcount); 3349 } 3350 3351 return sched_job; 3352 } 3353 3354 void panthor_job_put(struct drm_sched_job *sched_job) 3355 { 3356 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3357 3358 if (sched_job) 3359 kref_put(&job->refcount, job_release); 3360 } 3361 3362 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job) 3363 { 3364 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3365 3366 return job->group->vm; 3367 } 3368 3369 struct drm_sched_job * 3370 panthor_job_create(struct panthor_file *pfile, 3371 u16 group_handle, 3372 const struct drm_panthor_queue_submit *qsubmit) 3373 { 3374 struct panthor_group_pool *gpool = pfile->groups; 3375 struct panthor_job *job; 3376 int ret; 3377 3378 if (qsubmit->pad) 3379 return ERR_PTR(-EINVAL); 3380 3381 /* If stream_addr is zero, so stream_size should be. */ 3382 if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0)) 3383 return ERR_PTR(-EINVAL); 3384 3385 /* Make sure the address is aligned on 64-byte (cacheline) and the size is 3386 * aligned on 8-byte (instruction size). 3387 */ 3388 if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7)) 3389 return ERR_PTR(-EINVAL); 3390 3391 /* bits 24:30 must be zero. */ 3392 if (qsubmit->latest_flush & GENMASK(30, 24)) 3393 return ERR_PTR(-EINVAL); 3394 3395 job = kzalloc(sizeof(*job), GFP_KERNEL); 3396 if (!job) 3397 return ERR_PTR(-ENOMEM); 3398 3399 kref_init(&job->refcount); 3400 job->queue_idx = qsubmit->queue_index; 3401 job->call_info.size = qsubmit->stream_size; 3402 job->call_info.start = qsubmit->stream_addr; 3403 job->call_info.latest_flush = qsubmit->latest_flush; 3404 INIT_LIST_HEAD(&job->node); 3405 3406 job->group = group_from_handle(gpool, group_handle); 3407 if (!job->group) { 3408 ret = -EINVAL; 3409 goto err_put_job; 3410 } 3411 3412 if (job->queue_idx >= job->group->queue_count || 3413 !job->group->queues[job->queue_idx]) { 3414 ret = -EINVAL; 3415 goto err_put_job; 3416 } 3417 3418 /* Empty command streams don't need a fence, they'll pick the one from 3419 * the previously submitted job. 3420 */ 3421 if (job->call_info.size) { 3422 job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); 3423 if (!job->done_fence) { 3424 ret = -ENOMEM; 3425 goto err_put_job; 3426 } 3427 } 3428 3429 ret = drm_sched_job_init(&job->base, 3430 &job->group->queues[job->queue_idx]->entity, 3431 1, job->group); 3432 if (ret) 3433 goto err_put_job; 3434 3435 return &job->base; 3436 3437 err_put_job: 3438 panthor_job_put(&job->base); 3439 return ERR_PTR(ret); 3440 } 3441 3442 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job) 3443 { 3444 struct panthor_job *job = container_of(sched_job, struct panthor_job, base); 3445 3446 panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished, 3447 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); 3448 } 3449 3450 void panthor_sched_unplug(struct panthor_device *ptdev) 3451 { 3452 struct panthor_scheduler *sched = ptdev->scheduler; 3453 3454 cancel_delayed_work_sync(&sched->tick_work); 3455 3456 mutex_lock(&sched->lock); 3457 if (sched->pm.has_ref) { 3458 pm_runtime_put(ptdev->base.dev); 3459 sched->pm.has_ref = false; 3460 } 3461 mutex_unlock(&sched->lock); 3462 } 3463 3464 static void panthor_sched_fini(struct drm_device *ddev, void *res) 3465 { 3466 struct panthor_scheduler *sched = res; 3467 int prio; 3468 3469 if (!sched || !sched->csg_slot_count) 3470 return; 3471 3472 cancel_delayed_work_sync(&sched->tick_work); 3473 3474 if (sched->wq) 3475 destroy_workqueue(sched->wq); 3476 3477 if (sched->heap_alloc_wq) 3478 destroy_workqueue(sched->heap_alloc_wq); 3479 3480 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 3481 drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio])); 3482 drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio])); 3483 } 3484 3485 drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting)); 3486 } 3487 3488 int panthor_sched_init(struct panthor_device *ptdev) 3489 { 3490 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 3491 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0); 3492 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0); 3493 struct panthor_scheduler *sched; 3494 u32 gpu_as_count, num_groups; 3495 int prio, ret; 3496 3497 sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL); 3498 if (!sched) 3499 return -ENOMEM; 3500 3501 /* The highest bit in JOB_INT_* is reserved for globabl IRQs. That 3502 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here. 3503 */ 3504 num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num); 3505 3506 /* The FW-side scheduler might deadlock if two groups with the same 3507 * priority try to access a set of resources that overlaps, with part 3508 * of the resources being allocated to one group and the other part to 3509 * the other group, both groups waiting for the remaining resources to 3510 * be allocated. To avoid that, it is recommended to assign each CSG a 3511 * different priority. In theory we could allow several groups to have 3512 * the same CSG priority if they don't request the same resources, but 3513 * that makes the scheduling logic more complicated, so let's clamp 3514 * the number of CSG slots to MAX_CSG_PRIO + 1 for now. 3515 */ 3516 num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups); 3517 3518 /* We need at least one AS for the MCU and one for the GPU contexts. */ 3519 gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1)); 3520 if (!gpu_as_count) { 3521 drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)", 3522 gpu_as_count + 1); 3523 return -EINVAL; 3524 } 3525 3526 sched->ptdev = ptdev; 3527 sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features); 3528 sched->csg_slot_count = num_groups; 3529 sched->cs_slot_count = csg_iface->control->stream_num; 3530 sched->as_slot_count = gpu_as_count; 3531 ptdev->csif_info.csg_slot_count = sched->csg_slot_count; 3532 ptdev->csif_info.cs_slot_count = sched->cs_slot_count; 3533 ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count; 3534 3535 sched->last_tick = 0; 3536 sched->resched_target = U64_MAX; 3537 sched->tick_period = msecs_to_jiffies(10); 3538 INIT_DELAYED_WORK(&sched->tick_work, tick_work); 3539 INIT_WORK(&sched->sync_upd_work, sync_upd_work); 3540 INIT_WORK(&sched->fw_events_work, process_fw_events_work); 3541 3542 ret = drmm_mutex_init(&ptdev->base, &sched->lock); 3543 if (ret) 3544 return ret; 3545 3546 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) { 3547 INIT_LIST_HEAD(&sched->groups.runnable[prio]); 3548 INIT_LIST_HEAD(&sched->groups.idle[prio]); 3549 } 3550 INIT_LIST_HEAD(&sched->groups.waiting); 3551 3552 ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock); 3553 if (ret) 3554 return ret; 3555 3556 INIT_LIST_HEAD(&sched->reset.stopped_groups); 3557 3558 /* sched->heap_alloc_wq will be used for heap chunk allocation on 3559 * tiler OOM events, which means we can't use the same workqueue for 3560 * the scheduler because works queued by the scheduler are in 3561 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to 3562 * work around this limitation. 3563 * 3564 * FIXME: Ultimately, what we need is a failable/non-blocking GEM 3565 * allocation path that we can call when a heap OOM is reported. The 3566 * FW is smart enough to fall back on other methods if the kernel can't 3567 * allocate memory, and fail the tiling job if none of these 3568 * countermeasures worked. 3569 * 3570 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the 3571 * system is running out of memory. 3572 */ 3573 sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0); 3574 sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 3575 if (!sched->wq || !sched->heap_alloc_wq) { 3576 panthor_sched_fini(&ptdev->base, sched); 3577 drm_err(&ptdev->base, "Failed to allocate the workqueues"); 3578 return -ENOMEM; 3579 } 3580 3581 ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched); 3582 if (ret) 3583 return ret; 3584 3585 ptdev->scheduler = sched; 3586 return 0; 3587 } 3588