xref: /linux/drivers/gpu/drm/panthor/panthor_sched.c (revision b9a14d54ab2bf0c09409f373a2120de65046178a)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2023 Collabora ltd. */
3 
4 #include <drm/drm_drv.h>
5 #include <drm/drm_exec.h>
6 #include <drm/drm_gem_shmem_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/gpu_scheduler.h>
9 #include <drm/panthor_drm.h>
10 
11 #include <linux/build_bug.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-resv.h>
16 #include <linux/firmware.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/iosys-map.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 
25 #include "panthor_devfreq.h"
26 #include "panthor_device.h"
27 #include "panthor_fw.h"
28 #include "panthor_gem.h"
29 #include "panthor_gpu.h"
30 #include "panthor_heap.h"
31 #include "panthor_mmu.h"
32 #include "panthor_regs.h"
33 #include "panthor_sched.h"
34 
35 /**
36  * DOC: Scheduler
37  *
38  * Mali CSF hardware adopts a firmware-assisted scheduling model, where
39  * the firmware takes care of scheduling aspects, to some extent.
40  *
41  * The scheduling happens at the scheduling group level, each group
42  * contains 1 to N queues (N is FW/hardware dependent, and exposed
43  * through the firmware interface). Each queue is assigned a command
44  * stream ring buffer, which serves as a way to get jobs submitted to
45  * the GPU, among other things.
46  *
47  * The firmware can schedule a maximum of M groups (M is FW/hardware
48  * dependent, and exposed through the firmware interface). Passed
49  * this maximum number of groups, the kernel must take care of
50  * rotating the groups passed to the firmware so every group gets
51  * a chance to have his queues scheduled for execution.
52  *
53  * The current implementation only supports with kernel-mode queues.
54  * In other terms, userspace doesn't have access to the ring-buffer.
55  * Instead, userspace passes indirect command stream buffers that are
56  * called from the queue ring-buffer by the kernel using a pre-defined
57  * sequence of command stream instructions to ensure the userspace driver
58  * always gets consistent results (cache maintenance,
59  * synchronization, ...).
60  *
61  * We rely on the drm_gpu_scheduler framework to deal with job
62  * dependencies and submission. As any other driver dealing with a
63  * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
64  * entity has its own job scheduler. When a job is ready to be executed
65  * (all its dependencies are met), it is pushed to the appropriate
66  * queue ring-buffer, and the group is scheduled for execution if it
67  * wasn't already active.
68  *
69  * Kernel-side group scheduling is timeslice-based. When we have less
70  * groups than there are slots, the periodic tick is disabled and we
71  * just let the FW schedule the active groups. When there are more
72  * groups than slots, we let each group a chance to execute stuff for
73  * a given amount of time, and then re-evaluate and pick new groups
74  * to schedule. The group selection algorithm is based on
75  * priority+round-robin.
76  *
77  * Even though user-mode queues is out of the scope right now, the
78  * current design takes them into account by avoiding any guess on the
79  * group/queue state that would be based on information we wouldn't have
80  * if userspace was in charge of the ring-buffer. That's also one of the
81  * reason we don't do 'cooperative' scheduling (encoding FW group slot
82  * reservation as dma_fence that would be returned from the
83  * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
84  * a queue of waiters, ordered by job submission order). This approach
85  * would work for kernel-mode queues, but would make user-mode queues a
86  * lot more complicated to retrofit.
87  */
88 
89 #define JOB_TIMEOUT_MS				5000
90 
91 #define MIN_CS_PER_CSG				8
92 
93 #define MIN_CSGS				3
94 #define MAX_CSG_PRIO				0xf
95 
96 struct panthor_group;
97 
98 /**
99  * struct panthor_csg_slot - Command stream group slot
100  *
101  * This represents a FW slot for a scheduling group.
102  */
103 struct panthor_csg_slot {
104 	/** @group: Scheduling group bound to this slot. */
105 	struct panthor_group *group;
106 
107 	/** @priority: Group priority. */
108 	u8 priority;
109 
110 	/**
111 	 * @idle: True if the group bound to this slot is idle.
112 	 *
113 	 * A group is idle when it has nothing waiting for execution on
114 	 * all its queues, or when queues are blocked waiting for something
115 	 * to happen (synchronization object).
116 	 */
117 	bool idle;
118 };
119 
120 /**
121  * enum panthor_csg_priority - Group priority
122  */
123 enum panthor_csg_priority {
124 	/** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */
125 	PANTHOR_CSG_PRIORITY_LOW = 0,
126 
127 	/** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */
128 	PANTHOR_CSG_PRIORITY_MEDIUM,
129 
130 	/** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */
131 	PANTHOR_CSG_PRIORITY_HIGH,
132 
133 	/**
134 	 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group.
135 	 *
136 	 * Real-time priority allows one to preempt scheduling of other
137 	 * non-real-time groups. When such a group becomes executable,
138 	 * it will evict the group with the lowest non-rt priority if
139 	 * there's no free group slot available.
140 	 */
141 	PANTHOR_CSG_PRIORITY_RT,
142 
143 	/** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */
144 	PANTHOR_CSG_PRIORITY_COUNT,
145 };
146 
147 /**
148  * struct panthor_scheduler - Object used to manage the scheduler
149  */
150 struct panthor_scheduler {
151 	/** @ptdev: Device. */
152 	struct panthor_device *ptdev;
153 
154 	/**
155 	 * @wq: Workqueue used by our internal scheduler logic and
156 	 * drm_gpu_scheduler.
157 	 *
158 	 * Used for the scheduler tick, group update or other kind of FW
159 	 * event processing that can't be handled in the threaded interrupt
160 	 * path. Also passed to the drm_gpu_scheduler instances embedded
161 	 * in panthor_queue.
162 	 */
163 	struct workqueue_struct *wq;
164 
165 	/**
166 	 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works.
167 	 *
168 	 * We have a queue dedicated to heap chunk allocation works to avoid
169 	 * blocking the rest of the scheduler if the allocation tries to
170 	 * reclaim memory.
171 	 */
172 	struct workqueue_struct *heap_alloc_wq;
173 
174 	/** @tick_work: Work executed on a scheduling tick. */
175 	struct delayed_work tick_work;
176 
177 	/**
178 	 * @sync_upd_work: Work used to process synchronization object updates.
179 	 *
180 	 * We use this work to unblock queues/groups that were waiting on a
181 	 * synchronization object.
182 	 */
183 	struct work_struct sync_upd_work;
184 
185 	/**
186 	 * @fw_events_work: Work used to process FW events outside the interrupt path.
187 	 *
188 	 * Even if the interrupt is threaded, we need any event processing
189 	 * that require taking the panthor_scheduler::lock to be processed
190 	 * outside the interrupt path so we don't block the tick logic when
191 	 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the
192 	 * event processing requires taking this lock, we just delegate all
193 	 * FW event processing to the scheduler workqueue.
194 	 */
195 	struct work_struct fw_events_work;
196 
197 	/**
198 	 * @fw_events: Bitmask encoding pending FW events.
199 	 */
200 	atomic_t fw_events;
201 
202 	/**
203 	 * @resched_target: When the next tick should occur.
204 	 *
205 	 * Expressed in jiffies.
206 	 */
207 	u64 resched_target;
208 
209 	/**
210 	 * @last_tick: When the last tick occurred.
211 	 *
212 	 * Expressed in jiffies.
213 	 */
214 	u64 last_tick;
215 
216 	/** @tick_period: Tick period in jiffies. */
217 	u64 tick_period;
218 
219 	/**
220 	 * @lock: Lock protecting access to all the scheduler fields.
221 	 *
222 	 * Should be taken in the tick work, the irq handler, and anywhere the @groups
223 	 * fields are touched.
224 	 */
225 	struct mutex lock;
226 
227 	/** @groups: Various lists used to classify groups. */
228 	struct {
229 		/**
230 		 * @runnable: Runnable group lists.
231 		 *
232 		 * When a group has queues that want to execute something,
233 		 * its panthor_group::run_node should be inserted here.
234 		 *
235 		 * One list per-priority.
236 		 */
237 		struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT];
238 
239 		/**
240 		 * @idle: Idle group lists.
241 		 *
242 		 * When all queues of a group are idle (either because they
243 		 * have nothing to execute, or because they are blocked), the
244 		 * panthor_group::run_node field should be inserted here.
245 		 *
246 		 * One list per-priority.
247 		 */
248 		struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT];
249 
250 		/**
251 		 * @waiting: List of groups whose queues are blocked on a
252 		 * synchronization object.
253 		 *
254 		 * Insert panthor_group::wait_node here when a group is waiting
255 		 * for synchronization objects to be signaled.
256 		 *
257 		 * This list is evaluated in the @sync_upd_work work.
258 		 */
259 		struct list_head waiting;
260 	} groups;
261 
262 	/**
263 	 * @csg_slots: FW command stream group slots.
264 	 */
265 	struct panthor_csg_slot csg_slots[MAX_CSGS];
266 
267 	/** @csg_slot_count: Number of command stream group slots exposed by the FW. */
268 	u32 csg_slot_count;
269 
270 	/** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */
271 	u32 cs_slot_count;
272 
273 	/** @as_slot_count: Number of address space slots supported by the MMU. */
274 	u32 as_slot_count;
275 
276 	/** @used_csg_slot_count: Number of command stream group slot currently used. */
277 	u32 used_csg_slot_count;
278 
279 	/** @sb_slot_count: Number of scoreboard slots. */
280 	u32 sb_slot_count;
281 
282 	/**
283 	 * @might_have_idle_groups: True if an active group might have become idle.
284 	 *
285 	 * This will force a tick, so other runnable groups can be scheduled if one
286 	 * or more active groups became idle.
287 	 */
288 	bool might_have_idle_groups;
289 
290 	/** @pm: Power management related fields. */
291 	struct {
292 		/** @has_ref: True if the scheduler owns a runtime PM reference. */
293 		bool has_ref;
294 	} pm;
295 
296 	/** @reset: Reset related fields. */
297 	struct {
298 		/** @lock: Lock protecting the other reset fields. */
299 		struct mutex lock;
300 
301 		/**
302 		 * @in_progress: True if a reset is in progress.
303 		 *
304 		 * Set to true in panthor_sched_pre_reset() and back to false in
305 		 * panthor_sched_post_reset().
306 		 */
307 		atomic_t in_progress;
308 
309 		/**
310 		 * @stopped_groups: List containing all groups that were stopped
311 		 * before a reset.
312 		 *
313 		 * Insert panthor_group::run_node in the pre_reset path.
314 		 */
315 		struct list_head stopped_groups;
316 	} reset;
317 };
318 
319 /**
320  * struct panthor_syncobj_32b - 32-bit FW synchronization object
321  */
322 struct panthor_syncobj_32b {
323 	/** @seqno: Sequence number. */
324 	u32 seqno;
325 
326 	/**
327 	 * @status: Status.
328 	 *
329 	 * Not zero on failure.
330 	 */
331 	u32 status;
332 };
333 
334 /**
335  * struct panthor_syncobj_64b - 64-bit FW synchronization object
336  */
337 struct panthor_syncobj_64b {
338 	/** @seqno: Sequence number. */
339 	u64 seqno;
340 
341 	/**
342 	 * @status: Status.
343 	 *
344 	 * Not zero on failure.
345 	 */
346 	u32 status;
347 
348 	/** @pad: MBZ. */
349 	u32 pad;
350 };
351 
352 /**
353  * struct panthor_queue - Execution queue
354  */
355 struct panthor_queue {
356 	/** @scheduler: DRM scheduler used for this queue. */
357 	struct drm_gpu_scheduler scheduler;
358 
359 	/** @entity: DRM scheduling entity used for this queue. */
360 	struct drm_sched_entity entity;
361 
362 	/**
363 	 * @remaining_time: Time remaining before the job timeout expires.
364 	 *
365 	 * The job timeout is suspended when the queue is not scheduled by the
366 	 * FW. Every time we suspend the timer, we need to save the remaining
367 	 * time so we can restore it later on.
368 	 */
369 	unsigned long remaining_time;
370 
371 	/** @timeout_suspended: True if the job timeout was suspended. */
372 	bool timeout_suspended;
373 
374 	/**
375 	 * @doorbell_id: Doorbell assigned to this queue.
376 	 *
377 	 * Right now, all groups share the same doorbell, and the doorbell ID
378 	 * is assigned to group_slot + 1 when the group is assigned a slot. But
379 	 * we might decide to provide fine grained doorbell assignment at some
380 	 * point, so don't have to wake up all queues in a group every time one
381 	 * of them is updated.
382 	 */
383 	u8 doorbell_id;
384 
385 	/**
386 	 * @priority: Priority of the queue inside the group.
387 	 *
388 	 * Must be less than 16 (Only 4 bits available).
389 	 */
390 	u8 priority;
391 #define CSF_MAX_QUEUE_PRIO	GENMASK(3, 0)
392 
393 	/** @ringbuf: Command stream ring-buffer. */
394 	struct panthor_kernel_bo *ringbuf;
395 
396 	/** @iface: Firmware interface. */
397 	struct {
398 		/** @mem: FW memory allocated for this interface. */
399 		struct panthor_kernel_bo *mem;
400 
401 		/** @input: Input interface. */
402 		struct panthor_fw_ringbuf_input_iface *input;
403 
404 		/** @output: Output interface. */
405 		const struct panthor_fw_ringbuf_output_iface *output;
406 
407 		/** @input_fw_va: FW virtual address of the input interface buffer. */
408 		u32 input_fw_va;
409 
410 		/** @output_fw_va: FW virtual address of the output interface buffer. */
411 		u32 output_fw_va;
412 	} iface;
413 
414 	/**
415 	 * @syncwait: Stores information about the synchronization object this
416 	 * queue is waiting on.
417 	 */
418 	struct {
419 		/** @gpu_va: GPU address of the synchronization object. */
420 		u64 gpu_va;
421 
422 		/** @ref: Reference value to compare against. */
423 		u64 ref;
424 
425 		/** @gt: True if this is a greater-than test. */
426 		bool gt;
427 
428 		/** @sync64: True if this is a 64-bit sync object. */
429 		bool sync64;
430 
431 		/** @bo: Buffer object holding the synchronization object. */
432 		struct drm_gem_object *obj;
433 
434 		/** @offset: Offset of the synchronization object inside @bo. */
435 		u64 offset;
436 
437 		/**
438 		 * @kmap: Kernel mapping of the buffer object holding the
439 		 * synchronization object.
440 		 */
441 		void *kmap;
442 	} syncwait;
443 
444 	/** @fence_ctx: Fence context fields. */
445 	struct {
446 		/** @lock: Used to protect access to all fences allocated by this context. */
447 		spinlock_t lock;
448 
449 		/**
450 		 * @id: Fence context ID.
451 		 *
452 		 * Allocated with dma_fence_context_alloc().
453 		 */
454 		u64 id;
455 
456 		/** @seqno: Sequence number of the last initialized fence. */
457 		atomic64_t seqno;
458 
459 		/**
460 		 * @last_fence: Fence of the last submitted job.
461 		 *
462 		 * We return this fence when we get an empty command stream.
463 		 * This way, we are guaranteed that all earlier jobs have completed
464 		 * when drm_sched_job::s_fence::finished without having to feed
465 		 * the CS ring buffer with a dummy job that only signals the fence.
466 		 */
467 		struct dma_fence *last_fence;
468 
469 		/**
470 		 * @in_flight_jobs: List containing all in-flight jobs.
471 		 *
472 		 * Used to keep track and signal panthor_job::done_fence when the
473 		 * synchronization object attached to the queue is signaled.
474 		 */
475 		struct list_head in_flight_jobs;
476 	} fence_ctx;
477 };
478 
479 /**
480  * enum panthor_group_state - Scheduling group state.
481  */
482 enum panthor_group_state {
483 	/** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */
484 	PANTHOR_CS_GROUP_CREATED,
485 
486 	/** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */
487 	PANTHOR_CS_GROUP_ACTIVE,
488 
489 	/**
490 	 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is
491 	 * inactive/suspended right now.
492 	 */
493 	PANTHOR_CS_GROUP_SUSPENDED,
494 
495 	/**
496 	 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated.
497 	 *
498 	 * Can no longer be scheduled. The only allowed action is a destruction.
499 	 */
500 	PANTHOR_CS_GROUP_TERMINATED,
501 
502 	/**
503 	 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state.
504 	 *
505 	 * The FW returned an inconsistent state. The group is flagged unusable
506 	 * and can no longer be scheduled. The only allowed action is a
507 	 * destruction.
508 	 *
509 	 * When that happens, we also schedule a FW reset, to start from a fresh
510 	 * state.
511 	 */
512 	PANTHOR_CS_GROUP_UNKNOWN_STATE,
513 };
514 
515 /**
516  * struct panthor_group - Scheduling group object
517  */
518 struct panthor_group {
519 	/** @refcount: Reference count */
520 	struct kref refcount;
521 
522 	/** @ptdev: Device. */
523 	struct panthor_device *ptdev;
524 
525 	/** @vm: VM bound to the group. */
526 	struct panthor_vm *vm;
527 
528 	/** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */
529 	u64 compute_core_mask;
530 
531 	/** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */
532 	u64 fragment_core_mask;
533 
534 	/** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */
535 	u64 tiler_core_mask;
536 
537 	/** @max_compute_cores: Maximum number of shader cores used for compute jobs. */
538 	u8 max_compute_cores;
539 
540 	/** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */
541 	u8 max_fragment_cores;
542 
543 	/** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */
544 	u8 max_tiler_cores;
545 
546 	/** @priority: Group priority (check panthor_csg_priority). */
547 	u8 priority;
548 
549 	/** @blocked_queues: Bitmask reflecting the blocked queues. */
550 	u32 blocked_queues;
551 
552 	/** @idle_queues: Bitmask reflecting the idle queues. */
553 	u32 idle_queues;
554 
555 	/** @fatal_lock: Lock used to protect access to fatal fields. */
556 	spinlock_t fatal_lock;
557 
558 	/** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */
559 	u32 fatal_queues;
560 
561 	/** @tiler_oom: Mask of queues that have a tiler OOM event to process. */
562 	atomic_t tiler_oom;
563 
564 	/** @queue_count: Number of queues in this group. */
565 	u32 queue_count;
566 
567 	/** @queues: Queues owned by this group. */
568 	struct panthor_queue *queues[MAX_CS_PER_CSG];
569 
570 	/**
571 	 * @csg_id: ID of the FW group slot.
572 	 *
573 	 * -1 when the group is not scheduled/active.
574 	 */
575 	int csg_id;
576 
577 	/**
578 	 * @destroyed: True when the group has been destroyed.
579 	 *
580 	 * If a group is destroyed it becomes useless: no further jobs can be submitted
581 	 * to its queues. We simply wait for all references to be dropped so we can
582 	 * release the group object.
583 	 */
584 	bool destroyed;
585 
586 	/**
587 	 * @timedout: True when a timeout occurred on any of the queues owned by
588 	 * this group.
589 	 *
590 	 * Timeouts can be reported by drm_sched or by the FW. In any case, any
591 	 * timeout situation is unrecoverable, and the group becomes useless.
592 	 * We simply wait for all references to be dropped so we can release the
593 	 * group object.
594 	 */
595 	bool timedout;
596 
597 	/**
598 	 * @syncobjs: Pool of per-queue synchronization objects.
599 	 *
600 	 * One sync object per queue. The position of the sync object is
601 	 * determined by the queue index.
602 	 */
603 	struct panthor_kernel_bo *syncobjs;
604 
605 	/** @state: Group state. */
606 	enum panthor_group_state state;
607 
608 	/**
609 	 * @suspend_buf: Suspend buffer.
610 	 *
611 	 * Stores the state of the group and its queues when a group is suspended.
612 	 * Used at resume time to restore the group in its previous state.
613 	 *
614 	 * The size of the suspend buffer is exposed through the FW interface.
615 	 */
616 	struct panthor_kernel_bo *suspend_buf;
617 
618 	/**
619 	 * @protm_suspend_buf: Protection mode suspend buffer.
620 	 *
621 	 * Stores the state of the group and its queues when a group that's in
622 	 * protection mode is suspended.
623 	 *
624 	 * Used at resume time to restore the group in its previous state.
625 	 *
626 	 * The size of the protection mode suspend buffer is exposed through the
627 	 * FW interface.
628 	 */
629 	struct panthor_kernel_bo *protm_suspend_buf;
630 
631 	/** @sync_upd_work: Work used to check/signal job fences. */
632 	struct work_struct sync_upd_work;
633 
634 	/** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */
635 	struct work_struct tiler_oom_work;
636 
637 	/** @term_work: Work used to finish the group termination procedure. */
638 	struct work_struct term_work;
639 
640 	/**
641 	 * @release_work: Work used to release group resources.
642 	 *
643 	 * We need to postpone the group release to avoid a deadlock when
644 	 * the last ref is released in the tick work.
645 	 */
646 	struct work_struct release_work;
647 
648 	/**
649 	 * @run_node: Node used to insert the group in the
650 	 * panthor_group::groups::{runnable,idle} and
651 	 * panthor_group::reset.stopped_groups lists.
652 	 */
653 	struct list_head run_node;
654 
655 	/**
656 	 * @wait_node: Node used to insert the group in the
657 	 * panthor_group::groups::waiting list.
658 	 */
659 	struct list_head wait_node;
660 };
661 
662 /**
663  * group_queue_work() - Queue a group work
664  * @group: Group to queue the work for.
665  * @wname: Work name.
666  *
667  * Grabs a ref and queue a work item to the scheduler workqueue. If
668  * the work was already queued, we release the reference we grabbed.
669  *
670  * Work callbacks must release the reference we grabbed here.
671  */
672 #define group_queue_work(group, wname) \
673 	do { \
674 		group_get(group); \
675 		if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
676 			group_put(group); \
677 	} while (0)
678 
679 /**
680  * sched_queue_work() - Queue a scheduler work.
681  * @sched: Scheduler object.
682  * @wname: Work name.
683  *
684  * Conditionally queues a scheduler work if no reset is pending/in-progress.
685  */
686 #define sched_queue_work(sched, wname) \
687 	do { \
688 		if (!atomic_read(&(sched)->reset.in_progress) && \
689 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
690 			queue_work((sched)->wq, &(sched)->wname ## _work); \
691 	} while (0)
692 
693 /**
694  * sched_queue_delayed_work() - Queue a scheduler delayed work.
695  * @sched: Scheduler object.
696  * @wname: Work name.
697  * @delay: Work delay in jiffies.
698  *
699  * Conditionally queues a scheduler delayed work if no reset is
700  * pending/in-progress.
701  */
702 #define sched_queue_delayed_work(sched, wname, delay) \
703 	do { \
704 		if (!atomic_read(&sched->reset.in_progress) && \
705 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
706 			mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
707 	} while (0)
708 
709 /*
710  * We currently set the maximum of groups per file to an arbitrary low value.
711  * But this can be updated if we need more.
712  */
713 #define MAX_GROUPS_PER_POOL 128
714 
715 /**
716  * struct panthor_group_pool - Group pool
717  *
718  * Each file get assigned a group pool.
719  */
720 struct panthor_group_pool {
721 	/** @xa: Xarray used to manage group handles. */
722 	struct xarray xa;
723 };
724 
725 /**
726  * struct panthor_job - Used to manage GPU job
727  */
728 struct panthor_job {
729 	/** @base: Inherit from drm_sched_job. */
730 	struct drm_sched_job base;
731 
732 	/** @refcount: Reference count. */
733 	struct kref refcount;
734 
735 	/** @group: Group of the queue this job will be pushed to. */
736 	struct panthor_group *group;
737 
738 	/** @queue_idx: Index of the queue inside @group. */
739 	u32 queue_idx;
740 
741 	/** @call_info: Information about the userspace command stream call. */
742 	struct {
743 		/** @start: GPU address of the userspace command stream. */
744 		u64 start;
745 
746 		/** @size: Size of the userspace command stream. */
747 		u32 size;
748 
749 		/**
750 		 * @latest_flush: Flush ID at the time the userspace command
751 		 * stream was built.
752 		 *
753 		 * Needed for the flush reduction mechanism.
754 		 */
755 		u32 latest_flush;
756 	} call_info;
757 
758 	/** @ringbuf: Position of this job is in the ring buffer. */
759 	struct {
760 		/** @start: Start offset. */
761 		u64 start;
762 
763 		/** @end: End offset. */
764 		u64 end;
765 	} ringbuf;
766 
767 	/**
768 	 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs
769 	 * list.
770 	 */
771 	struct list_head node;
772 
773 	/** @done_fence: Fence signaled when the job is finished or cancelled. */
774 	struct dma_fence *done_fence;
775 };
776 
777 static void
778 panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
779 {
780 	if (queue->syncwait.kmap) {
781 		struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
782 
783 		drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
784 		queue->syncwait.kmap = NULL;
785 	}
786 
787 	drm_gem_object_put(queue->syncwait.obj);
788 	queue->syncwait.obj = NULL;
789 }
790 
791 static void *
792 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue)
793 {
794 	struct panthor_device *ptdev = group->ptdev;
795 	struct panthor_gem_object *bo;
796 	struct iosys_map map;
797 	int ret;
798 
799 	if (queue->syncwait.kmap)
800 		return queue->syncwait.kmap + queue->syncwait.offset;
801 
802 	bo = panthor_vm_get_bo_for_va(group->vm,
803 				      queue->syncwait.gpu_va,
804 				      &queue->syncwait.offset);
805 	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
806 		goto err_put_syncwait_obj;
807 
808 	queue->syncwait.obj = &bo->base.base;
809 	ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
810 	if (drm_WARN_ON(&ptdev->base, ret))
811 		goto err_put_syncwait_obj;
812 
813 	queue->syncwait.kmap = map.vaddr;
814 	if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
815 		goto err_put_syncwait_obj;
816 
817 	return queue->syncwait.kmap + queue->syncwait.offset;
818 
819 err_put_syncwait_obj:
820 	panthor_queue_put_syncwait_obj(queue);
821 	return NULL;
822 }
823 
824 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue)
825 {
826 	if (IS_ERR_OR_NULL(queue))
827 		return;
828 
829 	if (queue->entity.fence_context)
830 		drm_sched_entity_destroy(&queue->entity);
831 
832 	if (queue->scheduler.ops)
833 		drm_sched_fini(&queue->scheduler);
834 
835 	panthor_queue_put_syncwait_obj(queue);
836 
837 	panthor_kernel_bo_destroy(queue->ringbuf);
838 	panthor_kernel_bo_destroy(queue->iface.mem);
839 
840 	/* Release the last_fence we were holding, if any. */
841 	dma_fence_put(queue->fence_ctx.last_fence);
842 
843 	kfree(queue);
844 }
845 
846 static void group_release_work(struct work_struct *work)
847 {
848 	struct panthor_group *group = container_of(work,
849 						   struct panthor_group,
850 						   release_work);
851 	u32 i;
852 
853 	for (i = 0; i < group->queue_count; i++)
854 		group_free_queue(group, group->queues[i]);
855 
856 	panthor_kernel_bo_destroy(group->suspend_buf);
857 	panthor_kernel_bo_destroy(group->protm_suspend_buf);
858 	panthor_kernel_bo_destroy(group->syncobjs);
859 
860 	panthor_vm_put(group->vm);
861 	kfree(group);
862 }
863 
864 static void group_release(struct kref *kref)
865 {
866 	struct panthor_group *group = container_of(kref,
867 						   struct panthor_group,
868 						   refcount);
869 	struct panthor_device *ptdev = group->ptdev;
870 
871 	drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
872 	drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
873 	drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
874 
875 	queue_work(panthor_cleanup_wq, &group->release_work);
876 }
877 
878 static void group_put(struct panthor_group *group)
879 {
880 	if (group)
881 		kref_put(&group->refcount, group_release);
882 }
883 
884 static struct panthor_group *
885 group_get(struct panthor_group *group)
886 {
887 	if (group)
888 		kref_get(&group->refcount);
889 
890 	return group;
891 }
892 
893 /**
894  * group_bind_locked() - Bind a group to a group slot
895  * @group: Group.
896  * @csg_id: Slot.
897  *
898  * Return: 0 on success, a negative error code otherwise.
899  */
900 static int
901 group_bind_locked(struct panthor_group *group, u32 csg_id)
902 {
903 	struct panthor_device *ptdev = group->ptdev;
904 	struct panthor_csg_slot *csg_slot;
905 	int ret;
906 
907 	lockdep_assert_held(&ptdev->scheduler->lock);
908 
909 	if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
910 			ptdev->scheduler->csg_slots[csg_id].group))
911 		return -EINVAL;
912 
913 	ret = panthor_vm_active(group->vm);
914 	if (ret)
915 		return ret;
916 
917 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
918 	group_get(group);
919 	group->csg_id = csg_id;
920 
921 	/* Dummy doorbell allocation: doorbell is assigned to the group and
922 	 * all queues use the same doorbell.
923 	 *
924 	 * TODO: Implement LRU-based doorbell assignment, so the most often
925 	 * updated queues get their own doorbell, thus avoiding useless checks
926 	 * on queues belonging to the same group that are rarely updated.
927 	 */
928 	for (u32 i = 0; i < group->queue_count; i++)
929 		group->queues[i]->doorbell_id = csg_id + 1;
930 
931 	csg_slot->group = group;
932 
933 	return 0;
934 }
935 
936 /**
937  * group_unbind_locked() - Unbind a group from a slot.
938  * @group: Group to unbind.
939  *
940  * Return: 0 on success, a negative error code otherwise.
941  */
942 static int
943 group_unbind_locked(struct panthor_group *group)
944 {
945 	struct panthor_device *ptdev = group->ptdev;
946 	struct panthor_csg_slot *slot;
947 
948 	lockdep_assert_held(&ptdev->scheduler->lock);
949 
950 	if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
951 		return -EINVAL;
952 
953 	if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
954 		return -EINVAL;
955 
956 	slot = &ptdev->scheduler->csg_slots[group->csg_id];
957 	panthor_vm_idle(group->vm);
958 	group->csg_id = -1;
959 
960 	/* Tiler OOM events will be re-issued next time the group is scheduled. */
961 	atomic_set(&group->tiler_oom, 0);
962 	cancel_work(&group->tiler_oom_work);
963 
964 	for (u32 i = 0; i < group->queue_count; i++)
965 		group->queues[i]->doorbell_id = -1;
966 
967 	slot->group = NULL;
968 
969 	group_put(group);
970 	return 0;
971 }
972 
973 /**
974  * cs_slot_prog_locked() - Program a queue slot
975  * @ptdev: Device.
976  * @csg_id: Group slot ID.
977  * @cs_id: Queue slot ID.
978  *
979  * Program a queue slot with the queue information so things can start being
980  * executed on this queue.
981  *
982  * The group slot must have a group bound to it already (group_bind_locked()).
983  */
984 static void
985 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
986 {
987 	struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
988 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
989 
990 	lockdep_assert_held(&ptdev->scheduler->lock);
991 
992 	queue->iface.input->extract = queue->iface.output->extract;
993 	drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
994 
995 	cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf);
996 	cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
997 	cs_iface->input->ringbuf_input = queue->iface.input_fw_va;
998 	cs_iface->input->ringbuf_output = queue->iface.output_fw_va;
999 	cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) |
1000 				  CS_CONFIG_DOORBELL(queue->doorbell_id);
1001 	cs_iface->input->ack_irq_mask = ~0;
1002 	panthor_fw_update_reqs(cs_iface, req,
1003 			       CS_IDLE_SYNC_WAIT |
1004 			       CS_IDLE_EMPTY |
1005 			       CS_STATE_START |
1006 			       CS_EXTRACT_EVENT,
1007 			       CS_IDLE_SYNC_WAIT |
1008 			       CS_IDLE_EMPTY |
1009 			       CS_STATE_MASK |
1010 			       CS_EXTRACT_EVENT);
1011 	if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
1012 		drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
1013 		queue->timeout_suspended = false;
1014 	}
1015 }
1016 
1017 /**
1018  * cs_slot_reset_locked() - Reset a queue slot
1019  * @ptdev: Device.
1020  * @csg_id: Group slot.
1021  * @cs_id: Queue slot.
1022  *
1023  * Change the queue slot state to STOP and suspend the queue timeout if
1024  * the queue is not blocked.
1025  *
1026  * The group slot must have a group bound to it (group_bind_locked()).
1027  */
1028 static int
1029 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1030 {
1031 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1032 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1033 	struct panthor_queue *queue = group->queues[cs_id];
1034 
1035 	lockdep_assert_held(&ptdev->scheduler->lock);
1036 
1037 	panthor_fw_update_reqs(cs_iface, req,
1038 			       CS_STATE_STOP,
1039 			       CS_STATE_MASK);
1040 
1041 	/* If the queue is blocked, we want to keep the timeout running, so
1042 	 * we can detect unbounded waits and kill the group when that happens.
1043 	 */
1044 	if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
1045 		queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
1046 		queue->timeout_suspended = true;
1047 		WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
1048 	}
1049 
1050 	return 0;
1051 }
1052 
1053 /**
1054  * csg_slot_sync_priority_locked() - Synchronize the group slot priority
1055  * @ptdev: Device.
1056  * @csg_id: Group slot ID.
1057  *
1058  * Group slot priority update happens asynchronously. When we receive a
1059  * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can
1060  * reflect it to our panthor_csg_slot object.
1061  */
1062 static void
1063 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
1064 {
1065 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1066 	struct panthor_fw_csg_iface *csg_iface;
1067 
1068 	lockdep_assert_held(&ptdev->scheduler->lock);
1069 
1070 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1071 	csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
1072 }
1073 
1074 /**
1075  * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority
1076  * @ptdev: Device.
1077  * @csg_id: Group slot.
1078  * @cs_id: Queue slot.
1079  *
1080  * Queue state is updated on group suspend or STATUS_UPDATE event.
1081  */
1082 static void
1083 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1084 {
1085 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1086 	struct panthor_queue *queue = group->queues[cs_id];
1087 	struct panthor_fw_cs_iface *cs_iface =
1088 		panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
1089 
1090 	u32 status_wait_cond;
1091 
1092 	switch (cs_iface->output->status_blocked_reason) {
1093 	case CS_STATUS_BLOCKED_REASON_UNBLOCKED:
1094 		if (queue->iface.input->insert == queue->iface.output->extract &&
1095 		    cs_iface->output->status_scoreboards == 0)
1096 			group->idle_queues |= BIT(cs_id);
1097 		break;
1098 
1099 	case CS_STATUS_BLOCKED_REASON_SYNC_WAIT:
1100 		if (list_empty(&group->wait_node)) {
1101 			list_move_tail(&group->wait_node,
1102 				       &group->ptdev->scheduler->groups.waiting);
1103 		}
1104 		group->blocked_queues |= BIT(cs_id);
1105 		queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
1106 		queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
1107 		status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
1108 		queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT;
1109 		if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) {
1110 			u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi;
1111 
1112 			queue->syncwait.sync64 = true;
1113 			queue->syncwait.ref |= sync_val_hi << 32;
1114 		} else {
1115 			queue->syncwait.sync64 = false;
1116 		}
1117 		break;
1118 
1119 	default:
1120 		/* Other reasons are not blocking. Consider the queue as runnable
1121 		 * in those cases.
1122 		 */
1123 		break;
1124 	}
1125 }
1126 
1127 static void
1128 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
1129 {
1130 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1131 	struct panthor_group *group = csg_slot->group;
1132 	u32 i;
1133 
1134 	lockdep_assert_held(&ptdev->scheduler->lock);
1135 
1136 	group->idle_queues = 0;
1137 	group->blocked_queues = 0;
1138 
1139 	for (i = 0; i < group->queue_count; i++) {
1140 		if (group->queues[i])
1141 			cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
1142 	}
1143 }
1144 
1145 static void
1146 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
1147 {
1148 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1149 	struct panthor_fw_csg_iface *csg_iface;
1150 	struct panthor_group *group;
1151 	enum panthor_group_state new_state, old_state;
1152 	u32 csg_state;
1153 
1154 	lockdep_assert_held(&ptdev->scheduler->lock);
1155 
1156 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1157 	group = csg_slot->group;
1158 
1159 	if (!group)
1160 		return;
1161 
1162 	old_state = group->state;
1163 	csg_state = csg_iface->output->ack & CSG_STATE_MASK;
1164 	switch (csg_state) {
1165 	case CSG_STATE_START:
1166 	case CSG_STATE_RESUME:
1167 		new_state = PANTHOR_CS_GROUP_ACTIVE;
1168 		break;
1169 	case CSG_STATE_TERMINATE:
1170 		new_state = PANTHOR_CS_GROUP_TERMINATED;
1171 		break;
1172 	case CSG_STATE_SUSPEND:
1173 		new_state = PANTHOR_CS_GROUP_SUSPENDED;
1174 		break;
1175 	default:
1176 		/* The unknown state might be caused by a FW state corruption,
1177 		 * which means the group metadata can't be trusted anymore, and
1178 		 * the SUSPEND operation might propagate the corruption to the
1179 		 * suspend buffers. Flag the group state as unknown to make
1180 		 * sure it's unusable after that point.
1181 		 */
1182 		drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)",
1183 			csg_id, csg_state);
1184 		new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE;
1185 		break;
1186 	}
1187 
1188 	if (old_state == new_state)
1189 		return;
1190 
1191 	/* The unknown state might be caused by a FW issue, reset the FW to
1192 	 * take a fresh start.
1193 	 */
1194 	if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE)
1195 		panthor_device_schedule_reset(ptdev);
1196 
1197 	if (new_state == PANTHOR_CS_GROUP_SUSPENDED)
1198 		csg_slot_sync_queues_state_locked(ptdev, csg_id);
1199 
1200 	if (old_state == PANTHOR_CS_GROUP_ACTIVE) {
1201 		u32 i;
1202 
1203 		/* Reset the queue slots so we start from a clean
1204 		 * state when starting/resuming a new group on this
1205 		 * CSG slot. No wait needed here, and no ringbell
1206 		 * either, since the CS slot will only be re-used
1207 		 * on the next CSG start operation.
1208 		 */
1209 		for (i = 0; i < group->queue_count; i++) {
1210 			if (group->queues[i])
1211 				cs_slot_reset_locked(ptdev, csg_id, i);
1212 		}
1213 	}
1214 
1215 	group->state = new_state;
1216 }
1217 
1218 static int
1219 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
1220 {
1221 	struct panthor_fw_csg_iface *csg_iface;
1222 	struct panthor_csg_slot *csg_slot;
1223 	struct panthor_group *group;
1224 	u32 queue_mask = 0, i;
1225 
1226 	lockdep_assert_held(&ptdev->scheduler->lock);
1227 
1228 	if (priority > MAX_CSG_PRIO)
1229 		return -EINVAL;
1230 
1231 	if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
1232 		return -EINVAL;
1233 
1234 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1235 	group = csg_slot->group;
1236 	if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE)
1237 		return 0;
1238 
1239 	csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
1240 
1241 	for (i = 0; i < group->queue_count; i++) {
1242 		if (group->queues[i]) {
1243 			cs_slot_prog_locked(ptdev, csg_id, i);
1244 			queue_mask |= BIT(i);
1245 		}
1246 	}
1247 
1248 	csg_iface->input->allow_compute = group->compute_core_mask;
1249 	csg_iface->input->allow_fragment = group->fragment_core_mask;
1250 	csg_iface->input->allow_other = group->tiler_core_mask;
1251 	csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
1252 					 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
1253 					 CSG_EP_REQ_TILER(group->max_tiler_cores) |
1254 					 CSG_EP_REQ_PRIORITY(priority);
1255 	csg_iface->input->config = panthor_vm_as(group->vm);
1256 
1257 	if (group->suspend_buf)
1258 		csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf);
1259 	else
1260 		csg_iface->input->suspend_buf = 0;
1261 
1262 	if (group->protm_suspend_buf) {
1263 		csg_iface->input->protm_suspend_buf =
1264 			panthor_kernel_bo_gpuva(group->protm_suspend_buf);
1265 	} else {
1266 		csg_iface->input->protm_suspend_buf = 0;
1267 	}
1268 
1269 	csg_iface->input->ack_irq_mask = ~0;
1270 	panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask);
1271 	return 0;
1272 }
1273 
1274 static void
1275 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
1276 				   u32 csg_id, u32 cs_id)
1277 {
1278 	struct panthor_scheduler *sched = ptdev->scheduler;
1279 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1280 	struct panthor_group *group = csg_slot->group;
1281 	struct panthor_fw_cs_iface *cs_iface;
1282 	u32 fatal;
1283 	u64 info;
1284 
1285 	lockdep_assert_held(&sched->lock);
1286 
1287 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1288 	fatal = cs_iface->output->fatal;
1289 	info = cs_iface->output->fatal_info;
1290 
1291 	if (group)
1292 		group->fatal_queues |= BIT(cs_id);
1293 
1294 	if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
1295 		/* If this exception is unrecoverable, queue a reset, and make
1296 		 * sure we stop scheduling groups until the reset has happened.
1297 		 */
1298 		panthor_device_schedule_reset(ptdev);
1299 		cancel_delayed_work(&sched->tick_work);
1300 	} else {
1301 		sched_queue_delayed_work(sched, tick, 0);
1302 	}
1303 
1304 	drm_warn(&ptdev->base,
1305 		 "CSG slot %d CS slot: %d\n"
1306 		 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
1307 		 "CS_FATAL.EXCEPTION_DATA: 0x%x\n"
1308 		 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n",
1309 		 csg_id, cs_id,
1310 		 (unsigned int)CS_EXCEPTION_TYPE(fatal),
1311 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
1312 		 (unsigned int)CS_EXCEPTION_DATA(fatal),
1313 		 info);
1314 }
1315 
1316 static void
1317 cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
1318 				   u32 csg_id, u32 cs_id)
1319 {
1320 	struct panthor_scheduler *sched = ptdev->scheduler;
1321 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1322 	struct panthor_group *group = csg_slot->group;
1323 	struct panthor_queue *queue = group && cs_id < group->queue_count ?
1324 				      group->queues[cs_id] : NULL;
1325 	struct panthor_fw_cs_iface *cs_iface;
1326 	u32 fault;
1327 	u64 info;
1328 
1329 	lockdep_assert_held(&sched->lock);
1330 
1331 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1332 	fault = cs_iface->output->fault;
1333 	info = cs_iface->output->fault_info;
1334 
1335 	if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
1336 		u64 cs_extract = queue->iface.output->extract;
1337 		struct panthor_job *job;
1338 
1339 		spin_lock(&queue->fence_ctx.lock);
1340 		list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
1341 			if (cs_extract >= job->ringbuf.end)
1342 				continue;
1343 
1344 			if (cs_extract < job->ringbuf.start)
1345 				break;
1346 
1347 			dma_fence_set_error(job->done_fence, -EINVAL);
1348 		}
1349 		spin_unlock(&queue->fence_ctx.lock);
1350 	}
1351 
1352 	drm_warn(&ptdev->base,
1353 		 "CSG slot %d CS slot: %d\n"
1354 		 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
1355 		 "CS_FAULT.EXCEPTION_DATA: 0x%x\n"
1356 		 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n",
1357 		 csg_id, cs_id,
1358 		 (unsigned int)CS_EXCEPTION_TYPE(fault),
1359 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
1360 		 (unsigned int)CS_EXCEPTION_DATA(fault),
1361 		 info);
1362 }
1363 
1364 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
1365 {
1366 	struct panthor_device *ptdev = group->ptdev;
1367 	struct panthor_scheduler *sched = ptdev->scheduler;
1368 	u32 renderpasses_in_flight, pending_frag_count;
1369 	struct panthor_heap_pool *heaps = NULL;
1370 	u64 heap_address, new_chunk_va = 0;
1371 	u32 vt_start, vt_end, frag_end;
1372 	int ret, csg_id;
1373 
1374 	mutex_lock(&sched->lock);
1375 	csg_id = group->csg_id;
1376 	if (csg_id >= 0) {
1377 		struct panthor_fw_cs_iface *cs_iface;
1378 
1379 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1380 		heaps = panthor_vm_get_heap_pool(group->vm, false);
1381 		heap_address = cs_iface->output->heap_address;
1382 		vt_start = cs_iface->output->heap_vt_start;
1383 		vt_end = cs_iface->output->heap_vt_end;
1384 		frag_end = cs_iface->output->heap_frag_end;
1385 		renderpasses_in_flight = vt_start - frag_end;
1386 		pending_frag_count = vt_end - frag_end;
1387 	}
1388 	mutex_unlock(&sched->lock);
1389 
1390 	/* The group got scheduled out, we stop here. We will get a new tiler OOM event
1391 	 * when it's scheduled again.
1392 	 */
1393 	if (unlikely(csg_id < 0))
1394 		return 0;
1395 
1396 	if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) {
1397 		ret = -EINVAL;
1398 	} else {
1399 		/* We do the allocation without holding the scheduler lock to avoid
1400 		 * blocking the scheduling.
1401 		 */
1402 		ret = panthor_heap_grow(heaps, heap_address,
1403 					renderpasses_in_flight,
1404 					pending_frag_count, &new_chunk_va);
1405 	}
1406 
1407 	/* If the heap context doesn't have memory for us, we want to let the
1408 	 * FW try to reclaim memory by waiting for fragment jobs to land or by
1409 	 * executing the tiler OOM exception handler, which is supposed to
1410 	 * implement incremental rendering.
1411 	 */
1412 	if (ret && ret != -ENOMEM) {
1413 		drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
1414 		group->fatal_queues |= BIT(cs_id);
1415 		sched_queue_delayed_work(sched, tick, 0);
1416 		goto out_put_heap_pool;
1417 	}
1418 
1419 	mutex_lock(&sched->lock);
1420 	csg_id = group->csg_id;
1421 	if (csg_id >= 0) {
1422 		struct panthor_fw_csg_iface *csg_iface;
1423 		struct panthor_fw_cs_iface *cs_iface;
1424 
1425 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1426 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1427 
1428 		cs_iface->input->heap_start = new_chunk_va;
1429 		cs_iface->input->heap_end = new_chunk_va;
1430 		panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM);
1431 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id));
1432 		panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1433 	}
1434 	mutex_unlock(&sched->lock);
1435 
1436 	/* We allocated a chunck, but couldn't link it to the heap
1437 	 * context because the group was scheduled out while we were
1438 	 * allocating memory. We need to return this chunk to the heap.
1439 	 */
1440 	if (unlikely(csg_id < 0 && new_chunk_va))
1441 		panthor_heap_return_chunk(heaps, heap_address, new_chunk_va);
1442 
1443 	ret = 0;
1444 
1445 out_put_heap_pool:
1446 	panthor_heap_pool_put(heaps);
1447 	return ret;
1448 }
1449 
1450 static void group_tiler_oom_work(struct work_struct *work)
1451 {
1452 	struct panthor_group *group =
1453 		container_of(work, struct panthor_group, tiler_oom_work);
1454 	u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
1455 
1456 	while (tiler_oom) {
1457 		u32 cs_id = ffs(tiler_oom) - 1;
1458 
1459 		group_process_tiler_oom(group, cs_id);
1460 		tiler_oom &= ~BIT(cs_id);
1461 	}
1462 
1463 	group_put(group);
1464 }
1465 
1466 static void
1467 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
1468 				       u32 csg_id, u32 cs_id)
1469 {
1470 	struct panthor_scheduler *sched = ptdev->scheduler;
1471 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1472 	struct panthor_group *group = csg_slot->group;
1473 
1474 	lockdep_assert_held(&sched->lock);
1475 
1476 	if (drm_WARN_ON(&ptdev->base, !group))
1477 		return;
1478 
1479 	atomic_or(BIT(cs_id), &group->tiler_oom);
1480 
1481 	/* We don't use group_queue_work() here because we want to queue the
1482 	 * work item to the heap_alloc_wq.
1483 	 */
1484 	group_get(group);
1485 	if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
1486 		group_put(group);
1487 }
1488 
1489 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
1490 				       u32 csg_id, u32 cs_id)
1491 {
1492 	struct panthor_fw_cs_iface *cs_iface;
1493 	u32 req, ack, events;
1494 
1495 	lockdep_assert_held(&ptdev->scheduler->lock);
1496 
1497 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1498 	req = cs_iface->input->req;
1499 	ack = cs_iface->output->ack;
1500 	events = (req ^ ack) & CS_EVT_MASK;
1501 
1502 	if (events & CS_FATAL)
1503 		cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
1504 
1505 	if (events & CS_FAULT)
1506 		cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
1507 
1508 	if (events & CS_TILER_OOM)
1509 		cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
1510 
1511 	/* We don't acknowledge the TILER_OOM event since its handling is
1512 	 * deferred to a separate work.
1513 	 */
1514 	panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT);
1515 
1516 	return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
1517 }
1518 
1519 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id)
1520 {
1521 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1522 	struct panthor_fw_csg_iface *csg_iface;
1523 
1524 	lockdep_assert_held(&ptdev->scheduler->lock);
1525 
1526 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1527 	csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE;
1528 }
1529 
1530 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
1531 {
1532 	struct panthor_scheduler *sched = ptdev->scheduler;
1533 
1534 	lockdep_assert_held(&sched->lock);
1535 
1536 	sched->might_have_idle_groups = true;
1537 
1538 	/* Schedule a tick so we can evict idle groups and schedule non-idle
1539 	 * ones. This will also update runtime PM and devfreq busy/idle states,
1540 	 * so the device can lower its frequency or get suspended.
1541 	 */
1542 	sched_queue_delayed_work(sched, tick, 0);
1543 }
1544 
1545 static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
1546 					u32 csg_id)
1547 {
1548 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1549 	struct panthor_group *group = csg_slot->group;
1550 
1551 	lockdep_assert_held(&ptdev->scheduler->lock);
1552 
1553 	if (group)
1554 		group_queue_work(group, sync_upd);
1555 
1556 	sched_queue_work(ptdev->scheduler, sync_upd);
1557 }
1558 
1559 static void
1560 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
1561 {
1562 	struct panthor_scheduler *sched = ptdev->scheduler;
1563 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1564 	struct panthor_group *group = csg_slot->group;
1565 
1566 	lockdep_assert_held(&sched->lock);
1567 
1568 	drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
1569 
1570 	group = csg_slot->group;
1571 	if (!drm_WARN_ON(&ptdev->base, !group))
1572 		group->timedout = true;
1573 
1574 	sched_queue_delayed_work(sched, tick, 0);
1575 }
1576 
1577 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
1578 {
1579 	u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events;
1580 	struct panthor_fw_csg_iface *csg_iface;
1581 	u32 ring_cs_db_mask = 0;
1582 
1583 	lockdep_assert_held(&ptdev->scheduler->lock);
1584 
1585 	if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1586 		return;
1587 
1588 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1589 	req = READ_ONCE(csg_iface->input->req);
1590 	ack = READ_ONCE(csg_iface->output->ack);
1591 	cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req);
1592 	cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack);
1593 	csg_events = (req ^ ack) & CSG_EVT_MASK;
1594 
1595 	/* There may not be any pending CSG/CS interrupts to process */
1596 	if (req == ack && cs_irq_req == cs_irq_ack)
1597 		return;
1598 
1599 	/* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before
1600 	 * examining the CS_ACK & CS_REQ bits. This would ensure that Host
1601 	 * doesn't miss an interrupt for the CS in the race scenario where
1602 	 * whilst Host is servicing an interrupt for the CS, firmware sends
1603 	 * another interrupt for that CS.
1604 	 */
1605 	csg_iface->input->cs_irq_ack = cs_irq_req;
1606 
1607 	panthor_fw_update_reqs(csg_iface, req, ack,
1608 			       CSG_SYNC_UPDATE |
1609 			       CSG_IDLE |
1610 			       CSG_PROGRESS_TIMER_EVENT);
1611 
1612 	if (csg_events & CSG_IDLE)
1613 		csg_slot_process_idle_event_locked(ptdev, csg_id);
1614 
1615 	if (csg_events & CSG_PROGRESS_TIMER_EVENT)
1616 		csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
1617 
1618 	cs_irqs = cs_irq_req ^ cs_irq_ack;
1619 	while (cs_irqs) {
1620 		u32 cs_id = ffs(cs_irqs) - 1;
1621 
1622 		if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
1623 			ring_cs_db_mask |= BIT(cs_id);
1624 
1625 		cs_irqs &= ~BIT(cs_id);
1626 	}
1627 
1628 	if (csg_events & CSG_SYNC_UPDATE)
1629 		csg_slot_sync_update_locked(ptdev, csg_id);
1630 
1631 	if (ring_cs_db_mask)
1632 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask);
1633 
1634 	panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1635 }
1636 
1637 static void sched_process_idle_event_locked(struct panthor_device *ptdev)
1638 {
1639 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1640 
1641 	lockdep_assert_held(&ptdev->scheduler->lock);
1642 
1643 	/* Acknowledge the idle event and schedule a tick. */
1644 	panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE);
1645 	sched_queue_delayed_work(ptdev->scheduler, tick, 0);
1646 }
1647 
1648 /**
1649  * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ
1650  * @ptdev: Device.
1651  */
1652 static void sched_process_global_irq_locked(struct panthor_device *ptdev)
1653 {
1654 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1655 	u32 req, ack, evts;
1656 
1657 	lockdep_assert_held(&ptdev->scheduler->lock);
1658 
1659 	req = READ_ONCE(glb_iface->input->req);
1660 	ack = READ_ONCE(glb_iface->output->ack);
1661 	evts = (req ^ ack) & GLB_EVT_MASK;
1662 
1663 	if (evts & GLB_IDLE)
1664 		sched_process_idle_event_locked(ptdev);
1665 }
1666 
1667 static void process_fw_events_work(struct work_struct *work)
1668 {
1669 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
1670 						      fw_events_work);
1671 	u32 events = atomic_xchg(&sched->fw_events, 0);
1672 	struct panthor_device *ptdev = sched->ptdev;
1673 
1674 	mutex_lock(&sched->lock);
1675 
1676 	if (events & JOB_INT_GLOBAL_IF) {
1677 		sched_process_global_irq_locked(ptdev);
1678 		events &= ~JOB_INT_GLOBAL_IF;
1679 	}
1680 
1681 	while (events) {
1682 		u32 csg_id = ffs(events) - 1;
1683 
1684 		sched_process_csg_irq_locked(ptdev, csg_id);
1685 		events &= ~BIT(csg_id);
1686 	}
1687 
1688 	mutex_unlock(&sched->lock);
1689 }
1690 
1691 /**
1692  * panthor_sched_report_fw_events() - Report FW events to the scheduler.
1693  */
1694 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
1695 {
1696 	if (!ptdev->scheduler)
1697 		return;
1698 
1699 	atomic_or(events, &ptdev->scheduler->fw_events);
1700 	sched_queue_work(ptdev->scheduler, fw_events);
1701 }
1702 
1703 static const char *fence_get_driver_name(struct dma_fence *fence)
1704 {
1705 	return "panthor";
1706 }
1707 
1708 static const char *queue_fence_get_timeline_name(struct dma_fence *fence)
1709 {
1710 	return "queue-fence";
1711 }
1712 
1713 static const struct dma_fence_ops panthor_queue_fence_ops = {
1714 	.get_driver_name = fence_get_driver_name,
1715 	.get_timeline_name = queue_fence_get_timeline_name,
1716 };
1717 
1718 struct panthor_csg_slots_upd_ctx {
1719 	u32 update_mask;
1720 	u32 timedout_mask;
1721 	struct {
1722 		u32 value;
1723 		u32 mask;
1724 	} requests[MAX_CSGS];
1725 };
1726 
1727 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
1728 {
1729 	memset(ctx, 0, sizeof(*ctx));
1730 }
1731 
1732 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
1733 				    struct panthor_csg_slots_upd_ctx *ctx,
1734 				    u32 csg_id, u32 value, u32 mask)
1735 {
1736 	if (drm_WARN_ON(&ptdev->base, !mask) ||
1737 	    drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1738 		return;
1739 
1740 	ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
1741 	ctx->requests[csg_id].mask |= mask;
1742 	ctx->update_mask |= BIT(csg_id);
1743 }
1744 
1745 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
1746 				     struct panthor_csg_slots_upd_ctx *ctx)
1747 {
1748 	struct panthor_scheduler *sched = ptdev->scheduler;
1749 	u32 update_slots = ctx->update_mask;
1750 
1751 	lockdep_assert_held(&sched->lock);
1752 
1753 	if (!ctx->update_mask)
1754 		return 0;
1755 
1756 	while (update_slots) {
1757 		struct panthor_fw_csg_iface *csg_iface;
1758 		u32 csg_id = ffs(update_slots) - 1;
1759 
1760 		update_slots &= ~BIT(csg_id);
1761 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1762 		panthor_fw_update_reqs(csg_iface, req,
1763 				       ctx->requests[csg_id].value,
1764 				       ctx->requests[csg_id].mask);
1765 	}
1766 
1767 	panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
1768 
1769 	update_slots = ctx->update_mask;
1770 	while (update_slots) {
1771 		struct panthor_fw_csg_iface *csg_iface;
1772 		u32 csg_id = ffs(update_slots) - 1;
1773 		u32 req_mask = ctx->requests[csg_id].mask, acked;
1774 		int ret;
1775 
1776 		update_slots &= ~BIT(csg_id);
1777 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1778 
1779 		ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
1780 
1781 		if (acked & CSG_ENDPOINT_CONFIG)
1782 			csg_slot_sync_priority_locked(ptdev, csg_id);
1783 
1784 		if (acked & CSG_STATE_MASK)
1785 			csg_slot_sync_state_locked(ptdev, csg_id);
1786 
1787 		if (acked & CSG_STATUS_UPDATE) {
1788 			csg_slot_sync_queues_state_locked(ptdev, csg_id);
1789 			csg_slot_sync_idle_state_locked(ptdev, csg_id);
1790 		}
1791 
1792 		if (ret && acked != req_mask &&
1793 		    ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {
1794 			drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
1795 			ctx->timedout_mask |= BIT(csg_id);
1796 		}
1797 	}
1798 
1799 	if (ctx->timedout_mask)
1800 		return -ETIMEDOUT;
1801 
1802 	return 0;
1803 }
1804 
1805 struct panthor_sched_tick_ctx {
1806 	struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT];
1807 	struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
1808 	u32 idle_group_count;
1809 	u32 group_count;
1810 	enum panthor_csg_priority min_priority;
1811 	struct panthor_vm *vms[MAX_CS_PER_CSG];
1812 	u32 as_count;
1813 	bool immediate_tick;
1814 	u32 csg_upd_failed_mask;
1815 };
1816 
1817 static bool
1818 tick_ctx_is_full(const struct panthor_scheduler *sched,
1819 		 const struct panthor_sched_tick_ctx *ctx)
1820 {
1821 	return ctx->group_count == sched->csg_slot_count;
1822 }
1823 
1824 static bool
1825 group_is_idle(struct panthor_group *group)
1826 {
1827 	struct panthor_device *ptdev = group->ptdev;
1828 	u32 inactive_queues;
1829 
1830 	if (group->csg_id >= 0)
1831 		return ptdev->scheduler->csg_slots[group->csg_id].idle;
1832 
1833 	inactive_queues = group->idle_queues | group->blocked_queues;
1834 	return hweight32(inactive_queues) == group->queue_count;
1835 }
1836 
1837 static bool
1838 group_can_run(struct panthor_group *group)
1839 {
1840 	return group->state != PANTHOR_CS_GROUP_TERMINATED &&
1841 	       group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
1842 	       !group->destroyed && group->fatal_queues == 0 &&
1843 	       !group->timedout;
1844 }
1845 
1846 static void
1847 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
1848 			       struct panthor_sched_tick_ctx *ctx,
1849 			       struct list_head *queue,
1850 			       bool skip_idle_groups,
1851 			       bool owned_by_tick_ctx)
1852 {
1853 	struct panthor_group *group, *tmp;
1854 
1855 	if (tick_ctx_is_full(sched, ctx))
1856 		return;
1857 
1858 	list_for_each_entry_safe(group, tmp, queue, run_node) {
1859 		u32 i;
1860 
1861 		if (!group_can_run(group))
1862 			continue;
1863 
1864 		if (skip_idle_groups && group_is_idle(group))
1865 			continue;
1866 
1867 		for (i = 0; i < ctx->as_count; i++) {
1868 			if (ctx->vms[i] == group->vm)
1869 				break;
1870 		}
1871 
1872 		if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
1873 			continue;
1874 
1875 		if (!owned_by_tick_ctx)
1876 			group_get(group);
1877 
1878 		list_move_tail(&group->run_node, &ctx->groups[group->priority]);
1879 		ctx->group_count++;
1880 		if (group_is_idle(group))
1881 			ctx->idle_group_count++;
1882 
1883 		if (i == ctx->as_count)
1884 			ctx->vms[ctx->as_count++] = group->vm;
1885 
1886 		if (ctx->min_priority > group->priority)
1887 			ctx->min_priority = group->priority;
1888 
1889 		if (tick_ctx_is_full(sched, ctx))
1890 			return;
1891 	}
1892 }
1893 
1894 static void
1895 tick_ctx_insert_old_group(struct panthor_scheduler *sched,
1896 			  struct panthor_sched_tick_ctx *ctx,
1897 			  struct panthor_group *group,
1898 			  bool full_tick)
1899 {
1900 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
1901 	struct panthor_group *other_group;
1902 
1903 	if (!full_tick) {
1904 		list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1905 		return;
1906 	}
1907 
1908 	/* Rotate to make sure groups with lower CSG slot
1909 	 * priorities have a chance to get a higher CSG slot
1910 	 * priority next time they get picked. This priority
1911 	 * has an impact on resource request ordering, so it's
1912 	 * important to make sure we don't let one group starve
1913 	 * all other groups with the same group priority.
1914 	 */
1915 	list_for_each_entry(other_group,
1916 			    &ctx->old_groups[csg_slot->group->priority],
1917 			    run_node) {
1918 		struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
1919 
1920 		if (other_csg_slot->priority > csg_slot->priority) {
1921 			list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
1922 			return;
1923 		}
1924 	}
1925 
1926 	list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1927 }
1928 
1929 static void
1930 tick_ctx_init(struct panthor_scheduler *sched,
1931 	      struct panthor_sched_tick_ctx *ctx,
1932 	      bool full_tick)
1933 {
1934 	struct panthor_device *ptdev = sched->ptdev;
1935 	struct panthor_csg_slots_upd_ctx upd_ctx;
1936 	int ret;
1937 	u32 i;
1938 
1939 	memset(ctx, 0, sizeof(*ctx));
1940 	csgs_upd_ctx_init(&upd_ctx);
1941 
1942 	ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
1943 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
1944 		INIT_LIST_HEAD(&ctx->groups[i]);
1945 		INIT_LIST_HEAD(&ctx->old_groups[i]);
1946 	}
1947 
1948 	for (i = 0; i < sched->csg_slot_count; i++) {
1949 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
1950 		struct panthor_group *group = csg_slot->group;
1951 		struct panthor_fw_csg_iface *csg_iface;
1952 
1953 		if (!group)
1954 			continue;
1955 
1956 		csg_iface = panthor_fw_get_csg_iface(ptdev, i);
1957 		group_get(group);
1958 
1959 		/* If there was unhandled faults on the VM, force processing of
1960 		 * CSG IRQs, so we can flag the faulty queue.
1961 		 */
1962 		if (panthor_vm_has_unhandled_faults(group->vm)) {
1963 			sched_process_csg_irq_locked(ptdev, i);
1964 
1965 			/* No fatal fault reported, flag all queues as faulty. */
1966 			if (!group->fatal_queues)
1967 				group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
1968 		}
1969 
1970 		tick_ctx_insert_old_group(sched, ctx, group, full_tick);
1971 		csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
1972 					csg_iface->output->ack ^ CSG_STATUS_UPDATE,
1973 					CSG_STATUS_UPDATE);
1974 	}
1975 
1976 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
1977 	if (ret) {
1978 		panthor_device_schedule_reset(ptdev);
1979 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
1980 	}
1981 }
1982 
1983 #define NUM_INSTRS_PER_SLOT		16
1984 
1985 static void
1986 group_term_post_processing(struct panthor_group *group)
1987 {
1988 	struct panthor_job *job, *tmp;
1989 	LIST_HEAD(faulty_jobs);
1990 	bool cookie;
1991 	u32 i = 0;
1992 
1993 	if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
1994 		return;
1995 
1996 	cookie = dma_fence_begin_signalling();
1997 	for (i = 0; i < group->queue_count; i++) {
1998 		struct panthor_queue *queue = group->queues[i];
1999 		struct panthor_syncobj_64b *syncobj;
2000 		int err;
2001 
2002 		if (group->fatal_queues & BIT(i))
2003 			err = -EINVAL;
2004 		else if (group->timedout)
2005 			err = -ETIMEDOUT;
2006 		else
2007 			err = -ECANCELED;
2008 
2009 		if (!queue)
2010 			continue;
2011 
2012 		spin_lock(&queue->fence_ctx.lock);
2013 		list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
2014 			list_move_tail(&job->node, &faulty_jobs);
2015 			dma_fence_set_error(job->done_fence, err);
2016 			dma_fence_signal_locked(job->done_fence);
2017 		}
2018 		spin_unlock(&queue->fence_ctx.lock);
2019 
2020 		/* Manually update the syncobj seqno to unblock waiters. */
2021 		syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj));
2022 		syncobj->status = ~0;
2023 		syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno);
2024 		sched_queue_work(group->ptdev->scheduler, sync_upd);
2025 	}
2026 	dma_fence_end_signalling(cookie);
2027 
2028 	list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
2029 		list_del_init(&job->node);
2030 		panthor_job_put(&job->base);
2031 	}
2032 }
2033 
2034 static void group_term_work(struct work_struct *work)
2035 {
2036 	struct panthor_group *group =
2037 		container_of(work, struct panthor_group, term_work);
2038 
2039 	group_term_post_processing(group);
2040 	group_put(group);
2041 }
2042 
2043 static void
2044 tick_ctx_cleanup(struct panthor_scheduler *sched,
2045 		 struct panthor_sched_tick_ctx *ctx)
2046 {
2047 	struct panthor_group *group, *tmp;
2048 	u32 i;
2049 
2050 	for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) {
2051 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) {
2052 			/* If everything went fine, we should only have groups
2053 			 * to be terminated in the old_groups lists.
2054 			 */
2055 			drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
2056 				    group_can_run(group));
2057 
2058 			if (!group_can_run(group)) {
2059 				list_del_init(&group->run_node);
2060 				list_del_init(&group->wait_node);
2061 				group_queue_work(group, term);
2062 			} else if (group->csg_id >= 0) {
2063 				list_del_init(&group->run_node);
2064 			} else {
2065 				list_move(&group->run_node,
2066 					  group_is_idle(group) ?
2067 					  &sched->groups.idle[group->priority] :
2068 					  &sched->groups.runnable[group->priority]);
2069 			}
2070 			group_put(group);
2071 		}
2072 	}
2073 
2074 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2075 		/* If everything went fine, the groups to schedule lists should
2076 		 * be empty.
2077 		 */
2078 		drm_WARN_ON(&group->ptdev->base,
2079 			    !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
2080 
2081 		list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
2082 			if (group->csg_id >= 0) {
2083 				list_del_init(&group->run_node);
2084 			} else {
2085 				list_move(&group->run_node,
2086 					  group_is_idle(group) ?
2087 					  &sched->groups.idle[group->priority] :
2088 					  &sched->groups.runnable[group->priority]);
2089 			}
2090 			group_put(group);
2091 		}
2092 	}
2093 }
2094 
2095 static void
2096 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
2097 {
2098 	struct panthor_group *group, *tmp;
2099 	struct panthor_device *ptdev = sched->ptdev;
2100 	struct panthor_csg_slot *csg_slot;
2101 	int prio, new_csg_prio = MAX_CSG_PRIO, i;
2102 	u32 free_csg_slots = 0;
2103 	struct panthor_csg_slots_upd_ctx upd_ctx;
2104 	int ret;
2105 
2106 	csgs_upd_ctx_init(&upd_ctx);
2107 
2108 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2109 		/* Suspend or terminate evicted groups. */
2110 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2111 			bool term = !group_can_run(group);
2112 			int csg_id = group->csg_id;
2113 
2114 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2115 				continue;
2116 
2117 			csg_slot = &sched->csg_slots[csg_id];
2118 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2119 						term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
2120 						CSG_STATE_MASK);
2121 		}
2122 
2123 		/* Update priorities on already running groups. */
2124 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2125 			struct panthor_fw_csg_iface *csg_iface;
2126 			int csg_id = group->csg_id;
2127 
2128 			if (csg_id < 0) {
2129 				new_csg_prio--;
2130 				continue;
2131 			}
2132 
2133 			csg_slot = &sched->csg_slots[csg_id];
2134 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2135 			if (csg_slot->priority == new_csg_prio) {
2136 				new_csg_prio--;
2137 				continue;
2138 			}
2139 
2140 			panthor_fw_update_reqs(csg_iface, endpoint_req,
2141 					       CSG_EP_REQ_PRIORITY(new_csg_prio),
2142 					       CSG_EP_REQ_PRIORITY_MASK);
2143 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2144 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2145 						CSG_ENDPOINT_CONFIG);
2146 			new_csg_prio--;
2147 		}
2148 	}
2149 
2150 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2151 	if (ret) {
2152 		panthor_device_schedule_reset(ptdev);
2153 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2154 		return;
2155 	}
2156 
2157 	/* Unbind evicted groups. */
2158 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2159 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2160 			/* This group is gone. Process interrupts to clear
2161 			 * any pending interrupts before we start the new
2162 			 * group.
2163 			 */
2164 			if (group->csg_id >= 0)
2165 				sched_process_csg_irq_locked(ptdev, group->csg_id);
2166 
2167 			group_unbind_locked(group);
2168 		}
2169 	}
2170 
2171 	for (i = 0; i < sched->csg_slot_count; i++) {
2172 		if (!sched->csg_slots[i].group)
2173 			free_csg_slots |= BIT(i);
2174 	}
2175 
2176 	csgs_upd_ctx_init(&upd_ctx);
2177 	new_csg_prio = MAX_CSG_PRIO;
2178 
2179 	/* Start new groups. */
2180 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2181 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2182 			int csg_id = group->csg_id;
2183 			struct panthor_fw_csg_iface *csg_iface;
2184 
2185 			if (csg_id >= 0) {
2186 				new_csg_prio--;
2187 				continue;
2188 			}
2189 
2190 			csg_id = ffs(free_csg_slots) - 1;
2191 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2192 				break;
2193 
2194 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2195 			csg_slot = &sched->csg_slots[csg_id];
2196 			group_bind_locked(group, csg_id);
2197 			csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
2198 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2199 						group->state == PANTHOR_CS_GROUP_SUSPENDED ?
2200 						CSG_STATE_RESUME : CSG_STATE_START,
2201 						CSG_STATE_MASK);
2202 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2203 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2204 						CSG_ENDPOINT_CONFIG);
2205 			free_csg_slots &= ~BIT(csg_id);
2206 		}
2207 	}
2208 
2209 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2210 	if (ret) {
2211 		panthor_device_schedule_reset(ptdev);
2212 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2213 		return;
2214 	}
2215 
2216 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2217 		list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) {
2218 			list_del_init(&group->run_node);
2219 
2220 			/* If the group has been destroyed while we were
2221 			 * scheduling, ask for an immediate tick to
2222 			 * re-evaluate as soon as possible and get rid of
2223 			 * this dangling group.
2224 			 */
2225 			if (group->destroyed)
2226 				ctx->immediate_tick = true;
2227 			group_put(group);
2228 		}
2229 
2230 		/* Return evicted groups to the idle or run queues. Groups
2231 		 * that can no longer be run (because they've been destroyed
2232 		 * or experienced an unrecoverable error) will be scheduled
2233 		 * for destruction in tick_ctx_cleanup().
2234 		 */
2235 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) {
2236 			if (!group_can_run(group))
2237 				continue;
2238 
2239 			if (group_is_idle(group))
2240 				list_move_tail(&group->run_node, &sched->groups.idle[prio]);
2241 			else
2242 				list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
2243 			group_put(group);
2244 		}
2245 	}
2246 
2247 	sched->used_csg_slot_count = ctx->group_count;
2248 	sched->might_have_idle_groups = ctx->idle_group_count > 0;
2249 }
2250 
2251 static u64
2252 tick_ctx_update_resched_target(struct panthor_scheduler *sched,
2253 			       const struct panthor_sched_tick_ctx *ctx)
2254 {
2255 	/* We had space left, no need to reschedule until some external event happens. */
2256 	if (!tick_ctx_is_full(sched, ctx))
2257 		goto no_tick;
2258 
2259 	/* If idle groups were scheduled, no need to wake up until some external
2260 	 * event happens (group unblocked, new job submitted, ...).
2261 	 */
2262 	if (ctx->idle_group_count)
2263 		goto no_tick;
2264 
2265 	if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT))
2266 		goto no_tick;
2267 
2268 	/* If there are groups of the same priority waiting, we need to
2269 	 * keep the scheduler ticking, otherwise, we'll just wait for
2270 	 * new groups with higher priority to be queued.
2271 	 */
2272 	if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
2273 		u64 resched_target = sched->last_tick + sched->tick_period;
2274 
2275 		if (time_before64(sched->resched_target, sched->last_tick) ||
2276 		    time_before64(resched_target, sched->resched_target))
2277 			sched->resched_target = resched_target;
2278 
2279 		return sched->resched_target - sched->last_tick;
2280 	}
2281 
2282 no_tick:
2283 	sched->resched_target = U64_MAX;
2284 	return U64_MAX;
2285 }
2286 
2287 static void tick_work(struct work_struct *work)
2288 {
2289 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
2290 						      tick_work.work);
2291 	struct panthor_device *ptdev = sched->ptdev;
2292 	struct panthor_sched_tick_ctx ctx;
2293 	u64 remaining_jiffies = 0, resched_delay;
2294 	u64 now = get_jiffies_64();
2295 	int prio, ret, cookie;
2296 
2297 	if (!drm_dev_enter(&ptdev->base, &cookie))
2298 		return;
2299 
2300 	ret = pm_runtime_resume_and_get(ptdev->base.dev);
2301 	if (drm_WARN_ON(&ptdev->base, ret))
2302 		goto out_dev_exit;
2303 
2304 	if (time_before64(now, sched->resched_target))
2305 		remaining_jiffies = sched->resched_target - now;
2306 
2307 	mutex_lock(&sched->lock);
2308 	if (panthor_device_reset_is_pending(sched->ptdev))
2309 		goto out_unlock;
2310 
2311 	tick_ctx_init(sched, &ctx, remaining_jiffies != 0);
2312 	if (ctx.csg_upd_failed_mask)
2313 		goto out_cleanup_ctx;
2314 
2315 	if (remaining_jiffies) {
2316 		/* Scheduling forced in the middle of a tick. Only RT groups
2317 		 * can preempt non-RT ones. Currently running RT groups can't be
2318 		 * preempted.
2319 		 */
2320 		for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2321 		     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2322 		     prio--) {
2323 			tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
2324 						       true, true);
2325 			if (prio == PANTHOR_CSG_PRIORITY_RT) {
2326 				tick_ctx_pick_groups_from_list(sched, &ctx,
2327 							       &sched->groups.runnable[prio],
2328 							       true, false);
2329 			}
2330 		}
2331 	}
2332 
2333 	/* First pick non-idle groups */
2334 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2335 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2336 	     prio--) {
2337 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
2338 					       true, false);
2339 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
2340 	}
2341 
2342 	/* If we have free CSG slots left, pick idle groups */
2343 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2344 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2345 	     prio--) {
2346 		/* Check the old_group queue first to avoid reprogramming the slots */
2347 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
2348 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
2349 					       false, false);
2350 	}
2351 
2352 	tick_ctx_apply(sched, &ctx);
2353 	if (ctx.csg_upd_failed_mask)
2354 		goto out_cleanup_ctx;
2355 
2356 	if (ctx.idle_group_count == ctx.group_count) {
2357 		panthor_devfreq_record_idle(sched->ptdev);
2358 		if (sched->pm.has_ref) {
2359 			pm_runtime_put_autosuspend(ptdev->base.dev);
2360 			sched->pm.has_ref = false;
2361 		}
2362 	} else {
2363 		panthor_devfreq_record_busy(sched->ptdev);
2364 		if (!sched->pm.has_ref) {
2365 			pm_runtime_get(ptdev->base.dev);
2366 			sched->pm.has_ref = true;
2367 		}
2368 	}
2369 
2370 	sched->last_tick = now;
2371 	resched_delay = tick_ctx_update_resched_target(sched, &ctx);
2372 	if (ctx.immediate_tick)
2373 		resched_delay = 0;
2374 
2375 	if (resched_delay != U64_MAX)
2376 		sched_queue_delayed_work(sched, tick, resched_delay);
2377 
2378 out_cleanup_ctx:
2379 	tick_ctx_cleanup(sched, &ctx);
2380 
2381 out_unlock:
2382 	mutex_unlock(&sched->lock);
2383 	pm_runtime_mark_last_busy(ptdev->base.dev);
2384 	pm_runtime_put_autosuspend(ptdev->base.dev);
2385 
2386 out_dev_exit:
2387 	drm_dev_exit(cookie);
2388 }
2389 
2390 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)
2391 {
2392 	struct panthor_queue *queue = group->queues[queue_idx];
2393 	union {
2394 		struct panthor_syncobj_64b sync64;
2395 		struct panthor_syncobj_32b sync32;
2396 	} *syncobj;
2397 	bool result;
2398 	u64 value;
2399 
2400 	syncobj = panthor_queue_get_syncwait_obj(group, queue);
2401 	if (!syncobj)
2402 		return -EINVAL;
2403 
2404 	value = queue->syncwait.sync64 ?
2405 		syncobj->sync64.seqno :
2406 		syncobj->sync32.seqno;
2407 
2408 	if (queue->syncwait.gt)
2409 		result = value > queue->syncwait.ref;
2410 	else
2411 		result = value <= queue->syncwait.ref;
2412 
2413 	if (result)
2414 		panthor_queue_put_syncwait_obj(queue);
2415 
2416 	return result;
2417 }
2418 
2419 static void sync_upd_work(struct work_struct *work)
2420 {
2421 	struct panthor_scheduler *sched = container_of(work,
2422 						      struct panthor_scheduler,
2423 						      sync_upd_work);
2424 	struct panthor_group *group, *tmp;
2425 	bool immediate_tick = false;
2426 
2427 	mutex_lock(&sched->lock);
2428 	list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
2429 		u32 tested_queues = group->blocked_queues;
2430 		u32 unblocked_queues = 0;
2431 
2432 		while (tested_queues) {
2433 			u32 cs_id = ffs(tested_queues) - 1;
2434 			int ret;
2435 
2436 			ret = panthor_queue_eval_syncwait(group, cs_id);
2437 			drm_WARN_ON(&group->ptdev->base, ret < 0);
2438 			if (ret)
2439 				unblocked_queues |= BIT(cs_id);
2440 
2441 			tested_queues &= ~BIT(cs_id);
2442 		}
2443 
2444 		if (unblocked_queues) {
2445 			group->blocked_queues &= ~unblocked_queues;
2446 
2447 			if (group->csg_id < 0) {
2448 				list_move(&group->run_node,
2449 					  &sched->groups.runnable[group->priority]);
2450 				if (group->priority == PANTHOR_CSG_PRIORITY_RT)
2451 					immediate_tick = true;
2452 			}
2453 		}
2454 
2455 		if (!group->blocked_queues)
2456 			list_del_init(&group->wait_node);
2457 	}
2458 	mutex_unlock(&sched->lock);
2459 
2460 	if (immediate_tick)
2461 		sched_queue_delayed_work(sched, tick, 0);
2462 }
2463 
2464 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
2465 {
2466 	struct panthor_device *ptdev = group->ptdev;
2467 	struct panthor_scheduler *sched = ptdev->scheduler;
2468 	struct list_head *queue = &sched->groups.runnable[group->priority];
2469 	u64 delay_jiffies = 0;
2470 	bool was_idle;
2471 	u64 now;
2472 
2473 	if (!group_can_run(group))
2474 		return;
2475 
2476 	/* All updated queues are blocked, no need to wake up the scheduler. */
2477 	if ((queue_mask & group->blocked_queues) == queue_mask)
2478 		return;
2479 
2480 	was_idle = group_is_idle(group);
2481 	group->idle_queues &= ~queue_mask;
2482 
2483 	/* Don't mess up with the lists if we're in a middle of a reset. */
2484 	if (atomic_read(&sched->reset.in_progress))
2485 		return;
2486 
2487 	if (was_idle && !group_is_idle(group))
2488 		list_move_tail(&group->run_node, queue);
2489 
2490 	/* RT groups are preemptive. */
2491 	if (group->priority == PANTHOR_CSG_PRIORITY_RT) {
2492 		sched_queue_delayed_work(sched, tick, 0);
2493 		return;
2494 	}
2495 
2496 	/* Some groups might be idle, force an immediate tick to
2497 	 * re-evaluate.
2498 	 */
2499 	if (sched->might_have_idle_groups) {
2500 		sched_queue_delayed_work(sched, tick, 0);
2501 		return;
2502 	}
2503 
2504 	/* Scheduler is ticking, nothing to do. */
2505 	if (sched->resched_target != U64_MAX) {
2506 		/* If there are free slots, force immediating ticking. */
2507 		if (sched->used_csg_slot_count < sched->csg_slot_count)
2508 			sched_queue_delayed_work(sched, tick, 0);
2509 
2510 		return;
2511 	}
2512 
2513 	/* Scheduler tick was off, recalculate the resched_target based on the
2514 	 * last tick event, and queue the scheduler work.
2515 	 */
2516 	now = get_jiffies_64();
2517 	sched->resched_target = sched->last_tick + sched->tick_period;
2518 	if (sched->used_csg_slot_count == sched->csg_slot_count &&
2519 	    time_before64(now, sched->resched_target))
2520 		delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
2521 
2522 	sched_queue_delayed_work(sched, tick, delay_jiffies);
2523 }
2524 
2525 static void queue_stop(struct panthor_queue *queue,
2526 		       struct panthor_job *bad_job)
2527 {
2528 	drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
2529 }
2530 
2531 static void queue_start(struct panthor_queue *queue)
2532 {
2533 	struct panthor_job *job;
2534 
2535 	/* Re-assign the parent fences. */
2536 	list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
2537 		job->base.s_fence->parent = dma_fence_get(job->done_fence);
2538 
2539 	drm_sched_start(&queue->scheduler, 0);
2540 }
2541 
2542 static void panthor_group_stop(struct panthor_group *group)
2543 {
2544 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2545 
2546 	lockdep_assert_held(&sched->reset.lock);
2547 
2548 	for (u32 i = 0; i < group->queue_count; i++)
2549 		queue_stop(group->queues[i], NULL);
2550 
2551 	group_get(group);
2552 	list_move_tail(&group->run_node, &sched->reset.stopped_groups);
2553 }
2554 
2555 static void panthor_group_start(struct panthor_group *group)
2556 {
2557 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2558 
2559 	lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
2560 
2561 	for (u32 i = 0; i < group->queue_count; i++)
2562 		queue_start(group->queues[i]);
2563 
2564 	if (group_can_run(group)) {
2565 		list_move_tail(&group->run_node,
2566 			       group_is_idle(group) ?
2567 			       &sched->groups.idle[group->priority] :
2568 			       &sched->groups.runnable[group->priority]);
2569 	} else {
2570 		list_del_init(&group->run_node);
2571 		list_del_init(&group->wait_node);
2572 		group_queue_work(group, term);
2573 	}
2574 
2575 	group_put(group);
2576 }
2577 
2578 static void panthor_sched_immediate_tick(struct panthor_device *ptdev)
2579 {
2580 	struct panthor_scheduler *sched = ptdev->scheduler;
2581 
2582 	sched_queue_delayed_work(sched, tick, 0);
2583 }
2584 
2585 /**
2586  * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler.
2587  */
2588 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
2589 {
2590 	/* Force a tick to immediately kill faulty groups. */
2591 	if (ptdev->scheduler)
2592 		panthor_sched_immediate_tick(ptdev);
2593 }
2594 
2595 void panthor_sched_resume(struct panthor_device *ptdev)
2596 {
2597 	/* Force a tick to re-evaluate after a resume. */
2598 	panthor_sched_immediate_tick(ptdev);
2599 }
2600 
2601 void panthor_sched_suspend(struct panthor_device *ptdev)
2602 {
2603 	struct panthor_scheduler *sched = ptdev->scheduler;
2604 	struct panthor_csg_slots_upd_ctx upd_ctx;
2605 	struct panthor_group *group;
2606 	u32 suspended_slots;
2607 	u32 i;
2608 
2609 	mutex_lock(&sched->lock);
2610 	csgs_upd_ctx_init(&upd_ctx);
2611 	for (i = 0; i < sched->csg_slot_count; i++) {
2612 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2613 
2614 		if (csg_slot->group) {
2615 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2616 						group_can_run(csg_slot->group) ?
2617 						CSG_STATE_SUSPEND : CSG_STATE_TERMINATE,
2618 						CSG_STATE_MASK);
2619 		}
2620 	}
2621 
2622 	suspended_slots = upd_ctx.update_mask;
2623 
2624 	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2625 	suspended_slots &= ~upd_ctx.timedout_mask;
2626 
2627 	if (upd_ctx.timedout_mask) {
2628 		u32 slot_mask = upd_ctx.timedout_mask;
2629 
2630 		drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
2631 		csgs_upd_ctx_init(&upd_ctx);
2632 		while (slot_mask) {
2633 			u32 csg_id = ffs(slot_mask) - 1;
2634 
2635 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2636 						CSG_STATE_TERMINATE,
2637 						CSG_STATE_MASK);
2638 			slot_mask &= ~BIT(csg_id);
2639 		}
2640 
2641 		csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2642 
2643 		slot_mask = upd_ctx.timedout_mask;
2644 		while (slot_mask) {
2645 			u32 csg_id = ffs(slot_mask) - 1;
2646 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2647 
2648 			/* Terminate command timedout, but the soft-reset will
2649 			 * automatically terminate all active groups, so let's
2650 			 * force the state to halted here.
2651 			 */
2652 			if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
2653 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2654 			slot_mask &= ~BIT(csg_id);
2655 		}
2656 	}
2657 
2658 	/* Flush L2 and LSC caches to make sure suspend state is up-to-date.
2659 	 * If the flush fails, flag all queues for termination.
2660 	 */
2661 	if (suspended_slots) {
2662 		bool flush_caches_failed = false;
2663 		u32 slot_mask = suspended_slots;
2664 
2665 		if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
2666 			flush_caches_failed = true;
2667 
2668 		while (slot_mask) {
2669 			u32 csg_id = ffs(slot_mask) - 1;
2670 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2671 
2672 			if (flush_caches_failed)
2673 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2674 			else
2675 				csg_slot_sync_update_locked(ptdev, csg_id);
2676 
2677 			slot_mask &= ~BIT(csg_id);
2678 		}
2679 	}
2680 
2681 	for (i = 0; i < sched->csg_slot_count; i++) {
2682 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2683 
2684 		group = csg_slot->group;
2685 		if (!group)
2686 			continue;
2687 
2688 		group_get(group);
2689 
2690 		if (group->csg_id >= 0)
2691 			sched_process_csg_irq_locked(ptdev, group->csg_id);
2692 
2693 		group_unbind_locked(group);
2694 
2695 		drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
2696 
2697 		if (group_can_run(group)) {
2698 			list_add(&group->run_node,
2699 				 &sched->groups.idle[group->priority]);
2700 		} else {
2701 			/* We don't bother stopping the scheduler if the group is
2702 			 * faulty, the group termination work will finish the job.
2703 			 */
2704 			list_del_init(&group->wait_node);
2705 			group_queue_work(group, term);
2706 		}
2707 		group_put(group);
2708 	}
2709 	mutex_unlock(&sched->lock);
2710 }
2711 
2712 void panthor_sched_pre_reset(struct panthor_device *ptdev)
2713 {
2714 	struct panthor_scheduler *sched = ptdev->scheduler;
2715 	struct panthor_group *group, *group_tmp;
2716 	u32 i;
2717 
2718 	mutex_lock(&sched->reset.lock);
2719 	atomic_set(&sched->reset.in_progress, true);
2720 
2721 	/* Cancel all scheduler works. Once this is done, these works can't be
2722 	 * scheduled again until the reset operation is complete.
2723 	 */
2724 	cancel_work_sync(&sched->sync_upd_work);
2725 	cancel_delayed_work_sync(&sched->tick_work);
2726 
2727 	panthor_sched_suspend(ptdev);
2728 
2729 	/* Stop all groups that might still accept jobs, so we don't get passed
2730 	 * new jobs while we're resetting.
2731 	 */
2732 	for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
2733 		/* All groups should be in the idle lists. */
2734 		drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i]));
2735 		list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
2736 			panthor_group_stop(group);
2737 	}
2738 
2739 	for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
2740 		list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
2741 			panthor_group_stop(group);
2742 	}
2743 
2744 	mutex_unlock(&sched->reset.lock);
2745 }
2746 
2747 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
2748 {
2749 	struct panthor_scheduler *sched = ptdev->scheduler;
2750 	struct panthor_group *group, *group_tmp;
2751 
2752 	mutex_lock(&sched->reset.lock);
2753 
2754 	list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
2755 		/* Consider all previously running group as terminated if the
2756 		 * reset failed.
2757 		 */
2758 		if (reset_failed)
2759 			group->state = PANTHOR_CS_GROUP_TERMINATED;
2760 
2761 		panthor_group_start(group);
2762 	}
2763 
2764 	/* We're done resetting the GPU, clear the reset.in_progress bit so we can
2765 	 * kick the scheduler.
2766 	 */
2767 	atomic_set(&sched->reset.in_progress, false);
2768 	mutex_unlock(&sched->reset.lock);
2769 
2770 	/* No need to queue a tick and update syncs if the reset failed. */
2771 	if (!reset_failed) {
2772 		sched_queue_delayed_work(sched, tick, 0);
2773 		sched_queue_work(sched, sync_upd);
2774 	}
2775 }
2776 
2777 static void group_sync_upd_work(struct work_struct *work)
2778 {
2779 	struct panthor_group *group =
2780 		container_of(work, struct panthor_group, sync_upd_work);
2781 	struct panthor_job *job, *job_tmp;
2782 	LIST_HEAD(done_jobs);
2783 	u32 queue_idx;
2784 	bool cookie;
2785 
2786 	cookie = dma_fence_begin_signalling();
2787 	for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
2788 		struct panthor_queue *queue = group->queues[queue_idx];
2789 		struct panthor_syncobj_64b *syncobj;
2790 
2791 		if (!queue)
2792 			continue;
2793 
2794 		syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
2795 
2796 		spin_lock(&queue->fence_ctx.lock);
2797 		list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
2798 			if (syncobj->seqno < job->done_fence->seqno)
2799 				break;
2800 
2801 			list_move_tail(&job->node, &done_jobs);
2802 			dma_fence_signal_locked(job->done_fence);
2803 		}
2804 		spin_unlock(&queue->fence_ctx.lock);
2805 	}
2806 	dma_fence_end_signalling(cookie);
2807 
2808 	list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
2809 		list_del_init(&job->node);
2810 		panthor_job_put(&job->base);
2811 	}
2812 
2813 	group_put(group);
2814 }
2815 
2816 static struct dma_fence *
2817 queue_run_job(struct drm_sched_job *sched_job)
2818 {
2819 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
2820 	struct panthor_group *group = job->group;
2821 	struct panthor_queue *queue = group->queues[job->queue_idx];
2822 	struct panthor_device *ptdev = group->ptdev;
2823 	struct panthor_scheduler *sched = ptdev->scheduler;
2824 	u32 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
2825 	u32 ringbuf_insert = queue->iface.input->insert & (ringbuf_size - 1);
2826 	u64 addr_reg = ptdev->csif_info.cs_reg_count -
2827 		       ptdev->csif_info.unpreserved_cs_reg_count;
2828 	u64 val_reg = addr_reg + 2;
2829 	u64 sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
2830 			job->queue_idx * sizeof(struct panthor_syncobj_64b);
2831 	u32 waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
2832 	struct dma_fence *done_fence;
2833 	int ret;
2834 
2835 	u64 call_instrs[NUM_INSTRS_PER_SLOT] = {
2836 		/* MOV32 rX+2, cs.latest_flush */
2837 		(2ull << 56) | (val_reg << 48) | job->call_info.latest_flush,
2838 
2839 		/* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
2840 		(36ull << 56) | (0ull << 48) | (val_reg << 40) | (0 << 16) | 0x233,
2841 
2842 		/* MOV48 rX:rX+1, cs.start */
2843 		(1ull << 56) | (addr_reg << 48) | job->call_info.start,
2844 
2845 		/* MOV32 rX+2, cs.size */
2846 		(2ull << 56) | (val_reg << 48) | job->call_info.size,
2847 
2848 		/* WAIT(0) => waits for FLUSH_CACHE2 instruction */
2849 		(3ull << 56) | (1 << 16),
2850 
2851 		/* CALL rX:rX+1, rX+2 */
2852 		(32ull << 56) | (addr_reg << 40) | (val_reg << 32),
2853 
2854 		/* MOV48 rX:rX+1, sync_addr */
2855 		(1ull << 56) | (addr_reg << 48) | sync_addr,
2856 
2857 		/* MOV48 rX+2, #1 */
2858 		(1ull << 56) | (val_reg << 48) | 1,
2859 
2860 		/* WAIT(all) */
2861 		(3ull << 56) | (waitall_mask << 16),
2862 
2863 		/* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
2864 		(51ull << 56) | (0ull << 48) | (addr_reg << 40) | (val_reg << 32) | (0 << 16) | 1,
2865 
2866 		/* ERROR_BARRIER, so we can recover from faults at job
2867 		 * boundaries.
2868 		 */
2869 		(47ull << 56),
2870 	};
2871 
2872 	/* Need to be cacheline aligned to please the prefetcher. */
2873 	static_assert(sizeof(call_instrs) % 64 == 0,
2874 		      "call_instrs is not aligned on a cacheline");
2875 
2876 	/* Stream size is zero, nothing to do except making sure all previously
2877 	 * submitted jobs are done before we signal the
2878 	 * drm_sched_job::s_fence::finished fence.
2879 	 */
2880 	if (!job->call_info.size) {
2881 		job->done_fence = dma_fence_get(queue->fence_ctx.last_fence);
2882 		return dma_fence_get(job->done_fence);
2883 	}
2884 
2885 	ret = pm_runtime_resume_and_get(ptdev->base.dev);
2886 	if (drm_WARN_ON(&ptdev->base, ret))
2887 		return ERR_PTR(ret);
2888 
2889 	mutex_lock(&sched->lock);
2890 	if (!group_can_run(group)) {
2891 		done_fence = ERR_PTR(-ECANCELED);
2892 		goto out_unlock;
2893 	}
2894 
2895 	dma_fence_init(job->done_fence,
2896 		       &panthor_queue_fence_ops,
2897 		       &queue->fence_ctx.lock,
2898 		       queue->fence_ctx.id,
2899 		       atomic64_inc_return(&queue->fence_ctx.seqno));
2900 
2901 	memcpy(queue->ringbuf->kmap + ringbuf_insert,
2902 	       call_instrs, sizeof(call_instrs));
2903 
2904 	panthor_job_get(&job->base);
2905 	spin_lock(&queue->fence_ctx.lock);
2906 	list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
2907 	spin_unlock(&queue->fence_ctx.lock);
2908 
2909 	job->ringbuf.start = queue->iface.input->insert;
2910 	job->ringbuf.end = job->ringbuf.start + sizeof(call_instrs);
2911 
2912 	/* Make sure the ring buffer is updated before the INSERT
2913 	 * register.
2914 	 */
2915 	wmb();
2916 
2917 	queue->iface.input->extract = queue->iface.output->extract;
2918 	queue->iface.input->insert = job->ringbuf.end;
2919 
2920 	if (group->csg_id < 0) {
2921 		/* If the queue is blocked, we want to keep the timeout running, so we
2922 		 * can detect unbounded waits and kill the group when that happens.
2923 		 * Otherwise, we suspend the timeout so the time we spend waiting for
2924 		 * a CSG slot is not counted.
2925 		 */
2926 		if (!(group->blocked_queues & BIT(job->queue_idx)) &&
2927 		    !queue->timeout_suspended) {
2928 			queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
2929 			queue->timeout_suspended = true;
2930 		}
2931 
2932 		group_schedule_locked(group, BIT(job->queue_idx));
2933 	} else {
2934 		gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
2935 		if (!sched->pm.has_ref &&
2936 		    !(group->blocked_queues & BIT(job->queue_idx))) {
2937 			pm_runtime_get(ptdev->base.dev);
2938 			sched->pm.has_ref = true;
2939 		}
2940 		panthor_devfreq_record_busy(sched->ptdev);
2941 	}
2942 
2943 	/* Update the last fence. */
2944 	dma_fence_put(queue->fence_ctx.last_fence);
2945 	queue->fence_ctx.last_fence = dma_fence_get(job->done_fence);
2946 
2947 	done_fence = dma_fence_get(job->done_fence);
2948 
2949 out_unlock:
2950 	mutex_unlock(&sched->lock);
2951 	pm_runtime_mark_last_busy(ptdev->base.dev);
2952 	pm_runtime_put_autosuspend(ptdev->base.dev);
2953 
2954 	return done_fence;
2955 }
2956 
2957 static enum drm_gpu_sched_stat
2958 queue_timedout_job(struct drm_sched_job *sched_job)
2959 {
2960 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
2961 	struct panthor_group *group = job->group;
2962 	struct panthor_device *ptdev = group->ptdev;
2963 	struct panthor_scheduler *sched = ptdev->scheduler;
2964 	struct panthor_queue *queue = group->queues[job->queue_idx];
2965 
2966 	drm_warn(&ptdev->base, "job timeout\n");
2967 
2968 	drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
2969 
2970 	queue_stop(queue, job);
2971 
2972 	mutex_lock(&sched->lock);
2973 	group->timedout = true;
2974 	if (group->csg_id >= 0) {
2975 		sched_queue_delayed_work(ptdev->scheduler, tick, 0);
2976 	} else {
2977 		/* Remove from the run queues, so the scheduler can't
2978 		 * pick the group on the next tick.
2979 		 */
2980 		list_del_init(&group->run_node);
2981 		list_del_init(&group->wait_node);
2982 
2983 		group_queue_work(group, term);
2984 	}
2985 	mutex_unlock(&sched->lock);
2986 
2987 	queue_start(queue);
2988 
2989 	return DRM_GPU_SCHED_STAT_NOMINAL;
2990 }
2991 
2992 static void queue_free_job(struct drm_sched_job *sched_job)
2993 {
2994 	drm_sched_job_cleanup(sched_job);
2995 	panthor_job_put(sched_job);
2996 }
2997 
2998 static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
2999 	.run_job = queue_run_job,
3000 	.timedout_job = queue_timedout_job,
3001 	.free_job = queue_free_job,
3002 };
3003 
3004 static struct panthor_queue *
3005 group_create_queue(struct panthor_group *group,
3006 		   const struct drm_panthor_queue_create *args)
3007 {
3008 	struct drm_gpu_scheduler *drm_sched;
3009 	struct panthor_queue *queue;
3010 	int ret;
3011 
3012 	if (args->pad[0] || args->pad[1] || args->pad[2])
3013 		return ERR_PTR(-EINVAL);
3014 
3015 	if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K ||
3016 	    !is_power_of_2(args->ringbuf_size))
3017 		return ERR_PTR(-EINVAL);
3018 
3019 	if (args->priority > CSF_MAX_QUEUE_PRIO)
3020 		return ERR_PTR(-EINVAL);
3021 
3022 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
3023 	if (!queue)
3024 		return ERR_PTR(-ENOMEM);
3025 
3026 	queue->fence_ctx.id = dma_fence_context_alloc(1);
3027 	spin_lock_init(&queue->fence_ctx.lock);
3028 	INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
3029 
3030 	queue->priority = args->priority;
3031 
3032 	queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
3033 						  args->ringbuf_size,
3034 						  DRM_PANTHOR_BO_NO_MMAP,
3035 						  DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3036 						  DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3037 						  PANTHOR_VM_KERNEL_AUTO_VA);
3038 	if (IS_ERR(queue->ringbuf)) {
3039 		ret = PTR_ERR(queue->ringbuf);
3040 		goto err_free_queue;
3041 	}
3042 
3043 	ret = panthor_kernel_bo_vmap(queue->ringbuf);
3044 	if (ret)
3045 		goto err_free_queue;
3046 
3047 	queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
3048 							    &queue->iface.input,
3049 							    &queue->iface.output,
3050 							    &queue->iface.input_fw_va,
3051 							    &queue->iface.output_fw_va);
3052 	if (IS_ERR(queue->iface.mem)) {
3053 		ret = PTR_ERR(queue->iface.mem);
3054 		goto err_free_queue;
3055 	}
3056 
3057 	ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
3058 			     group->ptdev->scheduler->wq, 1,
3059 			     args->ringbuf_size / (NUM_INSTRS_PER_SLOT * sizeof(u64)),
3060 			     0, msecs_to_jiffies(JOB_TIMEOUT_MS),
3061 			     group->ptdev->reset.wq,
3062 			     NULL, "panthor-queue", group->ptdev->base.dev);
3063 	if (ret)
3064 		goto err_free_queue;
3065 
3066 	drm_sched = &queue->scheduler;
3067 	ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
3068 
3069 	return queue;
3070 
3071 err_free_queue:
3072 	group_free_queue(group, queue);
3073 	return ERR_PTR(ret);
3074 }
3075 
3076 #define MAX_GROUPS_PER_POOL		128
3077 
3078 int panthor_group_create(struct panthor_file *pfile,
3079 			 const struct drm_panthor_group_create *group_args,
3080 			 const struct drm_panthor_queue_create *queue_args)
3081 {
3082 	struct panthor_device *ptdev = pfile->ptdev;
3083 	struct panthor_group_pool *gpool = pfile->groups;
3084 	struct panthor_scheduler *sched = ptdev->scheduler;
3085 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3086 	struct panthor_group *group = NULL;
3087 	u32 gid, i, suspend_size;
3088 	int ret;
3089 
3090 	if (group_args->pad)
3091 		return -EINVAL;
3092 
3093 	if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT)
3094 		return -EINVAL;
3095 
3096 	if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
3097 	    (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
3098 	    (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
3099 		return -EINVAL;
3100 
3101 	if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
3102 	    hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
3103 	    hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
3104 		return -EINVAL;
3105 
3106 	group = kzalloc(sizeof(*group), GFP_KERNEL);
3107 	if (!group)
3108 		return -ENOMEM;
3109 
3110 	spin_lock_init(&group->fatal_lock);
3111 	kref_init(&group->refcount);
3112 	group->state = PANTHOR_CS_GROUP_CREATED;
3113 	group->csg_id = -1;
3114 
3115 	group->ptdev = ptdev;
3116 	group->max_compute_cores = group_args->max_compute_cores;
3117 	group->compute_core_mask = group_args->compute_core_mask;
3118 	group->max_fragment_cores = group_args->max_fragment_cores;
3119 	group->fragment_core_mask = group_args->fragment_core_mask;
3120 	group->max_tiler_cores = group_args->max_tiler_cores;
3121 	group->tiler_core_mask = group_args->tiler_core_mask;
3122 	group->priority = group_args->priority;
3123 
3124 	INIT_LIST_HEAD(&group->wait_node);
3125 	INIT_LIST_HEAD(&group->run_node);
3126 	INIT_WORK(&group->term_work, group_term_work);
3127 	INIT_WORK(&group->sync_upd_work, group_sync_upd_work);
3128 	INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work);
3129 	INIT_WORK(&group->release_work, group_release_work);
3130 
3131 	group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id);
3132 	if (!group->vm) {
3133 		ret = -EINVAL;
3134 		goto err_put_group;
3135 	}
3136 
3137 	suspend_size = csg_iface->control->suspend_size;
3138 	group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3139 	if (IS_ERR(group->suspend_buf)) {
3140 		ret = PTR_ERR(group->suspend_buf);
3141 		group->suspend_buf = NULL;
3142 		goto err_put_group;
3143 	}
3144 
3145 	suspend_size = csg_iface->control->protm_suspend_size;
3146 	group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3147 	if (IS_ERR(group->protm_suspend_buf)) {
3148 		ret = PTR_ERR(group->protm_suspend_buf);
3149 		group->protm_suspend_buf = NULL;
3150 		goto err_put_group;
3151 	}
3152 
3153 	group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
3154 						   group_args->queues.count *
3155 						   sizeof(struct panthor_syncobj_64b),
3156 						   DRM_PANTHOR_BO_NO_MMAP,
3157 						   DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3158 						   DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3159 						   PANTHOR_VM_KERNEL_AUTO_VA);
3160 	if (IS_ERR(group->syncobjs)) {
3161 		ret = PTR_ERR(group->syncobjs);
3162 		goto err_put_group;
3163 	}
3164 
3165 	ret = panthor_kernel_bo_vmap(group->syncobjs);
3166 	if (ret)
3167 		goto err_put_group;
3168 
3169 	memset(group->syncobjs->kmap, 0,
3170 	       group_args->queues.count * sizeof(struct panthor_syncobj_64b));
3171 
3172 	for (i = 0; i < group_args->queues.count; i++) {
3173 		group->queues[i] = group_create_queue(group, &queue_args[i]);
3174 		if (IS_ERR(group->queues[i])) {
3175 			ret = PTR_ERR(group->queues[i]);
3176 			group->queues[i] = NULL;
3177 			goto err_put_group;
3178 		}
3179 
3180 		group->queue_count++;
3181 	}
3182 
3183 	group->idle_queues = GENMASK(group->queue_count - 1, 0);
3184 
3185 	ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
3186 	if (ret)
3187 		goto err_put_group;
3188 
3189 	mutex_lock(&sched->reset.lock);
3190 	if (atomic_read(&sched->reset.in_progress)) {
3191 		panthor_group_stop(group);
3192 	} else {
3193 		mutex_lock(&sched->lock);
3194 		list_add_tail(&group->run_node,
3195 			      &sched->groups.idle[group->priority]);
3196 		mutex_unlock(&sched->lock);
3197 	}
3198 	mutex_unlock(&sched->reset.lock);
3199 
3200 	return gid;
3201 
3202 err_put_group:
3203 	group_put(group);
3204 	return ret;
3205 }
3206 
3207 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
3208 {
3209 	struct panthor_group_pool *gpool = pfile->groups;
3210 	struct panthor_device *ptdev = pfile->ptdev;
3211 	struct panthor_scheduler *sched = ptdev->scheduler;
3212 	struct panthor_group *group;
3213 
3214 	group = xa_erase(&gpool->xa, group_handle);
3215 	if (!group)
3216 		return -EINVAL;
3217 
3218 	for (u32 i = 0; i < group->queue_count; i++) {
3219 		if (group->queues[i])
3220 			drm_sched_entity_destroy(&group->queues[i]->entity);
3221 	}
3222 
3223 	mutex_lock(&sched->reset.lock);
3224 	mutex_lock(&sched->lock);
3225 	group->destroyed = true;
3226 	if (group->csg_id >= 0) {
3227 		sched_queue_delayed_work(sched, tick, 0);
3228 	} else if (!atomic_read(&sched->reset.in_progress)) {
3229 		/* Remove from the run queues, so the scheduler can't
3230 		 * pick the group on the next tick.
3231 		 */
3232 		list_del_init(&group->run_node);
3233 		list_del_init(&group->wait_node);
3234 		group_queue_work(group, term);
3235 	}
3236 	mutex_unlock(&sched->lock);
3237 	mutex_unlock(&sched->reset.lock);
3238 
3239 	group_put(group);
3240 	return 0;
3241 }
3242 
3243 int panthor_group_get_state(struct panthor_file *pfile,
3244 			    struct drm_panthor_group_get_state *get_state)
3245 {
3246 	struct panthor_group_pool *gpool = pfile->groups;
3247 	struct panthor_device *ptdev = pfile->ptdev;
3248 	struct panthor_scheduler *sched = ptdev->scheduler;
3249 	struct panthor_group *group;
3250 
3251 	if (get_state->pad)
3252 		return -EINVAL;
3253 
3254 	group = group_get(xa_load(&gpool->xa, get_state->group_handle));
3255 	if (!group)
3256 		return -EINVAL;
3257 
3258 	memset(get_state, 0, sizeof(*get_state));
3259 
3260 	mutex_lock(&sched->lock);
3261 	if (group->timedout)
3262 		get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT;
3263 	if (group->fatal_queues) {
3264 		get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
3265 		get_state->fatal_queues = group->fatal_queues;
3266 	}
3267 	mutex_unlock(&sched->lock);
3268 
3269 	group_put(group);
3270 	return 0;
3271 }
3272 
3273 int panthor_group_pool_create(struct panthor_file *pfile)
3274 {
3275 	struct panthor_group_pool *gpool;
3276 
3277 	gpool = kzalloc(sizeof(*gpool), GFP_KERNEL);
3278 	if (!gpool)
3279 		return -ENOMEM;
3280 
3281 	xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1);
3282 	pfile->groups = gpool;
3283 	return 0;
3284 }
3285 
3286 void panthor_group_pool_destroy(struct panthor_file *pfile)
3287 {
3288 	struct panthor_group_pool *gpool = pfile->groups;
3289 	struct panthor_group *group;
3290 	unsigned long i;
3291 
3292 	if (IS_ERR_OR_NULL(gpool))
3293 		return;
3294 
3295 	xa_for_each(&gpool->xa, i, group)
3296 		panthor_group_destroy(pfile, i);
3297 
3298 	xa_destroy(&gpool->xa);
3299 	kfree(gpool);
3300 	pfile->groups = NULL;
3301 }
3302 
3303 static void job_release(struct kref *ref)
3304 {
3305 	struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
3306 
3307 	drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
3308 
3309 	if (job->base.s_fence)
3310 		drm_sched_job_cleanup(&job->base);
3311 
3312 	if (job->done_fence && job->done_fence->ops)
3313 		dma_fence_put(job->done_fence);
3314 	else
3315 		dma_fence_free(job->done_fence);
3316 
3317 	group_put(job->group);
3318 
3319 	kfree(job);
3320 }
3321 
3322 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job)
3323 {
3324 	if (sched_job) {
3325 		struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3326 
3327 		kref_get(&job->refcount);
3328 	}
3329 
3330 	return sched_job;
3331 }
3332 
3333 void panthor_job_put(struct drm_sched_job *sched_job)
3334 {
3335 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3336 
3337 	if (sched_job)
3338 		kref_put(&job->refcount, job_release);
3339 }
3340 
3341 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
3342 {
3343 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3344 
3345 	return job->group->vm;
3346 }
3347 
3348 struct drm_sched_job *
3349 panthor_job_create(struct panthor_file *pfile,
3350 		   u16 group_handle,
3351 		   const struct drm_panthor_queue_submit *qsubmit)
3352 {
3353 	struct panthor_group_pool *gpool = pfile->groups;
3354 	struct panthor_job *job;
3355 	int ret;
3356 
3357 	if (qsubmit->pad)
3358 		return ERR_PTR(-EINVAL);
3359 
3360 	/* If stream_addr is zero, so stream_size should be. */
3361 	if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0))
3362 		return ERR_PTR(-EINVAL);
3363 
3364 	/* Make sure the address is aligned on 64-byte (cacheline) and the size is
3365 	 * aligned on 8-byte (instruction size).
3366 	 */
3367 	if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7))
3368 		return ERR_PTR(-EINVAL);
3369 
3370 	/* bits 24:30 must be zero. */
3371 	if (qsubmit->latest_flush & GENMASK(30, 24))
3372 		return ERR_PTR(-EINVAL);
3373 
3374 	job = kzalloc(sizeof(*job), GFP_KERNEL);
3375 	if (!job)
3376 		return ERR_PTR(-ENOMEM);
3377 
3378 	kref_init(&job->refcount);
3379 	job->queue_idx = qsubmit->queue_index;
3380 	job->call_info.size = qsubmit->stream_size;
3381 	job->call_info.start = qsubmit->stream_addr;
3382 	job->call_info.latest_flush = qsubmit->latest_flush;
3383 	INIT_LIST_HEAD(&job->node);
3384 
3385 	job->group = group_get(xa_load(&gpool->xa, group_handle));
3386 	if (!job->group) {
3387 		ret = -EINVAL;
3388 		goto err_put_job;
3389 	}
3390 
3391 	if (job->queue_idx >= job->group->queue_count ||
3392 	    !job->group->queues[job->queue_idx]) {
3393 		ret = -EINVAL;
3394 		goto err_put_job;
3395 	}
3396 
3397 	/* Empty command streams don't need a fence, they'll pick the one from
3398 	 * the previously submitted job.
3399 	 */
3400 	if (job->call_info.size) {
3401 		job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
3402 		if (!job->done_fence) {
3403 			ret = -ENOMEM;
3404 			goto err_put_job;
3405 		}
3406 	}
3407 
3408 	ret = drm_sched_job_init(&job->base,
3409 				 &job->group->queues[job->queue_idx]->entity,
3410 				 1, job->group);
3411 	if (ret)
3412 		goto err_put_job;
3413 
3414 	return &job->base;
3415 
3416 err_put_job:
3417 	panthor_job_put(&job->base);
3418 	return ERR_PTR(ret);
3419 }
3420 
3421 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job)
3422 {
3423 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3424 
3425 	/* Still not sure why we want USAGE_WRITE for external objects, since I
3426 	 * was assuming this would be handled through explicit syncs being imported
3427 	 * to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers
3428 	 * seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason.
3429 	 */
3430 	panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
3431 				DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
3432 }
3433 
3434 void panthor_sched_unplug(struct panthor_device *ptdev)
3435 {
3436 	struct panthor_scheduler *sched = ptdev->scheduler;
3437 
3438 	cancel_delayed_work_sync(&sched->tick_work);
3439 
3440 	mutex_lock(&sched->lock);
3441 	if (sched->pm.has_ref) {
3442 		pm_runtime_put(ptdev->base.dev);
3443 		sched->pm.has_ref = false;
3444 	}
3445 	mutex_unlock(&sched->lock);
3446 }
3447 
3448 static void panthor_sched_fini(struct drm_device *ddev, void *res)
3449 {
3450 	struct panthor_scheduler *sched = res;
3451 	int prio;
3452 
3453 	if (!sched || !sched->csg_slot_count)
3454 		return;
3455 
3456 	cancel_delayed_work_sync(&sched->tick_work);
3457 
3458 	if (sched->wq)
3459 		destroy_workqueue(sched->wq);
3460 
3461 	if (sched->heap_alloc_wq)
3462 		destroy_workqueue(sched->heap_alloc_wq);
3463 
3464 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3465 		drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
3466 		drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
3467 	}
3468 
3469 	drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
3470 }
3471 
3472 int panthor_sched_init(struct panthor_device *ptdev)
3473 {
3474 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
3475 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3476 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
3477 	struct panthor_scheduler *sched;
3478 	u32 gpu_as_count, num_groups;
3479 	int prio, ret;
3480 
3481 	sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
3482 	if (!sched)
3483 		return -ENOMEM;
3484 
3485 	/* The highest bit in JOB_INT_* is reserved for globabl IRQs. That
3486 	 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here.
3487 	 */
3488 	num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num);
3489 
3490 	/* The FW-side scheduler might deadlock if two groups with the same
3491 	 * priority try to access a set of resources that overlaps, with part
3492 	 * of the resources being allocated to one group and the other part to
3493 	 * the other group, both groups waiting for the remaining resources to
3494 	 * be allocated. To avoid that, it is recommended to assign each CSG a
3495 	 * different priority. In theory we could allow several groups to have
3496 	 * the same CSG priority if they don't request the same resources, but
3497 	 * that makes the scheduling logic more complicated, so let's clamp
3498 	 * the number of CSG slots to MAX_CSG_PRIO + 1 for now.
3499 	 */
3500 	num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups);
3501 
3502 	/* We need at least one AS for the MCU and one for the GPU contexts. */
3503 	gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
3504 	if (!gpu_as_count) {
3505 		drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
3506 			gpu_as_count + 1);
3507 		return -EINVAL;
3508 	}
3509 
3510 	sched->ptdev = ptdev;
3511 	sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
3512 	sched->csg_slot_count = num_groups;
3513 	sched->cs_slot_count = csg_iface->control->stream_num;
3514 	sched->as_slot_count = gpu_as_count;
3515 	ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
3516 	ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
3517 	ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
3518 
3519 	sched->last_tick = 0;
3520 	sched->resched_target = U64_MAX;
3521 	sched->tick_period = msecs_to_jiffies(10);
3522 	INIT_DELAYED_WORK(&sched->tick_work, tick_work);
3523 	INIT_WORK(&sched->sync_upd_work, sync_upd_work);
3524 	INIT_WORK(&sched->fw_events_work, process_fw_events_work);
3525 
3526 	ret = drmm_mutex_init(&ptdev->base, &sched->lock);
3527 	if (ret)
3528 		return ret;
3529 
3530 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3531 		INIT_LIST_HEAD(&sched->groups.runnable[prio]);
3532 		INIT_LIST_HEAD(&sched->groups.idle[prio]);
3533 	}
3534 	INIT_LIST_HEAD(&sched->groups.waiting);
3535 
3536 	ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
3537 	if (ret)
3538 		return ret;
3539 
3540 	INIT_LIST_HEAD(&sched->reset.stopped_groups);
3541 
3542 	/* sched->heap_alloc_wq will be used for heap chunk allocation on
3543 	 * tiler OOM events, which means we can't use the same workqueue for
3544 	 * the scheduler because works queued by the scheduler are in
3545 	 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to
3546 	 * work around this limitation.
3547 	 *
3548 	 * FIXME: Ultimately, what we need is a failable/non-blocking GEM
3549 	 * allocation path that we can call when a heap OOM is reported. The
3550 	 * FW is smart enough to fall back on other methods if the kernel can't
3551 	 * allocate memory, and fail the tiling job if none of these
3552 	 * countermeasures worked.
3553 	 *
3554 	 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the
3555 	 * system is running out of memory.
3556 	 */
3557 	sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
3558 	sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
3559 	if (!sched->wq || !sched->heap_alloc_wq) {
3560 		panthor_sched_fini(&ptdev->base, sched);
3561 		drm_err(&ptdev->base, "Failed to allocate the workqueues");
3562 		return -ENOMEM;
3563 	}
3564 
3565 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
3566 	if (ret)
3567 		return ret;
3568 
3569 	ptdev->scheduler = sched;
3570 	return 0;
3571 }
3572