xref: /linux/drivers/gpu/drm/panthor/panthor_sched.c (revision 52e6b198833411564e0b9ce6e96bbd3d72f961e7)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2023 Collabora ltd. */
3 
4 #include <drm/drm_drv.h>
5 #include <drm/drm_exec.h>
6 #include <drm/drm_gem_shmem_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/gpu_scheduler.h>
9 #include <drm/panthor_drm.h>
10 
11 #include <linux/build_bug.h>
12 #include <linux/cleanup.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dma-resv.h>
17 #include <linux/firmware.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/iosys-map.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 
26 #include "panthor_devfreq.h"
27 #include "panthor_device.h"
28 #include "panthor_fw.h"
29 #include "panthor_gem.h"
30 #include "panthor_gpu.h"
31 #include "panthor_heap.h"
32 #include "panthor_mmu.h"
33 #include "panthor_regs.h"
34 #include "panthor_sched.h"
35 
36 /**
37  * DOC: Scheduler
38  *
39  * Mali CSF hardware adopts a firmware-assisted scheduling model, where
40  * the firmware takes care of scheduling aspects, to some extent.
41  *
42  * The scheduling happens at the scheduling group level, each group
43  * contains 1 to N queues (N is FW/hardware dependent, and exposed
44  * through the firmware interface). Each queue is assigned a command
45  * stream ring buffer, which serves as a way to get jobs submitted to
46  * the GPU, among other things.
47  *
48  * The firmware can schedule a maximum of M groups (M is FW/hardware
49  * dependent, and exposed through the firmware interface). Passed
50  * this maximum number of groups, the kernel must take care of
51  * rotating the groups passed to the firmware so every group gets
52  * a chance to have his queues scheduled for execution.
53  *
54  * The current implementation only supports with kernel-mode queues.
55  * In other terms, userspace doesn't have access to the ring-buffer.
56  * Instead, userspace passes indirect command stream buffers that are
57  * called from the queue ring-buffer by the kernel using a pre-defined
58  * sequence of command stream instructions to ensure the userspace driver
59  * always gets consistent results (cache maintenance,
60  * synchronization, ...).
61  *
62  * We rely on the drm_gpu_scheduler framework to deal with job
63  * dependencies and submission. As any other driver dealing with a
64  * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
65  * entity has its own job scheduler. When a job is ready to be executed
66  * (all its dependencies are met), it is pushed to the appropriate
67  * queue ring-buffer, and the group is scheduled for execution if it
68  * wasn't already active.
69  *
70  * Kernel-side group scheduling is timeslice-based. When we have less
71  * groups than there are slots, the periodic tick is disabled and we
72  * just let the FW schedule the active groups. When there are more
73  * groups than slots, we let each group a chance to execute stuff for
74  * a given amount of time, and then re-evaluate and pick new groups
75  * to schedule. The group selection algorithm is based on
76  * priority+round-robin.
77  *
78  * Even though user-mode queues is out of the scope right now, the
79  * current design takes them into account by avoiding any guess on the
80  * group/queue state that would be based on information we wouldn't have
81  * if userspace was in charge of the ring-buffer. That's also one of the
82  * reason we don't do 'cooperative' scheduling (encoding FW group slot
83  * reservation as dma_fence that would be returned from the
84  * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
85  * a queue of waiters, ordered by job submission order). This approach
86  * would work for kernel-mode queues, but would make user-mode queues a
87  * lot more complicated to retrofit.
88  */
89 
90 #define JOB_TIMEOUT_MS				5000
91 
92 #define MAX_CSG_PRIO				0xf
93 
94 #define NUM_INSTRS_PER_CACHE_LINE		(64 / sizeof(u64))
95 #define MAX_INSTRS_PER_JOB			24
96 
97 struct panthor_group;
98 
99 /**
100  * struct panthor_csg_slot - Command stream group slot
101  *
102  * This represents a FW slot for a scheduling group.
103  */
104 struct panthor_csg_slot {
105 	/** @group: Scheduling group bound to this slot. */
106 	struct panthor_group *group;
107 
108 	/** @priority: Group priority. */
109 	u8 priority;
110 
111 	/**
112 	 * @idle: True if the group bound to this slot is idle.
113 	 *
114 	 * A group is idle when it has nothing waiting for execution on
115 	 * all its queues, or when queues are blocked waiting for something
116 	 * to happen (synchronization object).
117 	 */
118 	bool idle;
119 };
120 
121 /**
122  * enum panthor_csg_priority - Group priority
123  */
124 enum panthor_csg_priority {
125 	/** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */
126 	PANTHOR_CSG_PRIORITY_LOW = 0,
127 
128 	/** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */
129 	PANTHOR_CSG_PRIORITY_MEDIUM,
130 
131 	/** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */
132 	PANTHOR_CSG_PRIORITY_HIGH,
133 
134 	/**
135 	 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group.
136 	 *
137 	 * Real-time priority allows one to preempt scheduling of other
138 	 * non-real-time groups. When such a group becomes executable,
139 	 * it will evict the group with the lowest non-rt priority if
140 	 * there's no free group slot available.
141 	 */
142 	PANTHOR_CSG_PRIORITY_RT,
143 
144 	/** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */
145 	PANTHOR_CSG_PRIORITY_COUNT,
146 };
147 
148 /**
149  * struct panthor_scheduler - Object used to manage the scheduler
150  */
151 struct panthor_scheduler {
152 	/** @ptdev: Device. */
153 	struct panthor_device *ptdev;
154 
155 	/**
156 	 * @wq: Workqueue used by our internal scheduler logic and
157 	 * drm_gpu_scheduler.
158 	 *
159 	 * Used for the scheduler tick, group update or other kind of FW
160 	 * event processing that can't be handled in the threaded interrupt
161 	 * path. Also passed to the drm_gpu_scheduler instances embedded
162 	 * in panthor_queue.
163 	 */
164 	struct workqueue_struct *wq;
165 
166 	/**
167 	 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works.
168 	 *
169 	 * We have a queue dedicated to heap chunk allocation works to avoid
170 	 * blocking the rest of the scheduler if the allocation tries to
171 	 * reclaim memory.
172 	 */
173 	struct workqueue_struct *heap_alloc_wq;
174 
175 	/** @tick_work: Work executed on a scheduling tick. */
176 	struct delayed_work tick_work;
177 
178 	/**
179 	 * @sync_upd_work: Work used to process synchronization object updates.
180 	 *
181 	 * We use this work to unblock queues/groups that were waiting on a
182 	 * synchronization object.
183 	 */
184 	struct work_struct sync_upd_work;
185 
186 	/**
187 	 * @fw_events_work: Work used to process FW events outside the interrupt path.
188 	 *
189 	 * Even if the interrupt is threaded, we need any event processing
190 	 * that require taking the panthor_scheduler::lock to be processed
191 	 * outside the interrupt path so we don't block the tick logic when
192 	 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the
193 	 * event processing requires taking this lock, we just delegate all
194 	 * FW event processing to the scheduler workqueue.
195 	 */
196 	struct work_struct fw_events_work;
197 
198 	/**
199 	 * @fw_events: Bitmask encoding pending FW events.
200 	 */
201 	atomic_t fw_events;
202 
203 	/**
204 	 * @resched_target: When the next tick should occur.
205 	 *
206 	 * Expressed in jiffies.
207 	 */
208 	u64 resched_target;
209 
210 	/**
211 	 * @last_tick: When the last tick occurred.
212 	 *
213 	 * Expressed in jiffies.
214 	 */
215 	u64 last_tick;
216 
217 	/** @tick_period: Tick period in jiffies. */
218 	u64 tick_period;
219 
220 	/**
221 	 * @lock: Lock protecting access to all the scheduler fields.
222 	 *
223 	 * Should be taken in the tick work, the irq handler, and anywhere the @groups
224 	 * fields are touched.
225 	 */
226 	struct mutex lock;
227 
228 	/** @groups: Various lists used to classify groups. */
229 	struct {
230 		/**
231 		 * @runnable: Runnable group lists.
232 		 *
233 		 * When a group has queues that want to execute something,
234 		 * its panthor_group::run_node should be inserted here.
235 		 *
236 		 * One list per-priority.
237 		 */
238 		struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT];
239 
240 		/**
241 		 * @idle: Idle group lists.
242 		 *
243 		 * When all queues of a group are idle (either because they
244 		 * have nothing to execute, or because they are blocked), the
245 		 * panthor_group::run_node field should be inserted here.
246 		 *
247 		 * One list per-priority.
248 		 */
249 		struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT];
250 
251 		/**
252 		 * @waiting: List of groups whose queues are blocked on a
253 		 * synchronization object.
254 		 *
255 		 * Insert panthor_group::wait_node here when a group is waiting
256 		 * for synchronization objects to be signaled.
257 		 *
258 		 * This list is evaluated in the @sync_upd_work work.
259 		 */
260 		struct list_head waiting;
261 	} groups;
262 
263 	/**
264 	 * @csg_slots: FW command stream group slots.
265 	 */
266 	struct panthor_csg_slot csg_slots[MAX_CSGS];
267 
268 	/** @csg_slot_count: Number of command stream group slots exposed by the FW. */
269 	u32 csg_slot_count;
270 
271 	/** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */
272 	u32 cs_slot_count;
273 
274 	/** @as_slot_count: Number of address space slots supported by the MMU. */
275 	u32 as_slot_count;
276 
277 	/** @used_csg_slot_count: Number of command stream group slot currently used. */
278 	u32 used_csg_slot_count;
279 
280 	/** @sb_slot_count: Number of scoreboard slots. */
281 	u32 sb_slot_count;
282 
283 	/**
284 	 * @might_have_idle_groups: True if an active group might have become idle.
285 	 *
286 	 * This will force a tick, so other runnable groups can be scheduled if one
287 	 * or more active groups became idle.
288 	 */
289 	bool might_have_idle_groups;
290 
291 	/** @pm: Power management related fields. */
292 	struct {
293 		/** @has_ref: True if the scheduler owns a runtime PM reference. */
294 		bool has_ref;
295 	} pm;
296 
297 	/** @reset: Reset related fields. */
298 	struct {
299 		/** @lock: Lock protecting the other reset fields. */
300 		struct mutex lock;
301 
302 		/**
303 		 * @in_progress: True if a reset is in progress.
304 		 *
305 		 * Set to true in panthor_sched_pre_reset() and back to false in
306 		 * panthor_sched_post_reset().
307 		 */
308 		atomic_t in_progress;
309 
310 		/**
311 		 * @stopped_groups: List containing all groups that were stopped
312 		 * before a reset.
313 		 *
314 		 * Insert panthor_group::run_node in the pre_reset path.
315 		 */
316 		struct list_head stopped_groups;
317 	} reset;
318 };
319 
320 /**
321  * struct panthor_syncobj_32b - 32-bit FW synchronization object
322  */
323 struct panthor_syncobj_32b {
324 	/** @seqno: Sequence number. */
325 	u32 seqno;
326 
327 	/**
328 	 * @status: Status.
329 	 *
330 	 * Not zero on failure.
331 	 */
332 	u32 status;
333 };
334 
335 /**
336  * struct panthor_syncobj_64b - 64-bit FW synchronization object
337  */
338 struct panthor_syncobj_64b {
339 	/** @seqno: Sequence number. */
340 	u64 seqno;
341 
342 	/**
343 	 * @status: Status.
344 	 *
345 	 * Not zero on failure.
346 	 */
347 	u32 status;
348 
349 	/** @pad: MBZ. */
350 	u32 pad;
351 };
352 
353 /**
354  * struct panthor_queue - Execution queue
355  */
356 struct panthor_queue {
357 	/** @scheduler: DRM scheduler used for this queue. */
358 	struct drm_gpu_scheduler scheduler;
359 
360 	/** @entity: DRM scheduling entity used for this queue. */
361 	struct drm_sched_entity entity;
362 
363 	/** @name: DRM scheduler name for this queue. */
364 	char *name;
365 
366 	/**
367 	 * @remaining_time: Time remaining before the job timeout expires.
368 	 *
369 	 * The job timeout is suspended when the queue is not scheduled by the
370 	 * FW. Every time we suspend the timer, we need to save the remaining
371 	 * time so we can restore it later on.
372 	 */
373 	unsigned long remaining_time;
374 
375 	/** @timeout_suspended: True if the job timeout was suspended. */
376 	bool timeout_suspended;
377 
378 	/**
379 	 * @doorbell_id: Doorbell assigned to this queue.
380 	 *
381 	 * Right now, all groups share the same doorbell, and the doorbell ID
382 	 * is assigned to group_slot + 1 when the group is assigned a slot. But
383 	 * we might decide to provide fine grained doorbell assignment at some
384 	 * point, so don't have to wake up all queues in a group every time one
385 	 * of them is updated.
386 	 */
387 	u8 doorbell_id;
388 
389 	/**
390 	 * @priority: Priority of the queue inside the group.
391 	 *
392 	 * Must be less than 16 (Only 4 bits available).
393 	 */
394 	u8 priority;
395 #define CSF_MAX_QUEUE_PRIO	GENMASK(3, 0)
396 
397 	/** @ringbuf: Command stream ring-buffer. */
398 	struct panthor_kernel_bo *ringbuf;
399 
400 	/** @iface: Firmware interface. */
401 	struct {
402 		/** @mem: FW memory allocated for this interface. */
403 		struct panthor_kernel_bo *mem;
404 
405 		/** @input: Input interface. */
406 		struct panthor_fw_ringbuf_input_iface *input;
407 
408 		/** @output: Output interface. */
409 		const struct panthor_fw_ringbuf_output_iface *output;
410 
411 		/** @input_fw_va: FW virtual address of the input interface buffer. */
412 		u32 input_fw_va;
413 
414 		/** @output_fw_va: FW virtual address of the output interface buffer. */
415 		u32 output_fw_va;
416 	} iface;
417 
418 	/**
419 	 * @syncwait: Stores information about the synchronization object this
420 	 * queue is waiting on.
421 	 */
422 	struct {
423 		/** @gpu_va: GPU address of the synchronization object. */
424 		u64 gpu_va;
425 
426 		/** @ref: Reference value to compare against. */
427 		u64 ref;
428 
429 		/** @gt: True if this is a greater-than test. */
430 		bool gt;
431 
432 		/** @sync64: True if this is a 64-bit sync object. */
433 		bool sync64;
434 
435 		/** @bo: Buffer object holding the synchronization object. */
436 		struct drm_gem_object *obj;
437 
438 		/** @offset: Offset of the synchronization object inside @bo. */
439 		u64 offset;
440 
441 		/**
442 		 * @kmap: Kernel mapping of the buffer object holding the
443 		 * synchronization object.
444 		 */
445 		void *kmap;
446 	} syncwait;
447 
448 	/** @fence_ctx: Fence context fields. */
449 	struct {
450 		/** @lock: Used to protect access to all fences allocated by this context. */
451 		spinlock_t lock;
452 
453 		/**
454 		 * @id: Fence context ID.
455 		 *
456 		 * Allocated with dma_fence_context_alloc().
457 		 */
458 		u64 id;
459 
460 		/** @seqno: Sequence number of the last initialized fence. */
461 		atomic64_t seqno;
462 
463 		/**
464 		 * @last_fence: Fence of the last submitted job.
465 		 *
466 		 * We return this fence when we get an empty command stream.
467 		 * This way, we are guaranteed that all earlier jobs have completed
468 		 * when drm_sched_job::s_fence::finished without having to feed
469 		 * the CS ring buffer with a dummy job that only signals the fence.
470 		 */
471 		struct dma_fence *last_fence;
472 
473 		/**
474 		 * @in_flight_jobs: List containing all in-flight jobs.
475 		 *
476 		 * Used to keep track and signal panthor_job::done_fence when the
477 		 * synchronization object attached to the queue is signaled.
478 		 */
479 		struct list_head in_flight_jobs;
480 	} fence_ctx;
481 
482 	/** @profiling: Job profiling data slots and access information. */
483 	struct {
484 		/** @slots: Kernel BO holding the slots. */
485 		struct panthor_kernel_bo *slots;
486 
487 		/** @slot_count: Number of jobs ringbuffer can hold at once. */
488 		u32 slot_count;
489 
490 		/** @seqno: Index of the next available profiling information slot. */
491 		u32 seqno;
492 	} profiling;
493 };
494 
495 /**
496  * enum panthor_group_state - Scheduling group state.
497  */
498 enum panthor_group_state {
499 	/** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */
500 	PANTHOR_CS_GROUP_CREATED,
501 
502 	/** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */
503 	PANTHOR_CS_GROUP_ACTIVE,
504 
505 	/**
506 	 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is
507 	 * inactive/suspended right now.
508 	 */
509 	PANTHOR_CS_GROUP_SUSPENDED,
510 
511 	/**
512 	 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated.
513 	 *
514 	 * Can no longer be scheduled. The only allowed action is a destruction.
515 	 */
516 	PANTHOR_CS_GROUP_TERMINATED,
517 
518 	/**
519 	 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state.
520 	 *
521 	 * The FW returned an inconsistent state. The group is flagged unusable
522 	 * and can no longer be scheduled. The only allowed action is a
523 	 * destruction.
524 	 *
525 	 * When that happens, we also schedule a FW reset, to start from a fresh
526 	 * state.
527 	 */
528 	PANTHOR_CS_GROUP_UNKNOWN_STATE,
529 };
530 
531 /**
532  * struct panthor_group - Scheduling group object
533  */
534 struct panthor_group {
535 	/** @refcount: Reference count */
536 	struct kref refcount;
537 
538 	/** @ptdev: Device. */
539 	struct panthor_device *ptdev;
540 
541 	/** @vm: VM bound to the group. */
542 	struct panthor_vm *vm;
543 
544 	/** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */
545 	u64 compute_core_mask;
546 
547 	/** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */
548 	u64 fragment_core_mask;
549 
550 	/** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */
551 	u64 tiler_core_mask;
552 
553 	/** @max_compute_cores: Maximum number of shader cores used for compute jobs. */
554 	u8 max_compute_cores;
555 
556 	/** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */
557 	u8 max_fragment_cores;
558 
559 	/** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */
560 	u8 max_tiler_cores;
561 
562 	/** @priority: Group priority (check panthor_csg_priority). */
563 	u8 priority;
564 
565 	/** @blocked_queues: Bitmask reflecting the blocked queues. */
566 	u32 blocked_queues;
567 
568 	/** @idle_queues: Bitmask reflecting the idle queues. */
569 	u32 idle_queues;
570 
571 	/** @fatal_lock: Lock used to protect access to fatal fields. */
572 	spinlock_t fatal_lock;
573 
574 	/** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */
575 	u32 fatal_queues;
576 
577 	/** @tiler_oom: Mask of queues that have a tiler OOM event to process. */
578 	atomic_t tiler_oom;
579 
580 	/** @queue_count: Number of queues in this group. */
581 	u32 queue_count;
582 
583 	/** @queues: Queues owned by this group. */
584 	struct panthor_queue *queues[MAX_CS_PER_CSG];
585 
586 	/**
587 	 * @csg_id: ID of the FW group slot.
588 	 *
589 	 * -1 when the group is not scheduled/active.
590 	 */
591 	int csg_id;
592 
593 	/**
594 	 * @destroyed: True when the group has been destroyed.
595 	 *
596 	 * If a group is destroyed it becomes useless: no further jobs can be submitted
597 	 * to its queues. We simply wait for all references to be dropped so we can
598 	 * release the group object.
599 	 */
600 	bool destroyed;
601 
602 	/**
603 	 * @timedout: True when a timeout occurred on any of the queues owned by
604 	 * this group.
605 	 *
606 	 * Timeouts can be reported by drm_sched or by the FW. If a reset is required,
607 	 * and the group can't be suspended, this also leads to a timeout. In any case,
608 	 * any timeout situation is unrecoverable, and the group becomes useless. We
609 	 * simply wait for all references to be dropped so we can release the group
610 	 * object.
611 	 */
612 	bool timedout;
613 
614 	/**
615 	 * @innocent: True when the group becomes unusable because the group suspension
616 	 * failed during a reset.
617 	 *
618 	 * Sometimes the FW was put in a bad state by other groups, causing the group
619 	 * suspension happening in the reset path to fail. In that case, we consider the
620 	 * group innocent.
621 	 */
622 	bool innocent;
623 
624 	/**
625 	 * @syncobjs: Pool of per-queue synchronization objects.
626 	 *
627 	 * One sync object per queue. The position of the sync object is
628 	 * determined by the queue index.
629 	 */
630 	struct panthor_kernel_bo *syncobjs;
631 
632 	/** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */
633 	struct {
634 		/** @data: Total sampled values for jobs in queues from this group. */
635 		struct panthor_gpu_usage data;
636 
637 		/**
638 		 * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo
639 		 * callback and job post-completion processing function
640 		 */
641 		spinlock_t lock;
642 
643 		/** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
644 		size_t kbo_sizes;
645 	} fdinfo;
646 
647 	/** @task_info: Info of current->group_leader that created the group. */
648 	struct {
649 		/** @task_info.pid: pid of current->group_leader */
650 		pid_t pid;
651 
652 		/** @task_info.comm: comm of current->group_leader */
653 		char comm[TASK_COMM_LEN];
654 	} task_info;
655 
656 	/** @state: Group state. */
657 	enum panthor_group_state state;
658 
659 	/**
660 	 * @suspend_buf: Suspend buffer.
661 	 *
662 	 * Stores the state of the group and its queues when a group is suspended.
663 	 * Used at resume time to restore the group in its previous state.
664 	 *
665 	 * The size of the suspend buffer is exposed through the FW interface.
666 	 */
667 	struct panthor_kernel_bo *suspend_buf;
668 
669 	/**
670 	 * @protm_suspend_buf: Protection mode suspend buffer.
671 	 *
672 	 * Stores the state of the group and its queues when a group that's in
673 	 * protection mode is suspended.
674 	 *
675 	 * Used at resume time to restore the group in its previous state.
676 	 *
677 	 * The size of the protection mode suspend buffer is exposed through the
678 	 * FW interface.
679 	 */
680 	struct panthor_kernel_bo *protm_suspend_buf;
681 
682 	/** @sync_upd_work: Work used to check/signal job fences. */
683 	struct work_struct sync_upd_work;
684 
685 	/** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */
686 	struct work_struct tiler_oom_work;
687 
688 	/** @term_work: Work used to finish the group termination procedure. */
689 	struct work_struct term_work;
690 
691 	/**
692 	 * @release_work: Work used to release group resources.
693 	 *
694 	 * We need to postpone the group release to avoid a deadlock when
695 	 * the last ref is released in the tick work.
696 	 */
697 	struct work_struct release_work;
698 
699 	/**
700 	 * @run_node: Node used to insert the group in the
701 	 * panthor_group::groups::{runnable,idle} and
702 	 * panthor_group::reset.stopped_groups lists.
703 	 */
704 	struct list_head run_node;
705 
706 	/**
707 	 * @wait_node: Node used to insert the group in the
708 	 * panthor_group::groups::waiting list.
709 	 */
710 	struct list_head wait_node;
711 };
712 
713 struct panthor_job_profiling_data {
714 	struct {
715 		u64 before;
716 		u64 after;
717 	} cycles;
718 
719 	struct {
720 		u64 before;
721 		u64 after;
722 	} time;
723 };
724 
725 /**
726  * group_queue_work() - Queue a group work
727  * @group: Group to queue the work for.
728  * @wname: Work name.
729  *
730  * Grabs a ref and queue a work item to the scheduler workqueue. If
731  * the work was already queued, we release the reference we grabbed.
732  *
733  * Work callbacks must release the reference we grabbed here.
734  */
735 #define group_queue_work(group, wname) \
736 	do { \
737 		group_get(group); \
738 		if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
739 			group_put(group); \
740 	} while (0)
741 
742 /**
743  * sched_queue_work() - Queue a scheduler work.
744  * @sched: Scheduler object.
745  * @wname: Work name.
746  *
747  * Conditionally queues a scheduler work if no reset is pending/in-progress.
748  */
749 #define sched_queue_work(sched, wname) \
750 	do { \
751 		if (!atomic_read(&(sched)->reset.in_progress) && \
752 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
753 			queue_work((sched)->wq, &(sched)->wname ## _work); \
754 	} while (0)
755 
756 /**
757  * sched_queue_delayed_work() - Queue a scheduler delayed work.
758  * @sched: Scheduler object.
759  * @wname: Work name.
760  * @delay: Work delay in jiffies.
761  *
762  * Conditionally queues a scheduler delayed work if no reset is
763  * pending/in-progress.
764  */
765 #define sched_queue_delayed_work(sched, wname, delay) \
766 	do { \
767 		if (!atomic_read(&sched->reset.in_progress) && \
768 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
769 			mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
770 	} while (0)
771 
772 /*
773  * We currently set the maximum of groups per file to an arbitrary low value.
774  * But this can be updated if we need more.
775  */
776 #define MAX_GROUPS_PER_POOL 128
777 
778 /**
779  * struct panthor_group_pool - Group pool
780  *
781  * Each file get assigned a group pool.
782  */
783 struct panthor_group_pool {
784 	/** @xa: Xarray used to manage group handles. */
785 	struct xarray xa;
786 };
787 
788 /**
789  * struct panthor_job - Used to manage GPU job
790  */
791 struct panthor_job {
792 	/** @base: Inherit from drm_sched_job. */
793 	struct drm_sched_job base;
794 
795 	/** @refcount: Reference count. */
796 	struct kref refcount;
797 
798 	/** @group: Group of the queue this job will be pushed to. */
799 	struct panthor_group *group;
800 
801 	/** @queue_idx: Index of the queue inside @group. */
802 	u32 queue_idx;
803 
804 	/** @call_info: Information about the userspace command stream call. */
805 	struct {
806 		/** @start: GPU address of the userspace command stream. */
807 		u64 start;
808 
809 		/** @size: Size of the userspace command stream. */
810 		u32 size;
811 
812 		/**
813 		 * @latest_flush: Flush ID at the time the userspace command
814 		 * stream was built.
815 		 *
816 		 * Needed for the flush reduction mechanism.
817 		 */
818 		u32 latest_flush;
819 	} call_info;
820 
821 	/** @ringbuf: Position of this job is in the ring buffer. */
822 	struct {
823 		/** @start: Start offset. */
824 		u64 start;
825 
826 		/** @end: End offset. */
827 		u64 end;
828 	} ringbuf;
829 
830 	/**
831 	 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs
832 	 * list.
833 	 */
834 	struct list_head node;
835 
836 	/** @done_fence: Fence signaled when the job is finished or cancelled. */
837 	struct dma_fence *done_fence;
838 
839 	/** @profiling: Job profiling information. */
840 	struct {
841 		/** @mask: Current device job profiling enablement bitmask. */
842 		u32 mask;
843 
844 		/** @slot: Job index in the profiling slots BO. */
845 		u32 slot;
846 	} profiling;
847 };
848 
849 static void
850 panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
851 {
852 	if (queue->syncwait.kmap) {
853 		struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
854 
855 		drm_gem_vunmap(queue->syncwait.obj, &map);
856 		queue->syncwait.kmap = NULL;
857 	}
858 
859 	drm_gem_object_put(queue->syncwait.obj);
860 	queue->syncwait.obj = NULL;
861 }
862 
863 static void *
864 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue)
865 {
866 	struct panthor_device *ptdev = group->ptdev;
867 	struct panthor_gem_object *bo;
868 	struct iosys_map map;
869 	int ret;
870 
871 	if (queue->syncwait.kmap)
872 		return queue->syncwait.kmap + queue->syncwait.offset;
873 
874 	bo = panthor_vm_get_bo_for_va(group->vm,
875 				      queue->syncwait.gpu_va,
876 				      &queue->syncwait.offset);
877 	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
878 		goto err_put_syncwait_obj;
879 
880 	queue->syncwait.obj = &bo->base.base;
881 	ret = drm_gem_vmap(queue->syncwait.obj, &map);
882 	if (drm_WARN_ON(&ptdev->base, ret))
883 		goto err_put_syncwait_obj;
884 
885 	queue->syncwait.kmap = map.vaddr;
886 	if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
887 		goto err_put_syncwait_obj;
888 
889 	return queue->syncwait.kmap + queue->syncwait.offset;
890 
891 err_put_syncwait_obj:
892 	panthor_queue_put_syncwait_obj(queue);
893 	return NULL;
894 }
895 
896 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue)
897 {
898 	if (IS_ERR_OR_NULL(queue))
899 		return;
900 
901 	if (queue->entity.fence_context)
902 		drm_sched_entity_destroy(&queue->entity);
903 
904 	if (queue->scheduler.ops)
905 		drm_sched_fini(&queue->scheduler);
906 
907 	kfree(queue->name);
908 
909 	panthor_queue_put_syncwait_obj(queue);
910 
911 	panthor_kernel_bo_destroy(queue->ringbuf);
912 	panthor_kernel_bo_destroy(queue->iface.mem);
913 	panthor_kernel_bo_destroy(queue->profiling.slots);
914 
915 	/* Release the last_fence we were holding, if any. */
916 	dma_fence_put(queue->fence_ctx.last_fence);
917 
918 	kfree(queue);
919 }
920 
921 static void group_release_work(struct work_struct *work)
922 {
923 	struct panthor_group *group = container_of(work,
924 						   struct panthor_group,
925 						   release_work);
926 	u32 i;
927 
928 	for (i = 0; i < group->queue_count; i++)
929 		group_free_queue(group, group->queues[i]);
930 
931 	panthor_kernel_bo_destroy(group->suspend_buf);
932 	panthor_kernel_bo_destroy(group->protm_suspend_buf);
933 	panthor_kernel_bo_destroy(group->syncobjs);
934 
935 	panthor_vm_put(group->vm);
936 	kfree(group);
937 }
938 
939 static void group_release(struct kref *kref)
940 {
941 	struct panthor_group *group = container_of(kref,
942 						   struct panthor_group,
943 						   refcount);
944 	struct panthor_device *ptdev = group->ptdev;
945 
946 	drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
947 	drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
948 	drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
949 
950 	queue_work(panthor_cleanup_wq, &group->release_work);
951 }
952 
953 static void group_put(struct panthor_group *group)
954 {
955 	if (group)
956 		kref_put(&group->refcount, group_release);
957 }
958 
959 static struct panthor_group *
960 group_get(struct panthor_group *group)
961 {
962 	if (group)
963 		kref_get(&group->refcount);
964 
965 	return group;
966 }
967 
968 /**
969  * group_bind_locked() - Bind a group to a group slot
970  * @group: Group.
971  * @csg_id: Slot.
972  *
973  * Return: 0 on success, a negative error code otherwise.
974  */
975 static int
976 group_bind_locked(struct panthor_group *group, u32 csg_id)
977 {
978 	struct panthor_device *ptdev = group->ptdev;
979 	struct panthor_csg_slot *csg_slot;
980 	int ret;
981 
982 	lockdep_assert_held(&ptdev->scheduler->lock);
983 
984 	if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
985 			ptdev->scheduler->csg_slots[csg_id].group))
986 		return -EINVAL;
987 
988 	ret = panthor_vm_active(group->vm);
989 	if (ret)
990 		return ret;
991 
992 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
993 	group_get(group);
994 	group->csg_id = csg_id;
995 
996 	/* Dummy doorbell allocation: doorbell is assigned to the group and
997 	 * all queues use the same doorbell.
998 	 *
999 	 * TODO: Implement LRU-based doorbell assignment, so the most often
1000 	 * updated queues get their own doorbell, thus avoiding useless checks
1001 	 * on queues belonging to the same group that are rarely updated.
1002 	 */
1003 	for (u32 i = 0; i < group->queue_count; i++)
1004 		group->queues[i]->doorbell_id = csg_id + 1;
1005 
1006 	csg_slot->group = group;
1007 
1008 	return 0;
1009 }
1010 
1011 /**
1012  * group_unbind_locked() - Unbind a group from a slot.
1013  * @group: Group to unbind.
1014  *
1015  * Return: 0 on success, a negative error code otherwise.
1016  */
1017 static int
1018 group_unbind_locked(struct panthor_group *group)
1019 {
1020 	struct panthor_device *ptdev = group->ptdev;
1021 	struct panthor_csg_slot *slot;
1022 
1023 	lockdep_assert_held(&ptdev->scheduler->lock);
1024 
1025 	if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
1026 		return -EINVAL;
1027 
1028 	if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
1029 		return -EINVAL;
1030 
1031 	slot = &ptdev->scheduler->csg_slots[group->csg_id];
1032 	panthor_vm_idle(group->vm);
1033 	group->csg_id = -1;
1034 
1035 	/* Tiler OOM events will be re-issued next time the group is scheduled. */
1036 	atomic_set(&group->tiler_oom, 0);
1037 	cancel_work(&group->tiler_oom_work);
1038 
1039 	for (u32 i = 0; i < group->queue_count; i++)
1040 		group->queues[i]->doorbell_id = -1;
1041 
1042 	slot->group = NULL;
1043 
1044 	group_put(group);
1045 	return 0;
1046 }
1047 
1048 /**
1049  * cs_slot_prog_locked() - Program a queue slot
1050  * @ptdev: Device.
1051  * @csg_id: Group slot ID.
1052  * @cs_id: Queue slot ID.
1053  *
1054  * Program a queue slot with the queue information so things can start being
1055  * executed on this queue.
1056  *
1057  * The group slot must have a group bound to it already (group_bind_locked()).
1058  */
1059 static void
1060 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1061 {
1062 	struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
1063 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1064 
1065 	lockdep_assert_held(&ptdev->scheduler->lock);
1066 
1067 	queue->iface.input->extract = queue->iface.output->extract;
1068 	drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
1069 
1070 	cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf);
1071 	cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
1072 	cs_iface->input->ringbuf_input = queue->iface.input_fw_va;
1073 	cs_iface->input->ringbuf_output = queue->iface.output_fw_va;
1074 	cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) |
1075 				  CS_CONFIG_DOORBELL(queue->doorbell_id);
1076 	cs_iface->input->ack_irq_mask = ~0;
1077 	panthor_fw_update_reqs(cs_iface, req,
1078 			       CS_IDLE_SYNC_WAIT |
1079 			       CS_IDLE_EMPTY |
1080 			       CS_STATE_START |
1081 			       CS_EXTRACT_EVENT,
1082 			       CS_IDLE_SYNC_WAIT |
1083 			       CS_IDLE_EMPTY |
1084 			       CS_STATE_MASK |
1085 			       CS_EXTRACT_EVENT);
1086 	if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
1087 		drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
1088 		queue->timeout_suspended = false;
1089 	}
1090 }
1091 
1092 /**
1093  * cs_slot_reset_locked() - Reset a queue slot
1094  * @ptdev: Device.
1095  * @csg_id: Group slot.
1096  * @cs_id: Queue slot.
1097  *
1098  * Change the queue slot state to STOP and suspend the queue timeout if
1099  * the queue is not blocked.
1100  *
1101  * The group slot must have a group bound to it (group_bind_locked()).
1102  */
1103 static int
1104 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1105 {
1106 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1107 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1108 	struct panthor_queue *queue = group->queues[cs_id];
1109 
1110 	lockdep_assert_held(&ptdev->scheduler->lock);
1111 
1112 	panthor_fw_update_reqs(cs_iface, req,
1113 			       CS_STATE_STOP,
1114 			       CS_STATE_MASK);
1115 
1116 	/* If the queue is blocked, we want to keep the timeout running, so
1117 	 * we can detect unbounded waits and kill the group when that happens.
1118 	 */
1119 	if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
1120 		queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
1121 		queue->timeout_suspended = true;
1122 		WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
1123 	}
1124 
1125 	return 0;
1126 }
1127 
1128 /**
1129  * csg_slot_sync_priority_locked() - Synchronize the group slot priority
1130  * @ptdev: Device.
1131  * @csg_id: Group slot ID.
1132  *
1133  * Group slot priority update happens asynchronously. When we receive a
1134  * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can
1135  * reflect it to our panthor_csg_slot object.
1136  */
1137 static void
1138 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
1139 {
1140 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1141 	struct panthor_fw_csg_iface *csg_iface;
1142 
1143 	lockdep_assert_held(&ptdev->scheduler->lock);
1144 
1145 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1146 	csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
1147 }
1148 
1149 /**
1150  * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority
1151  * @ptdev: Device.
1152  * @csg_id: Group slot.
1153  * @cs_id: Queue slot.
1154  *
1155  * Queue state is updated on group suspend or STATUS_UPDATE event.
1156  */
1157 static void
1158 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1159 {
1160 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1161 	struct panthor_queue *queue = group->queues[cs_id];
1162 	struct panthor_fw_cs_iface *cs_iface =
1163 		panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
1164 
1165 	u32 status_wait_cond;
1166 
1167 	switch (cs_iface->output->status_blocked_reason) {
1168 	case CS_STATUS_BLOCKED_REASON_UNBLOCKED:
1169 		if (queue->iface.input->insert == queue->iface.output->extract &&
1170 		    cs_iface->output->status_scoreboards == 0)
1171 			group->idle_queues |= BIT(cs_id);
1172 		break;
1173 
1174 	case CS_STATUS_BLOCKED_REASON_SYNC_WAIT:
1175 		if (list_empty(&group->wait_node)) {
1176 			list_move_tail(&group->wait_node,
1177 				       &group->ptdev->scheduler->groups.waiting);
1178 		}
1179 
1180 		/* The queue is only blocked if there's no deferred operation
1181 		 * pending, which can be checked through the scoreboard status.
1182 		 */
1183 		if (!cs_iface->output->status_scoreboards)
1184 			group->blocked_queues |= BIT(cs_id);
1185 
1186 		queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
1187 		queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
1188 		status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
1189 		queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT;
1190 		if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) {
1191 			u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi;
1192 
1193 			queue->syncwait.sync64 = true;
1194 			queue->syncwait.ref |= sync_val_hi << 32;
1195 		} else {
1196 			queue->syncwait.sync64 = false;
1197 		}
1198 		break;
1199 
1200 	default:
1201 		/* Other reasons are not blocking. Consider the queue as runnable
1202 		 * in those cases.
1203 		 */
1204 		break;
1205 	}
1206 }
1207 
1208 static void
1209 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
1210 {
1211 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1212 	struct panthor_group *group = csg_slot->group;
1213 	u32 i;
1214 
1215 	lockdep_assert_held(&ptdev->scheduler->lock);
1216 
1217 	group->idle_queues = 0;
1218 	group->blocked_queues = 0;
1219 
1220 	for (i = 0; i < group->queue_count; i++) {
1221 		if (group->queues[i])
1222 			cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
1223 	}
1224 }
1225 
1226 static void
1227 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
1228 {
1229 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1230 	struct panthor_fw_csg_iface *csg_iface;
1231 	struct panthor_group *group;
1232 	enum panthor_group_state new_state, old_state;
1233 	u32 csg_state;
1234 
1235 	lockdep_assert_held(&ptdev->scheduler->lock);
1236 
1237 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1238 	group = csg_slot->group;
1239 
1240 	if (!group)
1241 		return;
1242 
1243 	old_state = group->state;
1244 	csg_state = csg_iface->output->ack & CSG_STATE_MASK;
1245 	switch (csg_state) {
1246 	case CSG_STATE_START:
1247 	case CSG_STATE_RESUME:
1248 		new_state = PANTHOR_CS_GROUP_ACTIVE;
1249 		break;
1250 	case CSG_STATE_TERMINATE:
1251 		new_state = PANTHOR_CS_GROUP_TERMINATED;
1252 		break;
1253 	case CSG_STATE_SUSPEND:
1254 		new_state = PANTHOR_CS_GROUP_SUSPENDED;
1255 		break;
1256 	default:
1257 		/* The unknown state might be caused by a FW state corruption,
1258 		 * which means the group metadata can't be trusted anymore, and
1259 		 * the SUSPEND operation might propagate the corruption to the
1260 		 * suspend buffers. Flag the group state as unknown to make
1261 		 * sure it's unusable after that point.
1262 		 */
1263 		drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)",
1264 			csg_id, csg_state);
1265 		new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE;
1266 		break;
1267 	}
1268 
1269 	if (old_state == new_state)
1270 		return;
1271 
1272 	/* The unknown state might be caused by a FW issue, reset the FW to
1273 	 * take a fresh start.
1274 	 */
1275 	if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE)
1276 		panthor_device_schedule_reset(ptdev);
1277 
1278 	if (new_state == PANTHOR_CS_GROUP_SUSPENDED)
1279 		csg_slot_sync_queues_state_locked(ptdev, csg_id);
1280 
1281 	if (old_state == PANTHOR_CS_GROUP_ACTIVE) {
1282 		u32 i;
1283 
1284 		/* Reset the queue slots so we start from a clean
1285 		 * state when starting/resuming a new group on this
1286 		 * CSG slot. No wait needed here, and no ringbell
1287 		 * either, since the CS slot will only be re-used
1288 		 * on the next CSG start operation.
1289 		 */
1290 		for (i = 0; i < group->queue_count; i++) {
1291 			if (group->queues[i])
1292 				cs_slot_reset_locked(ptdev, csg_id, i);
1293 		}
1294 	}
1295 
1296 	group->state = new_state;
1297 }
1298 
1299 static int
1300 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
1301 {
1302 	struct panthor_fw_csg_iface *csg_iface;
1303 	struct panthor_csg_slot *csg_slot;
1304 	struct panthor_group *group;
1305 	u32 queue_mask = 0, i;
1306 
1307 	lockdep_assert_held(&ptdev->scheduler->lock);
1308 
1309 	if (priority > MAX_CSG_PRIO)
1310 		return -EINVAL;
1311 
1312 	if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
1313 		return -EINVAL;
1314 
1315 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1316 	group = csg_slot->group;
1317 	if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE)
1318 		return 0;
1319 
1320 	csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
1321 
1322 	for (i = 0; i < group->queue_count; i++) {
1323 		if (group->queues[i]) {
1324 			cs_slot_prog_locked(ptdev, csg_id, i);
1325 			queue_mask |= BIT(i);
1326 		}
1327 	}
1328 
1329 	csg_iface->input->allow_compute = group->compute_core_mask;
1330 	csg_iface->input->allow_fragment = group->fragment_core_mask;
1331 	csg_iface->input->allow_other = group->tiler_core_mask;
1332 	csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
1333 					 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
1334 					 CSG_EP_REQ_TILER(group->max_tiler_cores) |
1335 					 CSG_EP_REQ_PRIORITY(priority);
1336 	csg_iface->input->config = panthor_vm_as(group->vm);
1337 
1338 	if (group->suspend_buf)
1339 		csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf);
1340 	else
1341 		csg_iface->input->suspend_buf = 0;
1342 
1343 	if (group->protm_suspend_buf) {
1344 		csg_iface->input->protm_suspend_buf =
1345 			panthor_kernel_bo_gpuva(group->protm_suspend_buf);
1346 	} else {
1347 		csg_iface->input->protm_suspend_buf = 0;
1348 	}
1349 
1350 	csg_iface->input->ack_irq_mask = ~0;
1351 	panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask);
1352 	return 0;
1353 }
1354 
1355 static void
1356 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
1357 				   u32 csg_id, u32 cs_id)
1358 {
1359 	struct panthor_scheduler *sched = ptdev->scheduler;
1360 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1361 	struct panthor_group *group = csg_slot->group;
1362 	struct panthor_fw_cs_iface *cs_iface;
1363 	u32 fatal;
1364 	u64 info;
1365 
1366 	lockdep_assert_held(&sched->lock);
1367 
1368 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1369 	fatal = cs_iface->output->fatal;
1370 	info = cs_iface->output->fatal_info;
1371 
1372 	if (group) {
1373 		drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n",
1374 			 group->task_info.pid, group->task_info.comm);
1375 
1376 		group->fatal_queues |= BIT(cs_id);
1377 	}
1378 
1379 	if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
1380 		/* If this exception is unrecoverable, queue a reset, and make
1381 		 * sure we stop scheduling groups until the reset has happened.
1382 		 */
1383 		panthor_device_schedule_reset(ptdev);
1384 		cancel_delayed_work(&sched->tick_work);
1385 	} else {
1386 		sched_queue_delayed_work(sched, tick, 0);
1387 	}
1388 
1389 	drm_warn(&ptdev->base,
1390 		 "CSG slot %d CS slot: %d\n"
1391 		 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
1392 		 "CS_FATAL.EXCEPTION_DATA: 0x%x\n"
1393 		 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n",
1394 		 csg_id, cs_id,
1395 		 (unsigned int)CS_EXCEPTION_TYPE(fatal),
1396 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
1397 		 (unsigned int)CS_EXCEPTION_DATA(fatal),
1398 		 info);
1399 }
1400 
1401 static void
1402 cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
1403 				   u32 csg_id, u32 cs_id)
1404 {
1405 	struct panthor_scheduler *sched = ptdev->scheduler;
1406 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1407 	struct panthor_group *group = csg_slot->group;
1408 	struct panthor_queue *queue = group && cs_id < group->queue_count ?
1409 				      group->queues[cs_id] : NULL;
1410 	struct panthor_fw_cs_iface *cs_iface;
1411 	u32 fault;
1412 	u64 info;
1413 
1414 	lockdep_assert_held(&sched->lock);
1415 
1416 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1417 	fault = cs_iface->output->fault;
1418 	info = cs_iface->output->fault_info;
1419 
1420 	if (queue) {
1421 		u64 cs_extract = queue->iface.output->extract;
1422 		struct panthor_job *job;
1423 
1424 		spin_lock(&queue->fence_ctx.lock);
1425 		list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
1426 			if (cs_extract >= job->ringbuf.end)
1427 				continue;
1428 
1429 			if (cs_extract < job->ringbuf.start)
1430 				break;
1431 
1432 			dma_fence_set_error(job->done_fence, -EINVAL);
1433 		}
1434 		spin_unlock(&queue->fence_ctx.lock);
1435 	}
1436 
1437 	if (group) {
1438 		drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n",
1439 			 group->task_info.pid, group->task_info.comm);
1440 	}
1441 
1442 	drm_warn(&ptdev->base,
1443 		 "CSG slot %d CS slot: %d\n"
1444 		 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
1445 		 "CS_FAULT.EXCEPTION_DATA: 0x%x\n"
1446 		 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n",
1447 		 csg_id, cs_id,
1448 		 (unsigned int)CS_EXCEPTION_TYPE(fault),
1449 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
1450 		 (unsigned int)CS_EXCEPTION_DATA(fault),
1451 		 info);
1452 }
1453 
1454 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
1455 {
1456 	struct panthor_device *ptdev = group->ptdev;
1457 	struct panthor_scheduler *sched = ptdev->scheduler;
1458 	u32 renderpasses_in_flight, pending_frag_count;
1459 	struct panthor_heap_pool *heaps = NULL;
1460 	u64 heap_address, new_chunk_va = 0;
1461 	u32 vt_start, vt_end, frag_end;
1462 	int ret, csg_id;
1463 
1464 	mutex_lock(&sched->lock);
1465 	csg_id = group->csg_id;
1466 	if (csg_id >= 0) {
1467 		struct panthor_fw_cs_iface *cs_iface;
1468 
1469 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1470 		heaps = panthor_vm_get_heap_pool(group->vm, false);
1471 		heap_address = cs_iface->output->heap_address;
1472 		vt_start = cs_iface->output->heap_vt_start;
1473 		vt_end = cs_iface->output->heap_vt_end;
1474 		frag_end = cs_iface->output->heap_frag_end;
1475 		renderpasses_in_flight = vt_start - frag_end;
1476 		pending_frag_count = vt_end - frag_end;
1477 	}
1478 	mutex_unlock(&sched->lock);
1479 
1480 	/* The group got scheduled out, we stop here. We will get a new tiler OOM event
1481 	 * when it's scheduled again.
1482 	 */
1483 	if (unlikely(csg_id < 0))
1484 		return 0;
1485 
1486 	if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) {
1487 		ret = -EINVAL;
1488 	} else {
1489 		/* We do the allocation without holding the scheduler lock to avoid
1490 		 * blocking the scheduling.
1491 		 */
1492 		ret = panthor_heap_grow(heaps, heap_address,
1493 					renderpasses_in_flight,
1494 					pending_frag_count, &new_chunk_va);
1495 	}
1496 
1497 	/* If the heap context doesn't have memory for us, we want to let the
1498 	 * FW try to reclaim memory by waiting for fragment jobs to land or by
1499 	 * executing the tiler OOM exception handler, which is supposed to
1500 	 * implement incremental rendering.
1501 	 */
1502 	if (ret && ret != -ENOMEM) {
1503 		drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
1504 		group->fatal_queues |= BIT(cs_id);
1505 		sched_queue_delayed_work(sched, tick, 0);
1506 		goto out_put_heap_pool;
1507 	}
1508 
1509 	mutex_lock(&sched->lock);
1510 	csg_id = group->csg_id;
1511 	if (csg_id >= 0) {
1512 		struct panthor_fw_csg_iface *csg_iface;
1513 		struct panthor_fw_cs_iface *cs_iface;
1514 
1515 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1516 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1517 
1518 		cs_iface->input->heap_start = new_chunk_va;
1519 		cs_iface->input->heap_end = new_chunk_va;
1520 		panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM);
1521 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id));
1522 		panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1523 	}
1524 	mutex_unlock(&sched->lock);
1525 
1526 	/* We allocated a chunck, but couldn't link it to the heap
1527 	 * context because the group was scheduled out while we were
1528 	 * allocating memory. We need to return this chunk to the heap.
1529 	 */
1530 	if (unlikely(csg_id < 0 && new_chunk_va))
1531 		panthor_heap_return_chunk(heaps, heap_address, new_chunk_va);
1532 
1533 	ret = 0;
1534 
1535 out_put_heap_pool:
1536 	panthor_heap_pool_put(heaps);
1537 	return ret;
1538 }
1539 
1540 static void group_tiler_oom_work(struct work_struct *work)
1541 {
1542 	struct panthor_group *group =
1543 		container_of(work, struct panthor_group, tiler_oom_work);
1544 	u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
1545 
1546 	while (tiler_oom) {
1547 		u32 cs_id = ffs(tiler_oom) - 1;
1548 
1549 		group_process_tiler_oom(group, cs_id);
1550 		tiler_oom &= ~BIT(cs_id);
1551 	}
1552 
1553 	group_put(group);
1554 }
1555 
1556 static void
1557 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
1558 				       u32 csg_id, u32 cs_id)
1559 {
1560 	struct panthor_scheduler *sched = ptdev->scheduler;
1561 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1562 	struct panthor_group *group = csg_slot->group;
1563 
1564 	lockdep_assert_held(&sched->lock);
1565 
1566 	if (drm_WARN_ON(&ptdev->base, !group))
1567 		return;
1568 
1569 	atomic_or(BIT(cs_id), &group->tiler_oom);
1570 
1571 	/* We don't use group_queue_work() here because we want to queue the
1572 	 * work item to the heap_alloc_wq.
1573 	 */
1574 	group_get(group);
1575 	if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
1576 		group_put(group);
1577 }
1578 
1579 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
1580 				       u32 csg_id, u32 cs_id)
1581 {
1582 	struct panthor_fw_cs_iface *cs_iface;
1583 	u32 req, ack, events;
1584 
1585 	lockdep_assert_held(&ptdev->scheduler->lock);
1586 
1587 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1588 	req = cs_iface->input->req;
1589 	ack = cs_iface->output->ack;
1590 	events = (req ^ ack) & CS_EVT_MASK;
1591 
1592 	if (events & CS_FATAL)
1593 		cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
1594 
1595 	if (events & CS_FAULT)
1596 		cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
1597 
1598 	if (events & CS_TILER_OOM)
1599 		cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
1600 
1601 	/* We don't acknowledge the TILER_OOM event since its handling is
1602 	 * deferred to a separate work.
1603 	 */
1604 	panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT);
1605 
1606 	return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
1607 }
1608 
1609 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id)
1610 {
1611 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1612 	struct panthor_fw_csg_iface *csg_iface;
1613 
1614 	lockdep_assert_held(&ptdev->scheduler->lock);
1615 
1616 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1617 	csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE;
1618 }
1619 
1620 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
1621 {
1622 	struct panthor_scheduler *sched = ptdev->scheduler;
1623 
1624 	lockdep_assert_held(&sched->lock);
1625 
1626 	sched->might_have_idle_groups = true;
1627 
1628 	/* Schedule a tick so we can evict idle groups and schedule non-idle
1629 	 * ones. This will also update runtime PM and devfreq busy/idle states,
1630 	 * so the device can lower its frequency or get suspended.
1631 	 */
1632 	sched_queue_delayed_work(sched, tick, 0);
1633 }
1634 
1635 static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
1636 					u32 csg_id)
1637 {
1638 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1639 	struct panthor_group *group = csg_slot->group;
1640 
1641 	lockdep_assert_held(&ptdev->scheduler->lock);
1642 
1643 	if (group)
1644 		group_queue_work(group, sync_upd);
1645 
1646 	sched_queue_work(ptdev->scheduler, sync_upd);
1647 }
1648 
1649 static void
1650 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
1651 {
1652 	struct panthor_scheduler *sched = ptdev->scheduler;
1653 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1654 	struct panthor_group *group = csg_slot->group;
1655 
1656 	lockdep_assert_held(&sched->lock);
1657 
1658 	group = csg_slot->group;
1659 	if (!drm_WARN_ON(&ptdev->base, !group)) {
1660 		drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n",
1661 			 group->task_info.pid, group->task_info.comm);
1662 
1663 		group->timedout = true;
1664 	}
1665 
1666 	drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
1667 
1668 	sched_queue_delayed_work(sched, tick, 0);
1669 }
1670 
1671 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
1672 {
1673 	u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events;
1674 	struct panthor_fw_csg_iface *csg_iface;
1675 	u32 ring_cs_db_mask = 0;
1676 
1677 	lockdep_assert_held(&ptdev->scheduler->lock);
1678 
1679 	if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1680 		return;
1681 
1682 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1683 	req = READ_ONCE(csg_iface->input->req);
1684 	ack = READ_ONCE(csg_iface->output->ack);
1685 	cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req);
1686 	cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack);
1687 	csg_events = (req ^ ack) & CSG_EVT_MASK;
1688 
1689 	/* There may not be any pending CSG/CS interrupts to process */
1690 	if (req == ack && cs_irq_req == cs_irq_ack)
1691 		return;
1692 
1693 	/* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before
1694 	 * examining the CS_ACK & CS_REQ bits. This would ensure that Host
1695 	 * doesn't miss an interrupt for the CS in the race scenario where
1696 	 * whilst Host is servicing an interrupt for the CS, firmware sends
1697 	 * another interrupt for that CS.
1698 	 */
1699 	csg_iface->input->cs_irq_ack = cs_irq_req;
1700 
1701 	panthor_fw_update_reqs(csg_iface, req, ack,
1702 			       CSG_SYNC_UPDATE |
1703 			       CSG_IDLE |
1704 			       CSG_PROGRESS_TIMER_EVENT);
1705 
1706 	if (csg_events & CSG_IDLE)
1707 		csg_slot_process_idle_event_locked(ptdev, csg_id);
1708 
1709 	if (csg_events & CSG_PROGRESS_TIMER_EVENT)
1710 		csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
1711 
1712 	cs_irqs = cs_irq_req ^ cs_irq_ack;
1713 	while (cs_irqs) {
1714 		u32 cs_id = ffs(cs_irqs) - 1;
1715 
1716 		if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
1717 			ring_cs_db_mask |= BIT(cs_id);
1718 
1719 		cs_irqs &= ~BIT(cs_id);
1720 	}
1721 
1722 	if (csg_events & CSG_SYNC_UPDATE)
1723 		csg_slot_sync_update_locked(ptdev, csg_id);
1724 
1725 	if (ring_cs_db_mask)
1726 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask);
1727 
1728 	panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1729 }
1730 
1731 static void sched_process_idle_event_locked(struct panthor_device *ptdev)
1732 {
1733 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1734 
1735 	lockdep_assert_held(&ptdev->scheduler->lock);
1736 
1737 	/* Acknowledge the idle event and schedule a tick. */
1738 	panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE);
1739 	sched_queue_delayed_work(ptdev->scheduler, tick, 0);
1740 }
1741 
1742 /**
1743  * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ
1744  * @ptdev: Device.
1745  */
1746 static void sched_process_global_irq_locked(struct panthor_device *ptdev)
1747 {
1748 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1749 	u32 req, ack, evts;
1750 
1751 	lockdep_assert_held(&ptdev->scheduler->lock);
1752 
1753 	req = READ_ONCE(glb_iface->input->req);
1754 	ack = READ_ONCE(glb_iface->output->ack);
1755 	evts = (req ^ ack) & GLB_EVT_MASK;
1756 
1757 	if (evts & GLB_IDLE)
1758 		sched_process_idle_event_locked(ptdev);
1759 }
1760 
1761 static void process_fw_events_work(struct work_struct *work)
1762 {
1763 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
1764 						      fw_events_work);
1765 	u32 events = atomic_xchg(&sched->fw_events, 0);
1766 	struct panthor_device *ptdev = sched->ptdev;
1767 
1768 	mutex_lock(&sched->lock);
1769 
1770 	if (events & JOB_INT_GLOBAL_IF) {
1771 		sched_process_global_irq_locked(ptdev);
1772 		events &= ~JOB_INT_GLOBAL_IF;
1773 	}
1774 
1775 	while (events) {
1776 		u32 csg_id = ffs(events) - 1;
1777 
1778 		sched_process_csg_irq_locked(ptdev, csg_id);
1779 		events &= ~BIT(csg_id);
1780 	}
1781 
1782 	mutex_unlock(&sched->lock);
1783 }
1784 
1785 /**
1786  * panthor_sched_report_fw_events() - Report FW events to the scheduler.
1787  */
1788 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
1789 {
1790 	if (!ptdev->scheduler)
1791 		return;
1792 
1793 	atomic_or(events, &ptdev->scheduler->fw_events);
1794 	sched_queue_work(ptdev->scheduler, fw_events);
1795 }
1796 
1797 static const char *fence_get_driver_name(struct dma_fence *fence)
1798 {
1799 	return "panthor";
1800 }
1801 
1802 static const char *queue_fence_get_timeline_name(struct dma_fence *fence)
1803 {
1804 	return "queue-fence";
1805 }
1806 
1807 static const struct dma_fence_ops panthor_queue_fence_ops = {
1808 	.get_driver_name = fence_get_driver_name,
1809 	.get_timeline_name = queue_fence_get_timeline_name,
1810 };
1811 
1812 struct panthor_csg_slots_upd_ctx {
1813 	u32 update_mask;
1814 	u32 timedout_mask;
1815 	struct {
1816 		u32 value;
1817 		u32 mask;
1818 	} requests[MAX_CSGS];
1819 };
1820 
1821 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
1822 {
1823 	memset(ctx, 0, sizeof(*ctx));
1824 }
1825 
1826 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
1827 				    struct panthor_csg_slots_upd_ctx *ctx,
1828 				    u32 csg_id, u32 value, u32 mask)
1829 {
1830 	if (drm_WARN_ON(&ptdev->base, !mask) ||
1831 	    drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1832 		return;
1833 
1834 	ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
1835 	ctx->requests[csg_id].mask |= mask;
1836 	ctx->update_mask |= BIT(csg_id);
1837 }
1838 
1839 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
1840 				     struct panthor_csg_slots_upd_ctx *ctx)
1841 {
1842 	struct panthor_scheduler *sched = ptdev->scheduler;
1843 	u32 update_slots = ctx->update_mask;
1844 
1845 	lockdep_assert_held(&sched->lock);
1846 
1847 	if (!ctx->update_mask)
1848 		return 0;
1849 
1850 	while (update_slots) {
1851 		struct panthor_fw_csg_iface *csg_iface;
1852 		u32 csg_id = ffs(update_slots) - 1;
1853 
1854 		update_slots &= ~BIT(csg_id);
1855 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1856 		panthor_fw_update_reqs(csg_iface, req,
1857 				       ctx->requests[csg_id].value,
1858 				       ctx->requests[csg_id].mask);
1859 	}
1860 
1861 	panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
1862 
1863 	update_slots = ctx->update_mask;
1864 	while (update_slots) {
1865 		struct panthor_fw_csg_iface *csg_iface;
1866 		u32 csg_id = ffs(update_slots) - 1;
1867 		u32 req_mask = ctx->requests[csg_id].mask, acked;
1868 		int ret;
1869 
1870 		update_slots &= ~BIT(csg_id);
1871 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1872 
1873 		ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
1874 
1875 		if (acked & CSG_ENDPOINT_CONFIG)
1876 			csg_slot_sync_priority_locked(ptdev, csg_id);
1877 
1878 		if (acked & CSG_STATE_MASK)
1879 			csg_slot_sync_state_locked(ptdev, csg_id);
1880 
1881 		if (acked & CSG_STATUS_UPDATE) {
1882 			csg_slot_sync_queues_state_locked(ptdev, csg_id);
1883 			csg_slot_sync_idle_state_locked(ptdev, csg_id);
1884 		}
1885 
1886 		if (ret && acked != req_mask &&
1887 		    ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {
1888 			drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
1889 			ctx->timedout_mask |= BIT(csg_id);
1890 		}
1891 	}
1892 
1893 	if (ctx->timedout_mask)
1894 		return -ETIMEDOUT;
1895 
1896 	return 0;
1897 }
1898 
1899 struct panthor_sched_tick_ctx {
1900 	struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT];
1901 	struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
1902 	u32 idle_group_count;
1903 	u32 group_count;
1904 	enum panthor_csg_priority min_priority;
1905 	struct panthor_vm *vms[MAX_CS_PER_CSG];
1906 	u32 as_count;
1907 	bool immediate_tick;
1908 	u32 csg_upd_failed_mask;
1909 };
1910 
1911 static bool
1912 tick_ctx_is_full(const struct panthor_scheduler *sched,
1913 		 const struct panthor_sched_tick_ctx *ctx)
1914 {
1915 	return ctx->group_count == sched->csg_slot_count;
1916 }
1917 
1918 static bool
1919 group_is_idle(struct panthor_group *group)
1920 {
1921 	struct panthor_device *ptdev = group->ptdev;
1922 	u32 inactive_queues;
1923 
1924 	if (group->csg_id >= 0)
1925 		return ptdev->scheduler->csg_slots[group->csg_id].idle;
1926 
1927 	inactive_queues = group->idle_queues | group->blocked_queues;
1928 	return hweight32(inactive_queues) == group->queue_count;
1929 }
1930 
1931 static bool
1932 group_can_run(struct panthor_group *group)
1933 {
1934 	return group->state != PANTHOR_CS_GROUP_TERMINATED &&
1935 	       group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
1936 	       !group->destroyed && group->fatal_queues == 0 &&
1937 	       !group->timedout;
1938 }
1939 
1940 static void
1941 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
1942 			       struct panthor_sched_tick_ctx *ctx,
1943 			       struct list_head *queue,
1944 			       bool skip_idle_groups,
1945 			       bool owned_by_tick_ctx)
1946 {
1947 	struct panthor_group *group, *tmp;
1948 
1949 	if (tick_ctx_is_full(sched, ctx))
1950 		return;
1951 
1952 	list_for_each_entry_safe(group, tmp, queue, run_node) {
1953 		u32 i;
1954 
1955 		if (!group_can_run(group))
1956 			continue;
1957 
1958 		if (skip_idle_groups && group_is_idle(group))
1959 			continue;
1960 
1961 		for (i = 0; i < ctx->as_count; i++) {
1962 			if (ctx->vms[i] == group->vm)
1963 				break;
1964 		}
1965 
1966 		if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
1967 			continue;
1968 
1969 		if (!owned_by_tick_ctx)
1970 			group_get(group);
1971 
1972 		list_move_tail(&group->run_node, &ctx->groups[group->priority]);
1973 		ctx->group_count++;
1974 		if (group_is_idle(group))
1975 			ctx->idle_group_count++;
1976 
1977 		if (i == ctx->as_count)
1978 			ctx->vms[ctx->as_count++] = group->vm;
1979 
1980 		if (ctx->min_priority > group->priority)
1981 			ctx->min_priority = group->priority;
1982 
1983 		if (tick_ctx_is_full(sched, ctx))
1984 			return;
1985 	}
1986 }
1987 
1988 static void
1989 tick_ctx_insert_old_group(struct panthor_scheduler *sched,
1990 			  struct panthor_sched_tick_ctx *ctx,
1991 			  struct panthor_group *group,
1992 			  bool full_tick)
1993 {
1994 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
1995 	struct panthor_group *other_group;
1996 
1997 	if (!full_tick) {
1998 		list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1999 		return;
2000 	}
2001 
2002 	/* Rotate to make sure groups with lower CSG slot
2003 	 * priorities have a chance to get a higher CSG slot
2004 	 * priority next time they get picked. This priority
2005 	 * has an impact on resource request ordering, so it's
2006 	 * important to make sure we don't let one group starve
2007 	 * all other groups with the same group priority.
2008 	 */
2009 	list_for_each_entry(other_group,
2010 			    &ctx->old_groups[csg_slot->group->priority],
2011 			    run_node) {
2012 		struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
2013 
2014 		if (other_csg_slot->priority > csg_slot->priority) {
2015 			list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
2016 			return;
2017 		}
2018 	}
2019 
2020 	list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
2021 }
2022 
2023 static void
2024 tick_ctx_init(struct panthor_scheduler *sched,
2025 	      struct panthor_sched_tick_ctx *ctx,
2026 	      bool full_tick)
2027 {
2028 	struct panthor_device *ptdev = sched->ptdev;
2029 	struct panthor_csg_slots_upd_ctx upd_ctx;
2030 	int ret;
2031 	u32 i;
2032 
2033 	memset(ctx, 0, sizeof(*ctx));
2034 	csgs_upd_ctx_init(&upd_ctx);
2035 
2036 	ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
2037 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2038 		INIT_LIST_HEAD(&ctx->groups[i]);
2039 		INIT_LIST_HEAD(&ctx->old_groups[i]);
2040 	}
2041 
2042 	for (i = 0; i < sched->csg_slot_count; i++) {
2043 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2044 		struct panthor_group *group = csg_slot->group;
2045 		struct panthor_fw_csg_iface *csg_iface;
2046 
2047 		if (!group)
2048 			continue;
2049 
2050 		csg_iface = panthor_fw_get_csg_iface(ptdev, i);
2051 		group_get(group);
2052 
2053 		/* If there was unhandled faults on the VM, force processing of
2054 		 * CSG IRQs, so we can flag the faulty queue.
2055 		 */
2056 		if (panthor_vm_has_unhandled_faults(group->vm)) {
2057 			sched_process_csg_irq_locked(ptdev, i);
2058 
2059 			/* No fatal fault reported, flag all queues as faulty. */
2060 			if (!group->fatal_queues)
2061 				group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
2062 		}
2063 
2064 		tick_ctx_insert_old_group(sched, ctx, group, full_tick);
2065 		csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2066 					csg_iface->output->ack ^ CSG_STATUS_UPDATE,
2067 					CSG_STATUS_UPDATE);
2068 	}
2069 
2070 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2071 	if (ret) {
2072 		panthor_device_schedule_reset(ptdev);
2073 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2074 	}
2075 }
2076 
2077 static void
2078 group_term_post_processing(struct panthor_group *group)
2079 {
2080 	struct panthor_job *job, *tmp;
2081 	LIST_HEAD(faulty_jobs);
2082 	bool cookie;
2083 	u32 i = 0;
2084 
2085 	if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
2086 		return;
2087 
2088 	cookie = dma_fence_begin_signalling();
2089 	for (i = 0; i < group->queue_count; i++) {
2090 		struct panthor_queue *queue = group->queues[i];
2091 		struct panthor_syncobj_64b *syncobj;
2092 		int err;
2093 
2094 		if (group->fatal_queues & BIT(i))
2095 			err = -EINVAL;
2096 		else if (group->timedout)
2097 			err = -ETIMEDOUT;
2098 		else
2099 			err = -ECANCELED;
2100 
2101 		if (!queue)
2102 			continue;
2103 
2104 		spin_lock(&queue->fence_ctx.lock);
2105 		list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
2106 			list_move_tail(&job->node, &faulty_jobs);
2107 			dma_fence_set_error(job->done_fence, err);
2108 			dma_fence_signal_locked(job->done_fence);
2109 		}
2110 		spin_unlock(&queue->fence_ctx.lock);
2111 
2112 		/* Manually update the syncobj seqno to unblock waiters. */
2113 		syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj));
2114 		syncobj->status = ~0;
2115 		syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno);
2116 		sched_queue_work(group->ptdev->scheduler, sync_upd);
2117 	}
2118 	dma_fence_end_signalling(cookie);
2119 
2120 	list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
2121 		list_del_init(&job->node);
2122 		panthor_job_put(&job->base);
2123 	}
2124 }
2125 
2126 static void group_term_work(struct work_struct *work)
2127 {
2128 	struct panthor_group *group =
2129 		container_of(work, struct panthor_group, term_work);
2130 
2131 	group_term_post_processing(group);
2132 	group_put(group);
2133 }
2134 
2135 static void
2136 tick_ctx_cleanup(struct panthor_scheduler *sched,
2137 		 struct panthor_sched_tick_ctx *ctx)
2138 {
2139 	struct panthor_device *ptdev = sched->ptdev;
2140 	struct panthor_group *group, *tmp;
2141 	u32 i;
2142 
2143 	for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) {
2144 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) {
2145 			/* If everything went fine, we should only have groups
2146 			 * to be terminated in the old_groups lists.
2147 			 */
2148 			drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
2149 				    group_can_run(group));
2150 
2151 			if (!group_can_run(group)) {
2152 				list_del_init(&group->run_node);
2153 				list_del_init(&group->wait_node);
2154 				group_queue_work(group, term);
2155 			} else if (group->csg_id >= 0) {
2156 				list_del_init(&group->run_node);
2157 			} else {
2158 				list_move(&group->run_node,
2159 					  group_is_idle(group) ?
2160 					  &sched->groups.idle[group->priority] :
2161 					  &sched->groups.runnable[group->priority]);
2162 			}
2163 			group_put(group);
2164 		}
2165 	}
2166 
2167 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2168 		/* If everything went fine, the groups to schedule lists should
2169 		 * be empty.
2170 		 */
2171 		drm_WARN_ON(&ptdev->base,
2172 			    !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
2173 
2174 		list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
2175 			if (group->csg_id >= 0) {
2176 				list_del_init(&group->run_node);
2177 			} else {
2178 				list_move(&group->run_node,
2179 					  group_is_idle(group) ?
2180 					  &sched->groups.idle[group->priority] :
2181 					  &sched->groups.runnable[group->priority]);
2182 			}
2183 			group_put(group);
2184 		}
2185 	}
2186 }
2187 
2188 static void
2189 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
2190 {
2191 	struct panthor_group *group, *tmp;
2192 	struct panthor_device *ptdev = sched->ptdev;
2193 	struct panthor_csg_slot *csg_slot;
2194 	int prio, new_csg_prio = MAX_CSG_PRIO, i;
2195 	u32 free_csg_slots = 0;
2196 	struct panthor_csg_slots_upd_ctx upd_ctx;
2197 	int ret;
2198 
2199 	csgs_upd_ctx_init(&upd_ctx);
2200 
2201 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2202 		/* Suspend or terminate evicted groups. */
2203 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2204 			bool term = !group_can_run(group);
2205 			int csg_id = group->csg_id;
2206 
2207 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2208 				continue;
2209 
2210 			csg_slot = &sched->csg_slots[csg_id];
2211 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2212 						term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
2213 						CSG_STATE_MASK);
2214 		}
2215 
2216 		/* Update priorities on already running groups. */
2217 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2218 			struct panthor_fw_csg_iface *csg_iface;
2219 			int csg_id = group->csg_id;
2220 
2221 			if (csg_id < 0) {
2222 				new_csg_prio--;
2223 				continue;
2224 			}
2225 
2226 			csg_slot = &sched->csg_slots[csg_id];
2227 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2228 			if (csg_slot->priority == new_csg_prio) {
2229 				new_csg_prio--;
2230 				continue;
2231 			}
2232 
2233 			panthor_fw_update_reqs(csg_iface, endpoint_req,
2234 					       CSG_EP_REQ_PRIORITY(new_csg_prio),
2235 					       CSG_EP_REQ_PRIORITY_MASK);
2236 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2237 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2238 						CSG_ENDPOINT_CONFIG);
2239 			new_csg_prio--;
2240 		}
2241 	}
2242 
2243 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2244 	if (ret) {
2245 		panthor_device_schedule_reset(ptdev);
2246 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2247 		return;
2248 	}
2249 
2250 	/* Unbind evicted groups. */
2251 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2252 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2253 			/* This group is gone. Process interrupts to clear
2254 			 * any pending interrupts before we start the new
2255 			 * group.
2256 			 */
2257 			if (group->csg_id >= 0)
2258 				sched_process_csg_irq_locked(ptdev, group->csg_id);
2259 
2260 			group_unbind_locked(group);
2261 		}
2262 	}
2263 
2264 	for (i = 0; i < sched->csg_slot_count; i++) {
2265 		if (!sched->csg_slots[i].group)
2266 			free_csg_slots |= BIT(i);
2267 	}
2268 
2269 	csgs_upd_ctx_init(&upd_ctx);
2270 	new_csg_prio = MAX_CSG_PRIO;
2271 
2272 	/* Start new groups. */
2273 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2274 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2275 			int csg_id = group->csg_id;
2276 			struct panthor_fw_csg_iface *csg_iface;
2277 
2278 			if (csg_id >= 0) {
2279 				new_csg_prio--;
2280 				continue;
2281 			}
2282 
2283 			csg_id = ffs(free_csg_slots) - 1;
2284 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2285 				break;
2286 
2287 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2288 			csg_slot = &sched->csg_slots[csg_id];
2289 			group_bind_locked(group, csg_id);
2290 			csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
2291 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2292 						group->state == PANTHOR_CS_GROUP_SUSPENDED ?
2293 						CSG_STATE_RESUME : CSG_STATE_START,
2294 						CSG_STATE_MASK);
2295 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2296 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2297 						CSG_ENDPOINT_CONFIG);
2298 			free_csg_slots &= ~BIT(csg_id);
2299 		}
2300 	}
2301 
2302 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2303 	if (ret) {
2304 		panthor_device_schedule_reset(ptdev);
2305 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2306 		return;
2307 	}
2308 
2309 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2310 		list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) {
2311 			list_del_init(&group->run_node);
2312 
2313 			/* If the group has been destroyed while we were
2314 			 * scheduling, ask for an immediate tick to
2315 			 * re-evaluate as soon as possible and get rid of
2316 			 * this dangling group.
2317 			 */
2318 			if (group->destroyed)
2319 				ctx->immediate_tick = true;
2320 			group_put(group);
2321 		}
2322 
2323 		/* Return evicted groups to the idle or run queues. Groups
2324 		 * that can no longer be run (because they've been destroyed
2325 		 * or experienced an unrecoverable error) will be scheduled
2326 		 * for destruction in tick_ctx_cleanup().
2327 		 */
2328 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) {
2329 			if (!group_can_run(group))
2330 				continue;
2331 
2332 			if (group_is_idle(group))
2333 				list_move_tail(&group->run_node, &sched->groups.idle[prio]);
2334 			else
2335 				list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
2336 			group_put(group);
2337 		}
2338 	}
2339 
2340 	sched->used_csg_slot_count = ctx->group_count;
2341 	sched->might_have_idle_groups = ctx->idle_group_count > 0;
2342 }
2343 
2344 static u64
2345 tick_ctx_update_resched_target(struct panthor_scheduler *sched,
2346 			       const struct panthor_sched_tick_ctx *ctx)
2347 {
2348 	/* We had space left, no need to reschedule until some external event happens. */
2349 	if (!tick_ctx_is_full(sched, ctx))
2350 		goto no_tick;
2351 
2352 	/* If idle groups were scheduled, no need to wake up until some external
2353 	 * event happens (group unblocked, new job submitted, ...).
2354 	 */
2355 	if (ctx->idle_group_count)
2356 		goto no_tick;
2357 
2358 	if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT))
2359 		goto no_tick;
2360 
2361 	/* If there are groups of the same priority waiting, we need to
2362 	 * keep the scheduler ticking, otherwise, we'll just wait for
2363 	 * new groups with higher priority to be queued.
2364 	 */
2365 	if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
2366 		u64 resched_target = sched->last_tick + sched->tick_period;
2367 
2368 		if (time_before64(sched->resched_target, sched->last_tick) ||
2369 		    time_before64(resched_target, sched->resched_target))
2370 			sched->resched_target = resched_target;
2371 
2372 		return sched->resched_target - sched->last_tick;
2373 	}
2374 
2375 no_tick:
2376 	sched->resched_target = U64_MAX;
2377 	return U64_MAX;
2378 }
2379 
2380 static void tick_work(struct work_struct *work)
2381 {
2382 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
2383 						      tick_work.work);
2384 	struct panthor_device *ptdev = sched->ptdev;
2385 	struct panthor_sched_tick_ctx ctx;
2386 	u64 remaining_jiffies = 0, resched_delay;
2387 	u64 now = get_jiffies_64();
2388 	int prio, ret, cookie;
2389 
2390 	if (!drm_dev_enter(&ptdev->base, &cookie))
2391 		return;
2392 
2393 	ret = panthor_device_resume_and_get(ptdev);
2394 	if (drm_WARN_ON(&ptdev->base, ret))
2395 		goto out_dev_exit;
2396 
2397 	if (time_before64(now, sched->resched_target))
2398 		remaining_jiffies = sched->resched_target - now;
2399 
2400 	mutex_lock(&sched->lock);
2401 	if (panthor_device_reset_is_pending(sched->ptdev))
2402 		goto out_unlock;
2403 
2404 	tick_ctx_init(sched, &ctx, remaining_jiffies != 0);
2405 	if (ctx.csg_upd_failed_mask)
2406 		goto out_cleanup_ctx;
2407 
2408 	if (remaining_jiffies) {
2409 		/* Scheduling forced in the middle of a tick. Only RT groups
2410 		 * can preempt non-RT ones. Currently running RT groups can't be
2411 		 * preempted.
2412 		 */
2413 		for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2414 		     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2415 		     prio--) {
2416 			tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
2417 						       true, true);
2418 			if (prio == PANTHOR_CSG_PRIORITY_RT) {
2419 				tick_ctx_pick_groups_from_list(sched, &ctx,
2420 							       &sched->groups.runnable[prio],
2421 							       true, false);
2422 			}
2423 		}
2424 	}
2425 
2426 	/* First pick non-idle groups */
2427 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2428 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2429 	     prio--) {
2430 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
2431 					       true, false);
2432 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
2433 	}
2434 
2435 	/* If we have free CSG slots left, pick idle groups */
2436 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2437 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2438 	     prio--) {
2439 		/* Check the old_group queue first to avoid reprogramming the slots */
2440 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
2441 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
2442 					       false, false);
2443 	}
2444 
2445 	tick_ctx_apply(sched, &ctx);
2446 	if (ctx.csg_upd_failed_mask)
2447 		goto out_cleanup_ctx;
2448 
2449 	if (ctx.idle_group_count == ctx.group_count) {
2450 		panthor_devfreq_record_idle(sched->ptdev);
2451 		if (sched->pm.has_ref) {
2452 			pm_runtime_put_autosuspend(ptdev->base.dev);
2453 			sched->pm.has_ref = false;
2454 		}
2455 	} else {
2456 		panthor_devfreq_record_busy(sched->ptdev);
2457 		if (!sched->pm.has_ref) {
2458 			pm_runtime_get(ptdev->base.dev);
2459 			sched->pm.has_ref = true;
2460 		}
2461 	}
2462 
2463 	sched->last_tick = now;
2464 	resched_delay = tick_ctx_update_resched_target(sched, &ctx);
2465 	if (ctx.immediate_tick)
2466 		resched_delay = 0;
2467 
2468 	if (resched_delay != U64_MAX)
2469 		sched_queue_delayed_work(sched, tick, resched_delay);
2470 
2471 out_cleanup_ctx:
2472 	tick_ctx_cleanup(sched, &ctx);
2473 
2474 out_unlock:
2475 	mutex_unlock(&sched->lock);
2476 	pm_runtime_mark_last_busy(ptdev->base.dev);
2477 	pm_runtime_put_autosuspend(ptdev->base.dev);
2478 
2479 out_dev_exit:
2480 	drm_dev_exit(cookie);
2481 }
2482 
2483 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)
2484 {
2485 	struct panthor_queue *queue = group->queues[queue_idx];
2486 	union {
2487 		struct panthor_syncobj_64b sync64;
2488 		struct panthor_syncobj_32b sync32;
2489 	} *syncobj;
2490 	bool result;
2491 	u64 value;
2492 
2493 	syncobj = panthor_queue_get_syncwait_obj(group, queue);
2494 	if (!syncobj)
2495 		return -EINVAL;
2496 
2497 	value = queue->syncwait.sync64 ?
2498 		syncobj->sync64.seqno :
2499 		syncobj->sync32.seqno;
2500 
2501 	if (queue->syncwait.gt)
2502 		result = value > queue->syncwait.ref;
2503 	else
2504 		result = value <= queue->syncwait.ref;
2505 
2506 	if (result)
2507 		panthor_queue_put_syncwait_obj(queue);
2508 
2509 	return result;
2510 }
2511 
2512 static void sync_upd_work(struct work_struct *work)
2513 {
2514 	struct panthor_scheduler *sched = container_of(work,
2515 						      struct panthor_scheduler,
2516 						      sync_upd_work);
2517 	struct panthor_group *group, *tmp;
2518 	bool immediate_tick = false;
2519 
2520 	mutex_lock(&sched->lock);
2521 	list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
2522 		u32 tested_queues = group->blocked_queues;
2523 		u32 unblocked_queues = 0;
2524 
2525 		while (tested_queues) {
2526 			u32 cs_id = ffs(tested_queues) - 1;
2527 			int ret;
2528 
2529 			ret = panthor_queue_eval_syncwait(group, cs_id);
2530 			drm_WARN_ON(&group->ptdev->base, ret < 0);
2531 			if (ret)
2532 				unblocked_queues |= BIT(cs_id);
2533 
2534 			tested_queues &= ~BIT(cs_id);
2535 		}
2536 
2537 		if (unblocked_queues) {
2538 			group->blocked_queues &= ~unblocked_queues;
2539 
2540 			if (group->csg_id < 0) {
2541 				list_move(&group->run_node,
2542 					  &sched->groups.runnable[group->priority]);
2543 				if (group->priority == PANTHOR_CSG_PRIORITY_RT)
2544 					immediate_tick = true;
2545 			}
2546 		}
2547 
2548 		if (!group->blocked_queues)
2549 			list_del_init(&group->wait_node);
2550 	}
2551 	mutex_unlock(&sched->lock);
2552 
2553 	if (immediate_tick)
2554 		sched_queue_delayed_work(sched, tick, 0);
2555 }
2556 
2557 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
2558 {
2559 	struct panthor_device *ptdev = group->ptdev;
2560 	struct panthor_scheduler *sched = ptdev->scheduler;
2561 	struct list_head *queue = &sched->groups.runnable[group->priority];
2562 	u64 delay_jiffies = 0;
2563 	bool was_idle;
2564 	u64 now;
2565 
2566 	if (!group_can_run(group))
2567 		return;
2568 
2569 	/* All updated queues are blocked, no need to wake up the scheduler. */
2570 	if ((queue_mask & group->blocked_queues) == queue_mask)
2571 		return;
2572 
2573 	was_idle = group_is_idle(group);
2574 	group->idle_queues &= ~queue_mask;
2575 
2576 	/* Don't mess up with the lists if we're in a middle of a reset. */
2577 	if (atomic_read(&sched->reset.in_progress))
2578 		return;
2579 
2580 	if (was_idle && !group_is_idle(group))
2581 		list_move_tail(&group->run_node, queue);
2582 
2583 	/* RT groups are preemptive. */
2584 	if (group->priority == PANTHOR_CSG_PRIORITY_RT) {
2585 		sched_queue_delayed_work(sched, tick, 0);
2586 		return;
2587 	}
2588 
2589 	/* Some groups might be idle, force an immediate tick to
2590 	 * re-evaluate.
2591 	 */
2592 	if (sched->might_have_idle_groups) {
2593 		sched_queue_delayed_work(sched, tick, 0);
2594 		return;
2595 	}
2596 
2597 	/* Scheduler is ticking, nothing to do. */
2598 	if (sched->resched_target != U64_MAX) {
2599 		/* If there are free slots, force immediating ticking. */
2600 		if (sched->used_csg_slot_count < sched->csg_slot_count)
2601 			sched_queue_delayed_work(sched, tick, 0);
2602 
2603 		return;
2604 	}
2605 
2606 	/* Scheduler tick was off, recalculate the resched_target based on the
2607 	 * last tick event, and queue the scheduler work.
2608 	 */
2609 	now = get_jiffies_64();
2610 	sched->resched_target = sched->last_tick + sched->tick_period;
2611 	if (sched->used_csg_slot_count == sched->csg_slot_count &&
2612 	    time_before64(now, sched->resched_target))
2613 		delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
2614 
2615 	sched_queue_delayed_work(sched, tick, delay_jiffies);
2616 }
2617 
2618 static void queue_stop(struct panthor_queue *queue,
2619 		       struct panthor_job *bad_job)
2620 {
2621 	drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
2622 }
2623 
2624 static void queue_start(struct panthor_queue *queue)
2625 {
2626 	struct panthor_job *job;
2627 
2628 	/* Re-assign the parent fences. */
2629 	list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
2630 		job->base.s_fence->parent = dma_fence_get(job->done_fence);
2631 
2632 	drm_sched_start(&queue->scheduler, 0);
2633 }
2634 
2635 static void panthor_group_stop(struct panthor_group *group)
2636 {
2637 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2638 
2639 	lockdep_assert_held(&sched->reset.lock);
2640 
2641 	for (u32 i = 0; i < group->queue_count; i++)
2642 		queue_stop(group->queues[i], NULL);
2643 
2644 	group_get(group);
2645 	list_move_tail(&group->run_node, &sched->reset.stopped_groups);
2646 }
2647 
2648 static void panthor_group_start(struct panthor_group *group)
2649 {
2650 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2651 
2652 	lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
2653 
2654 	for (u32 i = 0; i < group->queue_count; i++)
2655 		queue_start(group->queues[i]);
2656 
2657 	if (group_can_run(group)) {
2658 		list_move_tail(&group->run_node,
2659 			       group_is_idle(group) ?
2660 			       &sched->groups.idle[group->priority] :
2661 			       &sched->groups.runnable[group->priority]);
2662 	} else {
2663 		list_del_init(&group->run_node);
2664 		list_del_init(&group->wait_node);
2665 		group_queue_work(group, term);
2666 	}
2667 
2668 	group_put(group);
2669 }
2670 
2671 static void panthor_sched_immediate_tick(struct panthor_device *ptdev)
2672 {
2673 	struct panthor_scheduler *sched = ptdev->scheduler;
2674 
2675 	sched_queue_delayed_work(sched, tick, 0);
2676 }
2677 
2678 /**
2679  * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler.
2680  */
2681 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
2682 {
2683 	/* Force a tick to immediately kill faulty groups. */
2684 	if (ptdev->scheduler)
2685 		panthor_sched_immediate_tick(ptdev);
2686 }
2687 
2688 void panthor_sched_resume(struct panthor_device *ptdev)
2689 {
2690 	/* Force a tick to re-evaluate after a resume. */
2691 	panthor_sched_immediate_tick(ptdev);
2692 }
2693 
2694 void panthor_sched_suspend(struct panthor_device *ptdev)
2695 {
2696 	struct panthor_scheduler *sched = ptdev->scheduler;
2697 	struct panthor_csg_slots_upd_ctx upd_ctx;
2698 	struct panthor_group *group;
2699 	u32 suspended_slots;
2700 	u32 i;
2701 
2702 	mutex_lock(&sched->lock);
2703 	csgs_upd_ctx_init(&upd_ctx);
2704 	for (i = 0; i < sched->csg_slot_count; i++) {
2705 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2706 
2707 		if (csg_slot->group) {
2708 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2709 						group_can_run(csg_slot->group) ?
2710 						CSG_STATE_SUSPEND : CSG_STATE_TERMINATE,
2711 						CSG_STATE_MASK);
2712 		}
2713 	}
2714 
2715 	suspended_slots = upd_ctx.update_mask;
2716 
2717 	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2718 	suspended_slots &= ~upd_ctx.timedout_mask;
2719 
2720 	if (upd_ctx.timedout_mask) {
2721 		u32 slot_mask = upd_ctx.timedout_mask;
2722 
2723 		drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
2724 		csgs_upd_ctx_init(&upd_ctx);
2725 		while (slot_mask) {
2726 			u32 csg_id = ffs(slot_mask) - 1;
2727 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2728 
2729 			/* If the group was still usable before that point, we consider
2730 			 * it innocent.
2731 			 */
2732 			if (group_can_run(csg_slot->group))
2733 				csg_slot->group->innocent = true;
2734 
2735 			/* We consider group suspension failures as fatal and flag the
2736 			 * group as unusable by setting timedout=true.
2737 			 */
2738 			csg_slot->group->timedout = true;
2739 
2740 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2741 						CSG_STATE_TERMINATE,
2742 						CSG_STATE_MASK);
2743 			slot_mask &= ~BIT(csg_id);
2744 		}
2745 
2746 		csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2747 
2748 		slot_mask = upd_ctx.timedout_mask;
2749 		while (slot_mask) {
2750 			u32 csg_id = ffs(slot_mask) - 1;
2751 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2752 
2753 			/* Terminate command timedout, but the soft-reset will
2754 			 * automatically terminate all active groups, so let's
2755 			 * force the state to halted here.
2756 			 */
2757 			if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
2758 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2759 			slot_mask &= ~BIT(csg_id);
2760 		}
2761 	}
2762 
2763 	/* Flush L2 and LSC caches to make sure suspend state is up-to-date.
2764 	 * If the flush fails, flag all queues for termination.
2765 	 */
2766 	if (suspended_slots) {
2767 		bool flush_caches_failed = false;
2768 		u32 slot_mask = suspended_slots;
2769 
2770 		if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
2771 			flush_caches_failed = true;
2772 
2773 		while (slot_mask) {
2774 			u32 csg_id = ffs(slot_mask) - 1;
2775 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2776 
2777 			if (flush_caches_failed)
2778 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2779 			else
2780 				csg_slot_sync_update_locked(ptdev, csg_id);
2781 
2782 			slot_mask &= ~BIT(csg_id);
2783 		}
2784 	}
2785 
2786 	for (i = 0; i < sched->csg_slot_count; i++) {
2787 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2788 
2789 		group = csg_slot->group;
2790 		if (!group)
2791 			continue;
2792 
2793 		group_get(group);
2794 
2795 		if (group->csg_id >= 0)
2796 			sched_process_csg_irq_locked(ptdev, group->csg_id);
2797 
2798 		group_unbind_locked(group);
2799 
2800 		drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
2801 
2802 		if (group_can_run(group)) {
2803 			list_add(&group->run_node,
2804 				 &sched->groups.idle[group->priority]);
2805 		} else {
2806 			/* We don't bother stopping the scheduler if the group is
2807 			 * faulty, the group termination work will finish the job.
2808 			 */
2809 			list_del_init(&group->wait_node);
2810 			group_queue_work(group, term);
2811 		}
2812 		group_put(group);
2813 	}
2814 	mutex_unlock(&sched->lock);
2815 }
2816 
2817 void panthor_sched_pre_reset(struct panthor_device *ptdev)
2818 {
2819 	struct panthor_scheduler *sched = ptdev->scheduler;
2820 	struct panthor_group *group, *group_tmp;
2821 	u32 i;
2822 
2823 	mutex_lock(&sched->reset.lock);
2824 	atomic_set(&sched->reset.in_progress, true);
2825 
2826 	/* Cancel all scheduler works. Once this is done, these works can't be
2827 	 * scheduled again until the reset operation is complete.
2828 	 */
2829 	cancel_work_sync(&sched->sync_upd_work);
2830 	cancel_delayed_work_sync(&sched->tick_work);
2831 
2832 	panthor_sched_suspend(ptdev);
2833 
2834 	/* Stop all groups that might still accept jobs, so we don't get passed
2835 	 * new jobs while we're resetting.
2836 	 */
2837 	for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
2838 		/* All groups should be in the idle lists. */
2839 		drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i]));
2840 		list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
2841 			panthor_group_stop(group);
2842 	}
2843 
2844 	for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
2845 		list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
2846 			panthor_group_stop(group);
2847 	}
2848 
2849 	mutex_unlock(&sched->reset.lock);
2850 }
2851 
2852 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
2853 {
2854 	struct panthor_scheduler *sched = ptdev->scheduler;
2855 	struct panthor_group *group, *group_tmp;
2856 
2857 	mutex_lock(&sched->reset.lock);
2858 
2859 	list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
2860 		/* Consider all previously running group as terminated if the
2861 		 * reset failed.
2862 		 */
2863 		if (reset_failed)
2864 			group->state = PANTHOR_CS_GROUP_TERMINATED;
2865 
2866 		panthor_group_start(group);
2867 	}
2868 
2869 	/* We're done resetting the GPU, clear the reset.in_progress bit so we can
2870 	 * kick the scheduler.
2871 	 */
2872 	atomic_set(&sched->reset.in_progress, false);
2873 	mutex_unlock(&sched->reset.lock);
2874 
2875 	/* No need to queue a tick and update syncs if the reset failed. */
2876 	if (!reset_failed) {
2877 		sched_queue_delayed_work(sched, tick, 0);
2878 		sched_queue_work(sched, sync_upd);
2879 	}
2880 }
2881 
2882 static void update_fdinfo_stats(struct panthor_job *job)
2883 {
2884 	struct panthor_group *group = job->group;
2885 	struct panthor_queue *queue = group->queues[job->queue_idx];
2886 	struct panthor_gpu_usage *fdinfo = &group->fdinfo.data;
2887 	struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
2888 	struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
2889 
2890 	scoped_guard(spinlock, &group->fdinfo.lock) {
2891 		if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
2892 			fdinfo->cycles += data->cycles.after - data->cycles.before;
2893 		if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
2894 			fdinfo->time += data->time.after - data->time.before;
2895 	}
2896 }
2897 
2898 void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
2899 {
2900 	struct panthor_group_pool *gpool = pfile->groups;
2901 	struct panthor_group *group;
2902 	unsigned long i;
2903 
2904 	if (IS_ERR_OR_NULL(gpool))
2905 		return;
2906 
2907 	xa_lock(&gpool->xa);
2908 	xa_for_each(&gpool->xa, i, group) {
2909 		guard(spinlock)(&group->fdinfo.lock);
2910 		pfile->stats.cycles += group->fdinfo.data.cycles;
2911 		pfile->stats.time += group->fdinfo.data.time;
2912 		group->fdinfo.data.cycles = 0;
2913 		group->fdinfo.data.time = 0;
2914 	}
2915 	xa_unlock(&gpool->xa);
2916 }
2917 
2918 static void group_sync_upd_work(struct work_struct *work)
2919 {
2920 	struct panthor_group *group =
2921 		container_of(work, struct panthor_group, sync_upd_work);
2922 	struct panthor_job *job, *job_tmp;
2923 	LIST_HEAD(done_jobs);
2924 	u32 queue_idx;
2925 	bool cookie;
2926 
2927 	cookie = dma_fence_begin_signalling();
2928 	for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
2929 		struct panthor_queue *queue = group->queues[queue_idx];
2930 		struct panthor_syncobj_64b *syncobj;
2931 
2932 		if (!queue)
2933 			continue;
2934 
2935 		syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
2936 
2937 		spin_lock(&queue->fence_ctx.lock);
2938 		list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
2939 			if (syncobj->seqno < job->done_fence->seqno)
2940 				break;
2941 
2942 			list_move_tail(&job->node, &done_jobs);
2943 			dma_fence_signal_locked(job->done_fence);
2944 		}
2945 		spin_unlock(&queue->fence_ctx.lock);
2946 	}
2947 	dma_fence_end_signalling(cookie);
2948 
2949 	list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
2950 		if (job->profiling.mask)
2951 			update_fdinfo_stats(job);
2952 		list_del_init(&job->node);
2953 		panthor_job_put(&job->base);
2954 	}
2955 
2956 	group_put(group);
2957 }
2958 
2959 struct panthor_job_ringbuf_instrs {
2960 	u64 buffer[MAX_INSTRS_PER_JOB];
2961 	u32 count;
2962 };
2963 
2964 struct panthor_job_instr {
2965 	u32 profile_mask;
2966 	u64 instr;
2967 };
2968 
2969 #define JOB_INSTR(__prof, __instr) \
2970 	{ \
2971 		.profile_mask = __prof, \
2972 		.instr = __instr, \
2973 	}
2974 
2975 static void
2976 copy_instrs_to_ringbuf(struct panthor_queue *queue,
2977 		       struct panthor_job *job,
2978 		       struct panthor_job_ringbuf_instrs *instrs)
2979 {
2980 	u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
2981 	u64 start = job->ringbuf.start & (ringbuf_size - 1);
2982 	u64 size, written;
2983 
2984 	/*
2985 	 * We need to write a whole slot, including any trailing zeroes
2986 	 * that may come at the end of it. Also, because instrs.buffer has
2987 	 * been zero-initialised, there's no need to pad it with 0's
2988 	 */
2989 	instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
2990 	size = instrs->count * sizeof(u64);
2991 	WARN_ON(size > ringbuf_size);
2992 	written = min(ringbuf_size - start, size);
2993 
2994 	memcpy(queue->ringbuf->kmap + start, instrs->buffer, written);
2995 
2996 	if (written < size)
2997 		memcpy(queue->ringbuf->kmap,
2998 		       &instrs->buffer[written / sizeof(u64)],
2999 		       size - written);
3000 }
3001 
3002 struct panthor_job_cs_params {
3003 	u32 profile_mask;
3004 	u64 addr_reg; u64 val_reg;
3005 	u64 cycle_reg; u64 time_reg;
3006 	u64 sync_addr; u64 times_addr;
3007 	u64 cs_start; u64 cs_size;
3008 	u32 last_flush; u32 waitall_mask;
3009 };
3010 
3011 static void
3012 get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params)
3013 {
3014 	struct panthor_group *group = job->group;
3015 	struct panthor_queue *queue = group->queues[job->queue_idx];
3016 	struct panthor_device *ptdev = group->ptdev;
3017 	struct panthor_scheduler *sched = ptdev->scheduler;
3018 
3019 	params->addr_reg = ptdev->csif_info.cs_reg_count -
3020 			   ptdev->csif_info.unpreserved_cs_reg_count;
3021 	params->val_reg = params->addr_reg + 2;
3022 	params->cycle_reg = params->addr_reg;
3023 	params->time_reg = params->val_reg;
3024 
3025 	params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
3026 			    job->queue_idx * sizeof(struct panthor_syncobj_64b);
3027 	params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) +
3028 			     (job->profiling.slot * sizeof(struct panthor_job_profiling_data));
3029 	params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
3030 
3031 	params->cs_start = job->call_info.start;
3032 	params->cs_size = job->call_info.size;
3033 	params->last_flush = job->call_info.latest_flush;
3034 
3035 	params->profile_mask = job->profiling.mask;
3036 }
3037 
3038 #define JOB_INSTR_ALWAYS(instr) \
3039 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr))
3040 #define JOB_INSTR_TIMESTAMP(instr) \
3041 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr))
3042 #define JOB_INSTR_CYCLES(instr) \
3043 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr))
3044 
3045 static void
3046 prepare_job_instrs(const struct panthor_job_cs_params *params,
3047 		   struct panthor_job_ringbuf_instrs *instrs)
3048 {
3049 	const struct panthor_job_instr instr_seq[] = {
3050 		/* MOV32 rX+2, cs.latest_flush */
3051 		JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush),
3052 		/* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
3053 		JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) |
3054 				 (0 << 16) | 0x233),
3055 		/* MOV48 rX:rX+1, cycles_offset */
3056 		JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
3057 				 (params->times_addr +
3058 				  offsetof(struct panthor_job_profiling_data, cycles.before))),
3059 		/* STORE_STATE cycles */
3060 		JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
3061 		/* MOV48 rX:rX+1, time_offset */
3062 		JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
3063 				    (params->times_addr +
3064 				     offsetof(struct panthor_job_profiling_data, time.before))),
3065 		/* STORE_STATE timer */
3066 		JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
3067 		/* MOV48 rX:rX+1, cs.start */
3068 		JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start),
3069 		/* MOV32 rX+2, cs.size */
3070 		JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size),
3071 		/* WAIT(0) => waits for FLUSH_CACHE2 instruction */
3072 		JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)),
3073 		/* CALL rX:rX+1, rX+2 */
3074 		JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) |
3075 				 (params->val_reg << 32)),
3076 		/* MOV48 rX:rX+1, cycles_offset */
3077 		JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
3078 				 (params->times_addr +
3079 				  offsetof(struct panthor_job_profiling_data, cycles.after))),
3080 		/* STORE_STATE cycles */
3081 		JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
3082 		/* MOV48 rX:rX+1, time_offset */
3083 		JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
3084 			  (params->times_addr +
3085 			   offsetof(struct panthor_job_profiling_data, time.after))),
3086 		/* STORE_STATE timer */
3087 		JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
3088 		/* MOV48 rX:rX+1, sync_addr */
3089 		JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr),
3090 		/* MOV48 rX+2, #1 */
3091 		JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1),
3092 		/* WAIT(all) */
3093 		JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)),
3094 		/* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
3095 		JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) |
3096 				 (params->val_reg << 32) | (0 << 16) | 1),
3097 		/* ERROR_BARRIER, so we can recover from faults at job boundaries. */
3098 		JOB_INSTR_ALWAYS((47ull << 56)),
3099 	};
3100 	u32 pad;
3101 
3102 	instrs->count = 0;
3103 
3104 	/* NEED to be cacheline aligned to please the prefetcher. */
3105 	static_assert(sizeof(instrs->buffer) % 64 == 0,
3106 		      "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline");
3107 
3108 	/* Make sure we have enough storage to store the whole sequence. */
3109 	static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) ==
3110 		      ARRAY_SIZE(instrs->buffer),
3111 		      "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch");
3112 
3113 	for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) {
3114 		/* If the profile mask of this instruction is not enabled, skip it. */
3115 		if (instr_seq[i].profile_mask &&
3116 		    !(instr_seq[i].profile_mask & params->profile_mask))
3117 			continue;
3118 
3119 		instrs->buffer[instrs->count++] = instr_seq[i].instr;
3120 	}
3121 
3122 	pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
3123 	memset(&instrs->buffer[instrs->count], 0,
3124 	       (pad - instrs->count) * sizeof(instrs->buffer[0]));
3125 	instrs->count = pad;
3126 }
3127 
3128 static u32 calc_job_credits(u32 profile_mask)
3129 {
3130 	struct panthor_job_ringbuf_instrs instrs;
3131 	struct panthor_job_cs_params params = {
3132 		.profile_mask = profile_mask,
3133 	};
3134 
3135 	prepare_job_instrs(&params, &instrs);
3136 	return instrs.count;
3137 }
3138 
3139 static struct dma_fence *
3140 queue_run_job(struct drm_sched_job *sched_job)
3141 {
3142 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3143 	struct panthor_group *group = job->group;
3144 	struct panthor_queue *queue = group->queues[job->queue_idx];
3145 	struct panthor_device *ptdev = group->ptdev;
3146 	struct panthor_scheduler *sched = ptdev->scheduler;
3147 	struct panthor_job_ringbuf_instrs instrs;
3148 	struct panthor_job_cs_params cs_params;
3149 	struct dma_fence *done_fence;
3150 	int ret;
3151 
3152 	/* Stream size is zero, nothing to do except making sure all previously
3153 	 * submitted jobs are done before we signal the
3154 	 * drm_sched_job::s_fence::finished fence.
3155 	 */
3156 	if (!job->call_info.size) {
3157 		job->done_fence = dma_fence_get(queue->fence_ctx.last_fence);
3158 		return dma_fence_get(job->done_fence);
3159 	}
3160 
3161 	ret = panthor_device_resume_and_get(ptdev);
3162 	if (drm_WARN_ON(&ptdev->base, ret))
3163 		return ERR_PTR(ret);
3164 
3165 	mutex_lock(&sched->lock);
3166 	if (!group_can_run(group)) {
3167 		done_fence = ERR_PTR(-ECANCELED);
3168 		goto out_unlock;
3169 	}
3170 
3171 	dma_fence_init(job->done_fence,
3172 		       &panthor_queue_fence_ops,
3173 		       &queue->fence_ctx.lock,
3174 		       queue->fence_ctx.id,
3175 		       atomic64_inc_return(&queue->fence_ctx.seqno));
3176 
3177 	job->profiling.slot = queue->profiling.seqno++;
3178 	if (queue->profiling.seqno == queue->profiling.slot_count)
3179 		queue->profiling.seqno = 0;
3180 
3181 	job->ringbuf.start = queue->iface.input->insert;
3182 
3183 	get_job_cs_params(job, &cs_params);
3184 	prepare_job_instrs(&cs_params, &instrs);
3185 	copy_instrs_to_ringbuf(queue, job, &instrs);
3186 
3187 	job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64));
3188 
3189 	panthor_job_get(&job->base);
3190 	spin_lock(&queue->fence_ctx.lock);
3191 	list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
3192 	spin_unlock(&queue->fence_ctx.lock);
3193 
3194 	/* Make sure the ring buffer is updated before the INSERT
3195 	 * register.
3196 	 */
3197 	wmb();
3198 
3199 	queue->iface.input->extract = queue->iface.output->extract;
3200 	queue->iface.input->insert = job->ringbuf.end;
3201 
3202 	if (group->csg_id < 0) {
3203 		/* If the queue is blocked, we want to keep the timeout running, so we
3204 		 * can detect unbounded waits and kill the group when that happens.
3205 		 * Otherwise, we suspend the timeout so the time we spend waiting for
3206 		 * a CSG slot is not counted.
3207 		 */
3208 		if (!(group->blocked_queues & BIT(job->queue_idx)) &&
3209 		    !queue->timeout_suspended) {
3210 			queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
3211 			queue->timeout_suspended = true;
3212 		}
3213 
3214 		group_schedule_locked(group, BIT(job->queue_idx));
3215 	} else {
3216 		gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
3217 		if (!sched->pm.has_ref &&
3218 		    !(group->blocked_queues & BIT(job->queue_idx))) {
3219 			pm_runtime_get(ptdev->base.dev);
3220 			sched->pm.has_ref = true;
3221 		}
3222 		panthor_devfreq_record_busy(sched->ptdev);
3223 	}
3224 
3225 	/* Update the last fence. */
3226 	dma_fence_put(queue->fence_ctx.last_fence);
3227 	queue->fence_ctx.last_fence = dma_fence_get(job->done_fence);
3228 
3229 	done_fence = dma_fence_get(job->done_fence);
3230 
3231 out_unlock:
3232 	mutex_unlock(&sched->lock);
3233 	pm_runtime_mark_last_busy(ptdev->base.dev);
3234 	pm_runtime_put_autosuspend(ptdev->base.dev);
3235 
3236 	return done_fence;
3237 }
3238 
3239 static enum drm_gpu_sched_stat
3240 queue_timedout_job(struct drm_sched_job *sched_job)
3241 {
3242 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3243 	struct panthor_group *group = job->group;
3244 	struct panthor_device *ptdev = group->ptdev;
3245 	struct panthor_scheduler *sched = ptdev->scheduler;
3246 	struct panthor_queue *queue = group->queues[job->queue_idx];
3247 
3248 	drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n",
3249 		 group->task_info.pid, group->task_info.comm, job->done_fence->seqno);
3250 
3251 	drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
3252 
3253 	queue_stop(queue, job);
3254 
3255 	mutex_lock(&sched->lock);
3256 	group->timedout = true;
3257 	if (group->csg_id >= 0) {
3258 		sched_queue_delayed_work(ptdev->scheduler, tick, 0);
3259 	} else {
3260 		/* Remove from the run queues, so the scheduler can't
3261 		 * pick the group on the next tick.
3262 		 */
3263 		list_del_init(&group->run_node);
3264 		list_del_init(&group->wait_node);
3265 
3266 		group_queue_work(group, term);
3267 	}
3268 	mutex_unlock(&sched->lock);
3269 
3270 	queue_start(queue);
3271 
3272 	return DRM_GPU_SCHED_STAT_RESET;
3273 }
3274 
3275 static void queue_free_job(struct drm_sched_job *sched_job)
3276 {
3277 	drm_sched_job_cleanup(sched_job);
3278 	panthor_job_put(sched_job);
3279 }
3280 
3281 static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
3282 	.run_job = queue_run_job,
3283 	.timedout_job = queue_timedout_job,
3284 	.free_job = queue_free_job,
3285 };
3286 
3287 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
3288 					    u32 cs_ringbuf_size)
3289 {
3290 	u32 min_profiled_job_instrs = U32_MAX;
3291 	u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL);
3292 
3293 	/*
3294 	 * We want to calculate the minimum size of a profiled job's CS,
3295 	 * because since they need additional instructions for the sampling
3296 	 * of performance metrics, they might take up further slots in
3297 	 * the queue's ringbuffer. This means we might not need as many job
3298 	 * slots for keeping track of their profiling information. What we
3299 	 * need is the maximum number of slots we should allocate to this end,
3300 	 * which matches the maximum number of profiled jobs we can place
3301 	 * simultaneously in the queue's ring buffer.
3302 	 * That has to be calculated separately for every single job profiling
3303 	 * flag, but not in the case job profiling is disabled, since unprofiled
3304 	 * jobs don't need to keep track of this at all.
3305 	 */
3306 	for (u32 i = 0; i < last_flag; i++) {
3307 		min_profiled_job_instrs =
3308 			min(min_profiled_job_instrs, calc_job_credits(BIT(i)));
3309 	}
3310 
3311 	return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
3312 }
3313 
3314 static struct panthor_queue *
3315 group_create_queue(struct panthor_group *group,
3316 		   const struct drm_panthor_queue_create *args,
3317 		   u64 drm_client_id, u32 gid, u32 qid)
3318 {
3319 	struct drm_sched_init_args sched_args = {
3320 		.ops = &panthor_queue_sched_ops,
3321 		.submit_wq = group->ptdev->scheduler->wq,
3322 		.num_rqs = 1,
3323 		/*
3324 		 * The credit limit argument tells us the total number of
3325 		 * instructions across all CS slots in the ringbuffer, with
3326 		 * some jobs requiring twice as many as others, depending on
3327 		 * their profiling status.
3328 		 */
3329 		.credit_limit = args->ringbuf_size / sizeof(u64),
3330 		.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
3331 		.timeout_wq = group->ptdev->reset.wq,
3332 		.dev = group->ptdev->base.dev,
3333 	};
3334 	struct drm_gpu_scheduler *drm_sched;
3335 	struct panthor_queue *queue;
3336 	int ret;
3337 
3338 	if (args->pad[0] || args->pad[1] || args->pad[2])
3339 		return ERR_PTR(-EINVAL);
3340 
3341 	if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K ||
3342 	    !is_power_of_2(args->ringbuf_size))
3343 		return ERR_PTR(-EINVAL);
3344 
3345 	if (args->priority > CSF_MAX_QUEUE_PRIO)
3346 		return ERR_PTR(-EINVAL);
3347 
3348 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
3349 	if (!queue)
3350 		return ERR_PTR(-ENOMEM);
3351 
3352 	queue->fence_ctx.id = dma_fence_context_alloc(1);
3353 	spin_lock_init(&queue->fence_ctx.lock);
3354 	INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
3355 
3356 	queue->priority = args->priority;
3357 
3358 	queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
3359 						  args->ringbuf_size,
3360 						  DRM_PANTHOR_BO_NO_MMAP,
3361 						  DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3362 						  DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3363 						  PANTHOR_VM_KERNEL_AUTO_VA,
3364 						  "CS ring buffer");
3365 	if (IS_ERR(queue->ringbuf)) {
3366 		ret = PTR_ERR(queue->ringbuf);
3367 		goto err_free_queue;
3368 	}
3369 
3370 	ret = panthor_kernel_bo_vmap(queue->ringbuf);
3371 	if (ret)
3372 		goto err_free_queue;
3373 
3374 	queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
3375 							    &queue->iface.input,
3376 							    &queue->iface.output,
3377 							    &queue->iface.input_fw_va,
3378 							    &queue->iface.output_fw_va);
3379 	if (IS_ERR(queue->iface.mem)) {
3380 		ret = PTR_ERR(queue->iface.mem);
3381 		goto err_free_queue;
3382 	}
3383 
3384 	queue->profiling.slot_count =
3385 		calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size);
3386 
3387 	queue->profiling.slots =
3388 		panthor_kernel_bo_create(group->ptdev, group->vm,
3389 					 queue->profiling.slot_count *
3390 					 sizeof(struct panthor_job_profiling_data),
3391 					 DRM_PANTHOR_BO_NO_MMAP,
3392 					 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3393 					 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3394 					 PANTHOR_VM_KERNEL_AUTO_VA,
3395 					 "Group job stats");
3396 
3397 	if (IS_ERR(queue->profiling.slots)) {
3398 		ret = PTR_ERR(queue->profiling.slots);
3399 		goto err_free_queue;
3400 	}
3401 
3402 	ret = panthor_kernel_bo_vmap(queue->profiling.slots);
3403 	if (ret)
3404 		goto err_free_queue;
3405 
3406 	/* assign a unique name */
3407 	queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid);
3408 	if (!queue->name) {
3409 		ret = -ENOMEM;
3410 		goto err_free_queue;
3411 	}
3412 
3413 	sched_args.name = queue->name;
3414 
3415 	ret = drm_sched_init(&queue->scheduler, &sched_args);
3416 	if (ret)
3417 		goto err_free_queue;
3418 
3419 	drm_sched = &queue->scheduler;
3420 	ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
3421 
3422 	return queue;
3423 
3424 err_free_queue:
3425 	group_free_queue(group, queue);
3426 	return ERR_PTR(ret);
3427 }
3428 
3429 static void group_init_task_info(struct panthor_group *group)
3430 {
3431 	struct task_struct *task = current->group_leader;
3432 
3433 	group->task_info.pid = task->pid;
3434 	get_task_comm(group->task_info.comm, task);
3435 }
3436 
3437 static void add_group_kbo_sizes(struct panthor_device *ptdev,
3438 				struct panthor_group *group)
3439 {
3440 	struct panthor_queue *queue;
3441 	int i;
3442 
3443 	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
3444 		return;
3445 	if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
3446 		return;
3447 
3448 	group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
3449 	group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
3450 	group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
3451 
3452 	for (i = 0; i < group->queue_count; i++) {
3453 		queue =	group->queues[i];
3454 		group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
3455 		group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
3456 		group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
3457 	}
3458 }
3459 
3460 #define MAX_GROUPS_PER_POOL		128
3461 
3462 int panthor_group_create(struct panthor_file *pfile,
3463 			 const struct drm_panthor_group_create *group_args,
3464 			 const struct drm_panthor_queue_create *queue_args,
3465 			 u64 drm_client_id)
3466 {
3467 	struct panthor_device *ptdev = pfile->ptdev;
3468 	struct panthor_group_pool *gpool = pfile->groups;
3469 	struct panthor_scheduler *sched = ptdev->scheduler;
3470 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3471 	struct panthor_group *group = NULL;
3472 	u32 gid, i, suspend_size;
3473 	int ret;
3474 
3475 	if (group_args->pad)
3476 		return -EINVAL;
3477 
3478 	if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT)
3479 		return -EINVAL;
3480 
3481 	if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
3482 	    (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
3483 	    (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
3484 		return -EINVAL;
3485 
3486 	if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
3487 	    hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
3488 	    hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
3489 		return -EINVAL;
3490 
3491 	group = kzalloc(sizeof(*group), GFP_KERNEL);
3492 	if (!group)
3493 		return -ENOMEM;
3494 
3495 	spin_lock_init(&group->fatal_lock);
3496 	kref_init(&group->refcount);
3497 	group->state = PANTHOR_CS_GROUP_CREATED;
3498 	group->csg_id = -1;
3499 
3500 	group->ptdev = ptdev;
3501 	group->max_compute_cores = group_args->max_compute_cores;
3502 	group->compute_core_mask = group_args->compute_core_mask;
3503 	group->max_fragment_cores = group_args->max_fragment_cores;
3504 	group->fragment_core_mask = group_args->fragment_core_mask;
3505 	group->max_tiler_cores = group_args->max_tiler_cores;
3506 	group->tiler_core_mask = group_args->tiler_core_mask;
3507 	group->priority = group_args->priority;
3508 
3509 	INIT_LIST_HEAD(&group->wait_node);
3510 	INIT_LIST_HEAD(&group->run_node);
3511 	INIT_WORK(&group->term_work, group_term_work);
3512 	INIT_WORK(&group->sync_upd_work, group_sync_upd_work);
3513 	INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work);
3514 	INIT_WORK(&group->release_work, group_release_work);
3515 
3516 	group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id);
3517 	if (!group->vm) {
3518 		ret = -EINVAL;
3519 		goto err_put_group;
3520 	}
3521 
3522 	suspend_size = csg_iface->control->suspend_size;
3523 	group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3524 	if (IS_ERR(group->suspend_buf)) {
3525 		ret = PTR_ERR(group->suspend_buf);
3526 		group->suspend_buf = NULL;
3527 		goto err_put_group;
3528 	}
3529 
3530 	suspend_size = csg_iface->control->protm_suspend_size;
3531 	group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3532 	if (IS_ERR(group->protm_suspend_buf)) {
3533 		ret = PTR_ERR(group->protm_suspend_buf);
3534 		group->protm_suspend_buf = NULL;
3535 		goto err_put_group;
3536 	}
3537 
3538 	group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
3539 						   group_args->queues.count *
3540 						   sizeof(struct panthor_syncobj_64b),
3541 						   DRM_PANTHOR_BO_NO_MMAP,
3542 						   DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3543 						   DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3544 						   PANTHOR_VM_KERNEL_AUTO_VA,
3545 						   "Group sync objects");
3546 	if (IS_ERR(group->syncobjs)) {
3547 		ret = PTR_ERR(group->syncobjs);
3548 		goto err_put_group;
3549 	}
3550 
3551 	ret = panthor_kernel_bo_vmap(group->syncobjs);
3552 	if (ret)
3553 		goto err_put_group;
3554 
3555 	memset(group->syncobjs->kmap, 0,
3556 	       group_args->queues.count * sizeof(struct panthor_syncobj_64b));
3557 
3558 	ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
3559 	if (ret)
3560 		goto err_put_group;
3561 
3562 	for (i = 0; i < group_args->queues.count; i++) {
3563 		group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i);
3564 		if (IS_ERR(group->queues[i])) {
3565 			ret = PTR_ERR(group->queues[i]);
3566 			group->queues[i] = NULL;
3567 			goto err_erase_gid;
3568 		}
3569 
3570 		group->queue_count++;
3571 	}
3572 
3573 	group->idle_queues = GENMASK(group->queue_count - 1, 0);
3574 
3575 	mutex_lock(&sched->reset.lock);
3576 	if (atomic_read(&sched->reset.in_progress)) {
3577 		panthor_group_stop(group);
3578 	} else {
3579 		mutex_lock(&sched->lock);
3580 		list_add_tail(&group->run_node,
3581 			      &sched->groups.idle[group->priority]);
3582 		mutex_unlock(&sched->lock);
3583 	}
3584 	mutex_unlock(&sched->reset.lock);
3585 
3586 	add_group_kbo_sizes(group->ptdev, group);
3587 	spin_lock_init(&group->fdinfo.lock);
3588 
3589 	group_init_task_info(group);
3590 
3591 	return gid;
3592 
3593 err_erase_gid:
3594 	xa_erase(&gpool->xa, gid);
3595 
3596 err_put_group:
3597 	group_put(group);
3598 	return ret;
3599 }
3600 
3601 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
3602 {
3603 	struct panthor_group_pool *gpool = pfile->groups;
3604 	struct panthor_device *ptdev = pfile->ptdev;
3605 	struct panthor_scheduler *sched = ptdev->scheduler;
3606 	struct panthor_group *group;
3607 
3608 	group = xa_erase(&gpool->xa, group_handle);
3609 	if (!group)
3610 		return -EINVAL;
3611 
3612 	for (u32 i = 0; i < group->queue_count; i++) {
3613 		if (group->queues[i])
3614 			drm_sched_entity_destroy(&group->queues[i]->entity);
3615 	}
3616 
3617 	mutex_lock(&sched->reset.lock);
3618 	mutex_lock(&sched->lock);
3619 	group->destroyed = true;
3620 	if (group->csg_id >= 0) {
3621 		sched_queue_delayed_work(sched, tick, 0);
3622 	} else if (!atomic_read(&sched->reset.in_progress)) {
3623 		/* Remove from the run queues, so the scheduler can't
3624 		 * pick the group on the next tick.
3625 		 */
3626 		list_del_init(&group->run_node);
3627 		list_del_init(&group->wait_node);
3628 		group_queue_work(group, term);
3629 	}
3630 	mutex_unlock(&sched->lock);
3631 	mutex_unlock(&sched->reset.lock);
3632 
3633 	group_put(group);
3634 	return 0;
3635 }
3636 
3637 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool,
3638 					       u32 group_handle)
3639 {
3640 	struct panthor_group *group;
3641 
3642 	xa_lock(&pool->xa);
3643 	group = group_get(xa_load(&pool->xa, group_handle));
3644 	xa_unlock(&pool->xa);
3645 
3646 	return group;
3647 }
3648 
3649 int panthor_group_get_state(struct panthor_file *pfile,
3650 			    struct drm_panthor_group_get_state *get_state)
3651 {
3652 	struct panthor_group_pool *gpool = pfile->groups;
3653 	struct panthor_device *ptdev = pfile->ptdev;
3654 	struct panthor_scheduler *sched = ptdev->scheduler;
3655 	struct panthor_group *group;
3656 
3657 	if (get_state->pad)
3658 		return -EINVAL;
3659 
3660 	group = group_from_handle(gpool, get_state->group_handle);
3661 	if (!group)
3662 		return -EINVAL;
3663 
3664 	memset(get_state, 0, sizeof(*get_state));
3665 
3666 	mutex_lock(&sched->lock);
3667 	if (group->timedout)
3668 		get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT;
3669 	if (group->fatal_queues) {
3670 		get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
3671 		get_state->fatal_queues = group->fatal_queues;
3672 	}
3673 	if (group->innocent)
3674 		get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT;
3675 	mutex_unlock(&sched->lock);
3676 
3677 	group_put(group);
3678 	return 0;
3679 }
3680 
3681 int panthor_group_pool_create(struct panthor_file *pfile)
3682 {
3683 	struct panthor_group_pool *gpool;
3684 
3685 	gpool = kzalloc(sizeof(*gpool), GFP_KERNEL);
3686 	if (!gpool)
3687 		return -ENOMEM;
3688 
3689 	xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1);
3690 	pfile->groups = gpool;
3691 	return 0;
3692 }
3693 
3694 void panthor_group_pool_destroy(struct panthor_file *pfile)
3695 {
3696 	struct panthor_group_pool *gpool = pfile->groups;
3697 	struct panthor_group *group;
3698 	unsigned long i;
3699 
3700 	if (IS_ERR_OR_NULL(gpool))
3701 		return;
3702 
3703 	xa_for_each(&gpool->xa, i, group)
3704 		panthor_group_destroy(pfile, i);
3705 
3706 	xa_destroy(&gpool->xa);
3707 	kfree(gpool);
3708 	pfile->groups = NULL;
3709 }
3710 
3711 /**
3712  * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's
3713  * belonging to all the groups owned by an open Panthor file
3714  * @pfile: File.
3715  * @stats: Memory statistics to be updated.
3716  *
3717  */
3718 void
3719 panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
3720 				     struct drm_memory_stats *stats)
3721 {
3722 	struct panthor_group_pool *gpool = pfile->groups;
3723 	struct panthor_group *group;
3724 	unsigned long i;
3725 
3726 	if (IS_ERR_OR_NULL(gpool))
3727 		return;
3728 
3729 	xa_lock(&gpool->xa);
3730 	xa_for_each(&gpool->xa, i, group) {
3731 		stats->resident += group->fdinfo.kbo_sizes;
3732 		if (group->csg_id >= 0)
3733 			stats->active += group->fdinfo.kbo_sizes;
3734 	}
3735 	xa_unlock(&gpool->xa);
3736 }
3737 
3738 static void job_release(struct kref *ref)
3739 {
3740 	struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
3741 
3742 	drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
3743 
3744 	if (job->base.s_fence)
3745 		drm_sched_job_cleanup(&job->base);
3746 
3747 	if (job->done_fence && job->done_fence->ops)
3748 		dma_fence_put(job->done_fence);
3749 	else
3750 		dma_fence_free(job->done_fence);
3751 
3752 	group_put(job->group);
3753 
3754 	kfree(job);
3755 }
3756 
3757 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job)
3758 {
3759 	if (sched_job) {
3760 		struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3761 
3762 		kref_get(&job->refcount);
3763 	}
3764 
3765 	return sched_job;
3766 }
3767 
3768 void panthor_job_put(struct drm_sched_job *sched_job)
3769 {
3770 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3771 
3772 	if (sched_job)
3773 		kref_put(&job->refcount, job_release);
3774 }
3775 
3776 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
3777 {
3778 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3779 
3780 	return job->group->vm;
3781 }
3782 
3783 struct drm_sched_job *
3784 panthor_job_create(struct panthor_file *pfile,
3785 		   u16 group_handle,
3786 		   const struct drm_panthor_queue_submit *qsubmit,
3787 		   u64 drm_client_id)
3788 {
3789 	struct panthor_group_pool *gpool = pfile->groups;
3790 	struct panthor_job *job;
3791 	u32 credits;
3792 	int ret;
3793 
3794 	if (qsubmit->pad)
3795 		return ERR_PTR(-EINVAL);
3796 
3797 	/* If stream_addr is zero, so stream_size should be. */
3798 	if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0))
3799 		return ERR_PTR(-EINVAL);
3800 
3801 	/* Make sure the address is aligned on 64-byte (cacheline) and the size is
3802 	 * aligned on 8-byte (instruction size).
3803 	 */
3804 	if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7))
3805 		return ERR_PTR(-EINVAL);
3806 
3807 	/* bits 24:30 must be zero. */
3808 	if (qsubmit->latest_flush & GENMASK(30, 24))
3809 		return ERR_PTR(-EINVAL);
3810 
3811 	job = kzalloc(sizeof(*job), GFP_KERNEL);
3812 	if (!job)
3813 		return ERR_PTR(-ENOMEM);
3814 
3815 	kref_init(&job->refcount);
3816 	job->queue_idx = qsubmit->queue_index;
3817 	job->call_info.size = qsubmit->stream_size;
3818 	job->call_info.start = qsubmit->stream_addr;
3819 	job->call_info.latest_flush = qsubmit->latest_flush;
3820 	INIT_LIST_HEAD(&job->node);
3821 
3822 	job->group = group_from_handle(gpool, group_handle);
3823 	if (!job->group) {
3824 		ret = -EINVAL;
3825 		goto err_put_job;
3826 	}
3827 
3828 	if (!group_can_run(job->group)) {
3829 		ret = -EINVAL;
3830 		goto err_put_job;
3831 	}
3832 
3833 	if (job->queue_idx >= job->group->queue_count ||
3834 	    !job->group->queues[job->queue_idx]) {
3835 		ret = -EINVAL;
3836 		goto err_put_job;
3837 	}
3838 
3839 	/* Empty command streams don't need a fence, they'll pick the one from
3840 	 * the previously submitted job.
3841 	 */
3842 	if (job->call_info.size) {
3843 		job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
3844 		if (!job->done_fence) {
3845 			ret = -ENOMEM;
3846 			goto err_put_job;
3847 		}
3848 	}
3849 
3850 	job->profiling.mask = pfile->ptdev->profile_mask;
3851 	credits = calc_job_credits(job->profiling.mask);
3852 	if (credits == 0) {
3853 		ret = -EINVAL;
3854 		goto err_put_job;
3855 	}
3856 
3857 	ret = drm_sched_job_init(&job->base,
3858 				 &job->group->queues[job->queue_idx]->entity,
3859 				 credits, job->group, drm_client_id);
3860 	if (ret)
3861 		goto err_put_job;
3862 
3863 	return &job->base;
3864 
3865 err_put_job:
3866 	panthor_job_put(&job->base);
3867 	return ERR_PTR(ret);
3868 }
3869 
3870 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job)
3871 {
3872 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3873 
3874 	panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
3875 				DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
3876 }
3877 
3878 void panthor_sched_unplug(struct panthor_device *ptdev)
3879 {
3880 	struct panthor_scheduler *sched = ptdev->scheduler;
3881 
3882 	cancel_delayed_work_sync(&sched->tick_work);
3883 
3884 	mutex_lock(&sched->lock);
3885 	if (sched->pm.has_ref) {
3886 		pm_runtime_put(ptdev->base.dev);
3887 		sched->pm.has_ref = false;
3888 	}
3889 	mutex_unlock(&sched->lock);
3890 }
3891 
3892 static void panthor_sched_fini(struct drm_device *ddev, void *res)
3893 {
3894 	struct panthor_scheduler *sched = res;
3895 	int prio;
3896 
3897 	if (!sched || !sched->csg_slot_count)
3898 		return;
3899 
3900 	cancel_delayed_work_sync(&sched->tick_work);
3901 
3902 	if (sched->wq)
3903 		destroy_workqueue(sched->wq);
3904 
3905 	if (sched->heap_alloc_wq)
3906 		destroy_workqueue(sched->heap_alloc_wq);
3907 
3908 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3909 		drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
3910 		drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
3911 	}
3912 
3913 	drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
3914 }
3915 
3916 int panthor_sched_init(struct panthor_device *ptdev)
3917 {
3918 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
3919 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3920 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
3921 	struct panthor_scheduler *sched;
3922 	u32 gpu_as_count, num_groups;
3923 	int prio, ret;
3924 
3925 	sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
3926 	if (!sched)
3927 		return -ENOMEM;
3928 
3929 	/* The highest bit in JOB_INT_* is reserved for globabl IRQs. That
3930 	 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here.
3931 	 */
3932 	num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num);
3933 
3934 	/* The FW-side scheduler might deadlock if two groups with the same
3935 	 * priority try to access a set of resources that overlaps, with part
3936 	 * of the resources being allocated to one group and the other part to
3937 	 * the other group, both groups waiting for the remaining resources to
3938 	 * be allocated. To avoid that, it is recommended to assign each CSG a
3939 	 * different priority. In theory we could allow several groups to have
3940 	 * the same CSG priority if they don't request the same resources, but
3941 	 * that makes the scheduling logic more complicated, so let's clamp
3942 	 * the number of CSG slots to MAX_CSG_PRIO + 1 for now.
3943 	 */
3944 	num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups);
3945 
3946 	/* We need at least one AS for the MCU and one for the GPU contexts. */
3947 	gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
3948 	if (!gpu_as_count) {
3949 		drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
3950 			gpu_as_count + 1);
3951 		return -EINVAL;
3952 	}
3953 
3954 	sched->ptdev = ptdev;
3955 	sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
3956 	sched->csg_slot_count = num_groups;
3957 	sched->cs_slot_count = csg_iface->control->stream_num;
3958 	sched->as_slot_count = gpu_as_count;
3959 	ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
3960 	ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
3961 	ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
3962 
3963 	sched->last_tick = 0;
3964 	sched->resched_target = U64_MAX;
3965 	sched->tick_period = msecs_to_jiffies(10);
3966 	INIT_DELAYED_WORK(&sched->tick_work, tick_work);
3967 	INIT_WORK(&sched->sync_upd_work, sync_upd_work);
3968 	INIT_WORK(&sched->fw_events_work, process_fw_events_work);
3969 
3970 	ret = drmm_mutex_init(&ptdev->base, &sched->lock);
3971 	if (ret)
3972 		return ret;
3973 
3974 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3975 		INIT_LIST_HEAD(&sched->groups.runnable[prio]);
3976 		INIT_LIST_HEAD(&sched->groups.idle[prio]);
3977 	}
3978 	INIT_LIST_HEAD(&sched->groups.waiting);
3979 
3980 	ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
3981 	if (ret)
3982 		return ret;
3983 
3984 	INIT_LIST_HEAD(&sched->reset.stopped_groups);
3985 
3986 	/* sched->heap_alloc_wq will be used for heap chunk allocation on
3987 	 * tiler OOM events, which means we can't use the same workqueue for
3988 	 * the scheduler because works queued by the scheduler are in
3989 	 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to
3990 	 * work around this limitation.
3991 	 *
3992 	 * FIXME: Ultimately, what we need is a failable/non-blocking GEM
3993 	 * allocation path that we can call when a heap OOM is reported. The
3994 	 * FW is smart enough to fall back on other methods if the kernel can't
3995 	 * allocate memory, and fail the tiling job if none of these
3996 	 * countermeasures worked.
3997 	 *
3998 	 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the
3999 	 * system is running out of memory.
4000 	 */
4001 	sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
4002 	sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
4003 	if (!sched->wq || !sched->heap_alloc_wq) {
4004 		panthor_sched_fini(&ptdev->base, sched);
4005 		drm_err(&ptdev->base, "Failed to allocate the workqueues");
4006 		return -ENOMEM;
4007 	}
4008 
4009 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
4010 	if (ret)
4011 		return ret;
4012 
4013 	ptdev->scheduler = sched;
4014 	return 0;
4015 }
4016