xref: /linux/drivers/gpu/drm/panthor/panthor_sched.c (revision 44343e8b250abb2f6bfd615493ca07a7f11f3cc2)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2023 Collabora ltd. */
3 
4 #include <drm/drm_drv.h>
5 #include <drm/drm_exec.h>
6 #include <drm/drm_gem_shmem_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/gpu_scheduler.h>
9 #include <drm/panthor_drm.h>
10 
11 #include <linux/build_bug.h>
12 #include <linux/cleanup.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dma-resv.h>
17 #include <linux/firmware.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/iosys-map.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 
26 #include "panthor_devfreq.h"
27 #include "panthor_device.h"
28 #include "panthor_fw.h"
29 #include "panthor_gem.h"
30 #include "panthor_gpu.h"
31 #include "panthor_heap.h"
32 #include "panthor_mmu.h"
33 #include "panthor_regs.h"
34 #include "panthor_sched.h"
35 
36 /**
37  * DOC: Scheduler
38  *
39  * Mali CSF hardware adopts a firmware-assisted scheduling model, where
40  * the firmware takes care of scheduling aspects, to some extent.
41  *
42  * The scheduling happens at the scheduling group level, each group
43  * contains 1 to N queues (N is FW/hardware dependent, and exposed
44  * through the firmware interface). Each queue is assigned a command
45  * stream ring buffer, which serves as a way to get jobs submitted to
46  * the GPU, among other things.
47  *
48  * The firmware can schedule a maximum of M groups (M is FW/hardware
49  * dependent, and exposed through the firmware interface). Passed
50  * this maximum number of groups, the kernel must take care of
51  * rotating the groups passed to the firmware so every group gets
52  * a chance to have his queues scheduled for execution.
53  *
54  * The current implementation only supports with kernel-mode queues.
55  * In other terms, userspace doesn't have access to the ring-buffer.
56  * Instead, userspace passes indirect command stream buffers that are
57  * called from the queue ring-buffer by the kernel using a pre-defined
58  * sequence of command stream instructions to ensure the userspace driver
59  * always gets consistent results (cache maintenance,
60  * synchronization, ...).
61  *
62  * We rely on the drm_gpu_scheduler framework to deal with job
63  * dependencies and submission. As any other driver dealing with a
64  * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
65  * entity has its own job scheduler. When a job is ready to be executed
66  * (all its dependencies are met), it is pushed to the appropriate
67  * queue ring-buffer, and the group is scheduled for execution if it
68  * wasn't already active.
69  *
70  * Kernel-side group scheduling is timeslice-based. When we have less
71  * groups than there are slots, the periodic tick is disabled and we
72  * just let the FW schedule the active groups. When there are more
73  * groups than slots, we let each group a chance to execute stuff for
74  * a given amount of time, and then re-evaluate and pick new groups
75  * to schedule. The group selection algorithm is based on
76  * priority+round-robin.
77  *
78  * Even though user-mode queues is out of the scope right now, the
79  * current design takes them into account by avoiding any guess on the
80  * group/queue state that would be based on information we wouldn't have
81  * if userspace was in charge of the ring-buffer. That's also one of the
82  * reason we don't do 'cooperative' scheduling (encoding FW group slot
83  * reservation as dma_fence that would be returned from the
84  * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
85  * a queue of waiters, ordered by job submission order). This approach
86  * would work for kernel-mode queues, but would make user-mode queues a
87  * lot more complicated to retrofit.
88  */
89 
90 #define JOB_TIMEOUT_MS				5000
91 
92 #define MAX_CSG_PRIO				0xf
93 
94 #define NUM_INSTRS_PER_CACHE_LINE		(64 / sizeof(u64))
95 #define MAX_INSTRS_PER_JOB			24
96 
97 struct panthor_group;
98 
99 /**
100  * struct panthor_csg_slot - Command stream group slot
101  *
102  * This represents a FW slot for a scheduling group.
103  */
104 struct panthor_csg_slot {
105 	/** @group: Scheduling group bound to this slot. */
106 	struct panthor_group *group;
107 
108 	/** @priority: Group priority. */
109 	u8 priority;
110 
111 	/**
112 	 * @idle: True if the group bound to this slot is idle.
113 	 *
114 	 * A group is idle when it has nothing waiting for execution on
115 	 * all its queues, or when queues are blocked waiting for something
116 	 * to happen (synchronization object).
117 	 */
118 	bool idle;
119 };
120 
121 /**
122  * enum panthor_csg_priority - Group priority
123  */
124 enum panthor_csg_priority {
125 	/** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */
126 	PANTHOR_CSG_PRIORITY_LOW = 0,
127 
128 	/** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */
129 	PANTHOR_CSG_PRIORITY_MEDIUM,
130 
131 	/** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */
132 	PANTHOR_CSG_PRIORITY_HIGH,
133 
134 	/**
135 	 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group.
136 	 *
137 	 * Real-time priority allows one to preempt scheduling of other
138 	 * non-real-time groups. When such a group becomes executable,
139 	 * it will evict the group with the lowest non-rt priority if
140 	 * there's no free group slot available.
141 	 */
142 	PANTHOR_CSG_PRIORITY_RT,
143 
144 	/** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */
145 	PANTHOR_CSG_PRIORITY_COUNT,
146 };
147 
148 /**
149  * struct panthor_scheduler - Object used to manage the scheduler
150  */
151 struct panthor_scheduler {
152 	/** @ptdev: Device. */
153 	struct panthor_device *ptdev;
154 
155 	/**
156 	 * @wq: Workqueue used by our internal scheduler logic and
157 	 * drm_gpu_scheduler.
158 	 *
159 	 * Used for the scheduler tick, group update or other kind of FW
160 	 * event processing that can't be handled in the threaded interrupt
161 	 * path. Also passed to the drm_gpu_scheduler instances embedded
162 	 * in panthor_queue.
163 	 */
164 	struct workqueue_struct *wq;
165 
166 	/**
167 	 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works.
168 	 *
169 	 * We have a queue dedicated to heap chunk allocation works to avoid
170 	 * blocking the rest of the scheduler if the allocation tries to
171 	 * reclaim memory.
172 	 */
173 	struct workqueue_struct *heap_alloc_wq;
174 
175 	/** @tick_work: Work executed on a scheduling tick. */
176 	struct delayed_work tick_work;
177 
178 	/**
179 	 * @sync_upd_work: Work used to process synchronization object updates.
180 	 *
181 	 * We use this work to unblock queues/groups that were waiting on a
182 	 * synchronization object.
183 	 */
184 	struct work_struct sync_upd_work;
185 
186 	/**
187 	 * @fw_events_work: Work used to process FW events outside the interrupt path.
188 	 *
189 	 * Even if the interrupt is threaded, we need any event processing
190 	 * that require taking the panthor_scheduler::lock to be processed
191 	 * outside the interrupt path so we don't block the tick logic when
192 	 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the
193 	 * event processing requires taking this lock, we just delegate all
194 	 * FW event processing to the scheduler workqueue.
195 	 */
196 	struct work_struct fw_events_work;
197 
198 	/**
199 	 * @fw_events: Bitmask encoding pending FW events.
200 	 */
201 	atomic_t fw_events;
202 
203 	/**
204 	 * @resched_target: When the next tick should occur.
205 	 *
206 	 * Expressed in jiffies.
207 	 */
208 	u64 resched_target;
209 
210 	/**
211 	 * @last_tick: When the last tick occurred.
212 	 *
213 	 * Expressed in jiffies.
214 	 */
215 	u64 last_tick;
216 
217 	/** @tick_period: Tick period in jiffies. */
218 	u64 tick_period;
219 
220 	/**
221 	 * @lock: Lock protecting access to all the scheduler fields.
222 	 *
223 	 * Should be taken in the tick work, the irq handler, and anywhere the @groups
224 	 * fields are touched.
225 	 */
226 	struct mutex lock;
227 
228 	/** @groups: Various lists used to classify groups. */
229 	struct {
230 		/**
231 		 * @runnable: Runnable group lists.
232 		 *
233 		 * When a group has queues that want to execute something,
234 		 * its panthor_group::run_node should be inserted here.
235 		 *
236 		 * One list per-priority.
237 		 */
238 		struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT];
239 
240 		/**
241 		 * @idle: Idle group lists.
242 		 *
243 		 * When all queues of a group are idle (either because they
244 		 * have nothing to execute, or because they are blocked), the
245 		 * panthor_group::run_node field should be inserted here.
246 		 *
247 		 * One list per-priority.
248 		 */
249 		struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT];
250 
251 		/**
252 		 * @waiting: List of groups whose queues are blocked on a
253 		 * synchronization object.
254 		 *
255 		 * Insert panthor_group::wait_node here when a group is waiting
256 		 * for synchronization objects to be signaled.
257 		 *
258 		 * This list is evaluated in the @sync_upd_work work.
259 		 */
260 		struct list_head waiting;
261 	} groups;
262 
263 	/**
264 	 * @csg_slots: FW command stream group slots.
265 	 */
266 	struct panthor_csg_slot csg_slots[MAX_CSGS];
267 
268 	/** @csg_slot_count: Number of command stream group slots exposed by the FW. */
269 	u32 csg_slot_count;
270 
271 	/** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */
272 	u32 cs_slot_count;
273 
274 	/** @as_slot_count: Number of address space slots supported by the MMU. */
275 	u32 as_slot_count;
276 
277 	/** @used_csg_slot_count: Number of command stream group slot currently used. */
278 	u32 used_csg_slot_count;
279 
280 	/** @sb_slot_count: Number of scoreboard slots. */
281 	u32 sb_slot_count;
282 
283 	/**
284 	 * @might_have_idle_groups: True if an active group might have become idle.
285 	 *
286 	 * This will force a tick, so other runnable groups can be scheduled if one
287 	 * or more active groups became idle.
288 	 */
289 	bool might_have_idle_groups;
290 
291 	/** @pm: Power management related fields. */
292 	struct {
293 		/** @has_ref: True if the scheduler owns a runtime PM reference. */
294 		bool has_ref;
295 	} pm;
296 
297 	/** @reset: Reset related fields. */
298 	struct {
299 		/** @lock: Lock protecting the other reset fields. */
300 		struct mutex lock;
301 
302 		/**
303 		 * @in_progress: True if a reset is in progress.
304 		 *
305 		 * Set to true in panthor_sched_pre_reset() and back to false in
306 		 * panthor_sched_post_reset().
307 		 */
308 		atomic_t in_progress;
309 
310 		/**
311 		 * @stopped_groups: List containing all groups that were stopped
312 		 * before a reset.
313 		 *
314 		 * Insert panthor_group::run_node in the pre_reset path.
315 		 */
316 		struct list_head stopped_groups;
317 	} reset;
318 };
319 
320 /**
321  * struct panthor_syncobj_32b - 32-bit FW synchronization object
322  */
323 struct panthor_syncobj_32b {
324 	/** @seqno: Sequence number. */
325 	u32 seqno;
326 
327 	/**
328 	 * @status: Status.
329 	 *
330 	 * Not zero on failure.
331 	 */
332 	u32 status;
333 };
334 
335 /**
336  * struct panthor_syncobj_64b - 64-bit FW synchronization object
337  */
338 struct panthor_syncobj_64b {
339 	/** @seqno: Sequence number. */
340 	u64 seqno;
341 
342 	/**
343 	 * @status: Status.
344 	 *
345 	 * Not zero on failure.
346 	 */
347 	u32 status;
348 
349 	/** @pad: MBZ. */
350 	u32 pad;
351 };
352 
353 /**
354  * struct panthor_queue - Execution queue
355  */
356 struct panthor_queue {
357 	/** @scheduler: DRM scheduler used for this queue. */
358 	struct drm_gpu_scheduler scheduler;
359 
360 	/** @entity: DRM scheduling entity used for this queue. */
361 	struct drm_sched_entity entity;
362 
363 	/**
364 	 * @remaining_time: Time remaining before the job timeout expires.
365 	 *
366 	 * The job timeout is suspended when the queue is not scheduled by the
367 	 * FW. Every time we suspend the timer, we need to save the remaining
368 	 * time so we can restore it later on.
369 	 */
370 	unsigned long remaining_time;
371 
372 	/** @timeout_suspended: True if the job timeout was suspended. */
373 	bool timeout_suspended;
374 
375 	/**
376 	 * @doorbell_id: Doorbell assigned to this queue.
377 	 *
378 	 * Right now, all groups share the same doorbell, and the doorbell ID
379 	 * is assigned to group_slot + 1 when the group is assigned a slot. But
380 	 * we might decide to provide fine grained doorbell assignment at some
381 	 * point, so don't have to wake up all queues in a group every time one
382 	 * of them is updated.
383 	 */
384 	u8 doorbell_id;
385 
386 	/**
387 	 * @priority: Priority of the queue inside the group.
388 	 *
389 	 * Must be less than 16 (Only 4 bits available).
390 	 */
391 	u8 priority;
392 #define CSF_MAX_QUEUE_PRIO	GENMASK(3, 0)
393 
394 	/** @ringbuf: Command stream ring-buffer. */
395 	struct panthor_kernel_bo *ringbuf;
396 
397 	/** @iface: Firmware interface. */
398 	struct {
399 		/** @mem: FW memory allocated for this interface. */
400 		struct panthor_kernel_bo *mem;
401 
402 		/** @input: Input interface. */
403 		struct panthor_fw_ringbuf_input_iface *input;
404 
405 		/** @output: Output interface. */
406 		const struct panthor_fw_ringbuf_output_iface *output;
407 
408 		/** @input_fw_va: FW virtual address of the input interface buffer. */
409 		u32 input_fw_va;
410 
411 		/** @output_fw_va: FW virtual address of the output interface buffer. */
412 		u32 output_fw_va;
413 	} iface;
414 
415 	/**
416 	 * @syncwait: Stores information about the synchronization object this
417 	 * queue is waiting on.
418 	 */
419 	struct {
420 		/** @gpu_va: GPU address of the synchronization object. */
421 		u64 gpu_va;
422 
423 		/** @ref: Reference value to compare against. */
424 		u64 ref;
425 
426 		/** @gt: True if this is a greater-than test. */
427 		bool gt;
428 
429 		/** @sync64: True if this is a 64-bit sync object. */
430 		bool sync64;
431 
432 		/** @bo: Buffer object holding the synchronization object. */
433 		struct drm_gem_object *obj;
434 
435 		/** @offset: Offset of the synchronization object inside @bo. */
436 		u64 offset;
437 
438 		/**
439 		 * @kmap: Kernel mapping of the buffer object holding the
440 		 * synchronization object.
441 		 */
442 		void *kmap;
443 	} syncwait;
444 
445 	/** @fence_ctx: Fence context fields. */
446 	struct {
447 		/** @lock: Used to protect access to all fences allocated by this context. */
448 		spinlock_t lock;
449 
450 		/**
451 		 * @id: Fence context ID.
452 		 *
453 		 * Allocated with dma_fence_context_alloc().
454 		 */
455 		u64 id;
456 
457 		/** @seqno: Sequence number of the last initialized fence. */
458 		atomic64_t seqno;
459 
460 		/**
461 		 * @last_fence: Fence of the last submitted job.
462 		 *
463 		 * We return this fence when we get an empty command stream.
464 		 * This way, we are guaranteed that all earlier jobs have completed
465 		 * when drm_sched_job::s_fence::finished without having to feed
466 		 * the CS ring buffer with a dummy job that only signals the fence.
467 		 */
468 		struct dma_fence *last_fence;
469 
470 		/**
471 		 * @in_flight_jobs: List containing all in-flight jobs.
472 		 *
473 		 * Used to keep track and signal panthor_job::done_fence when the
474 		 * synchronization object attached to the queue is signaled.
475 		 */
476 		struct list_head in_flight_jobs;
477 	} fence_ctx;
478 
479 	/** @profiling: Job profiling data slots and access information. */
480 	struct {
481 		/** @slots: Kernel BO holding the slots. */
482 		struct panthor_kernel_bo *slots;
483 
484 		/** @slot_count: Number of jobs ringbuffer can hold at once. */
485 		u32 slot_count;
486 
487 		/** @seqno: Index of the next available profiling information slot. */
488 		u32 seqno;
489 	} profiling;
490 };
491 
492 /**
493  * enum panthor_group_state - Scheduling group state.
494  */
495 enum panthor_group_state {
496 	/** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */
497 	PANTHOR_CS_GROUP_CREATED,
498 
499 	/** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */
500 	PANTHOR_CS_GROUP_ACTIVE,
501 
502 	/**
503 	 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is
504 	 * inactive/suspended right now.
505 	 */
506 	PANTHOR_CS_GROUP_SUSPENDED,
507 
508 	/**
509 	 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated.
510 	 *
511 	 * Can no longer be scheduled. The only allowed action is a destruction.
512 	 */
513 	PANTHOR_CS_GROUP_TERMINATED,
514 
515 	/**
516 	 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state.
517 	 *
518 	 * The FW returned an inconsistent state. The group is flagged unusable
519 	 * and can no longer be scheduled. The only allowed action is a
520 	 * destruction.
521 	 *
522 	 * When that happens, we also schedule a FW reset, to start from a fresh
523 	 * state.
524 	 */
525 	PANTHOR_CS_GROUP_UNKNOWN_STATE,
526 };
527 
528 /**
529  * struct panthor_group - Scheduling group object
530  */
531 struct panthor_group {
532 	/** @refcount: Reference count */
533 	struct kref refcount;
534 
535 	/** @ptdev: Device. */
536 	struct panthor_device *ptdev;
537 
538 	/** @vm: VM bound to the group. */
539 	struct panthor_vm *vm;
540 
541 	/** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */
542 	u64 compute_core_mask;
543 
544 	/** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */
545 	u64 fragment_core_mask;
546 
547 	/** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */
548 	u64 tiler_core_mask;
549 
550 	/** @max_compute_cores: Maximum number of shader cores used for compute jobs. */
551 	u8 max_compute_cores;
552 
553 	/** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */
554 	u8 max_fragment_cores;
555 
556 	/** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */
557 	u8 max_tiler_cores;
558 
559 	/** @priority: Group priority (check panthor_csg_priority). */
560 	u8 priority;
561 
562 	/** @blocked_queues: Bitmask reflecting the blocked queues. */
563 	u32 blocked_queues;
564 
565 	/** @idle_queues: Bitmask reflecting the idle queues. */
566 	u32 idle_queues;
567 
568 	/** @fatal_lock: Lock used to protect access to fatal fields. */
569 	spinlock_t fatal_lock;
570 
571 	/** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */
572 	u32 fatal_queues;
573 
574 	/** @tiler_oom: Mask of queues that have a tiler OOM event to process. */
575 	atomic_t tiler_oom;
576 
577 	/** @queue_count: Number of queues in this group. */
578 	u32 queue_count;
579 
580 	/** @queues: Queues owned by this group. */
581 	struct panthor_queue *queues[MAX_CS_PER_CSG];
582 
583 	/**
584 	 * @csg_id: ID of the FW group slot.
585 	 *
586 	 * -1 when the group is not scheduled/active.
587 	 */
588 	int csg_id;
589 
590 	/**
591 	 * @destroyed: True when the group has been destroyed.
592 	 *
593 	 * If a group is destroyed it becomes useless: no further jobs can be submitted
594 	 * to its queues. We simply wait for all references to be dropped so we can
595 	 * release the group object.
596 	 */
597 	bool destroyed;
598 
599 	/**
600 	 * @timedout: True when a timeout occurred on any of the queues owned by
601 	 * this group.
602 	 *
603 	 * Timeouts can be reported by drm_sched or by the FW. If a reset is required,
604 	 * and the group can't be suspended, this also leads to a timeout. In any case,
605 	 * any timeout situation is unrecoverable, and the group becomes useless. We
606 	 * simply wait for all references to be dropped so we can release the group
607 	 * object.
608 	 */
609 	bool timedout;
610 
611 	/**
612 	 * @innocent: True when the group becomes unusable because the group suspension
613 	 * failed during a reset.
614 	 *
615 	 * Sometimes the FW was put in a bad state by other groups, causing the group
616 	 * suspension happening in the reset path to fail. In that case, we consider the
617 	 * group innocent.
618 	 */
619 	bool innocent;
620 
621 	/**
622 	 * @syncobjs: Pool of per-queue synchronization objects.
623 	 *
624 	 * One sync object per queue. The position of the sync object is
625 	 * determined by the queue index.
626 	 */
627 	struct panthor_kernel_bo *syncobjs;
628 
629 	/** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */
630 	struct {
631 		/** @data: Total sampled values for jobs in queues from this group. */
632 		struct panthor_gpu_usage data;
633 
634 		/**
635 		 * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo
636 		 * callback and job post-completion processing function
637 		 */
638 		spinlock_t lock;
639 
640 		/** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
641 		size_t kbo_sizes;
642 	} fdinfo;
643 
644 	/** @task_info: Info of current->group_leader that created the group. */
645 	struct {
646 		/** @task_info.pid: pid of current->group_leader */
647 		pid_t pid;
648 
649 		/** @task_info.comm: comm of current->group_leader */
650 		char comm[TASK_COMM_LEN];
651 	} task_info;
652 
653 	/** @state: Group state. */
654 	enum panthor_group_state state;
655 
656 	/**
657 	 * @suspend_buf: Suspend buffer.
658 	 *
659 	 * Stores the state of the group and its queues when a group is suspended.
660 	 * Used at resume time to restore the group in its previous state.
661 	 *
662 	 * The size of the suspend buffer is exposed through the FW interface.
663 	 */
664 	struct panthor_kernel_bo *suspend_buf;
665 
666 	/**
667 	 * @protm_suspend_buf: Protection mode suspend buffer.
668 	 *
669 	 * Stores the state of the group and its queues when a group that's in
670 	 * protection mode is suspended.
671 	 *
672 	 * Used at resume time to restore the group in its previous state.
673 	 *
674 	 * The size of the protection mode suspend buffer is exposed through the
675 	 * FW interface.
676 	 */
677 	struct panthor_kernel_bo *protm_suspend_buf;
678 
679 	/** @sync_upd_work: Work used to check/signal job fences. */
680 	struct work_struct sync_upd_work;
681 
682 	/** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */
683 	struct work_struct tiler_oom_work;
684 
685 	/** @term_work: Work used to finish the group termination procedure. */
686 	struct work_struct term_work;
687 
688 	/**
689 	 * @release_work: Work used to release group resources.
690 	 *
691 	 * We need to postpone the group release to avoid a deadlock when
692 	 * the last ref is released in the tick work.
693 	 */
694 	struct work_struct release_work;
695 
696 	/**
697 	 * @run_node: Node used to insert the group in the
698 	 * panthor_group::groups::{runnable,idle} and
699 	 * panthor_group::reset.stopped_groups lists.
700 	 */
701 	struct list_head run_node;
702 
703 	/**
704 	 * @wait_node: Node used to insert the group in the
705 	 * panthor_group::groups::waiting list.
706 	 */
707 	struct list_head wait_node;
708 };
709 
710 struct panthor_job_profiling_data {
711 	struct {
712 		u64 before;
713 		u64 after;
714 	} cycles;
715 
716 	struct {
717 		u64 before;
718 		u64 after;
719 	} time;
720 };
721 
722 /**
723  * group_queue_work() - Queue a group work
724  * @group: Group to queue the work for.
725  * @wname: Work name.
726  *
727  * Grabs a ref and queue a work item to the scheduler workqueue. If
728  * the work was already queued, we release the reference we grabbed.
729  *
730  * Work callbacks must release the reference we grabbed here.
731  */
732 #define group_queue_work(group, wname) \
733 	do { \
734 		group_get(group); \
735 		if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
736 			group_put(group); \
737 	} while (0)
738 
739 /**
740  * sched_queue_work() - Queue a scheduler work.
741  * @sched: Scheduler object.
742  * @wname: Work name.
743  *
744  * Conditionally queues a scheduler work if no reset is pending/in-progress.
745  */
746 #define sched_queue_work(sched, wname) \
747 	do { \
748 		if (!atomic_read(&(sched)->reset.in_progress) && \
749 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
750 			queue_work((sched)->wq, &(sched)->wname ## _work); \
751 	} while (0)
752 
753 /**
754  * sched_queue_delayed_work() - Queue a scheduler delayed work.
755  * @sched: Scheduler object.
756  * @wname: Work name.
757  * @delay: Work delay in jiffies.
758  *
759  * Conditionally queues a scheduler delayed work if no reset is
760  * pending/in-progress.
761  */
762 #define sched_queue_delayed_work(sched, wname, delay) \
763 	do { \
764 		if (!atomic_read(&sched->reset.in_progress) && \
765 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
766 			mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
767 	} while (0)
768 
769 /*
770  * We currently set the maximum of groups per file to an arbitrary low value.
771  * But this can be updated if we need more.
772  */
773 #define MAX_GROUPS_PER_POOL 128
774 
775 /**
776  * struct panthor_group_pool - Group pool
777  *
778  * Each file get assigned a group pool.
779  */
780 struct panthor_group_pool {
781 	/** @xa: Xarray used to manage group handles. */
782 	struct xarray xa;
783 };
784 
785 /**
786  * struct panthor_job - Used to manage GPU job
787  */
788 struct panthor_job {
789 	/** @base: Inherit from drm_sched_job. */
790 	struct drm_sched_job base;
791 
792 	/** @refcount: Reference count. */
793 	struct kref refcount;
794 
795 	/** @group: Group of the queue this job will be pushed to. */
796 	struct panthor_group *group;
797 
798 	/** @queue_idx: Index of the queue inside @group. */
799 	u32 queue_idx;
800 
801 	/** @call_info: Information about the userspace command stream call. */
802 	struct {
803 		/** @start: GPU address of the userspace command stream. */
804 		u64 start;
805 
806 		/** @size: Size of the userspace command stream. */
807 		u32 size;
808 
809 		/**
810 		 * @latest_flush: Flush ID at the time the userspace command
811 		 * stream was built.
812 		 *
813 		 * Needed for the flush reduction mechanism.
814 		 */
815 		u32 latest_flush;
816 	} call_info;
817 
818 	/** @ringbuf: Position of this job is in the ring buffer. */
819 	struct {
820 		/** @start: Start offset. */
821 		u64 start;
822 
823 		/** @end: End offset. */
824 		u64 end;
825 	} ringbuf;
826 
827 	/**
828 	 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs
829 	 * list.
830 	 */
831 	struct list_head node;
832 
833 	/** @done_fence: Fence signaled when the job is finished or cancelled. */
834 	struct dma_fence *done_fence;
835 
836 	/** @profiling: Job profiling information. */
837 	struct {
838 		/** @mask: Current device job profiling enablement bitmask. */
839 		u32 mask;
840 
841 		/** @slot: Job index in the profiling slots BO. */
842 		u32 slot;
843 	} profiling;
844 };
845 
846 static void
847 panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
848 {
849 	if (queue->syncwait.kmap) {
850 		struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
851 
852 		drm_gem_vunmap(queue->syncwait.obj, &map);
853 		queue->syncwait.kmap = NULL;
854 	}
855 
856 	drm_gem_object_put(queue->syncwait.obj);
857 	queue->syncwait.obj = NULL;
858 }
859 
860 static void *
861 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue)
862 {
863 	struct panthor_device *ptdev = group->ptdev;
864 	struct panthor_gem_object *bo;
865 	struct iosys_map map;
866 	int ret;
867 
868 	if (queue->syncwait.kmap)
869 		return queue->syncwait.kmap + queue->syncwait.offset;
870 
871 	bo = panthor_vm_get_bo_for_va(group->vm,
872 				      queue->syncwait.gpu_va,
873 				      &queue->syncwait.offset);
874 	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
875 		goto err_put_syncwait_obj;
876 
877 	queue->syncwait.obj = &bo->base.base;
878 	ret = drm_gem_vmap(queue->syncwait.obj, &map);
879 	if (drm_WARN_ON(&ptdev->base, ret))
880 		goto err_put_syncwait_obj;
881 
882 	queue->syncwait.kmap = map.vaddr;
883 	if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
884 		goto err_put_syncwait_obj;
885 
886 	return queue->syncwait.kmap + queue->syncwait.offset;
887 
888 err_put_syncwait_obj:
889 	panthor_queue_put_syncwait_obj(queue);
890 	return NULL;
891 }
892 
893 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue)
894 {
895 	if (IS_ERR_OR_NULL(queue))
896 		return;
897 
898 	if (queue->entity.fence_context)
899 		drm_sched_entity_destroy(&queue->entity);
900 
901 	if (queue->scheduler.ops)
902 		drm_sched_fini(&queue->scheduler);
903 
904 	panthor_queue_put_syncwait_obj(queue);
905 
906 	panthor_kernel_bo_destroy(queue->ringbuf);
907 	panthor_kernel_bo_destroy(queue->iface.mem);
908 	panthor_kernel_bo_destroy(queue->profiling.slots);
909 
910 	/* Release the last_fence we were holding, if any. */
911 	dma_fence_put(queue->fence_ctx.last_fence);
912 
913 	kfree(queue);
914 }
915 
916 static void group_release_work(struct work_struct *work)
917 {
918 	struct panthor_group *group = container_of(work,
919 						   struct panthor_group,
920 						   release_work);
921 	u32 i;
922 
923 	for (i = 0; i < group->queue_count; i++)
924 		group_free_queue(group, group->queues[i]);
925 
926 	panthor_kernel_bo_destroy(group->suspend_buf);
927 	panthor_kernel_bo_destroy(group->protm_suspend_buf);
928 	panthor_kernel_bo_destroy(group->syncobjs);
929 
930 	panthor_vm_put(group->vm);
931 	kfree(group);
932 }
933 
934 static void group_release(struct kref *kref)
935 {
936 	struct panthor_group *group = container_of(kref,
937 						   struct panthor_group,
938 						   refcount);
939 	struct panthor_device *ptdev = group->ptdev;
940 
941 	drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
942 	drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
943 	drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
944 
945 	queue_work(panthor_cleanup_wq, &group->release_work);
946 }
947 
948 static void group_put(struct panthor_group *group)
949 {
950 	if (group)
951 		kref_put(&group->refcount, group_release);
952 }
953 
954 static struct panthor_group *
955 group_get(struct panthor_group *group)
956 {
957 	if (group)
958 		kref_get(&group->refcount);
959 
960 	return group;
961 }
962 
963 /**
964  * group_bind_locked() - Bind a group to a group slot
965  * @group: Group.
966  * @csg_id: Slot.
967  *
968  * Return: 0 on success, a negative error code otherwise.
969  */
970 static int
971 group_bind_locked(struct panthor_group *group, u32 csg_id)
972 {
973 	struct panthor_device *ptdev = group->ptdev;
974 	struct panthor_csg_slot *csg_slot;
975 	int ret;
976 
977 	lockdep_assert_held(&ptdev->scheduler->lock);
978 
979 	if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
980 			ptdev->scheduler->csg_slots[csg_id].group))
981 		return -EINVAL;
982 
983 	ret = panthor_vm_active(group->vm);
984 	if (ret)
985 		return ret;
986 
987 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
988 	group_get(group);
989 	group->csg_id = csg_id;
990 
991 	/* Dummy doorbell allocation: doorbell is assigned to the group and
992 	 * all queues use the same doorbell.
993 	 *
994 	 * TODO: Implement LRU-based doorbell assignment, so the most often
995 	 * updated queues get their own doorbell, thus avoiding useless checks
996 	 * on queues belonging to the same group that are rarely updated.
997 	 */
998 	for (u32 i = 0; i < group->queue_count; i++)
999 		group->queues[i]->doorbell_id = csg_id + 1;
1000 
1001 	csg_slot->group = group;
1002 
1003 	return 0;
1004 }
1005 
1006 /**
1007  * group_unbind_locked() - Unbind a group from a slot.
1008  * @group: Group to unbind.
1009  *
1010  * Return: 0 on success, a negative error code otherwise.
1011  */
1012 static int
1013 group_unbind_locked(struct panthor_group *group)
1014 {
1015 	struct panthor_device *ptdev = group->ptdev;
1016 	struct panthor_csg_slot *slot;
1017 
1018 	lockdep_assert_held(&ptdev->scheduler->lock);
1019 
1020 	if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
1021 		return -EINVAL;
1022 
1023 	if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
1024 		return -EINVAL;
1025 
1026 	slot = &ptdev->scheduler->csg_slots[group->csg_id];
1027 	panthor_vm_idle(group->vm);
1028 	group->csg_id = -1;
1029 
1030 	/* Tiler OOM events will be re-issued next time the group is scheduled. */
1031 	atomic_set(&group->tiler_oom, 0);
1032 	cancel_work(&group->tiler_oom_work);
1033 
1034 	for (u32 i = 0; i < group->queue_count; i++)
1035 		group->queues[i]->doorbell_id = -1;
1036 
1037 	slot->group = NULL;
1038 
1039 	group_put(group);
1040 	return 0;
1041 }
1042 
1043 /**
1044  * cs_slot_prog_locked() - Program a queue slot
1045  * @ptdev: Device.
1046  * @csg_id: Group slot ID.
1047  * @cs_id: Queue slot ID.
1048  *
1049  * Program a queue slot with the queue information so things can start being
1050  * executed on this queue.
1051  *
1052  * The group slot must have a group bound to it already (group_bind_locked()).
1053  */
1054 static void
1055 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1056 {
1057 	struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
1058 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1059 
1060 	lockdep_assert_held(&ptdev->scheduler->lock);
1061 
1062 	queue->iface.input->extract = queue->iface.output->extract;
1063 	drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
1064 
1065 	cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf);
1066 	cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
1067 	cs_iface->input->ringbuf_input = queue->iface.input_fw_va;
1068 	cs_iface->input->ringbuf_output = queue->iface.output_fw_va;
1069 	cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) |
1070 				  CS_CONFIG_DOORBELL(queue->doorbell_id);
1071 	cs_iface->input->ack_irq_mask = ~0;
1072 	panthor_fw_update_reqs(cs_iface, req,
1073 			       CS_IDLE_SYNC_WAIT |
1074 			       CS_IDLE_EMPTY |
1075 			       CS_STATE_START |
1076 			       CS_EXTRACT_EVENT,
1077 			       CS_IDLE_SYNC_WAIT |
1078 			       CS_IDLE_EMPTY |
1079 			       CS_STATE_MASK |
1080 			       CS_EXTRACT_EVENT);
1081 	if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
1082 		drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
1083 		queue->timeout_suspended = false;
1084 	}
1085 }
1086 
1087 /**
1088  * cs_slot_reset_locked() - Reset a queue slot
1089  * @ptdev: Device.
1090  * @csg_id: Group slot.
1091  * @cs_id: Queue slot.
1092  *
1093  * Change the queue slot state to STOP and suspend the queue timeout if
1094  * the queue is not blocked.
1095  *
1096  * The group slot must have a group bound to it (group_bind_locked()).
1097  */
1098 static int
1099 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1100 {
1101 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1102 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1103 	struct panthor_queue *queue = group->queues[cs_id];
1104 
1105 	lockdep_assert_held(&ptdev->scheduler->lock);
1106 
1107 	panthor_fw_update_reqs(cs_iface, req,
1108 			       CS_STATE_STOP,
1109 			       CS_STATE_MASK);
1110 
1111 	/* If the queue is blocked, we want to keep the timeout running, so
1112 	 * we can detect unbounded waits and kill the group when that happens.
1113 	 */
1114 	if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
1115 		queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
1116 		queue->timeout_suspended = true;
1117 		WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
1118 	}
1119 
1120 	return 0;
1121 }
1122 
1123 /**
1124  * csg_slot_sync_priority_locked() - Synchronize the group slot priority
1125  * @ptdev: Device.
1126  * @csg_id: Group slot ID.
1127  *
1128  * Group slot priority update happens asynchronously. When we receive a
1129  * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can
1130  * reflect it to our panthor_csg_slot object.
1131  */
1132 static void
1133 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
1134 {
1135 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1136 	struct panthor_fw_csg_iface *csg_iface;
1137 
1138 	lockdep_assert_held(&ptdev->scheduler->lock);
1139 
1140 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1141 	csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
1142 }
1143 
1144 /**
1145  * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority
1146  * @ptdev: Device.
1147  * @csg_id: Group slot.
1148  * @cs_id: Queue slot.
1149  *
1150  * Queue state is updated on group suspend or STATUS_UPDATE event.
1151  */
1152 static void
1153 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1154 {
1155 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1156 	struct panthor_queue *queue = group->queues[cs_id];
1157 	struct panthor_fw_cs_iface *cs_iface =
1158 		panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
1159 
1160 	u32 status_wait_cond;
1161 
1162 	switch (cs_iface->output->status_blocked_reason) {
1163 	case CS_STATUS_BLOCKED_REASON_UNBLOCKED:
1164 		if (queue->iface.input->insert == queue->iface.output->extract &&
1165 		    cs_iface->output->status_scoreboards == 0)
1166 			group->idle_queues |= BIT(cs_id);
1167 		break;
1168 
1169 	case CS_STATUS_BLOCKED_REASON_SYNC_WAIT:
1170 		if (list_empty(&group->wait_node)) {
1171 			list_move_tail(&group->wait_node,
1172 				       &group->ptdev->scheduler->groups.waiting);
1173 		}
1174 
1175 		/* The queue is only blocked if there's no deferred operation
1176 		 * pending, which can be checked through the scoreboard status.
1177 		 */
1178 		if (!cs_iface->output->status_scoreboards)
1179 			group->blocked_queues |= BIT(cs_id);
1180 
1181 		queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
1182 		queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
1183 		status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
1184 		queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT;
1185 		if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) {
1186 			u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi;
1187 
1188 			queue->syncwait.sync64 = true;
1189 			queue->syncwait.ref |= sync_val_hi << 32;
1190 		} else {
1191 			queue->syncwait.sync64 = false;
1192 		}
1193 		break;
1194 
1195 	default:
1196 		/* Other reasons are not blocking. Consider the queue as runnable
1197 		 * in those cases.
1198 		 */
1199 		break;
1200 	}
1201 }
1202 
1203 static void
1204 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
1205 {
1206 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1207 	struct panthor_group *group = csg_slot->group;
1208 	u32 i;
1209 
1210 	lockdep_assert_held(&ptdev->scheduler->lock);
1211 
1212 	group->idle_queues = 0;
1213 	group->blocked_queues = 0;
1214 
1215 	for (i = 0; i < group->queue_count; i++) {
1216 		if (group->queues[i])
1217 			cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
1218 	}
1219 }
1220 
1221 static void
1222 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
1223 {
1224 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1225 	struct panthor_fw_csg_iface *csg_iface;
1226 	struct panthor_group *group;
1227 	enum panthor_group_state new_state, old_state;
1228 	u32 csg_state;
1229 
1230 	lockdep_assert_held(&ptdev->scheduler->lock);
1231 
1232 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1233 	group = csg_slot->group;
1234 
1235 	if (!group)
1236 		return;
1237 
1238 	old_state = group->state;
1239 	csg_state = csg_iface->output->ack & CSG_STATE_MASK;
1240 	switch (csg_state) {
1241 	case CSG_STATE_START:
1242 	case CSG_STATE_RESUME:
1243 		new_state = PANTHOR_CS_GROUP_ACTIVE;
1244 		break;
1245 	case CSG_STATE_TERMINATE:
1246 		new_state = PANTHOR_CS_GROUP_TERMINATED;
1247 		break;
1248 	case CSG_STATE_SUSPEND:
1249 		new_state = PANTHOR_CS_GROUP_SUSPENDED;
1250 		break;
1251 	default:
1252 		/* The unknown state might be caused by a FW state corruption,
1253 		 * which means the group metadata can't be trusted anymore, and
1254 		 * the SUSPEND operation might propagate the corruption to the
1255 		 * suspend buffers. Flag the group state as unknown to make
1256 		 * sure it's unusable after that point.
1257 		 */
1258 		drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)",
1259 			csg_id, csg_state);
1260 		new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE;
1261 		break;
1262 	}
1263 
1264 	if (old_state == new_state)
1265 		return;
1266 
1267 	/* The unknown state might be caused by a FW issue, reset the FW to
1268 	 * take a fresh start.
1269 	 */
1270 	if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE)
1271 		panthor_device_schedule_reset(ptdev);
1272 
1273 	if (new_state == PANTHOR_CS_GROUP_SUSPENDED)
1274 		csg_slot_sync_queues_state_locked(ptdev, csg_id);
1275 
1276 	if (old_state == PANTHOR_CS_GROUP_ACTIVE) {
1277 		u32 i;
1278 
1279 		/* Reset the queue slots so we start from a clean
1280 		 * state when starting/resuming a new group on this
1281 		 * CSG slot. No wait needed here, and no ringbell
1282 		 * either, since the CS slot will only be re-used
1283 		 * on the next CSG start operation.
1284 		 */
1285 		for (i = 0; i < group->queue_count; i++) {
1286 			if (group->queues[i])
1287 				cs_slot_reset_locked(ptdev, csg_id, i);
1288 		}
1289 	}
1290 
1291 	group->state = new_state;
1292 }
1293 
1294 static int
1295 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
1296 {
1297 	struct panthor_fw_csg_iface *csg_iface;
1298 	struct panthor_csg_slot *csg_slot;
1299 	struct panthor_group *group;
1300 	u32 queue_mask = 0, i;
1301 
1302 	lockdep_assert_held(&ptdev->scheduler->lock);
1303 
1304 	if (priority > MAX_CSG_PRIO)
1305 		return -EINVAL;
1306 
1307 	if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
1308 		return -EINVAL;
1309 
1310 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1311 	group = csg_slot->group;
1312 	if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE)
1313 		return 0;
1314 
1315 	csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
1316 
1317 	for (i = 0; i < group->queue_count; i++) {
1318 		if (group->queues[i]) {
1319 			cs_slot_prog_locked(ptdev, csg_id, i);
1320 			queue_mask |= BIT(i);
1321 		}
1322 	}
1323 
1324 	csg_iface->input->allow_compute = group->compute_core_mask;
1325 	csg_iface->input->allow_fragment = group->fragment_core_mask;
1326 	csg_iface->input->allow_other = group->tiler_core_mask;
1327 	csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
1328 					 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
1329 					 CSG_EP_REQ_TILER(group->max_tiler_cores) |
1330 					 CSG_EP_REQ_PRIORITY(priority);
1331 	csg_iface->input->config = panthor_vm_as(group->vm);
1332 
1333 	if (group->suspend_buf)
1334 		csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf);
1335 	else
1336 		csg_iface->input->suspend_buf = 0;
1337 
1338 	if (group->protm_suspend_buf) {
1339 		csg_iface->input->protm_suspend_buf =
1340 			panthor_kernel_bo_gpuva(group->protm_suspend_buf);
1341 	} else {
1342 		csg_iface->input->protm_suspend_buf = 0;
1343 	}
1344 
1345 	csg_iface->input->ack_irq_mask = ~0;
1346 	panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask);
1347 	return 0;
1348 }
1349 
1350 static void
1351 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
1352 				   u32 csg_id, u32 cs_id)
1353 {
1354 	struct panthor_scheduler *sched = ptdev->scheduler;
1355 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1356 	struct panthor_group *group = csg_slot->group;
1357 	struct panthor_fw_cs_iface *cs_iface;
1358 	u32 fatal;
1359 	u64 info;
1360 
1361 	lockdep_assert_held(&sched->lock);
1362 
1363 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1364 	fatal = cs_iface->output->fatal;
1365 	info = cs_iface->output->fatal_info;
1366 
1367 	if (group) {
1368 		drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n",
1369 			 group->task_info.pid, group->task_info.comm);
1370 
1371 		group->fatal_queues |= BIT(cs_id);
1372 	}
1373 
1374 	if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
1375 		/* If this exception is unrecoverable, queue a reset, and make
1376 		 * sure we stop scheduling groups until the reset has happened.
1377 		 */
1378 		panthor_device_schedule_reset(ptdev);
1379 		cancel_delayed_work(&sched->tick_work);
1380 	} else {
1381 		sched_queue_delayed_work(sched, tick, 0);
1382 	}
1383 
1384 	drm_warn(&ptdev->base,
1385 		 "CSG slot %d CS slot: %d\n"
1386 		 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
1387 		 "CS_FATAL.EXCEPTION_DATA: 0x%x\n"
1388 		 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n",
1389 		 csg_id, cs_id,
1390 		 (unsigned int)CS_EXCEPTION_TYPE(fatal),
1391 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
1392 		 (unsigned int)CS_EXCEPTION_DATA(fatal),
1393 		 info);
1394 }
1395 
1396 static void
1397 cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
1398 				   u32 csg_id, u32 cs_id)
1399 {
1400 	struct panthor_scheduler *sched = ptdev->scheduler;
1401 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1402 	struct panthor_group *group = csg_slot->group;
1403 	struct panthor_queue *queue = group && cs_id < group->queue_count ?
1404 				      group->queues[cs_id] : NULL;
1405 	struct panthor_fw_cs_iface *cs_iface;
1406 	u32 fault;
1407 	u64 info;
1408 
1409 	lockdep_assert_held(&sched->lock);
1410 
1411 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1412 	fault = cs_iface->output->fault;
1413 	info = cs_iface->output->fault_info;
1414 
1415 	if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
1416 		u64 cs_extract = queue->iface.output->extract;
1417 		struct panthor_job *job;
1418 
1419 		spin_lock(&queue->fence_ctx.lock);
1420 		list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
1421 			if (cs_extract >= job->ringbuf.end)
1422 				continue;
1423 
1424 			if (cs_extract < job->ringbuf.start)
1425 				break;
1426 
1427 			dma_fence_set_error(job->done_fence, -EINVAL);
1428 		}
1429 		spin_unlock(&queue->fence_ctx.lock);
1430 	}
1431 
1432 	if (group) {
1433 		drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n",
1434 			 group->task_info.pid, group->task_info.comm);
1435 	}
1436 
1437 	drm_warn(&ptdev->base,
1438 		 "CSG slot %d CS slot: %d\n"
1439 		 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
1440 		 "CS_FAULT.EXCEPTION_DATA: 0x%x\n"
1441 		 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n",
1442 		 csg_id, cs_id,
1443 		 (unsigned int)CS_EXCEPTION_TYPE(fault),
1444 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
1445 		 (unsigned int)CS_EXCEPTION_DATA(fault),
1446 		 info);
1447 }
1448 
1449 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
1450 {
1451 	struct panthor_device *ptdev = group->ptdev;
1452 	struct panthor_scheduler *sched = ptdev->scheduler;
1453 	u32 renderpasses_in_flight, pending_frag_count;
1454 	struct panthor_heap_pool *heaps = NULL;
1455 	u64 heap_address, new_chunk_va = 0;
1456 	u32 vt_start, vt_end, frag_end;
1457 	int ret, csg_id;
1458 
1459 	mutex_lock(&sched->lock);
1460 	csg_id = group->csg_id;
1461 	if (csg_id >= 0) {
1462 		struct panthor_fw_cs_iface *cs_iface;
1463 
1464 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1465 		heaps = panthor_vm_get_heap_pool(group->vm, false);
1466 		heap_address = cs_iface->output->heap_address;
1467 		vt_start = cs_iface->output->heap_vt_start;
1468 		vt_end = cs_iface->output->heap_vt_end;
1469 		frag_end = cs_iface->output->heap_frag_end;
1470 		renderpasses_in_flight = vt_start - frag_end;
1471 		pending_frag_count = vt_end - frag_end;
1472 	}
1473 	mutex_unlock(&sched->lock);
1474 
1475 	/* The group got scheduled out, we stop here. We will get a new tiler OOM event
1476 	 * when it's scheduled again.
1477 	 */
1478 	if (unlikely(csg_id < 0))
1479 		return 0;
1480 
1481 	if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) {
1482 		ret = -EINVAL;
1483 	} else {
1484 		/* We do the allocation without holding the scheduler lock to avoid
1485 		 * blocking the scheduling.
1486 		 */
1487 		ret = panthor_heap_grow(heaps, heap_address,
1488 					renderpasses_in_flight,
1489 					pending_frag_count, &new_chunk_va);
1490 	}
1491 
1492 	/* If the heap context doesn't have memory for us, we want to let the
1493 	 * FW try to reclaim memory by waiting for fragment jobs to land or by
1494 	 * executing the tiler OOM exception handler, which is supposed to
1495 	 * implement incremental rendering.
1496 	 */
1497 	if (ret && ret != -ENOMEM) {
1498 		drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
1499 		group->fatal_queues |= BIT(cs_id);
1500 		sched_queue_delayed_work(sched, tick, 0);
1501 		goto out_put_heap_pool;
1502 	}
1503 
1504 	mutex_lock(&sched->lock);
1505 	csg_id = group->csg_id;
1506 	if (csg_id >= 0) {
1507 		struct panthor_fw_csg_iface *csg_iface;
1508 		struct panthor_fw_cs_iface *cs_iface;
1509 
1510 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1511 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1512 
1513 		cs_iface->input->heap_start = new_chunk_va;
1514 		cs_iface->input->heap_end = new_chunk_va;
1515 		panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM);
1516 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id));
1517 		panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1518 	}
1519 	mutex_unlock(&sched->lock);
1520 
1521 	/* We allocated a chunck, but couldn't link it to the heap
1522 	 * context because the group was scheduled out while we were
1523 	 * allocating memory. We need to return this chunk to the heap.
1524 	 */
1525 	if (unlikely(csg_id < 0 && new_chunk_va))
1526 		panthor_heap_return_chunk(heaps, heap_address, new_chunk_va);
1527 
1528 	ret = 0;
1529 
1530 out_put_heap_pool:
1531 	panthor_heap_pool_put(heaps);
1532 	return ret;
1533 }
1534 
1535 static void group_tiler_oom_work(struct work_struct *work)
1536 {
1537 	struct panthor_group *group =
1538 		container_of(work, struct panthor_group, tiler_oom_work);
1539 	u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
1540 
1541 	while (tiler_oom) {
1542 		u32 cs_id = ffs(tiler_oom) - 1;
1543 
1544 		group_process_tiler_oom(group, cs_id);
1545 		tiler_oom &= ~BIT(cs_id);
1546 	}
1547 
1548 	group_put(group);
1549 }
1550 
1551 static void
1552 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
1553 				       u32 csg_id, u32 cs_id)
1554 {
1555 	struct panthor_scheduler *sched = ptdev->scheduler;
1556 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1557 	struct panthor_group *group = csg_slot->group;
1558 
1559 	lockdep_assert_held(&sched->lock);
1560 
1561 	if (drm_WARN_ON(&ptdev->base, !group))
1562 		return;
1563 
1564 	atomic_or(BIT(cs_id), &group->tiler_oom);
1565 
1566 	/* We don't use group_queue_work() here because we want to queue the
1567 	 * work item to the heap_alloc_wq.
1568 	 */
1569 	group_get(group);
1570 	if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
1571 		group_put(group);
1572 }
1573 
1574 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
1575 				       u32 csg_id, u32 cs_id)
1576 {
1577 	struct panthor_fw_cs_iface *cs_iface;
1578 	u32 req, ack, events;
1579 
1580 	lockdep_assert_held(&ptdev->scheduler->lock);
1581 
1582 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1583 	req = cs_iface->input->req;
1584 	ack = cs_iface->output->ack;
1585 	events = (req ^ ack) & CS_EVT_MASK;
1586 
1587 	if (events & CS_FATAL)
1588 		cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
1589 
1590 	if (events & CS_FAULT)
1591 		cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
1592 
1593 	if (events & CS_TILER_OOM)
1594 		cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
1595 
1596 	/* We don't acknowledge the TILER_OOM event since its handling is
1597 	 * deferred to a separate work.
1598 	 */
1599 	panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT);
1600 
1601 	return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
1602 }
1603 
1604 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id)
1605 {
1606 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1607 	struct panthor_fw_csg_iface *csg_iface;
1608 
1609 	lockdep_assert_held(&ptdev->scheduler->lock);
1610 
1611 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1612 	csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE;
1613 }
1614 
1615 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
1616 {
1617 	struct panthor_scheduler *sched = ptdev->scheduler;
1618 
1619 	lockdep_assert_held(&sched->lock);
1620 
1621 	sched->might_have_idle_groups = true;
1622 
1623 	/* Schedule a tick so we can evict idle groups and schedule non-idle
1624 	 * ones. This will also update runtime PM and devfreq busy/idle states,
1625 	 * so the device can lower its frequency or get suspended.
1626 	 */
1627 	sched_queue_delayed_work(sched, tick, 0);
1628 }
1629 
1630 static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
1631 					u32 csg_id)
1632 {
1633 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1634 	struct panthor_group *group = csg_slot->group;
1635 
1636 	lockdep_assert_held(&ptdev->scheduler->lock);
1637 
1638 	if (group)
1639 		group_queue_work(group, sync_upd);
1640 
1641 	sched_queue_work(ptdev->scheduler, sync_upd);
1642 }
1643 
1644 static void
1645 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
1646 {
1647 	struct panthor_scheduler *sched = ptdev->scheduler;
1648 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1649 	struct panthor_group *group = csg_slot->group;
1650 
1651 	lockdep_assert_held(&sched->lock);
1652 
1653 	group = csg_slot->group;
1654 	if (!drm_WARN_ON(&ptdev->base, !group)) {
1655 		drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n",
1656 			 group->task_info.pid, group->task_info.comm);
1657 
1658 		group->timedout = true;
1659 	}
1660 
1661 	drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
1662 
1663 	sched_queue_delayed_work(sched, tick, 0);
1664 }
1665 
1666 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
1667 {
1668 	u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events;
1669 	struct panthor_fw_csg_iface *csg_iface;
1670 	u32 ring_cs_db_mask = 0;
1671 
1672 	lockdep_assert_held(&ptdev->scheduler->lock);
1673 
1674 	if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1675 		return;
1676 
1677 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1678 	req = READ_ONCE(csg_iface->input->req);
1679 	ack = READ_ONCE(csg_iface->output->ack);
1680 	cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req);
1681 	cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack);
1682 	csg_events = (req ^ ack) & CSG_EVT_MASK;
1683 
1684 	/* There may not be any pending CSG/CS interrupts to process */
1685 	if (req == ack && cs_irq_req == cs_irq_ack)
1686 		return;
1687 
1688 	/* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before
1689 	 * examining the CS_ACK & CS_REQ bits. This would ensure that Host
1690 	 * doesn't miss an interrupt for the CS in the race scenario where
1691 	 * whilst Host is servicing an interrupt for the CS, firmware sends
1692 	 * another interrupt for that CS.
1693 	 */
1694 	csg_iface->input->cs_irq_ack = cs_irq_req;
1695 
1696 	panthor_fw_update_reqs(csg_iface, req, ack,
1697 			       CSG_SYNC_UPDATE |
1698 			       CSG_IDLE |
1699 			       CSG_PROGRESS_TIMER_EVENT);
1700 
1701 	if (csg_events & CSG_IDLE)
1702 		csg_slot_process_idle_event_locked(ptdev, csg_id);
1703 
1704 	if (csg_events & CSG_PROGRESS_TIMER_EVENT)
1705 		csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
1706 
1707 	cs_irqs = cs_irq_req ^ cs_irq_ack;
1708 	while (cs_irqs) {
1709 		u32 cs_id = ffs(cs_irqs) - 1;
1710 
1711 		if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
1712 			ring_cs_db_mask |= BIT(cs_id);
1713 
1714 		cs_irqs &= ~BIT(cs_id);
1715 	}
1716 
1717 	if (csg_events & CSG_SYNC_UPDATE)
1718 		csg_slot_sync_update_locked(ptdev, csg_id);
1719 
1720 	if (ring_cs_db_mask)
1721 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask);
1722 
1723 	panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1724 }
1725 
1726 static void sched_process_idle_event_locked(struct panthor_device *ptdev)
1727 {
1728 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1729 
1730 	lockdep_assert_held(&ptdev->scheduler->lock);
1731 
1732 	/* Acknowledge the idle event and schedule a tick. */
1733 	panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE);
1734 	sched_queue_delayed_work(ptdev->scheduler, tick, 0);
1735 }
1736 
1737 /**
1738  * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ
1739  * @ptdev: Device.
1740  */
1741 static void sched_process_global_irq_locked(struct panthor_device *ptdev)
1742 {
1743 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1744 	u32 req, ack, evts;
1745 
1746 	lockdep_assert_held(&ptdev->scheduler->lock);
1747 
1748 	req = READ_ONCE(glb_iface->input->req);
1749 	ack = READ_ONCE(glb_iface->output->ack);
1750 	evts = (req ^ ack) & GLB_EVT_MASK;
1751 
1752 	if (evts & GLB_IDLE)
1753 		sched_process_idle_event_locked(ptdev);
1754 }
1755 
1756 static void process_fw_events_work(struct work_struct *work)
1757 {
1758 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
1759 						      fw_events_work);
1760 	u32 events = atomic_xchg(&sched->fw_events, 0);
1761 	struct panthor_device *ptdev = sched->ptdev;
1762 
1763 	mutex_lock(&sched->lock);
1764 
1765 	if (events & JOB_INT_GLOBAL_IF) {
1766 		sched_process_global_irq_locked(ptdev);
1767 		events &= ~JOB_INT_GLOBAL_IF;
1768 	}
1769 
1770 	while (events) {
1771 		u32 csg_id = ffs(events) - 1;
1772 
1773 		sched_process_csg_irq_locked(ptdev, csg_id);
1774 		events &= ~BIT(csg_id);
1775 	}
1776 
1777 	mutex_unlock(&sched->lock);
1778 }
1779 
1780 /**
1781  * panthor_sched_report_fw_events() - Report FW events to the scheduler.
1782  */
1783 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
1784 {
1785 	if (!ptdev->scheduler)
1786 		return;
1787 
1788 	atomic_or(events, &ptdev->scheduler->fw_events);
1789 	sched_queue_work(ptdev->scheduler, fw_events);
1790 }
1791 
1792 static const char *fence_get_driver_name(struct dma_fence *fence)
1793 {
1794 	return "panthor";
1795 }
1796 
1797 static const char *queue_fence_get_timeline_name(struct dma_fence *fence)
1798 {
1799 	return "queue-fence";
1800 }
1801 
1802 static const struct dma_fence_ops panthor_queue_fence_ops = {
1803 	.get_driver_name = fence_get_driver_name,
1804 	.get_timeline_name = queue_fence_get_timeline_name,
1805 };
1806 
1807 struct panthor_csg_slots_upd_ctx {
1808 	u32 update_mask;
1809 	u32 timedout_mask;
1810 	struct {
1811 		u32 value;
1812 		u32 mask;
1813 	} requests[MAX_CSGS];
1814 };
1815 
1816 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
1817 {
1818 	memset(ctx, 0, sizeof(*ctx));
1819 }
1820 
1821 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
1822 				    struct panthor_csg_slots_upd_ctx *ctx,
1823 				    u32 csg_id, u32 value, u32 mask)
1824 {
1825 	if (drm_WARN_ON(&ptdev->base, !mask) ||
1826 	    drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1827 		return;
1828 
1829 	ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
1830 	ctx->requests[csg_id].mask |= mask;
1831 	ctx->update_mask |= BIT(csg_id);
1832 }
1833 
1834 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
1835 				     struct panthor_csg_slots_upd_ctx *ctx)
1836 {
1837 	struct panthor_scheduler *sched = ptdev->scheduler;
1838 	u32 update_slots = ctx->update_mask;
1839 
1840 	lockdep_assert_held(&sched->lock);
1841 
1842 	if (!ctx->update_mask)
1843 		return 0;
1844 
1845 	while (update_slots) {
1846 		struct panthor_fw_csg_iface *csg_iface;
1847 		u32 csg_id = ffs(update_slots) - 1;
1848 
1849 		update_slots &= ~BIT(csg_id);
1850 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1851 		panthor_fw_update_reqs(csg_iface, req,
1852 				       ctx->requests[csg_id].value,
1853 				       ctx->requests[csg_id].mask);
1854 	}
1855 
1856 	panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
1857 
1858 	update_slots = ctx->update_mask;
1859 	while (update_slots) {
1860 		struct panthor_fw_csg_iface *csg_iface;
1861 		u32 csg_id = ffs(update_slots) - 1;
1862 		u32 req_mask = ctx->requests[csg_id].mask, acked;
1863 		int ret;
1864 
1865 		update_slots &= ~BIT(csg_id);
1866 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1867 
1868 		ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
1869 
1870 		if (acked & CSG_ENDPOINT_CONFIG)
1871 			csg_slot_sync_priority_locked(ptdev, csg_id);
1872 
1873 		if (acked & CSG_STATE_MASK)
1874 			csg_slot_sync_state_locked(ptdev, csg_id);
1875 
1876 		if (acked & CSG_STATUS_UPDATE) {
1877 			csg_slot_sync_queues_state_locked(ptdev, csg_id);
1878 			csg_slot_sync_idle_state_locked(ptdev, csg_id);
1879 		}
1880 
1881 		if (ret && acked != req_mask &&
1882 		    ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {
1883 			drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
1884 			ctx->timedout_mask |= BIT(csg_id);
1885 		}
1886 	}
1887 
1888 	if (ctx->timedout_mask)
1889 		return -ETIMEDOUT;
1890 
1891 	return 0;
1892 }
1893 
1894 struct panthor_sched_tick_ctx {
1895 	struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT];
1896 	struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
1897 	u32 idle_group_count;
1898 	u32 group_count;
1899 	enum panthor_csg_priority min_priority;
1900 	struct panthor_vm *vms[MAX_CS_PER_CSG];
1901 	u32 as_count;
1902 	bool immediate_tick;
1903 	u32 csg_upd_failed_mask;
1904 };
1905 
1906 static bool
1907 tick_ctx_is_full(const struct panthor_scheduler *sched,
1908 		 const struct panthor_sched_tick_ctx *ctx)
1909 {
1910 	return ctx->group_count == sched->csg_slot_count;
1911 }
1912 
1913 static bool
1914 group_is_idle(struct panthor_group *group)
1915 {
1916 	struct panthor_device *ptdev = group->ptdev;
1917 	u32 inactive_queues;
1918 
1919 	if (group->csg_id >= 0)
1920 		return ptdev->scheduler->csg_slots[group->csg_id].idle;
1921 
1922 	inactive_queues = group->idle_queues | group->blocked_queues;
1923 	return hweight32(inactive_queues) == group->queue_count;
1924 }
1925 
1926 static bool
1927 group_can_run(struct panthor_group *group)
1928 {
1929 	return group->state != PANTHOR_CS_GROUP_TERMINATED &&
1930 	       group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
1931 	       !group->destroyed && group->fatal_queues == 0 &&
1932 	       !group->timedout;
1933 }
1934 
1935 static void
1936 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
1937 			       struct panthor_sched_tick_ctx *ctx,
1938 			       struct list_head *queue,
1939 			       bool skip_idle_groups,
1940 			       bool owned_by_tick_ctx)
1941 {
1942 	struct panthor_group *group, *tmp;
1943 
1944 	if (tick_ctx_is_full(sched, ctx))
1945 		return;
1946 
1947 	list_for_each_entry_safe(group, tmp, queue, run_node) {
1948 		u32 i;
1949 
1950 		if (!group_can_run(group))
1951 			continue;
1952 
1953 		if (skip_idle_groups && group_is_idle(group))
1954 			continue;
1955 
1956 		for (i = 0; i < ctx->as_count; i++) {
1957 			if (ctx->vms[i] == group->vm)
1958 				break;
1959 		}
1960 
1961 		if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
1962 			continue;
1963 
1964 		if (!owned_by_tick_ctx)
1965 			group_get(group);
1966 
1967 		list_move_tail(&group->run_node, &ctx->groups[group->priority]);
1968 		ctx->group_count++;
1969 		if (group_is_idle(group))
1970 			ctx->idle_group_count++;
1971 
1972 		if (i == ctx->as_count)
1973 			ctx->vms[ctx->as_count++] = group->vm;
1974 
1975 		if (ctx->min_priority > group->priority)
1976 			ctx->min_priority = group->priority;
1977 
1978 		if (tick_ctx_is_full(sched, ctx))
1979 			return;
1980 	}
1981 }
1982 
1983 static void
1984 tick_ctx_insert_old_group(struct panthor_scheduler *sched,
1985 			  struct panthor_sched_tick_ctx *ctx,
1986 			  struct panthor_group *group,
1987 			  bool full_tick)
1988 {
1989 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
1990 	struct panthor_group *other_group;
1991 
1992 	if (!full_tick) {
1993 		list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1994 		return;
1995 	}
1996 
1997 	/* Rotate to make sure groups with lower CSG slot
1998 	 * priorities have a chance to get a higher CSG slot
1999 	 * priority next time they get picked. This priority
2000 	 * has an impact on resource request ordering, so it's
2001 	 * important to make sure we don't let one group starve
2002 	 * all other groups with the same group priority.
2003 	 */
2004 	list_for_each_entry(other_group,
2005 			    &ctx->old_groups[csg_slot->group->priority],
2006 			    run_node) {
2007 		struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
2008 
2009 		if (other_csg_slot->priority > csg_slot->priority) {
2010 			list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
2011 			return;
2012 		}
2013 	}
2014 
2015 	list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
2016 }
2017 
2018 static void
2019 tick_ctx_init(struct panthor_scheduler *sched,
2020 	      struct panthor_sched_tick_ctx *ctx,
2021 	      bool full_tick)
2022 {
2023 	struct panthor_device *ptdev = sched->ptdev;
2024 	struct panthor_csg_slots_upd_ctx upd_ctx;
2025 	int ret;
2026 	u32 i;
2027 
2028 	memset(ctx, 0, sizeof(*ctx));
2029 	csgs_upd_ctx_init(&upd_ctx);
2030 
2031 	ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
2032 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2033 		INIT_LIST_HEAD(&ctx->groups[i]);
2034 		INIT_LIST_HEAD(&ctx->old_groups[i]);
2035 	}
2036 
2037 	for (i = 0; i < sched->csg_slot_count; i++) {
2038 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2039 		struct panthor_group *group = csg_slot->group;
2040 		struct panthor_fw_csg_iface *csg_iface;
2041 
2042 		if (!group)
2043 			continue;
2044 
2045 		csg_iface = panthor_fw_get_csg_iface(ptdev, i);
2046 		group_get(group);
2047 
2048 		/* If there was unhandled faults on the VM, force processing of
2049 		 * CSG IRQs, so we can flag the faulty queue.
2050 		 */
2051 		if (panthor_vm_has_unhandled_faults(group->vm)) {
2052 			sched_process_csg_irq_locked(ptdev, i);
2053 
2054 			/* No fatal fault reported, flag all queues as faulty. */
2055 			if (!group->fatal_queues)
2056 				group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
2057 		}
2058 
2059 		tick_ctx_insert_old_group(sched, ctx, group, full_tick);
2060 		csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2061 					csg_iface->output->ack ^ CSG_STATUS_UPDATE,
2062 					CSG_STATUS_UPDATE);
2063 	}
2064 
2065 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2066 	if (ret) {
2067 		panthor_device_schedule_reset(ptdev);
2068 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2069 	}
2070 }
2071 
2072 static void
2073 group_term_post_processing(struct panthor_group *group)
2074 {
2075 	struct panthor_job *job, *tmp;
2076 	LIST_HEAD(faulty_jobs);
2077 	bool cookie;
2078 	u32 i = 0;
2079 
2080 	if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
2081 		return;
2082 
2083 	cookie = dma_fence_begin_signalling();
2084 	for (i = 0; i < group->queue_count; i++) {
2085 		struct panthor_queue *queue = group->queues[i];
2086 		struct panthor_syncobj_64b *syncobj;
2087 		int err;
2088 
2089 		if (group->fatal_queues & BIT(i))
2090 			err = -EINVAL;
2091 		else if (group->timedout)
2092 			err = -ETIMEDOUT;
2093 		else
2094 			err = -ECANCELED;
2095 
2096 		if (!queue)
2097 			continue;
2098 
2099 		spin_lock(&queue->fence_ctx.lock);
2100 		list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
2101 			list_move_tail(&job->node, &faulty_jobs);
2102 			dma_fence_set_error(job->done_fence, err);
2103 			dma_fence_signal_locked(job->done_fence);
2104 		}
2105 		spin_unlock(&queue->fence_ctx.lock);
2106 
2107 		/* Manually update the syncobj seqno to unblock waiters. */
2108 		syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj));
2109 		syncobj->status = ~0;
2110 		syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno);
2111 		sched_queue_work(group->ptdev->scheduler, sync_upd);
2112 	}
2113 	dma_fence_end_signalling(cookie);
2114 
2115 	list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
2116 		list_del_init(&job->node);
2117 		panthor_job_put(&job->base);
2118 	}
2119 }
2120 
2121 static void group_term_work(struct work_struct *work)
2122 {
2123 	struct panthor_group *group =
2124 		container_of(work, struct panthor_group, term_work);
2125 
2126 	group_term_post_processing(group);
2127 	group_put(group);
2128 }
2129 
2130 static void
2131 tick_ctx_cleanup(struct panthor_scheduler *sched,
2132 		 struct panthor_sched_tick_ctx *ctx)
2133 {
2134 	struct panthor_device *ptdev = sched->ptdev;
2135 	struct panthor_group *group, *tmp;
2136 	u32 i;
2137 
2138 	for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) {
2139 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) {
2140 			/* If everything went fine, we should only have groups
2141 			 * to be terminated in the old_groups lists.
2142 			 */
2143 			drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
2144 				    group_can_run(group));
2145 
2146 			if (!group_can_run(group)) {
2147 				list_del_init(&group->run_node);
2148 				list_del_init(&group->wait_node);
2149 				group_queue_work(group, term);
2150 			} else if (group->csg_id >= 0) {
2151 				list_del_init(&group->run_node);
2152 			} else {
2153 				list_move(&group->run_node,
2154 					  group_is_idle(group) ?
2155 					  &sched->groups.idle[group->priority] :
2156 					  &sched->groups.runnable[group->priority]);
2157 			}
2158 			group_put(group);
2159 		}
2160 	}
2161 
2162 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2163 		/* If everything went fine, the groups to schedule lists should
2164 		 * be empty.
2165 		 */
2166 		drm_WARN_ON(&ptdev->base,
2167 			    !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
2168 
2169 		list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
2170 			if (group->csg_id >= 0) {
2171 				list_del_init(&group->run_node);
2172 			} else {
2173 				list_move(&group->run_node,
2174 					  group_is_idle(group) ?
2175 					  &sched->groups.idle[group->priority] :
2176 					  &sched->groups.runnable[group->priority]);
2177 			}
2178 			group_put(group);
2179 		}
2180 	}
2181 }
2182 
2183 static void
2184 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
2185 {
2186 	struct panthor_group *group, *tmp;
2187 	struct panthor_device *ptdev = sched->ptdev;
2188 	struct panthor_csg_slot *csg_slot;
2189 	int prio, new_csg_prio = MAX_CSG_PRIO, i;
2190 	u32 free_csg_slots = 0;
2191 	struct panthor_csg_slots_upd_ctx upd_ctx;
2192 	int ret;
2193 
2194 	csgs_upd_ctx_init(&upd_ctx);
2195 
2196 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2197 		/* Suspend or terminate evicted groups. */
2198 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2199 			bool term = !group_can_run(group);
2200 			int csg_id = group->csg_id;
2201 
2202 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2203 				continue;
2204 
2205 			csg_slot = &sched->csg_slots[csg_id];
2206 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2207 						term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
2208 						CSG_STATE_MASK);
2209 		}
2210 
2211 		/* Update priorities on already running groups. */
2212 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2213 			struct panthor_fw_csg_iface *csg_iface;
2214 			int csg_id = group->csg_id;
2215 
2216 			if (csg_id < 0) {
2217 				new_csg_prio--;
2218 				continue;
2219 			}
2220 
2221 			csg_slot = &sched->csg_slots[csg_id];
2222 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2223 			if (csg_slot->priority == new_csg_prio) {
2224 				new_csg_prio--;
2225 				continue;
2226 			}
2227 
2228 			panthor_fw_update_reqs(csg_iface, endpoint_req,
2229 					       CSG_EP_REQ_PRIORITY(new_csg_prio),
2230 					       CSG_EP_REQ_PRIORITY_MASK);
2231 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2232 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2233 						CSG_ENDPOINT_CONFIG);
2234 			new_csg_prio--;
2235 		}
2236 	}
2237 
2238 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2239 	if (ret) {
2240 		panthor_device_schedule_reset(ptdev);
2241 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2242 		return;
2243 	}
2244 
2245 	/* Unbind evicted groups. */
2246 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2247 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2248 			/* This group is gone. Process interrupts to clear
2249 			 * any pending interrupts before we start the new
2250 			 * group.
2251 			 */
2252 			if (group->csg_id >= 0)
2253 				sched_process_csg_irq_locked(ptdev, group->csg_id);
2254 
2255 			group_unbind_locked(group);
2256 		}
2257 	}
2258 
2259 	for (i = 0; i < sched->csg_slot_count; i++) {
2260 		if (!sched->csg_slots[i].group)
2261 			free_csg_slots |= BIT(i);
2262 	}
2263 
2264 	csgs_upd_ctx_init(&upd_ctx);
2265 	new_csg_prio = MAX_CSG_PRIO;
2266 
2267 	/* Start new groups. */
2268 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2269 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2270 			int csg_id = group->csg_id;
2271 			struct panthor_fw_csg_iface *csg_iface;
2272 
2273 			if (csg_id >= 0) {
2274 				new_csg_prio--;
2275 				continue;
2276 			}
2277 
2278 			csg_id = ffs(free_csg_slots) - 1;
2279 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2280 				break;
2281 
2282 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2283 			csg_slot = &sched->csg_slots[csg_id];
2284 			group_bind_locked(group, csg_id);
2285 			csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
2286 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2287 						group->state == PANTHOR_CS_GROUP_SUSPENDED ?
2288 						CSG_STATE_RESUME : CSG_STATE_START,
2289 						CSG_STATE_MASK);
2290 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2291 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2292 						CSG_ENDPOINT_CONFIG);
2293 			free_csg_slots &= ~BIT(csg_id);
2294 		}
2295 	}
2296 
2297 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2298 	if (ret) {
2299 		panthor_device_schedule_reset(ptdev);
2300 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2301 		return;
2302 	}
2303 
2304 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2305 		list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) {
2306 			list_del_init(&group->run_node);
2307 
2308 			/* If the group has been destroyed while we were
2309 			 * scheduling, ask for an immediate tick to
2310 			 * re-evaluate as soon as possible and get rid of
2311 			 * this dangling group.
2312 			 */
2313 			if (group->destroyed)
2314 				ctx->immediate_tick = true;
2315 			group_put(group);
2316 		}
2317 
2318 		/* Return evicted groups to the idle or run queues. Groups
2319 		 * that can no longer be run (because they've been destroyed
2320 		 * or experienced an unrecoverable error) will be scheduled
2321 		 * for destruction in tick_ctx_cleanup().
2322 		 */
2323 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) {
2324 			if (!group_can_run(group))
2325 				continue;
2326 
2327 			if (group_is_idle(group))
2328 				list_move_tail(&group->run_node, &sched->groups.idle[prio]);
2329 			else
2330 				list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
2331 			group_put(group);
2332 		}
2333 	}
2334 
2335 	sched->used_csg_slot_count = ctx->group_count;
2336 	sched->might_have_idle_groups = ctx->idle_group_count > 0;
2337 }
2338 
2339 static u64
2340 tick_ctx_update_resched_target(struct panthor_scheduler *sched,
2341 			       const struct panthor_sched_tick_ctx *ctx)
2342 {
2343 	/* We had space left, no need to reschedule until some external event happens. */
2344 	if (!tick_ctx_is_full(sched, ctx))
2345 		goto no_tick;
2346 
2347 	/* If idle groups were scheduled, no need to wake up until some external
2348 	 * event happens (group unblocked, new job submitted, ...).
2349 	 */
2350 	if (ctx->idle_group_count)
2351 		goto no_tick;
2352 
2353 	if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT))
2354 		goto no_tick;
2355 
2356 	/* If there are groups of the same priority waiting, we need to
2357 	 * keep the scheduler ticking, otherwise, we'll just wait for
2358 	 * new groups with higher priority to be queued.
2359 	 */
2360 	if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
2361 		u64 resched_target = sched->last_tick + sched->tick_period;
2362 
2363 		if (time_before64(sched->resched_target, sched->last_tick) ||
2364 		    time_before64(resched_target, sched->resched_target))
2365 			sched->resched_target = resched_target;
2366 
2367 		return sched->resched_target - sched->last_tick;
2368 	}
2369 
2370 no_tick:
2371 	sched->resched_target = U64_MAX;
2372 	return U64_MAX;
2373 }
2374 
2375 static void tick_work(struct work_struct *work)
2376 {
2377 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
2378 						      tick_work.work);
2379 	struct panthor_device *ptdev = sched->ptdev;
2380 	struct panthor_sched_tick_ctx ctx;
2381 	u64 remaining_jiffies = 0, resched_delay;
2382 	u64 now = get_jiffies_64();
2383 	int prio, ret, cookie;
2384 
2385 	if (!drm_dev_enter(&ptdev->base, &cookie))
2386 		return;
2387 
2388 	ret = panthor_device_resume_and_get(ptdev);
2389 	if (drm_WARN_ON(&ptdev->base, ret))
2390 		goto out_dev_exit;
2391 
2392 	if (time_before64(now, sched->resched_target))
2393 		remaining_jiffies = sched->resched_target - now;
2394 
2395 	mutex_lock(&sched->lock);
2396 	if (panthor_device_reset_is_pending(sched->ptdev))
2397 		goto out_unlock;
2398 
2399 	tick_ctx_init(sched, &ctx, remaining_jiffies != 0);
2400 	if (ctx.csg_upd_failed_mask)
2401 		goto out_cleanup_ctx;
2402 
2403 	if (remaining_jiffies) {
2404 		/* Scheduling forced in the middle of a tick. Only RT groups
2405 		 * can preempt non-RT ones. Currently running RT groups can't be
2406 		 * preempted.
2407 		 */
2408 		for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2409 		     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2410 		     prio--) {
2411 			tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
2412 						       true, true);
2413 			if (prio == PANTHOR_CSG_PRIORITY_RT) {
2414 				tick_ctx_pick_groups_from_list(sched, &ctx,
2415 							       &sched->groups.runnable[prio],
2416 							       true, false);
2417 			}
2418 		}
2419 	}
2420 
2421 	/* First pick non-idle groups */
2422 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2423 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2424 	     prio--) {
2425 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
2426 					       true, false);
2427 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
2428 	}
2429 
2430 	/* If we have free CSG slots left, pick idle groups */
2431 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2432 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2433 	     prio--) {
2434 		/* Check the old_group queue first to avoid reprogramming the slots */
2435 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
2436 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
2437 					       false, false);
2438 	}
2439 
2440 	tick_ctx_apply(sched, &ctx);
2441 	if (ctx.csg_upd_failed_mask)
2442 		goto out_cleanup_ctx;
2443 
2444 	if (ctx.idle_group_count == ctx.group_count) {
2445 		panthor_devfreq_record_idle(sched->ptdev);
2446 		if (sched->pm.has_ref) {
2447 			pm_runtime_put_autosuspend(ptdev->base.dev);
2448 			sched->pm.has_ref = false;
2449 		}
2450 	} else {
2451 		panthor_devfreq_record_busy(sched->ptdev);
2452 		if (!sched->pm.has_ref) {
2453 			pm_runtime_get(ptdev->base.dev);
2454 			sched->pm.has_ref = true;
2455 		}
2456 	}
2457 
2458 	sched->last_tick = now;
2459 	resched_delay = tick_ctx_update_resched_target(sched, &ctx);
2460 	if (ctx.immediate_tick)
2461 		resched_delay = 0;
2462 
2463 	if (resched_delay != U64_MAX)
2464 		sched_queue_delayed_work(sched, tick, resched_delay);
2465 
2466 out_cleanup_ctx:
2467 	tick_ctx_cleanup(sched, &ctx);
2468 
2469 out_unlock:
2470 	mutex_unlock(&sched->lock);
2471 	pm_runtime_mark_last_busy(ptdev->base.dev);
2472 	pm_runtime_put_autosuspend(ptdev->base.dev);
2473 
2474 out_dev_exit:
2475 	drm_dev_exit(cookie);
2476 }
2477 
2478 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)
2479 {
2480 	struct panthor_queue *queue = group->queues[queue_idx];
2481 	union {
2482 		struct panthor_syncobj_64b sync64;
2483 		struct panthor_syncobj_32b sync32;
2484 	} *syncobj;
2485 	bool result;
2486 	u64 value;
2487 
2488 	syncobj = panthor_queue_get_syncwait_obj(group, queue);
2489 	if (!syncobj)
2490 		return -EINVAL;
2491 
2492 	value = queue->syncwait.sync64 ?
2493 		syncobj->sync64.seqno :
2494 		syncobj->sync32.seqno;
2495 
2496 	if (queue->syncwait.gt)
2497 		result = value > queue->syncwait.ref;
2498 	else
2499 		result = value <= queue->syncwait.ref;
2500 
2501 	if (result)
2502 		panthor_queue_put_syncwait_obj(queue);
2503 
2504 	return result;
2505 }
2506 
2507 static void sync_upd_work(struct work_struct *work)
2508 {
2509 	struct panthor_scheduler *sched = container_of(work,
2510 						      struct panthor_scheduler,
2511 						      sync_upd_work);
2512 	struct panthor_group *group, *tmp;
2513 	bool immediate_tick = false;
2514 
2515 	mutex_lock(&sched->lock);
2516 	list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
2517 		u32 tested_queues = group->blocked_queues;
2518 		u32 unblocked_queues = 0;
2519 
2520 		while (tested_queues) {
2521 			u32 cs_id = ffs(tested_queues) - 1;
2522 			int ret;
2523 
2524 			ret = panthor_queue_eval_syncwait(group, cs_id);
2525 			drm_WARN_ON(&group->ptdev->base, ret < 0);
2526 			if (ret)
2527 				unblocked_queues |= BIT(cs_id);
2528 
2529 			tested_queues &= ~BIT(cs_id);
2530 		}
2531 
2532 		if (unblocked_queues) {
2533 			group->blocked_queues &= ~unblocked_queues;
2534 
2535 			if (group->csg_id < 0) {
2536 				list_move(&group->run_node,
2537 					  &sched->groups.runnable[group->priority]);
2538 				if (group->priority == PANTHOR_CSG_PRIORITY_RT)
2539 					immediate_tick = true;
2540 			}
2541 		}
2542 
2543 		if (!group->blocked_queues)
2544 			list_del_init(&group->wait_node);
2545 	}
2546 	mutex_unlock(&sched->lock);
2547 
2548 	if (immediate_tick)
2549 		sched_queue_delayed_work(sched, tick, 0);
2550 }
2551 
2552 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
2553 {
2554 	struct panthor_device *ptdev = group->ptdev;
2555 	struct panthor_scheduler *sched = ptdev->scheduler;
2556 	struct list_head *queue = &sched->groups.runnable[group->priority];
2557 	u64 delay_jiffies = 0;
2558 	bool was_idle;
2559 	u64 now;
2560 
2561 	if (!group_can_run(group))
2562 		return;
2563 
2564 	/* All updated queues are blocked, no need to wake up the scheduler. */
2565 	if ((queue_mask & group->blocked_queues) == queue_mask)
2566 		return;
2567 
2568 	was_idle = group_is_idle(group);
2569 	group->idle_queues &= ~queue_mask;
2570 
2571 	/* Don't mess up with the lists if we're in a middle of a reset. */
2572 	if (atomic_read(&sched->reset.in_progress))
2573 		return;
2574 
2575 	if (was_idle && !group_is_idle(group))
2576 		list_move_tail(&group->run_node, queue);
2577 
2578 	/* RT groups are preemptive. */
2579 	if (group->priority == PANTHOR_CSG_PRIORITY_RT) {
2580 		sched_queue_delayed_work(sched, tick, 0);
2581 		return;
2582 	}
2583 
2584 	/* Some groups might be idle, force an immediate tick to
2585 	 * re-evaluate.
2586 	 */
2587 	if (sched->might_have_idle_groups) {
2588 		sched_queue_delayed_work(sched, tick, 0);
2589 		return;
2590 	}
2591 
2592 	/* Scheduler is ticking, nothing to do. */
2593 	if (sched->resched_target != U64_MAX) {
2594 		/* If there are free slots, force immediating ticking. */
2595 		if (sched->used_csg_slot_count < sched->csg_slot_count)
2596 			sched_queue_delayed_work(sched, tick, 0);
2597 
2598 		return;
2599 	}
2600 
2601 	/* Scheduler tick was off, recalculate the resched_target based on the
2602 	 * last tick event, and queue the scheduler work.
2603 	 */
2604 	now = get_jiffies_64();
2605 	sched->resched_target = sched->last_tick + sched->tick_period;
2606 	if (sched->used_csg_slot_count == sched->csg_slot_count &&
2607 	    time_before64(now, sched->resched_target))
2608 		delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
2609 
2610 	sched_queue_delayed_work(sched, tick, delay_jiffies);
2611 }
2612 
2613 static void queue_stop(struct panthor_queue *queue,
2614 		       struct panthor_job *bad_job)
2615 {
2616 	drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
2617 }
2618 
2619 static void queue_start(struct panthor_queue *queue)
2620 {
2621 	struct panthor_job *job;
2622 
2623 	/* Re-assign the parent fences. */
2624 	list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
2625 		job->base.s_fence->parent = dma_fence_get(job->done_fence);
2626 
2627 	drm_sched_start(&queue->scheduler, 0);
2628 }
2629 
2630 static void panthor_group_stop(struct panthor_group *group)
2631 {
2632 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2633 
2634 	lockdep_assert_held(&sched->reset.lock);
2635 
2636 	for (u32 i = 0; i < group->queue_count; i++)
2637 		queue_stop(group->queues[i], NULL);
2638 
2639 	group_get(group);
2640 	list_move_tail(&group->run_node, &sched->reset.stopped_groups);
2641 }
2642 
2643 static void panthor_group_start(struct panthor_group *group)
2644 {
2645 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2646 
2647 	lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
2648 
2649 	for (u32 i = 0; i < group->queue_count; i++)
2650 		queue_start(group->queues[i]);
2651 
2652 	if (group_can_run(group)) {
2653 		list_move_tail(&group->run_node,
2654 			       group_is_idle(group) ?
2655 			       &sched->groups.idle[group->priority] :
2656 			       &sched->groups.runnable[group->priority]);
2657 	} else {
2658 		list_del_init(&group->run_node);
2659 		list_del_init(&group->wait_node);
2660 		group_queue_work(group, term);
2661 	}
2662 
2663 	group_put(group);
2664 }
2665 
2666 static void panthor_sched_immediate_tick(struct panthor_device *ptdev)
2667 {
2668 	struct panthor_scheduler *sched = ptdev->scheduler;
2669 
2670 	sched_queue_delayed_work(sched, tick, 0);
2671 }
2672 
2673 /**
2674  * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler.
2675  */
2676 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
2677 {
2678 	/* Force a tick to immediately kill faulty groups. */
2679 	if (ptdev->scheduler)
2680 		panthor_sched_immediate_tick(ptdev);
2681 }
2682 
2683 void panthor_sched_resume(struct panthor_device *ptdev)
2684 {
2685 	/* Force a tick to re-evaluate after a resume. */
2686 	panthor_sched_immediate_tick(ptdev);
2687 }
2688 
2689 void panthor_sched_suspend(struct panthor_device *ptdev)
2690 {
2691 	struct panthor_scheduler *sched = ptdev->scheduler;
2692 	struct panthor_csg_slots_upd_ctx upd_ctx;
2693 	struct panthor_group *group;
2694 	u32 suspended_slots;
2695 	u32 i;
2696 
2697 	mutex_lock(&sched->lock);
2698 	csgs_upd_ctx_init(&upd_ctx);
2699 	for (i = 0; i < sched->csg_slot_count; i++) {
2700 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2701 
2702 		if (csg_slot->group) {
2703 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2704 						group_can_run(csg_slot->group) ?
2705 						CSG_STATE_SUSPEND : CSG_STATE_TERMINATE,
2706 						CSG_STATE_MASK);
2707 		}
2708 	}
2709 
2710 	suspended_slots = upd_ctx.update_mask;
2711 
2712 	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2713 	suspended_slots &= ~upd_ctx.timedout_mask;
2714 
2715 	if (upd_ctx.timedout_mask) {
2716 		u32 slot_mask = upd_ctx.timedout_mask;
2717 
2718 		drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
2719 		csgs_upd_ctx_init(&upd_ctx);
2720 		while (slot_mask) {
2721 			u32 csg_id = ffs(slot_mask) - 1;
2722 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2723 
2724 			/* If the group was still usable before that point, we consider
2725 			 * it innocent.
2726 			 */
2727 			if (group_can_run(csg_slot->group))
2728 				csg_slot->group->innocent = true;
2729 
2730 			/* We consider group suspension failures as fatal and flag the
2731 			 * group as unusable by setting timedout=true.
2732 			 */
2733 			csg_slot->group->timedout = true;
2734 
2735 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2736 						CSG_STATE_TERMINATE,
2737 						CSG_STATE_MASK);
2738 			slot_mask &= ~BIT(csg_id);
2739 		}
2740 
2741 		csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2742 
2743 		slot_mask = upd_ctx.timedout_mask;
2744 		while (slot_mask) {
2745 			u32 csg_id = ffs(slot_mask) - 1;
2746 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2747 
2748 			/* Terminate command timedout, but the soft-reset will
2749 			 * automatically terminate all active groups, so let's
2750 			 * force the state to halted here.
2751 			 */
2752 			if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
2753 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2754 			slot_mask &= ~BIT(csg_id);
2755 		}
2756 	}
2757 
2758 	/* Flush L2 and LSC caches to make sure suspend state is up-to-date.
2759 	 * If the flush fails, flag all queues for termination.
2760 	 */
2761 	if (suspended_slots) {
2762 		bool flush_caches_failed = false;
2763 		u32 slot_mask = suspended_slots;
2764 
2765 		if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
2766 			flush_caches_failed = true;
2767 
2768 		while (slot_mask) {
2769 			u32 csg_id = ffs(slot_mask) - 1;
2770 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2771 
2772 			if (flush_caches_failed)
2773 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2774 			else
2775 				csg_slot_sync_update_locked(ptdev, csg_id);
2776 
2777 			slot_mask &= ~BIT(csg_id);
2778 		}
2779 	}
2780 
2781 	for (i = 0; i < sched->csg_slot_count; i++) {
2782 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2783 
2784 		group = csg_slot->group;
2785 		if (!group)
2786 			continue;
2787 
2788 		group_get(group);
2789 
2790 		if (group->csg_id >= 0)
2791 			sched_process_csg_irq_locked(ptdev, group->csg_id);
2792 
2793 		group_unbind_locked(group);
2794 
2795 		drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
2796 
2797 		if (group_can_run(group)) {
2798 			list_add(&group->run_node,
2799 				 &sched->groups.idle[group->priority]);
2800 		} else {
2801 			/* We don't bother stopping the scheduler if the group is
2802 			 * faulty, the group termination work will finish the job.
2803 			 */
2804 			list_del_init(&group->wait_node);
2805 			group_queue_work(group, term);
2806 		}
2807 		group_put(group);
2808 	}
2809 	mutex_unlock(&sched->lock);
2810 }
2811 
2812 void panthor_sched_pre_reset(struct panthor_device *ptdev)
2813 {
2814 	struct panthor_scheduler *sched = ptdev->scheduler;
2815 	struct panthor_group *group, *group_tmp;
2816 	u32 i;
2817 
2818 	mutex_lock(&sched->reset.lock);
2819 	atomic_set(&sched->reset.in_progress, true);
2820 
2821 	/* Cancel all scheduler works. Once this is done, these works can't be
2822 	 * scheduled again until the reset operation is complete.
2823 	 */
2824 	cancel_work_sync(&sched->sync_upd_work);
2825 	cancel_delayed_work_sync(&sched->tick_work);
2826 
2827 	panthor_sched_suspend(ptdev);
2828 
2829 	/* Stop all groups that might still accept jobs, so we don't get passed
2830 	 * new jobs while we're resetting.
2831 	 */
2832 	for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
2833 		/* All groups should be in the idle lists. */
2834 		drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i]));
2835 		list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
2836 			panthor_group_stop(group);
2837 	}
2838 
2839 	for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
2840 		list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
2841 			panthor_group_stop(group);
2842 	}
2843 
2844 	mutex_unlock(&sched->reset.lock);
2845 }
2846 
2847 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
2848 {
2849 	struct panthor_scheduler *sched = ptdev->scheduler;
2850 	struct panthor_group *group, *group_tmp;
2851 
2852 	mutex_lock(&sched->reset.lock);
2853 
2854 	list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
2855 		/* Consider all previously running group as terminated if the
2856 		 * reset failed.
2857 		 */
2858 		if (reset_failed)
2859 			group->state = PANTHOR_CS_GROUP_TERMINATED;
2860 
2861 		panthor_group_start(group);
2862 	}
2863 
2864 	/* We're done resetting the GPU, clear the reset.in_progress bit so we can
2865 	 * kick the scheduler.
2866 	 */
2867 	atomic_set(&sched->reset.in_progress, false);
2868 	mutex_unlock(&sched->reset.lock);
2869 
2870 	/* No need to queue a tick and update syncs if the reset failed. */
2871 	if (!reset_failed) {
2872 		sched_queue_delayed_work(sched, tick, 0);
2873 		sched_queue_work(sched, sync_upd);
2874 	}
2875 }
2876 
2877 static void update_fdinfo_stats(struct panthor_job *job)
2878 {
2879 	struct panthor_group *group = job->group;
2880 	struct panthor_queue *queue = group->queues[job->queue_idx];
2881 	struct panthor_gpu_usage *fdinfo = &group->fdinfo.data;
2882 	struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
2883 	struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
2884 
2885 	scoped_guard(spinlock, &group->fdinfo.lock) {
2886 		if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
2887 			fdinfo->cycles += data->cycles.after - data->cycles.before;
2888 		if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
2889 			fdinfo->time += data->time.after - data->time.before;
2890 	}
2891 }
2892 
2893 void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
2894 {
2895 	struct panthor_group_pool *gpool = pfile->groups;
2896 	struct panthor_group *group;
2897 	unsigned long i;
2898 
2899 	if (IS_ERR_OR_NULL(gpool))
2900 		return;
2901 
2902 	xa_lock(&gpool->xa);
2903 	xa_for_each(&gpool->xa, i, group) {
2904 		guard(spinlock)(&group->fdinfo.lock);
2905 		pfile->stats.cycles += group->fdinfo.data.cycles;
2906 		pfile->stats.time += group->fdinfo.data.time;
2907 		group->fdinfo.data.cycles = 0;
2908 		group->fdinfo.data.time = 0;
2909 	}
2910 	xa_unlock(&gpool->xa);
2911 }
2912 
2913 static void group_sync_upd_work(struct work_struct *work)
2914 {
2915 	struct panthor_group *group =
2916 		container_of(work, struct panthor_group, sync_upd_work);
2917 	struct panthor_job *job, *job_tmp;
2918 	LIST_HEAD(done_jobs);
2919 	u32 queue_idx;
2920 	bool cookie;
2921 
2922 	cookie = dma_fence_begin_signalling();
2923 	for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
2924 		struct panthor_queue *queue = group->queues[queue_idx];
2925 		struct panthor_syncobj_64b *syncobj;
2926 
2927 		if (!queue)
2928 			continue;
2929 
2930 		syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
2931 
2932 		spin_lock(&queue->fence_ctx.lock);
2933 		list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
2934 			if (syncobj->seqno < job->done_fence->seqno)
2935 				break;
2936 
2937 			list_move_tail(&job->node, &done_jobs);
2938 			dma_fence_signal_locked(job->done_fence);
2939 		}
2940 		spin_unlock(&queue->fence_ctx.lock);
2941 	}
2942 	dma_fence_end_signalling(cookie);
2943 
2944 	list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
2945 		if (job->profiling.mask)
2946 			update_fdinfo_stats(job);
2947 		list_del_init(&job->node);
2948 		panthor_job_put(&job->base);
2949 	}
2950 
2951 	group_put(group);
2952 }
2953 
2954 struct panthor_job_ringbuf_instrs {
2955 	u64 buffer[MAX_INSTRS_PER_JOB];
2956 	u32 count;
2957 };
2958 
2959 struct panthor_job_instr {
2960 	u32 profile_mask;
2961 	u64 instr;
2962 };
2963 
2964 #define JOB_INSTR(__prof, __instr) \
2965 	{ \
2966 		.profile_mask = __prof, \
2967 		.instr = __instr, \
2968 	}
2969 
2970 static void
2971 copy_instrs_to_ringbuf(struct panthor_queue *queue,
2972 		       struct panthor_job *job,
2973 		       struct panthor_job_ringbuf_instrs *instrs)
2974 {
2975 	u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
2976 	u64 start = job->ringbuf.start & (ringbuf_size - 1);
2977 	u64 size, written;
2978 
2979 	/*
2980 	 * We need to write a whole slot, including any trailing zeroes
2981 	 * that may come at the end of it. Also, because instrs.buffer has
2982 	 * been zero-initialised, there's no need to pad it with 0's
2983 	 */
2984 	instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
2985 	size = instrs->count * sizeof(u64);
2986 	WARN_ON(size > ringbuf_size);
2987 	written = min(ringbuf_size - start, size);
2988 
2989 	memcpy(queue->ringbuf->kmap + start, instrs->buffer, written);
2990 
2991 	if (written < size)
2992 		memcpy(queue->ringbuf->kmap,
2993 		       &instrs->buffer[written / sizeof(u64)],
2994 		       size - written);
2995 }
2996 
2997 struct panthor_job_cs_params {
2998 	u32 profile_mask;
2999 	u64 addr_reg; u64 val_reg;
3000 	u64 cycle_reg; u64 time_reg;
3001 	u64 sync_addr; u64 times_addr;
3002 	u64 cs_start; u64 cs_size;
3003 	u32 last_flush; u32 waitall_mask;
3004 };
3005 
3006 static void
3007 get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params)
3008 {
3009 	struct panthor_group *group = job->group;
3010 	struct panthor_queue *queue = group->queues[job->queue_idx];
3011 	struct panthor_device *ptdev = group->ptdev;
3012 	struct panthor_scheduler *sched = ptdev->scheduler;
3013 
3014 	params->addr_reg = ptdev->csif_info.cs_reg_count -
3015 			   ptdev->csif_info.unpreserved_cs_reg_count;
3016 	params->val_reg = params->addr_reg + 2;
3017 	params->cycle_reg = params->addr_reg;
3018 	params->time_reg = params->val_reg;
3019 
3020 	params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
3021 			    job->queue_idx * sizeof(struct panthor_syncobj_64b);
3022 	params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) +
3023 			     (job->profiling.slot * sizeof(struct panthor_job_profiling_data));
3024 	params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
3025 
3026 	params->cs_start = job->call_info.start;
3027 	params->cs_size = job->call_info.size;
3028 	params->last_flush = job->call_info.latest_flush;
3029 
3030 	params->profile_mask = job->profiling.mask;
3031 }
3032 
3033 #define JOB_INSTR_ALWAYS(instr) \
3034 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr))
3035 #define JOB_INSTR_TIMESTAMP(instr) \
3036 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr))
3037 #define JOB_INSTR_CYCLES(instr) \
3038 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr))
3039 
3040 static void
3041 prepare_job_instrs(const struct panthor_job_cs_params *params,
3042 		   struct panthor_job_ringbuf_instrs *instrs)
3043 {
3044 	const struct panthor_job_instr instr_seq[] = {
3045 		/* MOV32 rX+2, cs.latest_flush */
3046 		JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush),
3047 		/* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
3048 		JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) |
3049 				 (0 << 16) | 0x233),
3050 		/* MOV48 rX:rX+1, cycles_offset */
3051 		JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
3052 				 (params->times_addr +
3053 				  offsetof(struct panthor_job_profiling_data, cycles.before))),
3054 		/* STORE_STATE cycles */
3055 		JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
3056 		/* MOV48 rX:rX+1, time_offset */
3057 		JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
3058 				    (params->times_addr +
3059 				     offsetof(struct panthor_job_profiling_data, time.before))),
3060 		/* STORE_STATE timer */
3061 		JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
3062 		/* MOV48 rX:rX+1, cs.start */
3063 		JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start),
3064 		/* MOV32 rX+2, cs.size */
3065 		JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size),
3066 		/* WAIT(0) => waits for FLUSH_CACHE2 instruction */
3067 		JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)),
3068 		/* CALL rX:rX+1, rX+2 */
3069 		JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) |
3070 				 (params->val_reg << 32)),
3071 		/* MOV48 rX:rX+1, cycles_offset */
3072 		JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
3073 				 (params->times_addr +
3074 				  offsetof(struct panthor_job_profiling_data, cycles.after))),
3075 		/* STORE_STATE cycles */
3076 		JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
3077 		/* MOV48 rX:rX+1, time_offset */
3078 		JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
3079 			  (params->times_addr +
3080 			   offsetof(struct panthor_job_profiling_data, time.after))),
3081 		/* STORE_STATE timer */
3082 		JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
3083 		/* MOV48 rX:rX+1, sync_addr */
3084 		JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr),
3085 		/* MOV48 rX+2, #1 */
3086 		JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1),
3087 		/* WAIT(all) */
3088 		JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)),
3089 		/* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
3090 		JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) |
3091 				 (params->val_reg << 32) | (0 << 16) | 1),
3092 		/* ERROR_BARRIER, so we can recover from faults at job boundaries. */
3093 		JOB_INSTR_ALWAYS((47ull << 56)),
3094 	};
3095 	u32 pad;
3096 
3097 	instrs->count = 0;
3098 
3099 	/* NEED to be cacheline aligned to please the prefetcher. */
3100 	static_assert(sizeof(instrs->buffer) % 64 == 0,
3101 		      "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline");
3102 
3103 	/* Make sure we have enough storage to store the whole sequence. */
3104 	static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) ==
3105 		      ARRAY_SIZE(instrs->buffer),
3106 		      "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch");
3107 
3108 	for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) {
3109 		/* If the profile mask of this instruction is not enabled, skip it. */
3110 		if (instr_seq[i].profile_mask &&
3111 		    !(instr_seq[i].profile_mask & params->profile_mask))
3112 			continue;
3113 
3114 		instrs->buffer[instrs->count++] = instr_seq[i].instr;
3115 	}
3116 
3117 	pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
3118 	memset(&instrs->buffer[instrs->count], 0,
3119 	       (pad - instrs->count) * sizeof(instrs->buffer[0]));
3120 	instrs->count = pad;
3121 }
3122 
3123 static u32 calc_job_credits(u32 profile_mask)
3124 {
3125 	struct panthor_job_ringbuf_instrs instrs;
3126 	struct panthor_job_cs_params params = {
3127 		.profile_mask = profile_mask,
3128 	};
3129 
3130 	prepare_job_instrs(&params, &instrs);
3131 	return instrs.count;
3132 }
3133 
3134 static struct dma_fence *
3135 queue_run_job(struct drm_sched_job *sched_job)
3136 {
3137 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3138 	struct panthor_group *group = job->group;
3139 	struct panthor_queue *queue = group->queues[job->queue_idx];
3140 	struct panthor_device *ptdev = group->ptdev;
3141 	struct panthor_scheduler *sched = ptdev->scheduler;
3142 	struct panthor_job_ringbuf_instrs instrs;
3143 	struct panthor_job_cs_params cs_params;
3144 	struct dma_fence *done_fence;
3145 	int ret;
3146 
3147 	/* Stream size is zero, nothing to do except making sure all previously
3148 	 * submitted jobs are done before we signal the
3149 	 * drm_sched_job::s_fence::finished fence.
3150 	 */
3151 	if (!job->call_info.size) {
3152 		job->done_fence = dma_fence_get(queue->fence_ctx.last_fence);
3153 		return dma_fence_get(job->done_fence);
3154 	}
3155 
3156 	ret = panthor_device_resume_and_get(ptdev);
3157 	if (drm_WARN_ON(&ptdev->base, ret))
3158 		return ERR_PTR(ret);
3159 
3160 	mutex_lock(&sched->lock);
3161 	if (!group_can_run(group)) {
3162 		done_fence = ERR_PTR(-ECANCELED);
3163 		goto out_unlock;
3164 	}
3165 
3166 	dma_fence_init(job->done_fence,
3167 		       &panthor_queue_fence_ops,
3168 		       &queue->fence_ctx.lock,
3169 		       queue->fence_ctx.id,
3170 		       atomic64_inc_return(&queue->fence_ctx.seqno));
3171 
3172 	job->profiling.slot = queue->profiling.seqno++;
3173 	if (queue->profiling.seqno == queue->profiling.slot_count)
3174 		queue->profiling.seqno = 0;
3175 
3176 	job->ringbuf.start = queue->iface.input->insert;
3177 
3178 	get_job_cs_params(job, &cs_params);
3179 	prepare_job_instrs(&cs_params, &instrs);
3180 	copy_instrs_to_ringbuf(queue, job, &instrs);
3181 
3182 	job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64));
3183 
3184 	panthor_job_get(&job->base);
3185 	spin_lock(&queue->fence_ctx.lock);
3186 	list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
3187 	spin_unlock(&queue->fence_ctx.lock);
3188 
3189 	/* Make sure the ring buffer is updated before the INSERT
3190 	 * register.
3191 	 */
3192 	wmb();
3193 
3194 	queue->iface.input->extract = queue->iface.output->extract;
3195 	queue->iface.input->insert = job->ringbuf.end;
3196 
3197 	if (group->csg_id < 0) {
3198 		/* If the queue is blocked, we want to keep the timeout running, so we
3199 		 * can detect unbounded waits and kill the group when that happens.
3200 		 * Otherwise, we suspend the timeout so the time we spend waiting for
3201 		 * a CSG slot is not counted.
3202 		 */
3203 		if (!(group->blocked_queues & BIT(job->queue_idx)) &&
3204 		    !queue->timeout_suspended) {
3205 			queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
3206 			queue->timeout_suspended = true;
3207 		}
3208 
3209 		group_schedule_locked(group, BIT(job->queue_idx));
3210 	} else {
3211 		gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
3212 		if (!sched->pm.has_ref &&
3213 		    !(group->blocked_queues & BIT(job->queue_idx))) {
3214 			pm_runtime_get(ptdev->base.dev);
3215 			sched->pm.has_ref = true;
3216 		}
3217 		panthor_devfreq_record_busy(sched->ptdev);
3218 	}
3219 
3220 	/* Update the last fence. */
3221 	dma_fence_put(queue->fence_ctx.last_fence);
3222 	queue->fence_ctx.last_fence = dma_fence_get(job->done_fence);
3223 
3224 	done_fence = dma_fence_get(job->done_fence);
3225 
3226 out_unlock:
3227 	mutex_unlock(&sched->lock);
3228 	pm_runtime_mark_last_busy(ptdev->base.dev);
3229 	pm_runtime_put_autosuspend(ptdev->base.dev);
3230 
3231 	return done_fence;
3232 }
3233 
3234 static enum drm_gpu_sched_stat
3235 queue_timedout_job(struct drm_sched_job *sched_job)
3236 {
3237 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3238 	struct panthor_group *group = job->group;
3239 	struct panthor_device *ptdev = group->ptdev;
3240 	struct panthor_scheduler *sched = ptdev->scheduler;
3241 	struct panthor_queue *queue = group->queues[job->queue_idx];
3242 
3243 	drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n",
3244 		 group->task_info.pid, group->task_info.comm, job->done_fence->seqno);
3245 
3246 	drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
3247 
3248 	queue_stop(queue, job);
3249 
3250 	mutex_lock(&sched->lock);
3251 	group->timedout = true;
3252 	if (group->csg_id >= 0) {
3253 		sched_queue_delayed_work(ptdev->scheduler, tick, 0);
3254 	} else {
3255 		/* Remove from the run queues, so the scheduler can't
3256 		 * pick the group on the next tick.
3257 		 */
3258 		list_del_init(&group->run_node);
3259 		list_del_init(&group->wait_node);
3260 
3261 		group_queue_work(group, term);
3262 	}
3263 	mutex_unlock(&sched->lock);
3264 
3265 	queue_start(queue);
3266 
3267 	return DRM_GPU_SCHED_STAT_RESET;
3268 }
3269 
3270 static void queue_free_job(struct drm_sched_job *sched_job)
3271 {
3272 	drm_sched_job_cleanup(sched_job);
3273 	panthor_job_put(sched_job);
3274 }
3275 
3276 static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
3277 	.run_job = queue_run_job,
3278 	.timedout_job = queue_timedout_job,
3279 	.free_job = queue_free_job,
3280 };
3281 
3282 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
3283 					    u32 cs_ringbuf_size)
3284 {
3285 	u32 min_profiled_job_instrs = U32_MAX;
3286 	u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL);
3287 
3288 	/*
3289 	 * We want to calculate the minimum size of a profiled job's CS,
3290 	 * because since they need additional instructions for the sampling
3291 	 * of performance metrics, they might take up further slots in
3292 	 * the queue's ringbuffer. This means we might not need as many job
3293 	 * slots for keeping track of their profiling information. What we
3294 	 * need is the maximum number of slots we should allocate to this end,
3295 	 * which matches the maximum number of profiled jobs we can place
3296 	 * simultaneously in the queue's ring buffer.
3297 	 * That has to be calculated separately for every single job profiling
3298 	 * flag, but not in the case job profiling is disabled, since unprofiled
3299 	 * jobs don't need to keep track of this at all.
3300 	 */
3301 	for (u32 i = 0; i < last_flag; i++) {
3302 		min_profiled_job_instrs =
3303 			min(min_profiled_job_instrs, calc_job_credits(BIT(i)));
3304 	}
3305 
3306 	return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
3307 }
3308 
3309 static struct panthor_queue *
3310 group_create_queue(struct panthor_group *group,
3311 		   const struct drm_panthor_queue_create *args)
3312 {
3313 	const struct drm_sched_init_args sched_args = {
3314 		.ops = &panthor_queue_sched_ops,
3315 		.submit_wq = group->ptdev->scheduler->wq,
3316 		.num_rqs = 1,
3317 		/*
3318 		 * The credit limit argument tells us the total number of
3319 		 * instructions across all CS slots in the ringbuffer, with
3320 		 * some jobs requiring twice as many as others, depending on
3321 		 * their profiling status.
3322 		 */
3323 		.credit_limit = args->ringbuf_size / sizeof(u64),
3324 		.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
3325 		.timeout_wq = group->ptdev->reset.wq,
3326 		.name = "panthor-queue",
3327 		.dev = group->ptdev->base.dev,
3328 	};
3329 	struct drm_gpu_scheduler *drm_sched;
3330 	struct panthor_queue *queue;
3331 	int ret;
3332 
3333 	if (args->pad[0] || args->pad[1] || args->pad[2])
3334 		return ERR_PTR(-EINVAL);
3335 
3336 	if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K ||
3337 	    !is_power_of_2(args->ringbuf_size))
3338 		return ERR_PTR(-EINVAL);
3339 
3340 	if (args->priority > CSF_MAX_QUEUE_PRIO)
3341 		return ERR_PTR(-EINVAL);
3342 
3343 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
3344 	if (!queue)
3345 		return ERR_PTR(-ENOMEM);
3346 
3347 	queue->fence_ctx.id = dma_fence_context_alloc(1);
3348 	spin_lock_init(&queue->fence_ctx.lock);
3349 	INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
3350 
3351 	queue->priority = args->priority;
3352 
3353 	queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
3354 						  args->ringbuf_size,
3355 						  DRM_PANTHOR_BO_NO_MMAP,
3356 						  DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3357 						  DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3358 						  PANTHOR_VM_KERNEL_AUTO_VA,
3359 						  "CS ring buffer");
3360 	if (IS_ERR(queue->ringbuf)) {
3361 		ret = PTR_ERR(queue->ringbuf);
3362 		goto err_free_queue;
3363 	}
3364 
3365 	ret = panthor_kernel_bo_vmap(queue->ringbuf);
3366 	if (ret)
3367 		goto err_free_queue;
3368 
3369 	queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
3370 							    &queue->iface.input,
3371 							    &queue->iface.output,
3372 							    &queue->iface.input_fw_va,
3373 							    &queue->iface.output_fw_va);
3374 	if (IS_ERR(queue->iface.mem)) {
3375 		ret = PTR_ERR(queue->iface.mem);
3376 		goto err_free_queue;
3377 	}
3378 
3379 	queue->profiling.slot_count =
3380 		calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size);
3381 
3382 	queue->profiling.slots =
3383 		panthor_kernel_bo_create(group->ptdev, group->vm,
3384 					 queue->profiling.slot_count *
3385 					 sizeof(struct panthor_job_profiling_data),
3386 					 DRM_PANTHOR_BO_NO_MMAP,
3387 					 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3388 					 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3389 					 PANTHOR_VM_KERNEL_AUTO_VA,
3390 					 "Group job stats");
3391 
3392 	if (IS_ERR(queue->profiling.slots)) {
3393 		ret = PTR_ERR(queue->profiling.slots);
3394 		goto err_free_queue;
3395 	}
3396 
3397 	ret = panthor_kernel_bo_vmap(queue->profiling.slots);
3398 	if (ret)
3399 		goto err_free_queue;
3400 
3401 	ret = drm_sched_init(&queue->scheduler, &sched_args);
3402 	if (ret)
3403 		goto err_free_queue;
3404 
3405 	drm_sched = &queue->scheduler;
3406 	ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
3407 
3408 	return queue;
3409 
3410 err_free_queue:
3411 	group_free_queue(group, queue);
3412 	return ERR_PTR(ret);
3413 }
3414 
3415 static void group_init_task_info(struct panthor_group *group)
3416 {
3417 	struct task_struct *task = current->group_leader;
3418 
3419 	group->task_info.pid = task->pid;
3420 	get_task_comm(group->task_info.comm, task);
3421 }
3422 
3423 static void add_group_kbo_sizes(struct panthor_device *ptdev,
3424 				struct panthor_group *group)
3425 {
3426 	struct panthor_queue *queue;
3427 	int i;
3428 
3429 	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
3430 		return;
3431 	if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
3432 		return;
3433 
3434 	group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
3435 	group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
3436 	group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
3437 
3438 	for (i = 0; i < group->queue_count; i++) {
3439 		queue =	group->queues[i];
3440 		group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
3441 		group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
3442 		group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
3443 	}
3444 }
3445 
3446 #define MAX_GROUPS_PER_POOL		128
3447 
3448 int panthor_group_create(struct panthor_file *pfile,
3449 			 const struct drm_panthor_group_create *group_args,
3450 			 const struct drm_panthor_queue_create *queue_args)
3451 {
3452 	struct panthor_device *ptdev = pfile->ptdev;
3453 	struct panthor_group_pool *gpool = pfile->groups;
3454 	struct panthor_scheduler *sched = ptdev->scheduler;
3455 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3456 	struct panthor_group *group = NULL;
3457 	u32 gid, i, suspend_size;
3458 	int ret;
3459 
3460 	if (group_args->pad)
3461 		return -EINVAL;
3462 
3463 	if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT)
3464 		return -EINVAL;
3465 
3466 	if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
3467 	    (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
3468 	    (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
3469 		return -EINVAL;
3470 
3471 	if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
3472 	    hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
3473 	    hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
3474 		return -EINVAL;
3475 
3476 	group = kzalloc(sizeof(*group), GFP_KERNEL);
3477 	if (!group)
3478 		return -ENOMEM;
3479 
3480 	spin_lock_init(&group->fatal_lock);
3481 	kref_init(&group->refcount);
3482 	group->state = PANTHOR_CS_GROUP_CREATED;
3483 	group->csg_id = -1;
3484 
3485 	group->ptdev = ptdev;
3486 	group->max_compute_cores = group_args->max_compute_cores;
3487 	group->compute_core_mask = group_args->compute_core_mask;
3488 	group->max_fragment_cores = group_args->max_fragment_cores;
3489 	group->fragment_core_mask = group_args->fragment_core_mask;
3490 	group->max_tiler_cores = group_args->max_tiler_cores;
3491 	group->tiler_core_mask = group_args->tiler_core_mask;
3492 	group->priority = group_args->priority;
3493 
3494 	INIT_LIST_HEAD(&group->wait_node);
3495 	INIT_LIST_HEAD(&group->run_node);
3496 	INIT_WORK(&group->term_work, group_term_work);
3497 	INIT_WORK(&group->sync_upd_work, group_sync_upd_work);
3498 	INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work);
3499 	INIT_WORK(&group->release_work, group_release_work);
3500 
3501 	group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id);
3502 	if (!group->vm) {
3503 		ret = -EINVAL;
3504 		goto err_put_group;
3505 	}
3506 
3507 	suspend_size = csg_iface->control->suspend_size;
3508 	group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3509 	if (IS_ERR(group->suspend_buf)) {
3510 		ret = PTR_ERR(group->suspend_buf);
3511 		group->suspend_buf = NULL;
3512 		goto err_put_group;
3513 	}
3514 
3515 	suspend_size = csg_iface->control->protm_suspend_size;
3516 	group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3517 	if (IS_ERR(group->protm_suspend_buf)) {
3518 		ret = PTR_ERR(group->protm_suspend_buf);
3519 		group->protm_suspend_buf = NULL;
3520 		goto err_put_group;
3521 	}
3522 
3523 	group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
3524 						   group_args->queues.count *
3525 						   sizeof(struct panthor_syncobj_64b),
3526 						   DRM_PANTHOR_BO_NO_MMAP,
3527 						   DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3528 						   DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3529 						   PANTHOR_VM_KERNEL_AUTO_VA,
3530 						   "Group sync objects");
3531 	if (IS_ERR(group->syncobjs)) {
3532 		ret = PTR_ERR(group->syncobjs);
3533 		goto err_put_group;
3534 	}
3535 
3536 	ret = panthor_kernel_bo_vmap(group->syncobjs);
3537 	if (ret)
3538 		goto err_put_group;
3539 
3540 	memset(group->syncobjs->kmap, 0,
3541 	       group_args->queues.count * sizeof(struct panthor_syncobj_64b));
3542 
3543 	for (i = 0; i < group_args->queues.count; i++) {
3544 		group->queues[i] = group_create_queue(group, &queue_args[i]);
3545 		if (IS_ERR(group->queues[i])) {
3546 			ret = PTR_ERR(group->queues[i]);
3547 			group->queues[i] = NULL;
3548 			goto err_put_group;
3549 		}
3550 
3551 		group->queue_count++;
3552 	}
3553 
3554 	group->idle_queues = GENMASK(group->queue_count - 1, 0);
3555 
3556 	ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
3557 	if (ret)
3558 		goto err_put_group;
3559 
3560 	mutex_lock(&sched->reset.lock);
3561 	if (atomic_read(&sched->reset.in_progress)) {
3562 		panthor_group_stop(group);
3563 	} else {
3564 		mutex_lock(&sched->lock);
3565 		list_add_tail(&group->run_node,
3566 			      &sched->groups.idle[group->priority]);
3567 		mutex_unlock(&sched->lock);
3568 	}
3569 	mutex_unlock(&sched->reset.lock);
3570 
3571 	add_group_kbo_sizes(group->ptdev, group);
3572 	spin_lock_init(&group->fdinfo.lock);
3573 
3574 	group_init_task_info(group);
3575 
3576 	return gid;
3577 
3578 err_put_group:
3579 	group_put(group);
3580 	return ret;
3581 }
3582 
3583 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
3584 {
3585 	struct panthor_group_pool *gpool = pfile->groups;
3586 	struct panthor_device *ptdev = pfile->ptdev;
3587 	struct panthor_scheduler *sched = ptdev->scheduler;
3588 	struct panthor_group *group;
3589 
3590 	group = xa_erase(&gpool->xa, group_handle);
3591 	if (!group)
3592 		return -EINVAL;
3593 
3594 	for (u32 i = 0; i < group->queue_count; i++) {
3595 		if (group->queues[i])
3596 			drm_sched_entity_destroy(&group->queues[i]->entity);
3597 	}
3598 
3599 	mutex_lock(&sched->reset.lock);
3600 	mutex_lock(&sched->lock);
3601 	group->destroyed = true;
3602 	if (group->csg_id >= 0) {
3603 		sched_queue_delayed_work(sched, tick, 0);
3604 	} else if (!atomic_read(&sched->reset.in_progress)) {
3605 		/* Remove from the run queues, so the scheduler can't
3606 		 * pick the group on the next tick.
3607 		 */
3608 		list_del_init(&group->run_node);
3609 		list_del_init(&group->wait_node);
3610 		group_queue_work(group, term);
3611 	}
3612 	mutex_unlock(&sched->lock);
3613 	mutex_unlock(&sched->reset.lock);
3614 
3615 	group_put(group);
3616 	return 0;
3617 }
3618 
3619 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool,
3620 					       u32 group_handle)
3621 {
3622 	struct panthor_group *group;
3623 
3624 	xa_lock(&pool->xa);
3625 	group = group_get(xa_load(&pool->xa, group_handle));
3626 	xa_unlock(&pool->xa);
3627 
3628 	return group;
3629 }
3630 
3631 int panthor_group_get_state(struct panthor_file *pfile,
3632 			    struct drm_panthor_group_get_state *get_state)
3633 {
3634 	struct panthor_group_pool *gpool = pfile->groups;
3635 	struct panthor_device *ptdev = pfile->ptdev;
3636 	struct panthor_scheduler *sched = ptdev->scheduler;
3637 	struct panthor_group *group;
3638 
3639 	if (get_state->pad)
3640 		return -EINVAL;
3641 
3642 	group = group_from_handle(gpool, get_state->group_handle);
3643 	if (!group)
3644 		return -EINVAL;
3645 
3646 	memset(get_state, 0, sizeof(*get_state));
3647 
3648 	mutex_lock(&sched->lock);
3649 	if (group->timedout)
3650 		get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT;
3651 	if (group->fatal_queues) {
3652 		get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
3653 		get_state->fatal_queues = group->fatal_queues;
3654 	}
3655 	if (group->innocent)
3656 		get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT;
3657 	mutex_unlock(&sched->lock);
3658 
3659 	group_put(group);
3660 	return 0;
3661 }
3662 
3663 int panthor_group_pool_create(struct panthor_file *pfile)
3664 {
3665 	struct panthor_group_pool *gpool;
3666 
3667 	gpool = kzalloc(sizeof(*gpool), GFP_KERNEL);
3668 	if (!gpool)
3669 		return -ENOMEM;
3670 
3671 	xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1);
3672 	pfile->groups = gpool;
3673 	return 0;
3674 }
3675 
3676 void panthor_group_pool_destroy(struct panthor_file *pfile)
3677 {
3678 	struct panthor_group_pool *gpool = pfile->groups;
3679 	struct panthor_group *group;
3680 	unsigned long i;
3681 
3682 	if (IS_ERR_OR_NULL(gpool))
3683 		return;
3684 
3685 	xa_for_each(&gpool->xa, i, group)
3686 		panthor_group_destroy(pfile, i);
3687 
3688 	xa_destroy(&gpool->xa);
3689 	kfree(gpool);
3690 	pfile->groups = NULL;
3691 }
3692 
3693 /**
3694  * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's
3695  * belonging to all the groups owned by an open Panthor file
3696  * @pfile: File.
3697  * @stats: Memory statistics to be updated.
3698  *
3699  */
3700 void
3701 panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
3702 				     struct drm_memory_stats *stats)
3703 {
3704 	struct panthor_group_pool *gpool = pfile->groups;
3705 	struct panthor_group *group;
3706 	unsigned long i;
3707 
3708 	if (IS_ERR_OR_NULL(gpool))
3709 		return;
3710 
3711 	xa_lock(&gpool->xa);
3712 	xa_for_each(&gpool->xa, i, group) {
3713 		stats->resident += group->fdinfo.kbo_sizes;
3714 		if (group->csg_id >= 0)
3715 			stats->active += group->fdinfo.kbo_sizes;
3716 	}
3717 	xa_unlock(&gpool->xa);
3718 }
3719 
3720 static void job_release(struct kref *ref)
3721 {
3722 	struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
3723 
3724 	drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
3725 
3726 	if (job->base.s_fence)
3727 		drm_sched_job_cleanup(&job->base);
3728 
3729 	if (job->done_fence && job->done_fence->ops)
3730 		dma_fence_put(job->done_fence);
3731 	else
3732 		dma_fence_free(job->done_fence);
3733 
3734 	group_put(job->group);
3735 
3736 	kfree(job);
3737 }
3738 
3739 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job)
3740 {
3741 	if (sched_job) {
3742 		struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3743 
3744 		kref_get(&job->refcount);
3745 	}
3746 
3747 	return sched_job;
3748 }
3749 
3750 void panthor_job_put(struct drm_sched_job *sched_job)
3751 {
3752 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3753 
3754 	if (sched_job)
3755 		kref_put(&job->refcount, job_release);
3756 }
3757 
3758 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
3759 {
3760 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3761 
3762 	return job->group->vm;
3763 }
3764 
3765 struct drm_sched_job *
3766 panthor_job_create(struct panthor_file *pfile,
3767 		   u16 group_handle,
3768 		   const struct drm_panthor_queue_submit *qsubmit,
3769 		   u64 drm_client_id)
3770 {
3771 	struct panthor_group_pool *gpool = pfile->groups;
3772 	struct panthor_job *job;
3773 	u32 credits;
3774 	int ret;
3775 
3776 	if (qsubmit->pad)
3777 		return ERR_PTR(-EINVAL);
3778 
3779 	/* If stream_addr is zero, so stream_size should be. */
3780 	if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0))
3781 		return ERR_PTR(-EINVAL);
3782 
3783 	/* Make sure the address is aligned on 64-byte (cacheline) and the size is
3784 	 * aligned on 8-byte (instruction size).
3785 	 */
3786 	if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7))
3787 		return ERR_PTR(-EINVAL);
3788 
3789 	/* bits 24:30 must be zero. */
3790 	if (qsubmit->latest_flush & GENMASK(30, 24))
3791 		return ERR_PTR(-EINVAL);
3792 
3793 	job = kzalloc(sizeof(*job), GFP_KERNEL);
3794 	if (!job)
3795 		return ERR_PTR(-ENOMEM);
3796 
3797 	kref_init(&job->refcount);
3798 	job->queue_idx = qsubmit->queue_index;
3799 	job->call_info.size = qsubmit->stream_size;
3800 	job->call_info.start = qsubmit->stream_addr;
3801 	job->call_info.latest_flush = qsubmit->latest_flush;
3802 	INIT_LIST_HEAD(&job->node);
3803 
3804 	job->group = group_from_handle(gpool, group_handle);
3805 	if (!job->group) {
3806 		ret = -EINVAL;
3807 		goto err_put_job;
3808 	}
3809 
3810 	if (!group_can_run(job->group)) {
3811 		ret = -EINVAL;
3812 		goto err_put_job;
3813 	}
3814 
3815 	if (job->queue_idx >= job->group->queue_count ||
3816 	    !job->group->queues[job->queue_idx]) {
3817 		ret = -EINVAL;
3818 		goto err_put_job;
3819 	}
3820 
3821 	/* Empty command streams don't need a fence, they'll pick the one from
3822 	 * the previously submitted job.
3823 	 */
3824 	if (job->call_info.size) {
3825 		job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
3826 		if (!job->done_fence) {
3827 			ret = -ENOMEM;
3828 			goto err_put_job;
3829 		}
3830 	}
3831 
3832 	job->profiling.mask = pfile->ptdev->profile_mask;
3833 	credits = calc_job_credits(job->profiling.mask);
3834 	if (credits == 0) {
3835 		ret = -EINVAL;
3836 		goto err_put_job;
3837 	}
3838 
3839 	ret = drm_sched_job_init(&job->base,
3840 				 &job->group->queues[job->queue_idx]->entity,
3841 				 credits, job->group, drm_client_id);
3842 	if (ret)
3843 		goto err_put_job;
3844 
3845 	return &job->base;
3846 
3847 err_put_job:
3848 	panthor_job_put(&job->base);
3849 	return ERR_PTR(ret);
3850 }
3851 
3852 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job)
3853 {
3854 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3855 
3856 	panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
3857 				DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
3858 }
3859 
3860 void panthor_sched_unplug(struct panthor_device *ptdev)
3861 {
3862 	struct panthor_scheduler *sched = ptdev->scheduler;
3863 
3864 	cancel_delayed_work_sync(&sched->tick_work);
3865 
3866 	mutex_lock(&sched->lock);
3867 	if (sched->pm.has_ref) {
3868 		pm_runtime_put(ptdev->base.dev);
3869 		sched->pm.has_ref = false;
3870 	}
3871 	mutex_unlock(&sched->lock);
3872 }
3873 
3874 static void panthor_sched_fini(struct drm_device *ddev, void *res)
3875 {
3876 	struct panthor_scheduler *sched = res;
3877 	int prio;
3878 
3879 	if (!sched || !sched->csg_slot_count)
3880 		return;
3881 
3882 	cancel_delayed_work_sync(&sched->tick_work);
3883 
3884 	if (sched->wq)
3885 		destroy_workqueue(sched->wq);
3886 
3887 	if (sched->heap_alloc_wq)
3888 		destroy_workqueue(sched->heap_alloc_wq);
3889 
3890 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3891 		drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
3892 		drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
3893 	}
3894 
3895 	drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
3896 }
3897 
3898 int panthor_sched_init(struct panthor_device *ptdev)
3899 {
3900 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
3901 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3902 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
3903 	struct panthor_scheduler *sched;
3904 	u32 gpu_as_count, num_groups;
3905 	int prio, ret;
3906 
3907 	sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
3908 	if (!sched)
3909 		return -ENOMEM;
3910 
3911 	/* The highest bit in JOB_INT_* is reserved for globabl IRQs. That
3912 	 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here.
3913 	 */
3914 	num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num);
3915 
3916 	/* The FW-side scheduler might deadlock if two groups with the same
3917 	 * priority try to access a set of resources that overlaps, with part
3918 	 * of the resources being allocated to one group and the other part to
3919 	 * the other group, both groups waiting for the remaining resources to
3920 	 * be allocated. To avoid that, it is recommended to assign each CSG a
3921 	 * different priority. In theory we could allow several groups to have
3922 	 * the same CSG priority if they don't request the same resources, but
3923 	 * that makes the scheduling logic more complicated, so let's clamp
3924 	 * the number of CSG slots to MAX_CSG_PRIO + 1 for now.
3925 	 */
3926 	num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups);
3927 
3928 	/* We need at least one AS for the MCU and one for the GPU contexts. */
3929 	gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
3930 	if (!gpu_as_count) {
3931 		drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
3932 			gpu_as_count + 1);
3933 		return -EINVAL;
3934 	}
3935 
3936 	sched->ptdev = ptdev;
3937 	sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
3938 	sched->csg_slot_count = num_groups;
3939 	sched->cs_slot_count = csg_iface->control->stream_num;
3940 	sched->as_slot_count = gpu_as_count;
3941 	ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
3942 	ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
3943 	ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
3944 
3945 	sched->last_tick = 0;
3946 	sched->resched_target = U64_MAX;
3947 	sched->tick_period = msecs_to_jiffies(10);
3948 	INIT_DELAYED_WORK(&sched->tick_work, tick_work);
3949 	INIT_WORK(&sched->sync_upd_work, sync_upd_work);
3950 	INIT_WORK(&sched->fw_events_work, process_fw_events_work);
3951 
3952 	ret = drmm_mutex_init(&ptdev->base, &sched->lock);
3953 	if (ret)
3954 		return ret;
3955 
3956 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3957 		INIT_LIST_HEAD(&sched->groups.runnable[prio]);
3958 		INIT_LIST_HEAD(&sched->groups.idle[prio]);
3959 	}
3960 	INIT_LIST_HEAD(&sched->groups.waiting);
3961 
3962 	ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
3963 	if (ret)
3964 		return ret;
3965 
3966 	INIT_LIST_HEAD(&sched->reset.stopped_groups);
3967 
3968 	/* sched->heap_alloc_wq will be used for heap chunk allocation on
3969 	 * tiler OOM events, which means we can't use the same workqueue for
3970 	 * the scheduler because works queued by the scheduler are in
3971 	 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to
3972 	 * work around this limitation.
3973 	 *
3974 	 * FIXME: Ultimately, what we need is a failable/non-blocking GEM
3975 	 * allocation path that we can call when a heap OOM is reported. The
3976 	 * FW is smart enough to fall back on other methods if the kernel can't
3977 	 * allocate memory, and fail the tiling job if none of these
3978 	 * countermeasures worked.
3979 	 *
3980 	 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the
3981 	 * system is running out of memory.
3982 	 */
3983 	sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
3984 	sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
3985 	if (!sched->wq || !sched->heap_alloc_wq) {
3986 		panthor_sched_fini(&ptdev->base, sched);
3987 		drm_err(&ptdev->base, "Failed to allocate the workqueues");
3988 		return -ENOMEM;
3989 	}
3990 
3991 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
3992 	if (ret)
3993 		return ret;
3994 
3995 	ptdev->scheduler = sched;
3996 	return 0;
3997 }
3998