xref: /linux/drivers/gpu/drm/panthor/panthor_sched.c (revision 3027ce13e04eee76539ca65c2cb1028a01c8c508)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2023 Collabora ltd. */
3 
4 #include <drm/drm_drv.h>
5 #include <drm/drm_exec.h>
6 #include <drm/drm_gem_shmem_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/gpu_scheduler.h>
9 #include <drm/panthor_drm.h>
10 
11 #include <linux/build_bug.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-resv.h>
16 #include <linux/firmware.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/iosys-map.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 
25 #include "panthor_devfreq.h"
26 #include "panthor_device.h"
27 #include "panthor_fw.h"
28 #include "panthor_gem.h"
29 #include "panthor_gpu.h"
30 #include "panthor_heap.h"
31 #include "panthor_mmu.h"
32 #include "panthor_regs.h"
33 #include "panthor_sched.h"
34 
35 /**
36  * DOC: Scheduler
37  *
38  * Mali CSF hardware adopts a firmware-assisted scheduling model, where
39  * the firmware takes care of scheduling aspects, to some extent.
40  *
41  * The scheduling happens at the scheduling group level, each group
42  * contains 1 to N queues (N is FW/hardware dependent, and exposed
43  * through the firmware interface). Each queue is assigned a command
44  * stream ring buffer, which serves as a way to get jobs submitted to
45  * the GPU, among other things.
46  *
47  * The firmware can schedule a maximum of M groups (M is FW/hardware
48  * dependent, and exposed through the firmware interface). Passed
49  * this maximum number of groups, the kernel must take care of
50  * rotating the groups passed to the firmware so every group gets
51  * a chance to have his queues scheduled for execution.
52  *
53  * The current implementation only supports with kernel-mode queues.
54  * In other terms, userspace doesn't have access to the ring-buffer.
55  * Instead, userspace passes indirect command stream buffers that are
56  * called from the queue ring-buffer by the kernel using a pre-defined
57  * sequence of command stream instructions to ensure the userspace driver
58  * always gets consistent results (cache maintenance,
59  * synchronization, ...).
60  *
61  * We rely on the drm_gpu_scheduler framework to deal with job
62  * dependencies and submission. As any other driver dealing with a
63  * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
64  * entity has its own job scheduler. When a job is ready to be executed
65  * (all its dependencies are met), it is pushed to the appropriate
66  * queue ring-buffer, and the group is scheduled for execution if it
67  * wasn't already active.
68  *
69  * Kernel-side group scheduling is timeslice-based. When we have less
70  * groups than there are slots, the periodic tick is disabled and we
71  * just let the FW schedule the active groups. When there are more
72  * groups than slots, we let each group a chance to execute stuff for
73  * a given amount of time, and then re-evaluate and pick new groups
74  * to schedule. The group selection algorithm is based on
75  * priority+round-robin.
76  *
77  * Even though user-mode queues is out of the scope right now, the
78  * current design takes them into account by avoiding any guess on the
79  * group/queue state that would be based on information we wouldn't have
80  * if userspace was in charge of the ring-buffer. That's also one of the
81  * reason we don't do 'cooperative' scheduling (encoding FW group slot
82  * reservation as dma_fence that would be returned from the
83  * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
84  * a queue of waiters, ordered by job submission order). This approach
85  * would work for kernel-mode queues, but would make user-mode queues a
86  * lot more complicated to retrofit.
87  */
88 
89 #define JOB_TIMEOUT_MS				5000
90 
91 #define MIN_CS_PER_CSG				8
92 
93 #define MIN_CSGS				3
94 #define MAX_CSG_PRIO				0xf
95 
96 struct panthor_group;
97 
98 /**
99  * struct panthor_csg_slot - Command stream group slot
100  *
101  * This represents a FW slot for a scheduling group.
102  */
103 struct panthor_csg_slot {
104 	/** @group: Scheduling group bound to this slot. */
105 	struct panthor_group *group;
106 
107 	/** @priority: Group priority. */
108 	u8 priority;
109 
110 	/**
111 	 * @idle: True if the group bound to this slot is idle.
112 	 *
113 	 * A group is idle when it has nothing waiting for execution on
114 	 * all its queues, or when queues are blocked waiting for something
115 	 * to happen (synchronization object).
116 	 */
117 	bool idle;
118 };
119 
120 /**
121  * enum panthor_csg_priority - Group priority
122  */
123 enum panthor_csg_priority {
124 	/** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */
125 	PANTHOR_CSG_PRIORITY_LOW = 0,
126 
127 	/** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */
128 	PANTHOR_CSG_PRIORITY_MEDIUM,
129 
130 	/** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */
131 	PANTHOR_CSG_PRIORITY_HIGH,
132 
133 	/**
134 	 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group.
135 	 *
136 	 * Real-time priority allows one to preempt scheduling of other
137 	 * non-real-time groups. When such a group becomes executable,
138 	 * it will evict the group with the lowest non-rt priority if
139 	 * there's no free group slot available.
140 	 *
141 	 * Currently not exposed to userspace.
142 	 */
143 	PANTHOR_CSG_PRIORITY_RT,
144 
145 	/** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */
146 	PANTHOR_CSG_PRIORITY_COUNT,
147 };
148 
149 /**
150  * struct panthor_scheduler - Object used to manage the scheduler
151  */
152 struct panthor_scheduler {
153 	/** @ptdev: Device. */
154 	struct panthor_device *ptdev;
155 
156 	/**
157 	 * @wq: Workqueue used by our internal scheduler logic and
158 	 * drm_gpu_scheduler.
159 	 *
160 	 * Used for the scheduler tick, group update or other kind of FW
161 	 * event processing that can't be handled in the threaded interrupt
162 	 * path. Also passed to the drm_gpu_scheduler instances embedded
163 	 * in panthor_queue.
164 	 */
165 	struct workqueue_struct *wq;
166 
167 	/**
168 	 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works.
169 	 *
170 	 * We have a queue dedicated to heap chunk allocation works to avoid
171 	 * blocking the rest of the scheduler if the allocation tries to
172 	 * reclaim memory.
173 	 */
174 	struct workqueue_struct *heap_alloc_wq;
175 
176 	/** @tick_work: Work executed on a scheduling tick. */
177 	struct delayed_work tick_work;
178 
179 	/**
180 	 * @sync_upd_work: Work used to process synchronization object updates.
181 	 *
182 	 * We use this work to unblock queues/groups that were waiting on a
183 	 * synchronization object.
184 	 */
185 	struct work_struct sync_upd_work;
186 
187 	/**
188 	 * @fw_events_work: Work used to process FW events outside the interrupt path.
189 	 *
190 	 * Even if the interrupt is threaded, we need any event processing
191 	 * that require taking the panthor_scheduler::lock to be processed
192 	 * outside the interrupt path so we don't block the tick logic when
193 	 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the
194 	 * event processing requires taking this lock, we just delegate all
195 	 * FW event processing to the scheduler workqueue.
196 	 */
197 	struct work_struct fw_events_work;
198 
199 	/**
200 	 * @fw_events: Bitmask encoding pending FW events.
201 	 */
202 	atomic_t fw_events;
203 
204 	/**
205 	 * @resched_target: When the next tick should occur.
206 	 *
207 	 * Expressed in jiffies.
208 	 */
209 	u64 resched_target;
210 
211 	/**
212 	 * @last_tick: When the last tick occurred.
213 	 *
214 	 * Expressed in jiffies.
215 	 */
216 	u64 last_tick;
217 
218 	/** @tick_period: Tick period in jiffies. */
219 	u64 tick_period;
220 
221 	/**
222 	 * @lock: Lock protecting access to all the scheduler fields.
223 	 *
224 	 * Should be taken in the tick work, the irq handler, and anywhere the @groups
225 	 * fields are touched.
226 	 */
227 	struct mutex lock;
228 
229 	/** @groups: Various lists used to classify groups. */
230 	struct {
231 		/**
232 		 * @runnable: Runnable group lists.
233 		 *
234 		 * When a group has queues that want to execute something,
235 		 * its panthor_group::run_node should be inserted here.
236 		 *
237 		 * One list per-priority.
238 		 */
239 		struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT];
240 
241 		/**
242 		 * @idle: Idle group lists.
243 		 *
244 		 * When all queues of a group are idle (either because they
245 		 * have nothing to execute, or because they are blocked), the
246 		 * panthor_group::run_node field should be inserted here.
247 		 *
248 		 * One list per-priority.
249 		 */
250 		struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT];
251 
252 		/**
253 		 * @waiting: List of groups whose queues are blocked on a
254 		 * synchronization object.
255 		 *
256 		 * Insert panthor_group::wait_node here when a group is waiting
257 		 * for synchronization objects to be signaled.
258 		 *
259 		 * This list is evaluated in the @sync_upd_work work.
260 		 */
261 		struct list_head waiting;
262 	} groups;
263 
264 	/**
265 	 * @csg_slots: FW command stream group slots.
266 	 */
267 	struct panthor_csg_slot csg_slots[MAX_CSGS];
268 
269 	/** @csg_slot_count: Number of command stream group slots exposed by the FW. */
270 	u32 csg_slot_count;
271 
272 	/** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */
273 	u32 cs_slot_count;
274 
275 	/** @as_slot_count: Number of address space slots supported by the MMU. */
276 	u32 as_slot_count;
277 
278 	/** @used_csg_slot_count: Number of command stream group slot currently used. */
279 	u32 used_csg_slot_count;
280 
281 	/** @sb_slot_count: Number of scoreboard slots. */
282 	u32 sb_slot_count;
283 
284 	/**
285 	 * @might_have_idle_groups: True if an active group might have become idle.
286 	 *
287 	 * This will force a tick, so other runnable groups can be scheduled if one
288 	 * or more active groups became idle.
289 	 */
290 	bool might_have_idle_groups;
291 
292 	/** @pm: Power management related fields. */
293 	struct {
294 		/** @has_ref: True if the scheduler owns a runtime PM reference. */
295 		bool has_ref;
296 	} pm;
297 
298 	/** @reset: Reset related fields. */
299 	struct {
300 		/** @lock: Lock protecting the other reset fields. */
301 		struct mutex lock;
302 
303 		/**
304 		 * @in_progress: True if a reset is in progress.
305 		 *
306 		 * Set to true in panthor_sched_pre_reset() and back to false in
307 		 * panthor_sched_post_reset().
308 		 */
309 		atomic_t in_progress;
310 
311 		/**
312 		 * @stopped_groups: List containing all groups that were stopped
313 		 * before a reset.
314 		 *
315 		 * Insert panthor_group::run_node in the pre_reset path.
316 		 */
317 		struct list_head stopped_groups;
318 	} reset;
319 };
320 
321 /**
322  * struct panthor_syncobj_32b - 32-bit FW synchronization object
323  */
324 struct panthor_syncobj_32b {
325 	/** @seqno: Sequence number. */
326 	u32 seqno;
327 
328 	/**
329 	 * @status: Status.
330 	 *
331 	 * Not zero on failure.
332 	 */
333 	u32 status;
334 };
335 
336 /**
337  * struct panthor_syncobj_64b - 64-bit FW synchronization object
338  */
339 struct panthor_syncobj_64b {
340 	/** @seqno: Sequence number. */
341 	u64 seqno;
342 
343 	/**
344 	 * @status: Status.
345 	 *
346 	 * Not zero on failure.
347 	 */
348 	u32 status;
349 
350 	/** @pad: MBZ. */
351 	u32 pad;
352 };
353 
354 /**
355  * struct panthor_queue - Execution queue
356  */
357 struct panthor_queue {
358 	/** @scheduler: DRM scheduler used for this queue. */
359 	struct drm_gpu_scheduler scheduler;
360 
361 	/** @entity: DRM scheduling entity used for this queue. */
362 	struct drm_sched_entity entity;
363 
364 	/**
365 	 * @remaining_time: Time remaining before the job timeout expires.
366 	 *
367 	 * The job timeout is suspended when the queue is not scheduled by the
368 	 * FW. Every time we suspend the timer, we need to save the remaining
369 	 * time so we can restore it later on.
370 	 */
371 	unsigned long remaining_time;
372 
373 	/** @timeout_suspended: True if the job timeout was suspended. */
374 	bool timeout_suspended;
375 
376 	/**
377 	 * @doorbell_id: Doorbell assigned to this queue.
378 	 *
379 	 * Right now, all groups share the same doorbell, and the doorbell ID
380 	 * is assigned to group_slot + 1 when the group is assigned a slot. But
381 	 * we might decide to provide fine grained doorbell assignment at some
382 	 * point, so don't have to wake up all queues in a group every time one
383 	 * of them is updated.
384 	 */
385 	u8 doorbell_id;
386 
387 	/**
388 	 * @priority: Priority of the queue inside the group.
389 	 *
390 	 * Must be less than 16 (Only 4 bits available).
391 	 */
392 	u8 priority;
393 #define CSF_MAX_QUEUE_PRIO	GENMASK(3, 0)
394 
395 	/** @ringbuf: Command stream ring-buffer. */
396 	struct panthor_kernel_bo *ringbuf;
397 
398 	/** @iface: Firmware interface. */
399 	struct {
400 		/** @mem: FW memory allocated for this interface. */
401 		struct panthor_kernel_bo *mem;
402 
403 		/** @input: Input interface. */
404 		struct panthor_fw_ringbuf_input_iface *input;
405 
406 		/** @output: Output interface. */
407 		const struct panthor_fw_ringbuf_output_iface *output;
408 
409 		/** @input_fw_va: FW virtual address of the input interface buffer. */
410 		u32 input_fw_va;
411 
412 		/** @output_fw_va: FW virtual address of the output interface buffer. */
413 		u32 output_fw_va;
414 	} iface;
415 
416 	/**
417 	 * @syncwait: Stores information about the synchronization object this
418 	 * queue is waiting on.
419 	 */
420 	struct {
421 		/** @gpu_va: GPU address of the synchronization object. */
422 		u64 gpu_va;
423 
424 		/** @ref: Reference value to compare against. */
425 		u64 ref;
426 
427 		/** @gt: True if this is a greater-than test. */
428 		bool gt;
429 
430 		/** @sync64: True if this is a 64-bit sync object. */
431 		bool sync64;
432 
433 		/** @bo: Buffer object holding the synchronization object. */
434 		struct drm_gem_object *obj;
435 
436 		/** @offset: Offset of the synchronization object inside @bo. */
437 		u64 offset;
438 
439 		/**
440 		 * @kmap: Kernel mapping of the buffer object holding the
441 		 * synchronization object.
442 		 */
443 		void *kmap;
444 	} syncwait;
445 
446 	/** @fence_ctx: Fence context fields. */
447 	struct {
448 		/** @lock: Used to protect access to all fences allocated by this context. */
449 		spinlock_t lock;
450 
451 		/**
452 		 * @id: Fence context ID.
453 		 *
454 		 * Allocated with dma_fence_context_alloc().
455 		 */
456 		u64 id;
457 
458 		/** @seqno: Sequence number of the last initialized fence. */
459 		atomic64_t seqno;
460 
461 		/**
462 		 * @in_flight_jobs: List containing all in-flight jobs.
463 		 *
464 		 * Used to keep track and signal panthor_job::done_fence when the
465 		 * synchronization object attached to the queue is signaled.
466 		 */
467 		struct list_head in_flight_jobs;
468 	} fence_ctx;
469 };
470 
471 /**
472  * enum panthor_group_state - Scheduling group state.
473  */
474 enum panthor_group_state {
475 	/** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */
476 	PANTHOR_CS_GROUP_CREATED,
477 
478 	/** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */
479 	PANTHOR_CS_GROUP_ACTIVE,
480 
481 	/**
482 	 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is
483 	 * inactive/suspended right now.
484 	 */
485 	PANTHOR_CS_GROUP_SUSPENDED,
486 
487 	/**
488 	 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated.
489 	 *
490 	 * Can no longer be scheduled. The only allowed action is a destruction.
491 	 */
492 	PANTHOR_CS_GROUP_TERMINATED,
493 };
494 
495 /**
496  * struct panthor_group - Scheduling group object
497  */
498 struct panthor_group {
499 	/** @refcount: Reference count */
500 	struct kref refcount;
501 
502 	/** @ptdev: Device. */
503 	struct panthor_device *ptdev;
504 
505 	/** @vm: VM bound to the group. */
506 	struct panthor_vm *vm;
507 
508 	/** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */
509 	u64 compute_core_mask;
510 
511 	/** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */
512 	u64 fragment_core_mask;
513 
514 	/** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */
515 	u64 tiler_core_mask;
516 
517 	/** @max_compute_cores: Maximum number of shader cores used for compute jobs. */
518 	u8 max_compute_cores;
519 
520 	/** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */
521 	u8 max_fragment_cores;
522 
523 	/** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */
524 	u8 max_tiler_cores;
525 
526 	/** @priority: Group priority (check panthor_csg_priority). */
527 	u8 priority;
528 
529 	/** @blocked_queues: Bitmask reflecting the blocked queues. */
530 	u32 blocked_queues;
531 
532 	/** @idle_queues: Bitmask reflecting the idle queues. */
533 	u32 idle_queues;
534 
535 	/** @fatal_lock: Lock used to protect access to fatal fields. */
536 	spinlock_t fatal_lock;
537 
538 	/** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */
539 	u32 fatal_queues;
540 
541 	/** @tiler_oom: Mask of queues that have a tiler OOM event to process. */
542 	atomic_t tiler_oom;
543 
544 	/** @queue_count: Number of queues in this group. */
545 	u32 queue_count;
546 
547 	/** @queues: Queues owned by this group. */
548 	struct panthor_queue *queues[MAX_CS_PER_CSG];
549 
550 	/**
551 	 * @csg_id: ID of the FW group slot.
552 	 *
553 	 * -1 when the group is not scheduled/active.
554 	 */
555 	int csg_id;
556 
557 	/**
558 	 * @destroyed: True when the group has been destroyed.
559 	 *
560 	 * If a group is destroyed it becomes useless: no further jobs can be submitted
561 	 * to its queues. We simply wait for all references to be dropped so we can
562 	 * release the group object.
563 	 */
564 	bool destroyed;
565 
566 	/**
567 	 * @timedout: True when a timeout occurred on any of the queues owned by
568 	 * this group.
569 	 *
570 	 * Timeouts can be reported by drm_sched or by the FW. In any case, any
571 	 * timeout situation is unrecoverable, and the group becomes useless.
572 	 * We simply wait for all references to be dropped so we can release the
573 	 * group object.
574 	 */
575 	bool timedout;
576 
577 	/**
578 	 * @syncobjs: Pool of per-queue synchronization objects.
579 	 *
580 	 * One sync object per queue. The position of the sync object is
581 	 * determined by the queue index.
582 	 */
583 	struct panthor_kernel_bo *syncobjs;
584 
585 	/** @state: Group state. */
586 	enum panthor_group_state state;
587 
588 	/**
589 	 * @suspend_buf: Suspend buffer.
590 	 *
591 	 * Stores the state of the group and its queues when a group is suspended.
592 	 * Used at resume time to restore the group in its previous state.
593 	 *
594 	 * The size of the suspend buffer is exposed through the FW interface.
595 	 */
596 	struct panthor_kernel_bo *suspend_buf;
597 
598 	/**
599 	 * @protm_suspend_buf: Protection mode suspend buffer.
600 	 *
601 	 * Stores the state of the group and its queues when a group that's in
602 	 * protection mode is suspended.
603 	 *
604 	 * Used at resume time to restore the group in its previous state.
605 	 *
606 	 * The size of the protection mode suspend buffer is exposed through the
607 	 * FW interface.
608 	 */
609 	struct panthor_kernel_bo *protm_suspend_buf;
610 
611 	/** @sync_upd_work: Work used to check/signal job fences. */
612 	struct work_struct sync_upd_work;
613 
614 	/** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */
615 	struct work_struct tiler_oom_work;
616 
617 	/** @term_work: Work used to finish the group termination procedure. */
618 	struct work_struct term_work;
619 
620 	/**
621 	 * @release_work: Work used to release group resources.
622 	 *
623 	 * We need to postpone the group release to avoid a deadlock when
624 	 * the last ref is released in the tick work.
625 	 */
626 	struct work_struct release_work;
627 
628 	/**
629 	 * @run_node: Node used to insert the group in the
630 	 * panthor_group::groups::{runnable,idle} and
631 	 * panthor_group::reset.stopped_groups lists.
632 	 */
633 	struct list_head run_node;
634 
635 	/**
636 	 * @wait_node: Node used to insert the group in the
637 	 * panthor_group::groups::waiting list.
638 	 */
639 	struct list_head wait_node;
640 };
641 
642 /**
643  * group_queue_work() - Queue a group work
644  * @group: Group to queue the work for.
645  * @wname: Work name.
646  *
647  * Grabs a ref and queue a work item to the scheduler workqueue. If
648  * the work was already queued, we release the reference we grabbed.
649  *
650  * Work callbacks must release the reference we grabbed here.
651  */
652 #define group_queue_work(group, wname) \
653 	do { \
654 		group_get(group); \
655 		if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
656 			group_put(group); \
657 	} while (0)
658 
659 /**
660  * sched_queue_work() - Queue a scheduler work.
661  * @sched: Scheduler object.
662  * @wname: Work name.
663  *
664  * Conditionally queues a scheduler work if no reset is pending/in-progress.
665  */
666 #define sched_queue_work(sched, wname) \
667 	do { \
668 		if (!atomic_read(&(sched)->reset.in_progress) && \
669 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
670 			queue_work((sched)->wq, &(sched)->wname ## _work); \
671 	} while (0)
672 
673 /**
674  * sched_queue_delayed_work() - Queue a scheduler delayed work.
675  * @sched: Scheduler object.
676  * @wname: Work name.
677  * @delay: Work delay in jiffies.
678  *
679  * Conditionally queues a scheduler delayed work if no reset is
680  * pending/in-progress.
681  */
682 #define sched_queue_delayed_work(sched, wname, delay) \
683 	do { \
684 		if (!atomic_read(&sched->reset.in_progress) && \
685 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
686 			mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
687 	} while (0)
688 
689 /*
690  * We currently set the maximum of groups per file to an arbitrary low value.
691  * But this can be updated if we need more.
692  */
693 #define MAX_GROUPS_PER_POOL 128
694 
695 /**
696  * struct panthor_group_pool - Group pool
697  *
698  * Each file get assigned a group pool.
699  */
700 struct panthor_group_pool {
701 	/** @xa: Xarray used to manage group handles. */
702 	struct xarray xa;
703 };
704 
705 /**
706  * struct panthor_job - Used to manage GPU job
707  */
708 struct panthor_job {
709 	/** @base: Inherit from drm_sched_job. */
710 	struct drm_sched_job base;
711 
712 	/** @refcount: Reference count. */
713 	struct kref refcount;
714 
715 	/** @group: Group of the queue this job will be pushed to. */
716 	struct panthor_group *group;
717 
718 	/** @queue_idx: Index of the queue inside @group. */
719 	u32 queue_idx;
720 
721 	/** @call_info: Information about the userspace command stream call. */
722 	struct {
723 		/** @start: GPU address of the userspace command stream. */
724 		u64 start;
725 
726 		/** @size: Size of the userspace command stream. */
727 		u32 size;
728 
729 		/**
730 		 * @latest_flush: Flush ID at the time the userspace command
731 		 * stream was built.
732 		 *
733 		 * Needed for the flush reduction mechanism.
734 		 */
735 		u32 latest_flush;
736 	} call_info;
737 
738 	/** @ringbuf: Position of this job is in the ring buffer. */
739 	struct {
740 		/** @start: Start offset. */
741 		u64 start;
742 
743 		/** @end: End offset. */
744 		u64 end;
745 	} ringbuf;
746 
747 	/**
748 	 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs
749 	 * list.
750 	 */
751 	struct list_head node;
752 
753 	/** @done_fence: Fence signaled when the job is finished or cancelled. */
754 	struct dma_fence *done_fence;
755 };
756 
757 static void
758 panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
759 {
760 	if (queue->syncwait.kmap) {
761 		struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
762 
763 		drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
764 		queue->syncwait.kmap = NULL;
765 	}
766 
767 	drm_gem_object_put(queue->syncwait.obj);
768 	queue->syncwait.obj = NULL;
769 }
770 
771 static void *
772 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue)
773 {
774 	struct panthor_device *ptdev = group->ptdev;
775 	struct panthor_gem_object *bo;
776 	struct iosys_map map;
777 	int ret;
778 
779 	if (queue->syncwait.kmap)
780 		return queue->syncwait.kmap + queue->syncwait.offset;
781 
782 	bo = panthor_vm_get_bo_for_va(group->vm,
783 				      queue->syncwait.gpu_va,
784 				      &queue->syncwait.offset);
785 	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
786 		goto err_put_syncwait_obj;
787 
788 	queue->syncwait.obj = &bo->base.base;
789 	ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
790 	if (drm_WARN_ON(&ptdev->base, ret))
791 		goto err_put_syncwait_obj;
792 
793 	queue->syncwait.kmap = map.vaddr;
794 	if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
795 		goto err_put_syncwait_obj;
796 
797 	return queue->syncwait.kmap + queue->syncwait.offset;
798 
799 err_put_syncwait_obj:
800 	panthor_queue_put_syncwait_obj(queue);
801 	return NULL;
802 }
803 
804 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue)
805 {
806 	if (IS_ERR_OR_NULL(queue))
807 		return;
808 
809 	if (queue->entity.fence_context)
810 		drm_sched_entity_destroy(&queue->entity);
811 
812 	if (queue->scheduler.ops)
813 		drm_sched_fini(&queue->scheduler);
814 
815 	panthor_queue_put_syncwait_obj(queue);
816 
817 	panthor_kernel_bo_destroy(group->vm, queue->ringbuf);
818 	panthor_kernel_bo_destroy(panthor_fw_vm(group->ptdev), queue->iface.mem);
819 
820 	kfree(queue);
821 }
822 
823 static void group_release_work(struct work_struct *work)
824 {
825 	struct panthor_group *group = container_of(work,
826 						   struct panthor_group,
827 						   release_work);
828 	struct panthor_device *ptdev = group->ptdev;
829 	u32 i;
830 
831 	for (i = 0; i < group->queue_count; i++)
832 		group_free_queue(group, group->queues[i]);
833 
834 	panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->suspend_buf);
835 	panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->protm_suspend_buf);
836 	panthor_kernel_bo_destroy(group->vm, group->syncobjs);
837 
838 	panthor_vm_put(group->vm);
839 	kfree(group);
840 }
841 
842 static void group_release(struct kref *kref)
843 {
844 	struct panthor_group *group = container_of(kref,
845 						   struct panthor_group,
846 						   refcount);
847 	struct panthor_device *ptdev = group->ptdev;
848 
849 	drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
850 	drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
851 	drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
852 
853 	queue_work(panthor_cleanup_wq, &group->release_work);
854 }
855 
856 static void group_put(struct panthor_group *group)
857 {
858 	if (group)
859 		kref_put(&group->refcount, group_release);
860 }
861 
862 static struct panthor_group *
863 group_get(struct panthor_group *group)
864 {
865 	if (group)
866 		kref_get(&group->refcount);
867 
868 	return group;
869 }
870 
871 /**
872  * group_bind_locked() - Bind a group to a group slot
873  * @group: Group.
874  * @csg_id: Slot.
875  *
876  * Return: 0 on success, a negative error code otherwise.
877  */
878 static int
879 group_bind_locked(struct panthor_group *group, u32 csg_id)
880 {
881 	struct panthor_device *ptdev = group->ptdev;
882 	struct panthor_csg_slot *csg_slot;
883 	int ret;
884 
885 	lockdep_assert_held(&ptdev->scheduler->lock);
886 
887 	if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
888 			ptdev->scheduler->csg_slots[csg_id].group))
889 		return -EINVAL;
890 
891 	ret = panthor_vm_active(group->vm);
892 	if (ret)
893 		return ret;
894 
895 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
896 	group_get(group);
897 	group->csg_id = csg_id;
898 
899 	/* Dummy doorbell allocation: doorbell is assigned to the group and
900 	 * all queues use the same doorbell.
901 	 *
902 	 * TODO: Implement LRU-based doorbell assignment, so the most often
903 	 * updated queues get their own doorbell, thus avoiding useless checks
904 	 * on queues belonging to the same group that are rarely updated.
905 	 */
906 	for (u32 i = 0; i < group->queue_count; i++)
907 		group->queues[i]->doorbell_id = csg_id + 1;
908 
909 	csg_slot->group = group;
910 
911 	return 0;
912 }
913 
914 /**
915  * group_unbind_locked() - Unbind a group from a slot.
916  * @group: Group to unbind.
917  *
918  * Return: 0 on success, a negative error code otherwise.
919  */
920 static int
921 group_unbind_locked(struct panthor_group *group)
922 {
923 	struct panthor_device *ptdev = group->ptdev;
924 	struct panthor_csg_slot *slot;
925 
926 	lockdep_assert_held(&ptdev->scheduler->lock);
927 
928 	if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
929 		return -EINVAL;
930 
931 	if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
932 		return -EINVAL;
933 
934 	slot = &ptdev->scheduler->csg_slots[group->csg_id];
935 	panthor_vm_idle(group->vm);
936 	group->csg_id = -1;
937 
938 	/* Tiler OOM events will be re-issued next time the group is scheduled. */
939 	atomic_set(&group->tiler_oom, 0);
940 	cancel_work(&group->tiler_oom_work);
941 
942 	for (u32 i = 0; i < group->queue_count; i++)
943 		group->queues[i]->doorbell_id = -1;
944 
945 	slot->group = NULL;
946 
947 	group_put(group);
948 	return 0;
949 }
950 
951 /**
952  * cs_slot_prog_locked() - Program a queue slot
953  * @ptdev: Device.
954  * @csg_id: Group slot ID.
955  * @cs_id: Queue slot ID.
956  *
957  * Program a queue slot with the queue information so things can start being
958  * executed on this queue.
959  *
960  * The group slot must have a group bound to it already (group_bind_locked()).
961  */
962 static void
963 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
964 {
965 	struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
966 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
967 
968 	lockdep_assert_held(&ptdev->scheduler->lock);
969 
970 	queue->iface.input->extract = queue->iface.output->extract;
971 	drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
972 
973 	cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf);
974 	cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
975 	cs_iface->input->ringbuf_input = queue->iface.input_fw_va;
976 	cs_iface->input->ringbuf_output = queue->iface.output_fw_va;
977 	cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) |
978 				  CS_CONFIG_DOORBELL(queue->doorbell_id);
979 	cs_iface->input->ack_irq_mask = ~0;
980 	panthor_fw_update_reqs(cs_iface, req,
981 			       CS_IDLE_SYNC_WAIT |
982 			       CS_IDLE_EMPTY |
983 			       CS_STATE_START |
984 			       CS_EXTRACT_EVENT,
985 			       CS_IDLE_SYNC_WAIT |
986 			       CS_IDLE_EMPTY |
987 			       CS_STATE_MASK |
988 			       CS_EXTRACT_EVENT);
989 	if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
990 		drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
991 		queue->timeout_suspended = false;
992 	}
993 }
994 
995 /**
996  * cs_slot_reset_locked() - Reset a queue slot
997  * @ptdev: Device.
998  * @csg_id: Group slot.
999  * @cs_id: Queue slot.
1000  *
1001  * Change the queue slot state to STOP and suspend the queue timeout if
1002  * the queue is not blocked.
1003  *
1004  * The group slot must have a group bound to it (group_bind_locked()).
1005  */
1006 static int
1007 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1008 {
1009 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1010 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1011 	struct panthor_queue *queue = group->queues[cs_id];
1012 
1013 	lockdep_assert_held(&ptdev->scheduler->lock);
1014 
1015 	panthor_fw_update_reqs(cs_iface, req,
1016 			       CS_STATE_STOP,
1017 			       CS_STATE_MASK);
1018 
1019 	/* If the queue is blocked, we want to keep the timeout running, so
1020 	 * we can detect unbounded waits and kill the group when that happens.
1021 	 */
1022 	if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
1023 		queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
1024 		queue->timeout_suspended = true;
1025 		WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
1026 	}
1027 
1028 	return 0;
1029 }
1030 
1031 /**
1032  * csg_slot_sync_priority_locked() - Synchronize the group slot priority
1033  * @ptdev: Device.
1034  * @csg_id: Group slot ID.
1035  *
1036  * Group slot priority update happens asynchronously. When we receive a
1037  * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can
1038  * reflect it to our panthor_csg_slot object.
1039  */
1040 static void
1041 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
1042 {
1043 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1044 	struct panthor_fw_csg_iface *csg_iface;
1045 
1046 	lockdep_assert_held(&ptdev->scheduler->lock);
1047 
1048 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1049 	csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
1050 }
1051 
1052 /**
1053  * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority
1054  * @ptdev: Device.
1055  * @csg_id: Group slot.
1056  * @cs_id: Queue slot.
1057  *
1058  * Queue state is updated on group suspend or STATUS_UPDATE event.
1059  */
1060 static void
1061 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1062 {
1063 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1064 	struct panthor_queue *queue = group->queues[cs_id];
1065 	struct panthor_fw_cs_iface *cs_iface =
1066 		panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
1067 
1068 	u32 status_wait_cond;
1069 
1070 	switch (cs_iface->output->status_blocked_reason) {
1071 	case CS_STATUS_BLOCKED_REASON_UNBLOCKED:
1072 		if (queue->iface.input->insert == queue->iface.output->extract &&
1073 		    cs_iface->output->status_scoreboards == 0)
1074 			group->idle_queues |= BIT(cs_id);
1075 		break;
1076 
1077 	case CS_STATUS_BLOCKED_REASON_SYNC_WAIT:
1078 		if (list_empty(&group->wait_node)) {
1079 			list_move_tail(&group->wait_node,
1080 				       &group->ptdev->scheduler->groups.waiting);
1081 		}
1082 		group->blocked_queues |= BIT(cs_id);
1083 		queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
1084 		queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
1085 		status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
1086 		queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT;
1087 		if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) {
1088 			u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi;
1089 
1090 			queue->syncwait.sync64 = true;
1091 			queue->syncwait.ref |= sync_val_hi << 32;
1092 		} else {
1093 			queue->syncwait.sync64 = false;
1094 		}
1095 		break;
1096 
1097 	default:
1098 		/* Other reasons are not blocking. Consider the queue as runnable
1099 		 * in those cases.
1100 		 */
1101 		break;
1102 	}
1103 }
1104 
1105 static void
1106 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
1107 {
1108 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1109 	struct panthor_group *group = csg_slot->group;
1110 	u32 i;
1111 
1112 	lockdep_assert_held(&ptdev->scheduler->lock);
1113 
1114 	group->idle_queues = 0;
1115 	group->blocked_queues = 0;
1116 
1117 	for (i = 0; i < group->queue_count; i++) {
1118 		if (group->queues[i])
1119 			cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
1120 	}
1121 }
1122 
1123 static void
1124 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
1125 {
1126 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1127 	struct panthor_fw_csg_iface *csg_iface;
1128 	struct panthor_group *group;
1129 	enum panthor_group_state new_state, old_state;
1130 
1131 	lockdep_assert_held(&ptdev->scheduler->lock);
1132 
1133 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1134 	group = csg_slot->group;
1135 
1136 	if (!group)
1137 		return;
1138 
1139 	old_state = group->state;
1140 	switch (csg_iface->output->ack & CSG_STATE_MASK) {
1141 	case CSG_STATE_START:
1142 	case CSG_STATE_RESUME:
1143 		new_state = PANTHOR_CS_GROUP_ACTIVE;
1144 		break;
1145 	case CSG_STATE_TERMINATE:
1146 		new_state = PANTHOR_CS_GROUP_TERMINATED;
1147 		break;
1148 	case CSG_STATE_SUSPEND:
1149 		new_state = PANTHOR_CS_GROUP_SUSPENDED;
1150 		break;
1151 	}
1152 
1153 	if (old_state == new_state)
1154 		return;
1155 
1156 	if (new_state == PANTHOR_CS_GROUP_SUSPENDED)
1157 		csg_slot_sync_queues_state_locked(ptdev, csg_id);
1158 
1159 	if (old_state == PANTHOR_CS_GROUP_ACTIVE) {
1160 		u32 i;
1161 
1162 		/* Reset the queue slots so we start from a clean
1163 		 * state when starting/resuming a new group on this
1164 		 * CSG slot. No wait needed here, and no ringbell
1165 		 * either, since the CS slot will only be re-used
1166 		 * on the next CSG start operation.
1167 		 */
1168 		for (i = 0; i < group->queue_count; i++) {
1169 			if (group->queues[i])
1170 				cs_slot_reset_locked(ptdev, csg_id, i);
1171 		}
1172 	}
1173 
1174 	group->state = new_state;
1175 }
1176 
1177 static int
1178 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
1179 {
1180 	struct panthor_fw_csg_iface *csg_iface;
1181 	struct panthor_csg_slot *csg_slot;
1182 	struct panthor_group *group;
1183 	u32 queue_mask = 0, i;
1184 
1185 	lockdep_assert_held(&ptdev->scheduler->lock);
1186 
1187 	if (priority > MAX_CSG_PRIO)
1188 		return -EINVAL;
1189 
1190 	if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
1191 		return -EINVAL;
1192 
1193 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1194 	group = csg_slot->group;
1195 	if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE)
1196 		return 0;
1197 
1198 	csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
1199 
1200 	for (i = 0; i < group->queue_count; i++) {
1201 		if (group->queues[i]) {
1202 			cs_slot_prog_locked(ptdev, csg_id, i);
1203 			queue_mask |= BIT(i);
1204 		}
1205 	}
1206 
1207 	csg_iface->input->allow_compute = group->compute_core_mask;
1208 	csg_iface->input->allow_fragment = group->fragment_core_mask;
1209 	csg_iface->input->allow_other = group->tiler_core_mask;
1210 	csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
1211 					 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
1212 					 CSG_EP_REQ_TILER(group->max_tiler_cores) |
1213 					 CSG_EP_REQ_PRIORITY(priority);
1214 	csg_iface->input->config = panthor_vm_as(group->vm);
1215 
1216 	if (group->suspend_buf)
1217 		csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf);
1218 	else
1219 		csg_iface->input->suspend_buf = 0;
1220 
1221 	if (group->protm_suspend_buf) {
1222 		csg_iface->input->protm_suspend_buf =
1223 			panthor_kernel_bo_gpuva(group->protm_suspend_buf);
1224 	} else {
1225 		csg_iface->input->protm_suspend_buf = 0;
1226 	}
1227 
1228 	csg_iface->input->ack_irq_mask = ~0;
1229 	panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask);
1230 	return 0;
1231 }
1232 
1233 static void
1234 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
1235 				   u32 csg_id, u32 cs_id)
1236 {
1237 	struct panthor_scheduler *sched = ptdev->scheduler;
1238 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1239 	struct panthor_group *group = csg_slot->group;
1240 	struct panthor_fw_cs_iface *cs_iface;
1241 	u32 fatal;
1242 	u64 info;
1243 
1244 	lockdep_assert_held(&sched->lock);
1245 
1246 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1247 	fatal = cs_iface->output->fatal;
1248 	info = cs_iface->output->fatal_info;
1249 
1250 	if (group)
1251 		group->fatal_queues |= BIT(cs_id);
1252 
1253 	sched_queue_delayed_work(sched, tick, 0);
1254 	drm_warn(&ptdev->base,
1255 		 "CSG slot %d CS slot: %d\n"
1256 		 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
1257 		 "CS_FATAL.EXCEPTION_DATA: 0x%x\n"
1258 		 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n",
1259 		 csg_id, cs_id,
1260 		 (unsigned int)CS_EXCEPTION_TYPE(fatal),
1261 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
1262 		 (unsigned int)CS_EXCEPTION_DATA(fatal),
1263 		 info);
1264 }
1265 
1266 static void
1267 cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
1268 				   u32 csg_id, u32 cs_id)
1269 {
1270 	struct panthor_scheduler *sched = ptdev->scheduler;
1271 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1272 	struct panthor_group *group = csg_slot->group;
1273 	struct panthor_queue *queue = group && cs_id < group->queue_count ?
1274 				      group->queues[cs_id] : NULL;
1275 	struct panthor_fw_cs_iface *cs_iface;
1276 	u32 fault;
1277 	u64 info;
1278 
1279 	lockdep_assert_held(&sched->lock);
1280 
1281 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1282 	fault = cs_iface->output->fault;
1283 	info = cs_iface->output->fault_info;
1284 
1285 	if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
1286 		u64 cs_extract = queue->iface.output->extract;
1287 		struct panthor_job *job;
1288 
1289 		spin_lock(&queue->fence_ctx.lock);
1290 		list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
1291 			if (cs_extract >= job->ringbuf.end)
1292 				continue;
1293 
1294 			if (cs_extract < job->ringbuf.start)
1295 				break;
1296 
1297 			dma_fence_set_error(job->done_fence, -EINVAL);
1298 		}
1299 		spin_unlock(&queue->fence_ctx.lock);
1300 	}
1301 
1302 	drm_warn(&ptdev->base,
1303 		 "CSG slot %d CS slot: %d\n"
1304 		 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
1305 		 "CS_FAULT.EXCEPTION_DATA: 0x%x\n"
1306 		 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n",
1307 		 csg_id, cs_id,
1308 		 (unsigned int)CS_EXCEPTION_TYPE(fault),
1309 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
1310 		 (unsigned int)CS_EXCEPTION_DATA(fault),
1311 		 info);
1312 }
1313 
1314 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
1315 {
1316 	struct panthor_device *ptdev = group->ptdev;
1317 	struct panthor_scheduler *sched = ptdev->scheduler;
1318 	u32 renderpasses_in_flight, pending_frag_count;
1319 	struct panthor_heap_pool *heaps = NULL;
1320 	u64 heap_address, new_chunk_va = 0;
1321 	u32 vt_start, vt_end, frag_end;
1322 	int ret, csg_id;
1323 
1324 	mutex_lock(&sched->lock);
1325 	csg_id = group->csg_id;
1326 	if (csg_id >= 0) {
1327 		struct panthor_fw_cs_iface *cs_iface;
1328 
1329 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1330 		heaps = panthor_vm_get_heap_pool(group->vm, false);
1331 		heap_address = cs_iface->output->heap_address;
1332 		vt_start = cs_iface->output->heap_vt_start;
1333 		vt_end = cs_iface->output->heap_vt_end;
1334 		frag_end = cs_iface->output->heap_frag_end;
1335 		renderpasses_in_flight = vt_start - frag_end;
1336 		pending_frag_count = vt_end - frag_end;
1337 	}
1338 	mutex_unlock(&sched->lock);
1339 
1340 	/* The group got scheduled out, we stop here. We will get a new tiler OOM event
1341 	 * when it's scheduled again.
1342 	 */
1343 	if (unlikely(csg_id < 0))
1344 		return 0;
1345 
1346 	if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) {
1347 		ret = -EINVAL;
1348 	} else {
1349 		/* We do the allocation without holding the scheduler lock to avoid
1350 		 * blocking the scheduling.
1351 		 */
1352 		ret = panthor_heap_grow(heaps, heap_address,
1353 					renderpasses_in_flight,
1354 					pending_frag_count, &new_chunk_va);
1355 	}
1356 
1357 	if (ret && ret != -EBUSY) {
1358 		drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
1359 		group->fatal_queues |= BIT(cs_id);
1360 		sched_queue_delayed_work(sched, tick, 0);
1361 		goto out_put_heap_pool;
1362 	}
1363 
1364 	mutex_lock(&sched->lock);
1365 	csg_id = group->csg_id;
1366 	if (csg_id >= 0) {
1367 		struct panthor_fw_csg_iface *csg_iface;
1368 		struct panthor_fw_cs_iface *cs_iface;
1369 
1370 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1371 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1372 
1373 		cs_iface->input->heap_start = new_chunk_va;
1374 		cs_iface->input->heap_end = new_chunk_va;
1375 		panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM);
1376 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id));
1377 		panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1378 	}
1379 	mutex_unlock(&sched->lock);
1380 
1381 	/* We allocated a chunck, but couldn't link it to the heap
1382 	 * context because the group was scheduled out while we were
1383 	 * allocating memory. We need to return this chunk to the heap.
1384 	 */
1385 	if (unlikely(csg_id < 0 && new_chunk_va))
1386 		panthor_heap_return_chunk(heaps, heap_address, new_chunk_va);
1387 
1388 	ret = 0;
1389 
1390 out_put_heap_pool:
1391 	panthor_heap_pool_put(heaps);
1392 	return ret;
1393 }
1394 
1395 static void group_tiler_oom_work(struct work_struct *work)
1396 {
1397 	struct panthor_group *group =
1398 		container_of(work, struct panthor_group, tiler_oom_work);
1399 	u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
1400 
1401 	while (tiler_oom) {
1402 		u32 cs_id = ffs(tiler_oom) - 1;
1403 
1404 		group_process_tiler_oom(group, cs_id);
1405 		tiler_oom &= ~BIT(cs_id);
1406 	}
1407 
1408 	group_put(group);
1409 }
1410 
1411 static void
1412 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
1413 				       u32 csg_id, u32 cs_id)
1414 {
1415 	struct panthor_scheduler *sched = ptdev->scheduler;
1416 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1417 	struct panthor_group *group = csg_slot->group;
1418 
1419 	lockdep_assert_held(&sched->lock);
1420 
1421 	if (drm_WARN_ON(&ptdev->base, !group))
1422 		return;
1423 
1424 	atomic_or(BIT(cs_id), &group->tiler_oom);
1425 
1426 	/* We don't use group_queue_work() here because we want to queue the
1427 	 * work item to the heap_alloc_wq.
1428 	 */
1429 	group_get(group);
1430 	if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
1431 		group_put(group);
1432 }
1433 
1434 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
1435 				       u32 csg_id, u32 cs_id)
1436 {
1437 	struct panthor_fw_cs_iface *cs_iface;
1438 	u32 req, ack, events;
1439 
1440 	lockdep_assert_held(&ptdev->scheduler->lock);
1441 
1442 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1443 	req = cs_iface->input->req;
1444 	ack = cs_iface->output->ack;
1445 	events = (req ^ ack) & CS_EVT_MASK;
1446 
1447 	if (events & CS_FATAL)
1448 		cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
1449 
1450 	if (events & CS_FAULT)
1451 		cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
1452 
1453 	if (events & CS_TILER_OOM)
1454 		cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
1455 
1456 	/* We don't acknowledge the TILER_OOM event since its handling is
1457 	 * deferred to a separate work.
1458 	 */
1459 	panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT);
1460 
1461 	return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
1462 }
1463 
1464 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id)
1465 {
1466 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1467 	struct panthor_fw_csg_iface *csg_iface;
1468 
1469 	lockdep_assert_held(&ptdev->scheduler->lock);
1470 
1471 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1472 	csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE;
1473 }
1474 
1475 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
1476 {
1477 	struct panthor_scheduler *sched = ptdev->scheduler;
1478 
1479 	lockdep_assert_held(&sched->lock);
1480 
1481 	sched->might_have_idle_groups = true;
1482 
1483 	/* Schedule a tick so we can evict idle groups and schedule non-idle
1484 	 * ones. This will also update runtime PM and devfreq busy/idle states,
1485 	 * so the device can lower its frequency or get suspended.
1486 	 */
1487 	sched_queue_delayed_work(sched, tick, 0);
1488 }
1489 
1490 static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
1491 					u32 csg_id)
1492 {
1493 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1494 	struct panthor_group *group = csg_slot->group;
1495 
1496 	lockdep_assert_held(&ptdev->scheduler->lock);
1497 
1498 	if (group)
1499 		group_queue_work(group, sync_upd);
1500 
1501 	sched_queue_work(ptdev->scheduler, sync_upd);
1502 }
1503 
1504 static void
1505 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
1506 {
1507 	struct panthor_scheduler *sched = ptdev->scheduler;
1508 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1509 	struct panthor_group *group = csg_slot->group;
1510 
1511 	lockdep_assert_held(&sched->lock);
1512 
1513 	drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
1514 
1515 	group = csg_slot->group;
1516 	if (!drm_WARN_ON(&ptdev->base, !group))
1517 		group->timedout = true;
1518 
1519 	sched_queue_delayed_work(sched, tick, 0);
1520 }
1521 
1522 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
1523 {
1524 	u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events;
1525 	struct panthor_fw_csg_iface *csg_iface;
1526 	u32 ring_cs_db_mask = 0;
1527 
1528 	lockdep_assert_held(&ptdev->scheduler->lock);
1529 
1530 	if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1531 		return;
1532 
1533 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1534 	req = READ_ONCE(csg_iface->input->req);
1535 	ack = READ_ONCE(csg_iface->output->ack);
1536 	cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req);
1537 	cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack);
1538 	csg_events = (req ^ ack) & CSG_EVT_MASK;
1539 
1540 	/* There may not be any pending CSG/CS interrupts to process */
1541 	if (req == ack && cs_irq_req == cs_irq_ack)
1542 		return;
1543 
1544 	/* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before
1545 	 * examining the CS_ACK & CS_REQ bits. This would ensure that Host
1546 	 * doesn't miss an interrupt for the CS in the race scenario where
1547 	 * whilst Host is servicing an interrupt for the CS, firmware sends
1548 	 * another interrupt for that CS.
1549 	 */
1550 	csg_iface->input->cs_irq_ack = cs_irq_req;
1551 
1552 	panthor_fw_update_reqs(csg_iface, req, ack,
1553 			       CSG_SYNC_UPDATE |
1554 			       CSG_IDLE |
1555 			       CSG_PROGRESS_TIMER_EVENT);
1556 
1557 	if (csg_events & CSG_IDLE)
1558 		csg_slot_process_idle_event_locked(ptdev, csg_id);
1559 
1560 	if (csg_events & CSG_PROGRESS_TIMER_EVENT)
1561 		csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
1562 
1563 	cs_irqs = cs_irq_req ^ cs_irq_ack;
1564 	while (cs_irqs) {
1565 		u32 cs_id = ffs(cs_irqs) - 1;
1566 
1567 		if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
1568 			ring_cs_db_mask |= BIT(cs_id);
1569 
1570 		cs_irqs &= ~BIT(cs_id);
1571 	}
1572 
1573 	if (csg_events & CSG_SYNC_UPDATE)
1574 		csg_slot_sync_update_locked(ptdev, csg_id);
1575 
1576 	if (ring_cs_db_mask)
1577 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask);
1578 
1579 	panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1580 }
1581 
1582 static void sched_process_idle_event_locked(struct panthor_device *ptdev)
1583 {
1584 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1585 
1586 	lockdep_assert_held(&ptdev->scheduler->lock);
1587 
1588 	/* Acknowledge the idle event and schedule a tick. */
1589 	panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE);
1590 	sched_queue_delayed_work(ptdev->scheduler, tick, 0);
1591 }
1592 
1593 /**
1594  * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ
1595  * @ptdev: Device.
1596  */
1597 static void sched_process_global_irq_locked(struct panthor_device *ptdev)
1598 {
1599 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1600 	u32 req, ack, evts;
1601 
1602 	lockdep_assert_held(&ptdev->scheduler->lock);
1603 
1604 	req = READ_ONCE(glb_iface->input->req);
1605 	ack = READ_ONCE(glb_iface->output->ack);
1606 	evts = (req ^ ack) & GLB_EVT_MASK;
1607 
1608 	if (evts & GLB_IDLE)
1609 		sched_process_idle_event_locked(ptdev);
1610 }
1611 
1612 static void process_fw_events_work(struct work_struct *work)
1613 {
1614 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
1615 						      fw_events_work);
1616 	u32 events = atomic_xchg(&sched->fw_events, 0);
1617 	struct panthor_device *ptdev = sched->ptdev;
1618 
1619 	mutex_lock(&sched->lock);
1620 
1621 	if (events & JOB_INT_GLOBAL_IF) {
1622 		sched_process_global_irq_locked(ptdev);
1623 		events &= ~JOB_INT_GLOBAL_IF;
1624 	}
1625 
1626 	while (events) {
1627 		u32 csg_id = ffs(events) - 1;
1628 
1629 		sched_process_csg_irq_locked(ptdev, csg_id);
1630 		events &= ~BIT(csg_id);
1631 	}
1632 
1633 	mutex_unlock(&sched->lock);
1634 }
1635 
1636 /**
1637  * panthor_sched_report_fw_events() - Report FW events to the scheduler.
1638  */
1639 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
1640 {
1641 	if (!ptdev->scheduler)
1642 		return;
1643 
1644 	atomic_or(events, &ptdev->scheduler->fw_events);
1645 	sched_queue_work(ptdev->scheduler, fw_events);
1646 }
1647 
1648 static const char *fence_get_driver_name(struct dma_fence *fence)
1649 {
1650 	return "panthor";
1651 }
1652 
1653 static const char *queue_fence_get_timeline_name(struct dma_fence *fence)
1654 {
1655 	return "queue-fence";
1656 }
1657 
1658 static const struct dma_fence_ops panthor_queue_fence_ops = {
1659 	.get_driver_name = fence_get_driver_name,
1660 	.get_timeline_name = queue_fence_get_timeline_name,
1661 };
1662 
1663 struct panthor_csg_slots_upd_ctx {
1664 	u32 update_mask;
1665 	u32 timedout_mask;
1666 	struct {
1667 		u32 value;
1668 		u32 mask;
1669 	} requests[MAX_CSGS];
1670 };
1671 
1672 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
1673 {
1674 	memset(ctx, 0, sizeof(*ctx));
1675 }
1676 
1677 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
1678 				    struct panthor_csg_slots_upd_ctx *ctx,
1679 				    u32 csg_id, u32 value, u32 mask)
1680 {
1681 	if (drm_WARN_ON(&ptdev->base, !mask) ||
1682 	    drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1683 		return;
1684 
1685 	ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
1686 	ctx->requests[csg_id].mask |= mask;
1687 	ctx->update_mask |= BIT(csg_id);
1688 }
1689 
1690 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
1691 				     struct panthor_csg_slots_upd_ctx *ctx)
1692 {
1693 	struct panthor_scheduler *sched = ptdev->scheduler;
1694 	u32 update_slots = ctx->update_mask;
1695 
1696 	lockdep_assert_held(&sched->lock);
1697 
1698 	if (!ctx->update_mask)
1699 		return 0;
1700 
1701 	while (update_slots) {
1702 		struct panthor_fw_csg_iface *csg_iface;
1703 		u32 csg_id = ffs(update_slots) - 1;
1704 
1705 		update_slots &= ~BIT(csg_id);
1706 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1707 		panthor_fw_update_reqs(csg_iface, req,
1708 				       ctx->requests[csg_id].value,
1709 				       ctx->requests[csg_id].mask);
1710 	}
1711 
1712 	panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
1713 
1714 	update_slots = ctx->update_mask;
1715 	while (update_slots) {
1716 		struct panthor_fw_csg_iface *csg_iface;
1717 		u32 csg_id = ffs(update_slots) - 1;
1718 		u32 req_mask = ctx->requests[csg_id].mask, acked;
1719 		int ret;
1720 
1721 		update_slots &= ~BIT(csg_id);
1722 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1723 
1724 		ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
1725 
1726 		if (acked & CSG_ENDPOINT_CONFIG)
1727 			csg_slot_sync_priority_locked(ptdev, csg_id);
1728 
1729 		if (acked & CSG_STATE_MASK)
1730 			csg_slot_sync_state_locked(ptdev, csg_id);
1731 
1732 		if (acked & CSG_STATUS_UPDATE) {
1733 			csg_slot_sync_queues_state_locked(ptdev, csg_id);
1734 			csg_slot_sync_idle_state_locked(ptdev, csg_id);
1735 		}
1736 
1737 		if (ret && acked != req_mask &&
1738 		    ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {
1739 			drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
1740 			ctx->timedout_mask |= BIT(csg_id);
1741 		}
1742 	}
1743 
1744 	if (ctx->timedout_mask)
1745 		return -ETIMEDOUT;
1746 
1747 	return 0;
1748 }
1749 
1750 struct panthor_sched_tick_ctx {
1751 	struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT];
1752 	struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
1753 	u32 idle_group_count;
1754 	u32 group_count;
1755 	enum panthor_csg_priority min_priority;
1756 	struct panthor_vm *vms[MAX_CS_PER_CSG];
1757 	u32 as_count;
1758 	bool immediate_tick;
1759 	u32 csg_upd_failed_mask;
1760 };
1761 
1762 static bool
1763 tick_ctx_is_full(const struct panthor_scheduler *sched,
1764 		 const struct panthor_sched_tick_ctx *ctx)
1765 {
1766 	return ctx->group_count == sched->csg_slot_count;
1767 }
1768 
1769 static bool
1770 group_is_idle(struct panthor_group *group)
1771 {
1772 	struct panthor_device *ptdev = group->ptdev;
1773 	u32 inactive_queues;
1774 
1775 	if (group->csg_id >= 0)
1776 		return ptdev->scheduler->csg_slots[group->csg_id].idle;
1777 
1778 	inactive_queues = group->idle_queues | group->blocked_queues;
1779 	return hweight32(inactive_queues) == group->queue_count;
1780 }
1781 
1782 static bool
1783 group_can_run(struct panthor_group *group)
1784 {
1785 	return group->state != PANTHOR_CS_GROUP_TERMINATED &&
1786 	       !group->destroyed && group->fatal_queues == 0 &&
1787 	       !group->timedout;
1788 }
1789 
1790 static void
1791 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
1792 			       struct panthor_sched_tick_ctx *ctx,
1793 			       struct list_head *queue,
1794 			       bool skip_idle_groups,
1795 			       bool owned_by_tick_ctx)
1796 {
1797 	struct panthor_group *group, *tmp;
1798 
1799 	if (tick_ctx_is_full(sched, ctx))
1800 		return;
1801 
1802 	list_for_each_entry_safe(group, tmp, queue, run_node) {
1803 		u32 i;
1804 
1805 		if (!group_can_run(group))
1806 			continue;
1807 
1808 		if (skip_idle_groups && group_is_idle(group))
1809 			continue;
1810 
1811 		for (i = 0; i < ctx->as_count; i++) {
1812 			if (ctx->vms[i] == group->vm)
1813 				break;
1814 		}
1815 
1816 		if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
1817 			continue;
1818 
1819 		if (!owned_by_tick_ctx)
1820 			group_get(group);
1821 
1822 		list_move_tail(&group->run_node, &ctx->groups[group->priority]);
1823 		ctx->group_count++;
1824 		if (group_is_idle(group))
1825 			ctx->idle_group_count++;
1826 
1827 		if (i == ctx->as_count)
1828 			ctx->vms[ctx->as_count++] = group->vm;
1829 
1830 		if (ctx->min_priority > group->priority)
1831 			ctx->min_priority = group->priority;
1832 
1833 		if (tick_ctx_is_full(sched, ctx))
1834 			return;
1835 	}
1836 }
1837 
1838 static void
1839 tick_ctx_insert_old_group(struct panthor_scheduler *sched,
1840 			  struct panthor_sched_tick_ctx *ctx,
1841 			  struct panthor_group *group,
1842 			  bool full_tick)
1843 {
1844 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
1845 	struct panthor_group *other_group;
1846 
1847 	if (!full_tick) {
1848 		list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1849 		return;
1850 	}
1851 
1852 	/* Rotate to make sure groups with lower CSG slot
1853 	 * priorities have a chance to get a higher CSG slot
1854 	 * priority next time they get picked. This priority
1855 	 * has an impact on resource request ordering, so it's
1856 	 * important to make sure we don't let one group starve
1857 	 * all other groups with the same group priority.
1858 	 */
1859 	list_for_each_entry(other_group,
1860 			    &ctx->old_groups[csg_slot->group->priority],
1861 			    run_node) {
1862 		struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
1863 
1864 		if (other_csg_slot->priority > csg_slot->priority) {
1865 			list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
1866 			return;
1867 		}
1868 	}
1869 
1870 	list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1871 }
1872 
1873 static void
1874 tick_ctx_init(struct panthor_scheduler *sched,
1875 	      struct panthor_sched_tick_ctx *ctx,
1876 	      bool full_tick)
1877 {
1878 	struct panthor_device *ptdev = sched->ptdev;
1879 	struct panthor_csg_slots_upd_ctx upd_ctx;
1880 	int ret;
1881 	u32 i;
1882 
1883 	memset(ctx, 0, sizeof(*ctx));
1884 	csgs_upd_ctx_init(&upd_ctx);
1885 
1886 	ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
1887 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
1888 		INIT_LIST_HEAD(&ctx->groups[i]);
1889 		INIT_LIST_HEAD(&ctx->old_groups[i]);
1890 	}
1891 
1892 	for (i = 0; i < sched->csg_slot_count; i++) {
1893 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
1894 		struct panthor_group *group = csg_slot->group;
1895 		struct panthor_fw_csg_iface *csg_iface;
1896 
1897 		if (!group)
1898 			continue;
1899 
1900 		csg_iface = panthor_fw_get_csg_iface(ptdev, i);
1901 		group_get(group);
1902 
1903 		/* If there was unhandled faults on the VM, force processing of
1904 		 * CSG IRQs, so we can flag the faulty queue.
1905 		 */
1906 		if (panthor_vm_has_unhandled_faults(group->vm)) {
1907 			sched_process_csg_irq_locked(ptdev, i);
1908 
1909 			/* No fatal fault reported, flag all queues as faulty. */
1910 			if (!group->fatal_queues)
1911 				group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
1912 		}
1913 
1914 		tick_ctx_insert_old_group(sched, ctx, group, full_tick);
1915 		csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
1916 					csg_iface->output->ack ^ CSG_STATUS_UPDATE,
1917 					CSG_STATUS_UPDATE);
1918 	}
1919 
1920 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
1921 	if (ret) {
1922 		panthor_device_schedule_reset(ptdev);
1923 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
1924 	}
1925 }
1926 
1927 #define NUM_INSTRS_PER_SLOT		16
1928 
1929 static void
1930 group_term_post_processing(struct panthor_group *group)
1931 {
1932 	struct panthor_job *job, *tmp;
1933 	LIST_HEAD(faulty_jobs);
1934 	bool cookie;
1935 	u32 i = 0;
1936 
1937 	if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
1938 		return;
1939 
1940 	cookie = dma_fence_begin_signalling();
1941 	for (i = 0; i < group->queue_count; i++) {
1942 		struct panthor_queue *queue = group->queues[i];
1943 		struct panthor_syncobj_64b *syncobj;
1944 		int err;
1945 
1946 		if (group->fatal_queues & BIT(i))
1947 			err = -EINVAL;
1948 		else if (group->timedout)
1949 			err = -ETIMEDOUT;
1950 		else
1951 			err = -ECANCELED;
1952 
1953 		if (!queue)
1954 			continue;
1955 
1956 		spin_lock(&queue->fence_ctx.lock);
1957 		list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
1958 			list_move_tail(&job->node, &faulty_jobs);
1959 			dma_fence_set_error(job->done_fence, err);
1960 			dma_fence_signal_locked(job->done_fence);
1961 		}
1962 		spin_unlock(&queue->fence_ctx.lock);
1963 
1964 		/* Manually update the syncobj seqno to unblock waiters. */
1965 		syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj));
1966 		syncobj->status = ~0;
1967 		syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno);
1968 		sched_queue_work(group->ptdev->scheduler, sync_upd);
1969 	}
1970 	dma_fence_end_signalling(cookie);
1971 
1972 	list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
1973 		list_del_init(&job->node);
1974 		panthor_job_put(&job->base);
1975 	}
1976 }
1977 
1978 static void group_term_work(struct work_struct *work)
1979 {
1980 	struct panthor_group *group =
1981 		container_of(work, struct panthor_group, term_work);
1982 
1983 	group_term_post_processing(group);
1984 	group_put(group);
1985 }
1986 
1987 static void
1988 tick_ctx_cleanup(struct panthor_scheduler *sched,
1989 		 struct panthor_sched_tick_ctx *ctx)
1990 {
1991 	struct panthor_group *group, *tmp;
1992 	u32 i;
1993 
1994 	for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) {
1995 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) {
1996 			/* If everything went fine, we should only have groups
1997 			 * to be terminated in the old_groups lists.
1998 			 */
1999 			drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
2000 				    group_can_run(group));
2001 
2002 			if (!group_can_run(group)) {
2003 				list_del_init(&group->run_node);
2004 				list_del_init(&group->wait_node);
2005 				group_queue_work(group, term);
2006 			} else if (group->csg_id >= 0) {
2007 				list_del_init(&group->run_node);
2008 			} else {
2009 				list_move(&group->run_node,
2010 					  group_is_idle(group) ?
2011 					  &sched->groups.idle[group->priority] :
2012 					  &sched->groups.runnable[group->priority]);
2013 			}
2014 			group_put(group);
2015 		}
2016 	}
2017 
2018 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2019 		/* If everything went fine, the groups to schedule lists should
2020 		 * be empty.
2021 		 */
2022 		drm_WARN_ON(&group->ptdev->base,
2023 			    !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
2024 
2025 		list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
2026 			if (group->csg_id >= 0) {
2027 				list_del_init(&group->run_node);
2028 			} else {
2029 				list_move(&group->run_node,
2030 					  group_is_idle(group) ?
2031 					  &sched->groups.idle[group->priority] :
2032 					  &sched->groups.runnable[group->priority]);
2033 			}
2034 			group_put(group);
2035 		}
2036 	}
2037 }
2038 
2039 static void
2040 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
2041 {
2042 	struct panthor_group *group, *tmp;
2043 	struct panthor_device *ptdev = sched->ptdev;
2044 	struct panthor_csg_slot *csg_slot;
2045 	int prio, new_csg_prio = MAX_CSG_PRIO, i;
2046 	u32 free_csg_slots = 0;
2047 	struct panthor_csg_slots_upd_ctx upd_ctx;
2048 	int ret;
2049 
2050 	csgs_upd_ctx_init(&upd_ctx);
2051 
2052 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2053 		/* Suspend or terminate evicted groups. */
2054 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2055 			bool term = !group_can_run(group);
2056 			int csg_id = group->csg_id;
2057 
2058 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2059 				continue;
2060 
2061 			csg_slot = &sched->csg_slots[csg_id];
2062 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2063 						term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
2064 						CSG_STATE_MASK);
2065 		}
2066 
2067 		/* Update priorities on already running groups. */
2068 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2069 			struct panthor_fw_csg_iface *csg_iface;
2070 			int csg_id = group->csg_id;
2071 
2072 			if (csg_id < 0) {
2073 				new_csg_prio--;
2074 				continue;
2075 			}
2076 
2077 			csg_slot = &sched->csg_slots[csg_id];
2078 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2079 			if (csg_slot->priority == new_csg_prio) {
2080 				new_csg_prio--;
2081 				continue;
2082 			}
2083 
2084 			panthor_fw_update_reqs(csg_iface, endpoint_req,
2085 					       CSG_EP_REQ_PRIORITY(new_csg_prio),
2086 					       CSG_EP_REQ_PRIORITY_MASK);
2087 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2088 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2089 						CSG_ENDPOINT_CONFIG);
2090 			new_csg_prio--;
2091 		}
2092 	}
2093 
2094 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2095 	if (ret) {
2096 		panthor_device_schedule_reset(ptdev);
2097 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2098 		return;
2099 	}
2100 
2101 	/* Unbind evicted groups. */
2102 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2103 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2104 			/* This group is gone. Process interrupts to clear
2105 			 * any pending interrupts before we start the new
2106 			 * group.
2107 			 */
2108 			if (group->csg_id >= 0)
2109 				sched_process_csg_irq_locked(ptdev, group->csg_id);
2110 
2111 			group_unbind_locked(group);
2112 		}
2113 	}
2114 
2115 	for (i = 0; i < sched->csg_slot_count; i++) {
2116 		if (!sched->csg_slots[i].group)
2117 			free_csg_slots |= BIT(i);
2118 	}
2119 
2120 	csgs_upd_ctx_init(&upd_ctx);
2121 	new_csg_prio = MAX_CSG_PRIO;
2122 
2123 	/* Start new groups. */
2124 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2125 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2126 			int csg_id = group->csg_id;
2127 			struct panthor_fw_csg_iface *csg_iface;
2128 
2129 			if (csg_id >= 0) {
2130 				new_csg_prio--;
2131 				continue;
2132 			}
2133 
2134 			csg_id = ffs(free_csg_slots) - 1;
2135 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2136 				break;
2137 
2138 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2139 			csg_slot = &sched->csg_slots[csg_id];
2140 			group_bind_locked(group, csg_id);
2141 			csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
2142 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2143 						group->state == PANTHOR_CS_GROUP_SUSPENDED ?
2144 						CSG_STATE_RESUME : CSG_STATE_START,
2145 						CSG_STATE_MASK);
2146 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2147 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2148 						CSG_ENDPOINT_CONFIG);
2149 			free_csg_slots &= ~BIT(csg_id);
2150 		}
2151 	}
2152 
2153 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2154 	if (ret) {
2155 		panthor_device_schedule_reset(ptdev);
2156 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2157 		return;
2158 	}
2159 
2160 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2161 		list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) {
2162 			list_del_init(&group->run_node);
2163 
2164 			/* If the group has been destroyed while we were
2165 			 * scheduling, ask for an immediate tick to
2166 			 * re-evaluate as soon as possible and get rid of
2167 			 * this dangling group.
2168 			 */
2169 			if (group->destroyed)
2170 				ctx->immediate_tick = true;
2171 			group_put(group);
2172 		}
2173 
2174 		/* Return evicted groups to the idle or run queues. Groups
2175 		 * that can no longer be run (because they've been destroyed
2176 		 * or experienced an unrecoverable error) will be scheduled
2177 		 * for destruction in tick_ctx_cleanup().
2178 		 */
2179 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) {
2180 			if (!group_can_run(group))
2181 				continue;
2182 
2183 			if (group_is_idle(group))
2184 				list_move_tail(&group->run_node, &sched->groups.idle[prio]);
2185 			else
2186 				list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
2187 			group_put(group);
2188 		}
2189 	}
2190 
2191 	sched->used_csg_slot_count = ctx->group_count;
2192 	sched->might_have_idle_groups = ctx->idle_group_count > 0;
2193 }
2194 
2195 static u64
2196 tick_ctx_update_resched_target(struct panthor_scheduler *sched,
2197 			       const struct panthor_sched_tick_ctx *ctx)
2198 {
2199 	/* We had space left, no need to reschedule until some external event happens. */
2200 	if (!tick_ctx_is_full(sched, ctx))
2201 		goto no_tick;
2202 
2203 	/* If idle groups were scheduled, no need to wake up until some external
2204 	 * event happens (group unblocked, new job submitted, ...).
2205 	 */
2206 	if (ctx->idle_group_count)
2207 		goto no_tick;
2208 
2209 	if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT))
2210 		goto no_tick;
2211 
2212 	/* If there are groups of the same priority waiting, we need to
2213 	 * keep the scheduler ticking, otherwise, we'll just wait for
2214 	 * new groups with higher priority to be queued.
2215 	 */
2216 	if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
2217 		u64 resched_target = sched->last_tick + sched->tick_period;
2218 
2219 		if (time_before64(sched->resched_target, sched->last_tick) ||
2220 		    time_before64(resched_target, sched->resched_target))
2221 			sched->resched_target = resched_target;
2222 
2223 		return sched->resched_target - sched->last_tick;
2224 	}
2225 
2226 no_tick:
2227 	sched->resched_target = U64_MAX;
2228 	return U64_MAX;
2229 }
2230 
2231 static void tick_work(struct work_struct *work)
2232 {
2233 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
2234 						      tick_work.work);
2235 	struct panthor_device *ptdev = sched->ptdev;
2236 	struct panthor_sched_tick_ctx ctx;
2237 	u64 remaining_jiffies = 0, resched_delay;
2238 	u64 now = get_jiffies_64();
2239 	int prio, ret, cookie;
2240 
2241 	if (!drm_dev_enter(&ptdev->base, &cookie))
2242 		return;
2243 
2244 	ret = pm_runtime_resume_and_get(ptdev->base.dev);
2245 	if (drm_WARN_ON(&ptdev->base, ret))
2246 		goto out_dev_exit;
2247 
2248 	if (time_before64(now, sched->resched_target))
2249 		remaining_jiffies = sched->resched_target - now;
2250 
2251 	mutex_lock(&sched->lock);
2252 	if (panthor_device_reset_is_pending(sched->ptdev))
2253 		goto out_unlock;
2254 
2255 	tick_ctx_init(sched, &ctx, remaining_jiffies != 0);
2256 	if (ctx.csg_upd_failed_mask)
2257 		goto out_cleanup_ctx;
2258 
2259 	if (remaining_jiffies) {
2260 		/* Scheduling forced in the middle of a tick. Only RT groups
2261 		 * can preempt non-RT ones. Currently running RT groups can't be
2262 		 * preempted.
2263 		 */
2264 		for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2265 		     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2266 		     prio--) {
2267 			tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
2268 						       true, true);
2269 			if (prio == PANTHOR_CSG_PRIORITY_RT) {
2270 				tick_ctx_pick_groups_from_list(sched, &ctx,
2271 							       &sched->groups.runnable[prio],
2272 							       true, false);
2273 			}
2274 		}
2275 	}
2276 
2277 	/* First pick non-idle groups */
2278 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2279 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2280 	     prio--) {
2281 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
2282 					       true, false);
2283 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
2284 	}
2285 
2286 	/* If we have free CSG slots left, pick idle groups */
2287 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2288 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2289 	     prio--) {
2290 		/* Check the old_group queue first to avoid reprogramming the slots */
2291 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
2292 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
2293 					       false, false);
2294 	}
2295 
2296 	tick_ctx_apply(sched, &ctx);
2297 	if (ctx.csg_upd_failed_mask)
2298 		goto out_cleanup_ctx;
2299 
2300 	if (ctx.idle_group_count == ctx.group_count) {
2301 		panthor_devfreq_record_idle(sched->ptdev);
2302 		if (sched->pm.has_ref) {
2303 			pm_runtime_put_autosuspend(ptdev->base.dev);
2304 			sched->pm.has_ref = false;
2305 		}
2306 	} else {
2307 		panthor_devfreq_record_busy(sched->ptdev);
2308 		if (!sched->pm.has_ref) {
2309 			pm_runtime_get(ptdev->base.dev);
2310 			sched->pm.has_ref = true;
2311 		}
2312 	}
2313 
2314 	sched->last_tick = now;
2315 	resched_delay = tick_ctx_update_resched_target(sched, &ctx);
2316 	if (ctx.immediate_tick)
2317 		resched_delay = 0;
2318 
2319 	if (resched_delay != U64_MAX)
2320 		sched_queue_delayed_work(sched, tick, resched_delay);
2321 
2322 out_cleanup_ctx:
2323 	tick_ctx_cleanup(sched, &ctx);
2324 
2325 out_unlock:
2326 	mutex_unlock(&sched->lock);
2327 	pm_runtime_mark_last_busy(ptdev->base.dev);
2328 	pm_runtime_put_autosuspend(ptdev->base.dev);
2329 
2330 out_dev_exit:
2331 	drm_dev_exit(cookie);
2332 }
2333 
2334 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)
2335 {
2336 	struct panthor_queue *queue = group->queues[queue_idx];
2337 	union {
2338 		struct panthor_syncobj_64b sync64;
2339 		struct panthor_syncobj_32b sync32;
2340 	} *syncobj;
2341 	bool result;
2342 	u64 value;
2343 
2344 	syncobj = panthor_queue_get_syncwait_obj(group, queue);
2345 	if (!syncobj)
2346 		return -EINVAL;
2347 
2348 	value = queue->syncwait.sync64 ?
2349 		syncobj->sync64.seqno :
2350 		syncobj->sync32.seqno;
2351 
2352 	if (queue->syncwait.gt)
2353 		result = value > queue->syncwait.ref;
2354 	else
2355 		result = value <= queue->syncwait.ref;
2356 
2357 	if (result)
2358 		panthor_queue_put_syncwait_obj(queue);
2359 
2360 	return result;
2361 }
2362 
2363 static void sync_upd_work(struct work_struct *work)
2364 {
2365 	struct panthor_scheduler *sched = container_of(work,
2366 						      struct panthor_scheduler,
2367 						      sync_upd_work);
2368 	struct panthor_group *group, *tmp;
2369 	bool immediate_tick = false;
2370 
2371 	mutex_lock(&sched->lock);
2372 	list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
2373 		u32 tested_queues = group->blocked_queues;
2374 		u32 unblocked_queues = 0;
2375 
2376 		while (tested_queues) {
2377 			u32 cs_id = ffs(tested_queues) - 1;
2378 			int ret;
2379 
2380 			ret = panthor_queue_eval_syncwait(group, cs_id);
2381 			drm_WARN_ON(&group->ptdev->base, ret < 0);
2382 			if (ret)
2383 				unblocked_queues |= BIT(cs_id);
2384 
2385 			tested_queues &= ~BIT(cs_id);
2386 		}
2387 
2388 		if (unblocked_queues) {
2389 			group->blocked_queues &= ~unblocked_queues;
2390 
2391 			if (group->csg_id < 0) {
2392 				list_move(&group->run_node,
2393 					  &sched->groups.runnable[group->priority]);
2394 				if (group->priority == PANTHOR_CSG_PRIORITY_RT)
2395 					immediate_tick = true;
2396 			}
2397 		}
2398 
2399 		if (!group->blocked_queues)
2400 			list_del_init(&group->wait_node);
2401 	}
2402 	mutex_unlock(&sched->lock);
2403 
2404 	if (immediate_tick)
2405 		sched_queue_delayed_work(sched, tick, 0);
2406 }
2407 
2408 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
2409 {
2410 	struct panthor_device *ptdev = group->ptdev;
2411 	struct panthor_scheduler *sched = ptdev->scheduler;
2412 	struct list_head *queue = &sched->groups.runnable[group->priority];
2413 	u64 delay_jiffies = 0;
2414 	bool was_idle;
2415 	u64 now;
2416 
2417 	if (!group_can_run(group))
2418 		return;
2419 
2420 	/* All updated queues are blocked, no need to wake up the scheduler. */
2421 	if ((queue_mask & group->blocked_queues) == queue_mask)
2422 		return;
2423 
2424 	was_idle = group_is_idle(group);
2425 	group->idle_queues &= ~queue_mask;
2426 
2427 	/* Don't mess up with the lists if we're in a middle of a reset. */
2428 	if (atomic_read(&sched->reset.in_progress))
2429 		return;
2430 
2431 	if (was_idle && !group_is_idle(group))
2432 		list_move_tail(&group->run_node, queue);
2433 
2434 	/* RT groups are preemptive. */
2435 	if (group->priority == PANTHOR_CSG_PRIORITY_RT) {
2436 		sched_queue_delayed_work(sched, tick, 0);
2437 		return;
2438 	}
2439 
2440 	/* Some groups might be idle, force an immediate tick to
2441 	 * re-evaluate.
2442 	 */
2443 	if (sched->might_have_idle_groups) {
2444 		sched_queue_delayed_work(sched, tick, 0);
2445 		return;
2446 	}
2447 
2448 	/* Scheduler is ticking, nothing to do. */
2449 	if (sched->resched_target != U64_MAX) {
2450 		/* If there are free slots, force immediating ticking. */
2451 		if (sched->used_csg_slot_count < sched->csg_slot_count)
2452 			sched_queue_delayed_work(sched, tick, 0);
2453 
2454 		return;
2455 	}
2456 
2457 	/* Scheduler tick was off, recalculate the resched_target based on the
2458 	 * last tick event, and queue the scheduler work.
2459 	 */
2460 	now = get_jiffies_64();
2461 	sched->resched_target = sched->last_tick + sched->tick_period;
2462 	if (sched->used_csg_slot_count == sched->csg_slot_count &&
2463 	    time_before64(now, sched->resched_target))
2464 		delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
2465 
2466 	sched_queue_delayed_work(sched, tick, delay_jiffies);
2467 }
2468 
2469 static void queue_stop(struct panthor_queue *queue,
2470 		       struct panthor_job *bad_job)
2471 {
2472 	drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
2473 }
2474 
2475 static void queue_start(struct panthor_queue *queue)
2476 {
2477 	struct panthor_job *job;
2478 
2479 	/* Re-assign the parent fences. */
2480 	list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
2481 		job->base.s_fence->parent = dma_fence_get(job->done_fence);
2482 
2483 	drm_sched_start(&queue->scheduler, true);
2484 }
2485 
2486 static void panthor_group_stop(struct panthor_group *group)
2487 {
2488 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2489 
2490 	lockdep_assert_held(&sched->reset.lock);
2491 
2492 	for (u32 i = 0; i < group->queue_count; i++)
2493 		queue_stop(group->queues[i], NULL);
2494 
2495 	group_get(group);
2496 	list_move_tail(&group->run_node, &sched->reset.stopped_groups);
2497 }
2498 
2499 static void panthor_group_start(struct panthor_group *group)
2500 {
2501 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2502 
2503 	lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
2504 
2505 	for (u32 i = 0; i < group->queue_count; i++)
2506 		queue_start(group->queues[i]);
2507 
2508 	if (group_can_run(group)) {
2509 		list_move_tail(&group->run_node,
2510 			       group_is_idle(group) ?
2511 			       &sched->groups.idle[group->priority] :
2512 			       &sched->groups.runnable[group->priority]);
2513 	} else {
2514 		list_del_init(&group->run_node);
2515 		list_del_init(&group->wait_node);
2516 		group_queue_work(group, term);
2517 	}
2518 
2519 	group_put(group);
2520 }
2521 
2522 static void panthor_sched_immediate_tick(struct panthor_device *ptdev)
2523 {
2524 	struct panthor_scheduler *sched = ptdev->scheduler;
2525 
2526 	sched_queue_delayed_work(sched, tick, 0);
2527 }
2528 
2529 /**
2530  * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler.
2531  */
2532 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
2533 {
2534 	/* Force a tick to immediately kill faulty groups. */
2535 	if (ptdev->scheduler)
2536 		panthor_sched_immediate_tick(ptdev);
2537 }
2538 
2539 void panthor_sched_resume(struct panthor_device *ptdev)
2540 {
2541 	/* Force a tick to re-evaluate after a resume. */
2542 	panthor_sched_immediate_tick(ptdev);
2543 }
2544 
2545 void panthor_sched_suspend(struct panthor_device *ptdev)
2546 {
2547 	struct panthor_scheduler *sched = ptdev->scheduler;
2548 	struct panthor_csg_slots_upd_ctx upd_ctx;
2549 	u64 suspended_slots, faulty_slots;
2550 	struct panthor_group *group;
2551 	u32 i;
2552 
2553 	mutex_lock(&sched->lock);
2554 	csgs_upd_ctx_init(&upd_ctx);
2555 	for (i = 0; i < sched->csg_slot_count; i++) {
2556 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2557 
2558 		if (csg_slot->group) {
2559 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2560 						CSG_STATE_SUSPEND,
2561 						CSG_STATE_MASK);
2562 		}
2563 	}
2564 
2565 	suspended_slots = upd_ctx.update_mask;
2566 
2567 	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2568 	suspended_slots &= ~upd_ctx.timedout_mask;
2569 	faulty_slots = upd_ctx.timedout_mask;
2570 
2571 	if (faulty_slots) {
2572 		u32 slot_mask = faulty_slots;
2573 
2574 		drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
2575 		csgs_upd_ctx_init(&upd_ctx);
2576 		while (slot_mask) {
2577 			u32 csg_id = ffs(slot_mask) - 1;
2578 
2579 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2580 						CSG_STATE_TERMINATE,
2581 						CSG_STATE_MASK);
2582 			slot_mask &= ~BIT(csg_id);
2583 		}
2584 
2585 		csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2586 
2587 		slot_mask = upd_ctx.timedout_mask;
2588 		while (slot_mask) {
2589 			u32 csg_id = ffs(slot_mask) - 1;
2590 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2591 
2592 			/* Terminate command timedout, but the soft-reset will
2593 			 * automatically terminate all active groups, so let's
2594 			 * force the state to halted here.
2595 			 */
2596 			if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
2597 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2598 			slot_mask &= ~BIT(csg_id);
2599 		}
2600 	}
2601 
2602 	/* Flush L2 and LSC caches to make sure suspend state is up-to-date.
2603 	 * If the flush fails, flag all queues for termination.
2604 	 */
2605 	if (suspended_slots) {
2606 		bool flush_caches_failed = false;
2607 		u32 slot_mask = suspended_slots;
2608 
2609 		if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
2610 			flush_caches_failed = true;
2611 
2612 		while (slot_mask) {
2613 			u32 csg_id = ffs(slot_mask) - 1;
2614 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2615 
2616 			if (flush_caches_failed)
2617 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2618 			else
2619 				csg_slot_sync_update_locked(ptdev, csg_id);
2620 
2621 			slot_mask &= ~BIT(csg_id);
2622 		}
2623 
2624 		if (flush_caches_failed)
2625 			faulty_slots |= suspended_slots;
2626 	}
2627 
2628 	for (i = 0; i < sched->csg_slot_count; i++) {
2629 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2630 
2631 		group = csg_slot->group;
2632 		if (!group)
2633 			continue;
2634 
2635 		group_get(group);
2636 
2637 		if (group->csg_id >= 0)
2638 			sched_process_csg_irq_locked(ptdev, group->csg_id);
2639 
2640 		group_unbind_locked(group);
2641 
2642 		drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
2643 
2644 		if (group_can_run(group)) {
2645 			list_add(&group->run_node,
2646 				 &sched->groups.idle[group->priority]);
2647 		} else {
2648 			/* We don't bother stopping the scheduler if the group is
2649 			 * faulty, the group termination work will finish the job.
2650 			 */
2651 			list_del_init(&group->wait_node);
2652 			group_queue_work(group, term);
2653 		}
2654 		group_put(group);
2655 	}
2656 	mutex_unlock(&sched->lock);
2657 }
2658 
2659 void panthor_sched_pre_reset(struct panthor_device *ptdev)
2660 {
2661 	struct panthor_scheduler *sched = ptdev->scheduler;
2662 	struct panthor_group *group, *group_tmp;
2663 	u32 i;
2664 
2665 	mutex_lock(&sched->reset.lock);
2666 	atomic_set(&sched->reset.in_progress, true);
2667 
2668 	/* Cancel all scheduler works. Once this is done, these works can't be
2669 	 * scheduled again until the reset operation is complete.
2670 	 */
2671 	cancel_work_sync(&sched->sync_upd_work);
2672 	cancel_delayed_work_sync(&sched->tick_work);
2673 
2674 	panthor_sched_suspend(ptdev);
2675 
2676 	/* Stop all groups that might still accept jobs, so we don't get passed
2677 	 * new jobs while we're resetting.
2678 	 */
2679 	for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
2680 		/* All groups should be in the idle lists. */
2681 		drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i]));
2682 		list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
2683 			panthor_group_stop(group);
2684 	}
2685 
2686 	for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
2687 		list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
2688 			panthor_group_stop(group);
2689 	}
2690 
2691 	mutex_unlock(&sched->reset.lock);
2692 }
2693 
2694 void panthor_sched_post_reset(struct panthor_device *ptdev)
2695 {
2696 	struct panthor_scheduler *sched = ptdev->scheduler;
2697 	struct panthor_group *group, *group_tmp;
2698 
2699 	mutex_lock(&sched->reset.lock);
2700 
2701 	list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node)
2702 		panthor_group_start(group);
2703 
2704 	/* We're done resetting the GPU, clear the reset.in_progress bit so we can
2705 	 * kick the scheduler.
2706 	 */
2707 	atomic_set(&sched->reset.in_progress, false);
2708 	mutex_unlock(&sched->reset.lock);
2709 
2710 	sched_queue_delayed_work(sched, tick, 0);
2711 
2712 	sched_queue_work(sched, sync_upd);
2713 }
2714 
2715 static void group_sync_upd_work(struct work_struct *work)
2716 {
2717 	struct panthor_group *group =
2718 		container_of(work, struct panthor_group, sync_upd_work);
2719 	struct panthor_job *job, *job_tmp;
2720 	LIST_HEAD(done_jobs);
2721 	u32 queue_idx;
2722 	bool cookie;
2723 
2724 	cookie = dma_fence_begin_signalling();
2725 	for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
2726 		struct panthor_queue *queue = group->queues[queue_idx];
2727 		struct panthor_syncobj_64b *syncobj;
2728 
2729 		if (!queue)
2730 			continue;
2731 
2732 		syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
2733 
2734 		spin_lock(&queue->fence_ctx.lock);
2735 		list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
2736 			if (!job->call_info.size)
2737 				continue;
2738 
2739 			if (syncobj->seqno < job->done_fence->seqno)
2740 				break;
2741 
2742 			list_move_tail(&job->node, &done_jobs);
2743 			dma_fence_signal_locked(job->done_fence);
2744 		}
2745 		spin_unlock(&queue->fence_ctx.lock);
2746 	}
2747 	dma_fence_end_signalling(cookie);
2748 
2749 	list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
2750 		list_del_init(&job->node);
2751 		panthor_job_put(&job->base);
2752 	}
2753 
2754 	group_put(group);
2755 }
2756 
2757 static struct dma_fence *
2758 queue_run_job(struct drm_sched_job *sched_job)
2759 {
2760 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
2761 	struct panthor_group *group = job->group;
2762 	struct panthor_queue *queue = group->queues[job->queue_idx];
2763 	struct panthor_device *ptdev = group->ptdev;
2764 	struct panthor_scheduler *sched = ptdev->scheduler;
2765 	u32 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
2766 	u32 ringbuf_insert = queue->iface.input->insert & (ringbuf_size - 1);
2767 	u64 addr_reg = ptdev->csif_info.cs_reg_count -
2768 		       ptdev->csif_info.unpreserved_cs_reg_count;
2769 	u64 val_reg = addr_reg + 2;
2770 	u64 sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
2771 			job->queue_idx * sizeof(struct panthor_syncobj_64b);
2772 	u32 waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
2773 	struct dma_fence *done_fence;
2774 	int ret;
2775 
2776 	u64 call_instrs[NUM_INSTRS_PER_SLOT] = {
2777 		/* MOV32 rX+2, cs.latest_flush */
2778 		(2ull << 56) | (val_reg << 48) | job->call_info.latest_flush,
2779 
2780 		/* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
2781 		(36ull << 56) | (0ull << 48) | (val_reg << 40) | (0 << 16) | 0x233,
2782 
2783 		/* MOV48 rX:rX+1, cs.start */
2784 		(1ull << 56) | (addr_reg << 48) | job->call_info.start,
2785 
2786 		/* MOV32 rX+2, cs.size */
2787 		(2ull << 56) | (val_reg << 48) | job->call_info.size,
2788 
2789 		/* WAIT(0) => waits for FLUSH_CACHE2 instruction */
2790 		(3ull << 56) | (1 << 16),
2791 
2792 		/* CALL rX:rX+1, rX+2 */
2793 		(32ull << 56) | (addr_reg << 40) | (val_reg << 32),
2794 
2795 		/* MOV48 rX:rX+1, sync_addr */
2796 		(1ull << 56) | (addr_reg << 48) | sync_addr,
2797 
2798 		/* MOV48 rX+2, #1 */
2799 		(1ull << 56) | (val_reg << 48) | 1,
2800 
2801 		/* WAIT(all) */
2802 		(3ull << 56) | (waitall_mask << 16),
2803 
2804 		/* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
2805 		(51ull << 56) | (0ull << 48) | (addr_reg << 40) | (val_reg << 32) | (0 << 16) | 1,
2806 
2807 		/* ERROR_BARRIER, so we can recover from faults at job
2808 		 * boundaries.
2809 		 */
2810 		(47ull << 56),
2811 	};
2812 
2813 	/* Need to be cacheline aligned to please the prefetcher. */
2814 	static_assert(sizeof(call_instrs) % 64 == 0,
2815 		      "call_instrs is not aligned on a cacheline");
2816 
2817 	/* Stream size is zero, nothing to do => return a NULL fence and let
2818 	 * drm_sched signal the parent.
2819 	 */
2820 	if (!job->call_info.size)
2821 		return NULL;
2822 
2823 	ret = pm_runtime_resume_and_get(ptdev->base.dev);
2824 	if (drm_WARN_ON(&ptdev->base, ret))
2825 		return ERR_PTR(ret);
2826 
2827 	mutex_lock(&sched->lock);
2828 	if (!group_can_run(group)) {
2829 		done_fence = ERR_PTR(-ECANCELED);
2830 		goto out_unlock;
2831 	}
2832 
2833 	dma_fence_init(job->done_fence,
2834 		       &panthor_queue_fence_ops,
2835 		       &queue->fence_ctx.lock,
2836 		       queue->fence_ctx.id,
2837 		       atomic64_inc_return(&queue->fence_ctx.seqno));
2838 
2839 	memcpy(queue->ringbuf->kmap + ringbuf_insert,
2840 	       call_instrs, sizeof(call_instrs));
2841 
2842 	panthor_job_get(&job->base);
2843 	spin_lock(&queue->fence_ctx.lock);
2844 	list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
2845 	spin_unlock(&queue->fence_ctx.lock);
2846 
2847 	job->ringbuf.start = queue->iface.input->insert;
2848 	job->ringbuf.end = job->ringbuf.start + sizeof(call_instrs);
2849 
2850 	/* Make sure the ring buffer is updated before the INSERT
2851 	 * register.
2852 	 */
2853 	wmb();
2854 
2855 	queue->iface.input->extract = queue->iface.output->extract;
2856 	queue->iface.input->insert = job->ringbuf.end;
2857 
2858 	if (group->csg_id < 0) {
2859 		/* If the queue is blocked, we want to keep the timeout running, so we
2860 		 * can detect unbounded waits and kill the group when that happens.
2861 		 * Otherwise, we suspend the timeout so the time we spend waiting for
2862 		 * a CSG slot is not counted.
2863 		 */
2864 		if (!(group->blocked_queues & BIT(job->queue_idx)) &&
2865 		    !queue->timeout_suspended) {
2866 			queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
2867 			queue->timeout_suspended = true;
2868 		}
2869 
2870 		group_schedule_locked(group, BIT(job->queue_idx));
2871 	} else {
2872 		gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
2873 		if (!sched->pm.has_ref &&
2874 		    !(group->blocked_queues & BIT(job->queue_idx))) {
2875 			pm_runtime_get(ptdev->base.dev);
2876 			sched->pm.has_ref = true;
2877 		}
2878 	}
2879 
2880 	done_fence = dma_fence_get(job->done_fence);
2881 
2882 out_unlock:
2883 	mutex_unlock(&sched->lock);
2884 	pm_runtime_mark_last_busy(ptdev->base.dev);
2885 	pm_runtime_put_autosuspend(ptdev->base.dev);
2886 
2887 	return done_fence;
2888 }
2889 
2890 static enum drm_gpu_sched_stat
2891 queue_timedout_job(struct drm_sched_job *sched_job)
2892 {
2893 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
2894 	struct panthor_group *group = job->group;
2895 	struct panthor_device *ptdev = group->ptdev;
2896 	struct panthor_scheduler *sched = ptdev->scheduler;
2897 	struct panthor_queue *queue = group->queues[job->queue_idx];
2898 
2899 	drm_warn(&ptdev->base, "job timeout\n");
2900 
2901 	drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
2902 
2903 	queue_stop(queue, job);
2904 
2905 	mutex_lock(&sched->lock);
2906 	group->timedout = true;
2907 	if (group->csg_id >= 0) {
2908 		sched_queue_delayed_work(ptdev->scheduler, tick, 0);
2909 	} else {
2910 		/* Remove from the run queues, so the scheduler can't
2911 		 * pick the group on the next tick.
2912 		 */
2913 		list_del_init(&group->run_node);
2914 		list_del_init(&group->wait_node);
2915 
2916 		group_queue_work(group, term);
2917 	}
2918 	mutex_unlock(&sched->lock);
2919 
2920 	queue_start(queue);
2921 
2922 	return DRM_GPU_SCHED_STAT_NOMINAL;
2923 }
2924 
2925 static void queue_free_job(struct drm_sched_job *sched_job)
2926 {
2927 	drm_sched_job_cleanup(sched_job);
2928 	panthor_job_put(sched_job);
2929 }
2930 
2931 static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
2932 	.run_job = queue_run_job,
2933 	.timedout_job = queue_timedout_job,
2934 	.free_job = queue_free_job,
2935 };
2936 
2937 static struct panthor_queue *
2938 group_create_queue(struct panthor_group *group,
2939 		   const struct drm_panthor_queue_create *args)
2940 {
2941 	struct drm_gpu_scheduler *drm_sched;
2942 	struct panthor_queue *queue;
2943 	int ret;
2944 
2945 	if (args->pad[0] || args->pad[1] || args->pad[2])
2946 		return ERR_PTR(-EINVAL);
2947 
2948 	if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K ||
2949 	    !is_power_of_2(args->ringbuf_size))
2950 		return ERR_PTR(-EINVAL);
2951 
2952 	if (args->priority > CSF_MAX_QUEUE_PRIO)
2953 		return ERR_PTR(-EINVAL);
2954 
2955 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
2956 	if (!queue)
2957 		return ERR_PTR(-ENOMEM);
2958 
2959 	queue->fence_ctx.id = dma_fence_context_alloc(1);
2960 	spin_lock_init(&queue->fence_ctx.lock);
2961 	INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
2962 
2963 	queue->priority = args->priority;
2964 
2965 	queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
2966 						  args->ringbuf_size,
2967 						  DRM_PANTHOR_BO_NO_MMAP,
2968 						  DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
2969 						  DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
2970 						  PANTHOR_VM_KERNEL_AUTO_VA);
2971 	if (IS_ERR(queue->ringbuf)) {
2972 		ret = PTR_ERR(queue->ringbuf);
2973 		goto err_free_queue;
2974 	}
2975 
2976 	ret = panthor_kernel_bo_vmap(queue->ringbuf);
2977 	if (ret)
2978 		goto err_free_queue;
2979 
2980 	queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
2981 							    &queue->iface.input,
2982 							    &queue->iface.output,
2983 							    &queue->iface.input_fw_va,
2984 							    &queue->iface.output_fw_va);
2985 	if (IS_ERR(queue->iface.mem)) {
2986 		ret = PTR_ERR(queue->iface.mem);
2987 		goto err_free_queue;
2988 	}
2989 
2990 	ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
2991 			     group->ptdev->scheduler->wq, 1,
2992 			     args->ringbuf_size / (NUM_INSTRS_PER_SLOT * sizeof(u64)),
2993 			     0, msecs_to_jiffies(JOB_TIMEOUT_MS),
2994 			     group->ptdev->reset.wq,
2995 			     NULL, "panthor-queue", group->ptdev->base.dev);
2996 	if (ret)
2997 		goto err_free_queue;
2998 
2999 	drm_sched = &queue->scheduler;
3000 	ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
3001 
3002 	return queue;
3003 
3004 err_free_queue:
3005 	group_free_queue(group, queue);
3006 	return ERR_PTR(ret);
3007 }
3008 
3009 #define MAX_GROUPS_PER_POOL		128
3010 
3011 int panthor_group_create(struct panthor_file *pfile,
3012 			 const struct drm_panthor_group_create *group_args,
3013 			 const struct drm_panthor_queue_create *queue_args)
3014 {
3015 	struct panthor_device *ptdev = pfile->ptdev;
3016 	struct panthor_group_pool *gpool = pfile->groups;
3017 	struct panthor_scheduler *sched = ptdev->scheduler;
3018 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3019 	struct panthor_group *group = NULL;
3020 	u32 gid, i, suspend_size;
3021 	int ret;
3022 
3023 	if (group_args->pad)
3024 		return -EINVAL;
3025 
3026 	if (group_args->priority > PANTHOR_CSG_PRIORITY_HIGH)
3027 		return -EINVAL;
3028 
3029 	if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
3030 	    (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
3031 	    (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
3032 		return -EINVAL;
3033 
3034 	if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
3035 	    hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
3036 	    hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
3037 		return -EINVAL;
3038 
3039 	group = kzalloc(sizeof(*group), GFP_KERNEL);
3040 	if (!group)
3041 		return -ENOMEM;
3042 
3043 	spin_lock_init(&group->fatal_lock);
3044 	kref_init(&group->refcount);
3045 	group->state = PANTHOR_CS_GROUP_CREATED;
3046 	group->csg_id = -1;
3047 
3048 	group->ptdev = ptdev;
3049 	group->max_compute_cores = group_args->max_compute_cores;
3050 	group->compute_core_mask = group_args->compute_core_mask;
3051 	group->max_fragment_cores = group_args->max_fragment_cores;
3052 	group->fragment_core_mask = group_args->fragment_core_mask;
3053 	group->max_tiler_cores = group_args->max_tiler_cores;
3054 	group->tiler_core_mask = group_args->tiler_core_mask;
3055 	group->priority = group_args->priority;
3056 
3057 	INIT_LIST_HEAD(&group->wait_node);
3058 	INIT_LIST_HEAD(&group->run_node);
3059 	INIT_WORK(&group->term_work, group_term_work);
3060 	INIT_WORK(&group->sync_upd_work, group_sync_upd_work);
3061 	INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work);
3062 	INIT_WORK(&group->release_work, group_release_work);
3063 
3064 	group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id);
3065 	if (!group->vm) {
3066 		ret = -EINVAL;
3067 		goto err_put_group;
3068 	}
3069 
3070 	suspend_size = csg_iface->control->suspend_size;
3071 	group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3072 	if (IS_ERR(group->suspend_buf)) {
3073 		ret = PTR_ERR(group->suspend_buf);
3074 		group->suspend_buf = NULL;
3075 		goto err_put_group;
3076 	}
3077 
3078 	suspend_size = csg_iface->control->protm_suspend_size;
3079 	group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3080 	if (IS_ERR(group->protm_suspend_buf)) {
3081 		ret = PTR_ERR(group->protm_suspend_buf);
3082 		group->protm_suspend_buf = NULL;
3083 		goto err_put_group;
3084 	}
3085 
3086 	group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
3087 						   group_args->queues.count *
3088 						   sizeof(struct panthor_syncobj_64b),
3089 						   DRM_PANTHOR_BO_NO_MMAP,
3090 						   DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3091 						   DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3092 						   PANTHOR_VM_KERNEL_AUTO_VA);
3093 	if (IS_ERR(group->syncobjs)) {
3094 		ret = PTR_ERR(group->syncobjs);
3095 		goto err_put_group;
3096 	}
3097 
3098 	ret = panthor_kernel_bo_vmap(group->syncobjs);
3099 	if (ret)
3100 		goto err_put_group;
3101 
3102 	memset(group->syncobjs->kmap, 0,
3103 	       group_args->queues.count * sizeof(struct panthor_syncobj_64b));
3104 
3105 	for (i = 0; i < group_args->queues.count; i++) {
3106 		group->queues[i] = group_create_queue(group, &queue_args[i]);
3107 		if (IS_ERR(group->queues[i])) {
3108 			ret = PTR_ERR(group->queues[i]);
3109 			group->queues[i] = NULL;
3110 			goto err_put_group;
3111 		}
3112 
3113 		group->queue_count++;
3114 	}
3115 
3116 	group->idle_queues = GENMASK(group->queue_count - 1, 0);
3117 
3118 	ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
3119 	if (ret)
3120 		goto err_put_group;
3121 
3122 	mutex_lock(&sched->reset.lock);
3123 	if (atomic_read(&sched->reset.in_progress)) {
3124 		panthor_group_stop(group);
3125 	} else {
3126 		mutex_lock(&sched->lock);
3127 		list_add_tail(&group->run_node,
3128 			      &sched->groups.idle[group->priority]);
3129 		mutex_unlock(&sched->lock);
3130 	}
3131 	mutex_unlock(&sched->reset.lock);
3132 
3133 	return gid;
3134 
3135 err_put_group:
3136 	group_put(group);
3137 	return ret;
3138 }
3139 
3140 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
3141 {
3142 	struct panthor_group_pool *gpool = pfile->groups;
3143 	struct panthor_device *ptdev = pfile->ptdev;
3144 	struct panthor_scheduler *sched = ptdev->scheduler;
3145 	struct panthor_group *group;
3146 
3147 	group = xa_erase(&gpool->xa, group_handle);
3148 	if (!group)
3149 		return -EINVAL;
3150 
3151 	for (u32 i = 0; i < group->queue_count; i++) {
3152 		if (group->queues[i])
3153 			drm_sched_entity_destroy(&group->queues[i]->entity);
3154 	}
3155 
3156 	mutex_lock(&sched->reset.lock);
3157 	mutex_lock(&sched->lock);
3158 	group->destroyed = true;
3159 	if (group->csg_id >= 0) {
3160 		sched_queue_delayed_work(sched, tick, 0);
3161 	} else if (!atomic_read(&sched->reset.in_progress)) {
3162 		/* Remove from the run queues, so the scheduler can't
3163 		 * pick the group on the next tick.
3164 		 */
3165 		list_del_init(&group->run_node);
3166 		list_del_init(&group->wait_node);
3167 		group_queue_work(group, term);
3168 	}
3169 	mutex_unlock(&sched->lock);
3170 	mutex_unlock(&sched->reset.lock);
3171 
3172 	group_put(group);
3173 	return 0;
3174 }
3175 
3176 int panthor_group_get_state(struct panthor_file *pfile,
3177 			    struct drm_panthor_group_get_state *get_state)
3178 {
3179 	struct panthor_group_pool *gpool = pfile->groups;
3180 	struct panthor_device *ptdev = pfile->ptdev;
3181 	struct panthor_scheduler *sched = ptdev->scheduler;
3182 	struct panthor_group *group;
3183 
3184 	if (get_state->pad)
3185 		return -EINVAL;
3186 
3187 	group = group_get(xa_load(&gpool->xa, get_state->group_handle));
3188 	if (!group)
3189 		return -EINVAL;
3190 
3191 	memset(get_state, 0, sizeof(*get_state));
3192 
3193 	mutex_lock(&sched->lock);
3194 	if (group->timedout)
3195 		get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT;
3196 	if (group->fatal_queues) {
3197 		get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
3198 		get_state->fatal_queues = group->fatal_queues;
3199 	}
3200 	mutex_unlock(&sched->lock);
3201 
3202 	group_put(group);
3203 	return 0;
3204 }
3205 
3206 int panthor_group_pool_create(struct panthor_file *pfile)
3207 {
3208 	struct panthor_group_pool *gpool;
3209 
3210 	gpool = kzalloc(sizeof(*gpool), GFP_KERNEL);
3211 	if (!gpool)
3212 		return -ENOMEM;
3213 
3214 	xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1);
3215 	pfile->groups = gpool;
3216 	return 0;
3217 }
3218 
3219 void panthor_group_pool_destroy(struct panthor_file *pfile)
3220 {
3221 	struct panthor_group_pool *gpool = pfile->groups;
3222 	struct panthor_group *group;
3223 	unsigned long i;
3224 
3225 	if (IS_ERR_OR_NULL(gpool))
3226 		return;
3227 
3228 	xa_for_each(&gpool->xa, i, group)
3229 		panthor_group_destroy(pfile, i);
3230 
3231 	xa_destroy(&gpool->xa);
3232 	kfree(gpool);
3233 	pfile->groups = NULL;
3234 }
3235 
3236 static void job_release(struct kref *ref)
3237 {
3238 	struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
3239 
3240 	drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
3241 
3242 	if (job->base.s_fence)
3243 		drm_sched_job_cleanup(&job->base);
3244 
3245 	if (job->done_fence && job->done_fence->ops)
3246 		dma_fence_put(job->done_fence);
3247 	else
3248 		dma_fence_free(job->done_fence);
3249 
3250 	group_put(job->group);
3251 
3252 	kfree(job);
3253 }
3254 
3255 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job)
3256 {
3257 	if (sched_job) {
3258 		struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3259 
3260 		kref_get(&job->refcount);
3261 	}
3262 
3263 	return sched_job;
3264 }
3265 
3266 void panthor_job_put(struct drm_sched_job *sched_job)
3267 {
3268 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3269 
3270 	if (sched_job)
3271 		kref_put(&job->refcount, job_release);
3272 }
3273 
3274 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
3275 {
3276 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3277 
3278 	return job->group->vm;
3279 }
3280 
3281 struct drm_sched_job *
3282 panthor_job_create(struct panthor_file *pfile,
3283 		   u16 group_handle,
3284 		   const struct drm_panthor_queue_submit *qsubmit)
3285 {
3286 	struct panthor_group_pool *gpool = pfile->groups;
3287 	struct panthor_job *job;
3288 	int ret;
3289 
3290 	if (qsubmit->pad)
3291 		return ERR_PTR(-EINVAL);
3292 
3293 	/* If stream_addr is zero, so stream_size should be. */
3294 	if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0))
3295 		return ERR_PTR(-EINVAL);
3296 
3297 	/* Make sure the address is aligned on 64-byte (cacheline) and the size is
3298 	 * aligned on 8-byte (instruction size).
3299 	 */
3300 	if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7))
3301 		return ERR_PTR(-EINVAL);
3302 
3303 	/* bits 24:30 must be zero. */
3304 	if (qsubmit->latest_flush & GENMASK(30, 24))
3305 		return ERR_PTR(-EINVAL);
3306 
3307 	job = kzalloc(sizeof(*job), GFP_KERNEL);
3308 	if (!job)
3309 		return ERR_PTR(-ENOMEM);
3310 
3311 	kref_init(&job->refcount);
3312 	job->queue_idx = qsubmit->queue_index;
3313 	job->call_info.size = qsubmit->stream_size;
3314 	job->call_info.start = qsubmit->stream_addr;
3315 	job->call_info.latest_flush = qsubmit->latest_flush;
3316 	INIT_LIST_HEAD(&job->node);
3317 
3318 	job->group = group_get(xa_load(&gpool->xa, group_handle));
3319 	if (!job->group) {
3320 		ret = -EINVAL;
3321 		goto err_put_job;
3322 	}
3323 
3324 	if (job->queue_idx >= job->group->queue_count ||
3325 	    !job->group->queues[job->queue_idx]) {
3326 		ret = -EINVAL;
3327 		goto err_put_job;
3328 	}
3329 
3330 	job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
3331 	if (!job->done_fence) {
3332 		ret = -ENOMEM;
3333 		goto err_put_job;
3334 	}
3335 
3336 	ret = drm_sched_job_init(&job->base,
3337 				 &job->group->queues[job->queue_idx]->entity,
3338 				 1, job->group);
3339 	if (ret)
3340 		goto err_put_job;
3341 
3342 	return &job->base;
3343 
3344 err_put_job:
3345 	panthor_job_put(&job->base);
3346 	return ERR_PTR(ret);
3347 }
3348 
3349 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job)
3350 {
3351 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3352 
3353 	/* Still not sure why we want USAGE_WRITE for external objects, since I
3354 	 * was assuming this would be handled through explicit syncs being imported
3355 	 * to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers
3356 	 * seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason.
3357 	 */
3358 	panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
3359 				DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
3360 }
3361 
3362 void panthor_sched_unplug(struct panthor_device *ptdev)
3363 {
3364 	struct panthor_scheduler *sched = ptdev->scheduler;
3365 
3366 	cancel_delayed_work_sync(&sched->tick_work);
3367 
3368 	mutex_lock(&sched->lock);
3369 	if (sched->pm.has_ref) {
3370 		pm_runtime_put(ptdev->base.dev);
3371 		sched->pm.has_ref = false;
3372 	}
3373 	mutex_unlock(&sched->lock);
3374 }
3375 
3376 static void panthor_sched_fini(struct drm_device *ddev, void *res)
3377 {
3378 	struct panthor_scheduler *sched = res;
3379 	int prio;
3380 
3381 	if (!sched || !sched->csg_slot_count)
3382 		return;
3383 
3384 	cancel_delayed_work_sync(&sched->tick_work);
3385 
3386 	if (sched->wq)
3387 		destroy_workqueue(sched->wq);
3388 
3389 	if (sched->heap_alloc_wq)
3390 		destroy_workqueue(sched->heap_alloc_wq);
3391 
3392 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3393 		drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
3394 		drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
3395 	}
3396 
3397 	drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
3398 }
3399 
3400 int panthor_sched_init(struct panthor_device *ptdev)
3401 {
3402 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
3403 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3404 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
3405 	struct panthor_scheduler *sched;
3406 	u32 gpu_as_count, num_groups;
3407 	int prio, ret;
3408 
3409 	sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
3410 	if (!sched)
3411 		return -ENOMEM;
3412 
3413 	/* The highest bit in JOB_INT_* is reserved for globabl IRQs. That
3414 	 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here.
3415 	 */
3416 	num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num);
3417 
3418 	/* The FW-side scheduler might deadlock if two groups with the same
3419 	 * priority try to access a set of resources that overlaps, with part
3420 	 * of the resources being allocated to one group and the other part to
3421 	 * the other group, both groups waiting for the remaining resources to
3422 	 * be allocated. To avoid that, it is recommended to assign each CSG a
3423 	 * different priority. In theory we could allow several groups to have
3424 	 * the same CSG priority if they don't request the same resources, but
3425 	 * that makes the scheduling logic more complicated, so let's clamp
3426 	 * the number of CSG slots to MAX_CSG_PRIO + 1 for now.
3427 	 */
3428 	num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups);
3429 
3430 	/* We need at least one AS for the MCU and one for the GPU contexts. */
3431 	gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
3432 	if (!gpu_as_count) {
3433 		drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
3434 			gpu_as_count + 1);
3435 		return -EINVAL;
3436 	}
3437 
3438 	sched->ptdev = ptdev;
3439 	sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
3440 	sched->csg_slot_count = num_groups;
3441 	sched->cs_slot_count = csg_iface->control->stream_num;
3442 	sched->as_slot_count = gpu_as_count;
3443 	ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
3444 	ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
3445 	ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
3446 
3447 	sched->last_tick = 0;
3448 	sched->resched_target = U64_MAX;
3449 	sched->tick_period = msecs_to_jiffies(10);
3450 	INIT_DELAYED_WORK(&sched->tick_work, tick_work);
3451 	INIT_WORK(&sched->sync_upd_work, sync_upd_work);
3452 	INIT_WORK(&sched->fw_events_work, process_fw_events_work);
3453 
3454 	ret = drmm_mutex_init(&ptdev->base, &sched->lock);
3455 	if (ret)
3456 		return ret;
3457 
3458 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3459 		INIT_LIST_HEAD(&sched->groups.runnable[prio]);
3460 		INIT_LIST_HEAD(&sched->groups.idle[prio]);
3461 	}
3462 	INIT_LIST_HEAD(&sched->groups.waiting);
3463 
3464 	ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
3465 	if (ret)
3466 		return ret;
3467 
3468 	INIT_LIST_HEAD(&sched->reset.stopped_groups);
3469 
3470 	/* sched->heap_alloc_wq will be used for heap chunk allocation on
3471 	 * tiler OOM events, which means we can't use the same workqueue for
3472 	 * the scheduler because works queued by the scheduler are in
3473 	 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to
3474 	 * work around this limitation.
3475 	 *
3476 	 * FIXME: Ultimately, what we need is a failable/non-blocking GEM
3477 	 * allocation path that we can call when a heap OOM is reported. The
3478 	 * FW is smart enough to fall back on other methods if the kernel can't
3479 	 * allocate memory, and fail the tiling job if none of these
3480 	 * countermeasures worked.
3481 	 *
3482 	 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the
3483 	 * system is running out of memory.
3484 	 */
3485 	sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
3486 	sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
3487 	if (!sched->wq || !sched->heap_alloc_wq) {
3488 		panthor_sched_fini(&ptdev->base, sched);
3489 		drm_err(&ptdev->base, "Failed to allocate the workqueues");
3490 		return -ENOMEM;
3491 	}
3492 
3493 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
3494 	if (ret)
3495 		return ret;
3496 
3497 	ptdev->scheduler = sched;
3498 	return 0;
3499 }
3500