xref: /linux/drivers/gpu/drm/panthor/panthor_sched.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2023 Collabora ltd. */
3 
4 #include <drm/drm_drv.h>
5 #include <drm/drm_exec.h>
6 #include <drm/drm_gem_shmem_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/gpu_scheduler.h>
9 #include <drm/panthor_drm.h>
10 
11 #include <linux/build_bug.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-resv.h>
16 #include <linux/firmware.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/iosys-map.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 
25 #include "panthor_devfreq.h"
26 #include "panthor_device.h"
27 #include "panthor_fw.h"
28 #include "panthor_gem.h"
29 #include "panthor_gpu.h"
30 #include "panthor_heap.h"
31 #include "panthor_mmu.h"
32 #include "panthor_regs.h"
33 #include "panthor_sched.h"
34 
35 /**
36  * DOC: Scheduler
37  *
38  * Mali CSF hardware adopts a firmware-assisted scheduling model, where
39  * the firmware takes care of scheduling aspects, to some extent.
40  *
41  * The scheduling happens at the scheduling group level, each group
42  * contains 1 to N queues (N is FW/hardware dependent, and exposed
43  * through the firmware interface). Each queue is assigned a command
44  * stream ring buffer, which serves as a way to get jobs submitted to
45  * the GPU, among other things.
46  *
47  * The firmware can schedule a maximum of M groups (M is FW/hardware
48  * dependent, and exposed through the firmware interface). Passed
49  * this maximum number of groups, the kernel must take care of
50  * rotating the groups passed to the firmware so every group gets
51  * a chance to have his queues scheduled for execution.
52  *
53  * The current implementation only supports with kernel-mode queues.
54  * In other terms, userspace doesn't have access to the ring-buffer.
55  * Instead, userspace passes indirect command stream buffers that are
56  * called from the queue ring-buffer by the kernel using a pre-defined
57  * sequence of command stream instructions to ensure the userspace driver
58  * always gets consistent results (cache maintenance,
59  * synchronization, ...).
60  *
61  * We rely on the drm_gpu_scheduler framework to deal with job
62  * dependencies and submission. As any other driver dealing with a
63  * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
64  * entity has its own job scheduler. When a job is ready to be executed
65  * (all its dependencies are met), it is pushed to the appropriate
66  * queue ring-buffer, and the group is scheduled for execution if it
67  * wasn't already active.
68  *
69  * Kernel-side group scheduling is timeslice-based. When we have less
70  * groups than there are slots, the periodic tick is disabled and we
71  * just let the FW schedule the active groups. When there are more
72  * groups than slots, we let each group a chance to execute stuff for
73  * a given amount of time, and then re-evaluate and pick new groups
74  * to schedule. The group selection algorithm is based on
75  * priority+round-robin.
76  *
77  * Even though user-mode queues is out of the scope right now, the
78  * current design takes them into account by avoiding any guess on the
79  * group/queue state that would be based on information we wouldn't have
80  * if userspace was in charge of the ring-buffer. That's also one of the
81  * reason we don't do 'cooperative' scheduling (encoding FW group slot
82  * reservation as dma_fence that would be returned from the
83  * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
84  * a queue of waiters, ordered by job submission order). This approach
85  * would work for kernel-mode queues, but would make user-mode queues a
86  * lot more complicated to retrofit.
87  */
88 
89 #define JOB_TIMEOUT_MS				5000
90 
91 #define MIN_CS_PER_CSG				8
92 
93 #define MIN_CSGS				3
94 #define MAX_CSG_PRIO				0xf
95 
96 #define NUM_INSTRS_PER_CACHE_LINE		(64 / sizeof(u64))
97 #define MAX_INSTRS_PER_JOB			24
98 
99 struct panthor_group;
100 
101 /**
102  * struct panthor_csg_slot - Command stream group slot
103  *
104  * This represents a FW slot for a scheduling group.
105  */
106 struct panthor_csg_slot {
107 	/** @group: Scheduling group bound to this slot. */
108 	struct panthor_group *group;
109 
110 	/** @priority: Group priority. */
111 	u8 priority;
112 
113 	/**
114 	 * @idle: True if the group bound to this slot is idle.
115 	 *
116 	 * A group is idle when it has nothing waiting for execution on
117 	 * all its queues, or when queues are blocked waiting for something
118 	 * to happen (synchronization object).
119 	 */
120 	bool idle;
121 };
122 
123 /**
124  * enum panthor_csg_priority - Group priority
125  */
126 enum panthor_csg_priority {
127 	/** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */
128 	PANTHOR_CSG_PRIORITY_LOW = 0,
129 
130 	/** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */
131 	PANTHOR_CSG_PRIORITY_MEDIUM,
132 
133 	/** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */
134 	PANTHOR_CSG_PRIORITY_HIGH,
135 
136 	/**
137 	 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group.
138 	 *
139 	 * Real-time priority allows one to preempt scheduling of other
140 	 * non-real-time groups. When such a group becomes executable,
141 	 * it will evict the group with the lowest non-rt priority if
142 	 * there's no free group slot available.
143 	 */
144 	PANTHOR_CSG_PRIORITY_RT,
145 
146 	/** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */
147 	PANTHOR_CSG_PRIORITY_COUNT,
148 };
149 
150 /**
151  * struct panthor_scheduler - Object used to manage the scheduler
152  */
153 struct panthor_scheduler {
154 	/** @ptdev: Device. */
155 	struct panthor_device *ptdev;
156 
157 	/**
158 	 * @wq: Workqueue used by our internal scheduler logic and
159 	 * drm_gpu_scheduler.
160 	 *
161 	 * Used for the scheduler tick, group update or other kind of FW
162 	 * event processing that can't be handled in the threaded interrupt
163 	 * path. Also passed to the drm_gpu_scheduler instances embedded
164 	 * in panthor_queue.
165 	 */
166 	struct workqueue_struct *wq;
167 
168 	/**
169 	 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works.
170 	 *
171 	 * We have a queue dedicated to heap chunk allocation works to avoid
172 	 * blocking the rest of the scheduler if the allocation tries to
173 	 * reclaim memory.
174 	 */
175 	struct workqueue_struct *heap_alloc_wq;
176 
177 	/** @tick_work: Work executed on a scheduling tick. */
178 	struct delayed_work tick_work;
179 
180 	/**
181 	 * @sync_upd_work: Work used to process synchronization object updates.
182 	 *
183 	 * We use this work to unblock queues/groups that were waiting on a
184 	 * synchronization object.
185 	 */
186 	struct work_struct sync_upd_work;
187 
188 	/**
189 	 * @fw_events_work: Work used to process FW events outside the interrupt path.
190 	 *
191 	 * Even if the interrupt is threaded, we need any event processing
192 	 * that require taking the panthor_scheduler::lock to be processed
193 	 * outside the interrupt path so we don't block the tick logic when
194 	 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the
195 	 * event processing requires taking this lock, we just delegate all
196 	 * FW event processing to the scheduler workqueue.
197 	 */
198 	struct work_struct fw_events_work;
199 
200 	/**
201 	 * @fw_events: Bitmask encoding pending FW events.
202 	 */
203 	atomic_t fw_events;
204 
205 	/**
206 	 * @resched_target: When the next tick should occur.
207 	 *
208 	 * Expressed in jiffies.
209 	 */
210 	u64 resched_target;
211 
212 	/**
213 	 * @last_tick: When the last tick occurred.
214 	 *
215 	 * Expressed in jiffies.
216 	 */
217 	u64 last_tick;
218 
219 	/** @tick_period: Tick period in jiffies. */
220 	u64 tick_period;
221 
222 	/**
223 	 * @lock: Lock protecting access to all the scheduler fields.
224 	 *
225 	 * Should be taken in the tick work, the irq handler, and anywhere the @groups
226 	 * fields are touched.
227 	 */
228 	struct mutex lock;
229 
230 	/** @groups: Various lists used to classify groups. */
231 	struct {
232 		/**
233 		 * @runnable: Runnable group lists.
234 		 *
235 		 * When a group has queues that want to execute something,
236 		 * its panthor_group::run_node should be inserted here.
237 		 *
238 		 * One list per-priority.
239 		 */
240 		struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT];
241 
242 		/**
243 		 * @idle: Idle group lists.
244 		 *
245 		 * When all queues of a group are idle (either because they
246 		 * have nothing to execute, or because they are blocked), the
247 		 * panthor_group::run_node field should be inserted here.
248 		 *
249 		 * One list per-priority.
250 		 */
251 		struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT];
252 
253 		/**
254 		 * @waiting: List of groups whose queues are blocked on a
255 		 * synchronization object.
256 		 *
257 		 * Insert panthor_group::wait_node here when a group is waiting
258 		 * for synchronization objects to be signaled.
259 		 *
260 		 * This list is evaluated in the @sync_upd_work work.
261 		 */
262 		struct list_head waiting;
263 	} groups;
264 
265 	/**
266 	 * @csg_slots: FW command stream group slots.
267 	 */
268 	struct panthor_csg_slot csg_slots[MAX_CSGS];
269 
270 	/** @csg_slot_count: Number of command stream group slots exposed by the FW. */
271 	u32 csg_slot_count;
272 
273 	/** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */
274 	u32 cs_slot_count;
275 
276 	/** @as_slot_count: Number of address space slots supported by the MMU. */
277 	u32 as_slot_count;
278 
279 	/** @used_csg_slot_count: Number of command stream group slot currently used. */
280 	u32 used_csg_slot_count;
281 
282 	/** @sb_slot_count: Number of scoreboard slots. */
283 	u32 sb_slot_count;
284 
285 	/**
286 	 * @might_have_idle_groups: True if an active group might have become idle.
287 	 *
288 	 * This will force a tick, so other runnable groups can be scheduled if one
289 	 * or more active groups became idle.
290 	 */
291 	bool might_have_idle_groups;
292 
293 	/** @pm: Power management related fields. */
294 	struct {
295 		/** @has_ref: True if the scheduler owns a runtime PM reference. */
296 		bool has_ref;
297 	} pm;
298 
299 	/** @reset: Reset related fields. */
300 	struct {
301 		/** @lock: Lock protecting the other reset fields. */
302 		struct mutex lock;
303 
304 		/**
305 		 * @in_progress: True if a reset is in progress.
306 		 *
307 		 * Set to true in panthor_sched_pre_reset() and back to false in
308 		 * panthor_sched_post_reset().
309 		 */
310 		atomic_t in_progress;
311 
312 		/**
313 		 * @stopped_groups: List containing all groups that were stopped
314 		 * before a reset.
315 		 *
316 		 * Insert panthor_group::run_node in the pre_reset path.
317 		 */
318 		struct list_head stopped_groups;
319 	} reset;
320 };
321 
322 /**
323  * struct panthor_syncobj_32b - 32-bit FW synchronization object
324  */
325 struct panthor_syncobj_32b {
326 	/** @seqno: Sequence number. */
327 	u32 seqno;
328 
329 	/**
330 	 * @status: Status.
331 	 *
332 	 * Not zero on failure.
333 	 */
334 	u32 status;
335 };
336 
337 /**
338  * struct panthor_syncobj_64b - 64-bit FW synchronization object
339  */
340 struct panthor_syncobj_64b {
341 	/** @seqno: Sequence number. */
342 	u64 seqno;
343 
344 	/**
345 	 * @status: Status.
346 	 *
347 	 * Not zero on failure.
348 	 */
349 	u32 status;
350 
351 	/** @pad: MBZ. */
352 	u32 pad;
353 };
354 
355 /**
356  * struct panthor_queue - Execution queue
357  */
358 struct panthor_queue {
359 	/** @scheduler: DRM scheduler used for this queue. */
360 	struct drm_gpu_scheduler scheduler;
361 
362 	/** @entity: DRM scheduling entity used for this queue. */
363 	struct drm_sched_entity entity;
364 
365 	/**
366 	 * @remaining_time: Time remaining before the job timeout expires.
367 	 *
368 	 * The job timeout is suspended when the queue is not scheduled by the
369 	 * FW. Every time we suspend the timer, we need to save the remaining
370 	 * time so we can restore it later on.
371 	 */
372 	unsigned long remaining_time;
373 
374 	/** @timeout_suspended: True if the job timeout was suspended. */
375 	bool timeout_suspended;
376 
377 	/**
378 	 * @doorbell_id: Doorbell assigned to this queue.
379 	 *
380 	 * Right now, all groups share the same doorbell, and the doorbell ID
381 	 * is assigned to group_slot + 1 when the group is assigned a slot. But
382 	 * we might decide to provide fine grained doorbell assignment at some
383 	 * point, so don't have to wake up all queues in a group every time one
384 	 * of them is updated.
385 	 */
386 	u8 doorbell_id;
387 
388 	/**
389 	 * @priority: Priority of the queue inside the group.
390 	 *
391 	 * Must be less than 16 (Only 4 bits available).
392 	 */
393 	u8 priority;
394 #define CSF_MAX_QUEUE_PRIO	GENMASK(3, 0)
395 
396 	/** @ringbuf: Command stream ring-buffer. */
397 	struct panthor_kernel_bo *ringbuf;
398 
399 	/** @iface: Firmware interface. */
400 	struct {
401 		/** @mem: FW memory allocated for this interface. */
402 		struct panthor_kernel_bo *mem;
403 
404 		/** @input: Input interface. */
405 		struct panthor_fw_ringbuf_input_iface *input;
406 
407 		/** @output: Output interface. */
408 		const struct panthor_fw_ringbuf_output_iface *output;
409 
410 		/** @input_fw_va: FW virtual address of the input interface buffer. */
411 		u32 input_fw_va;
412 
413 		/** @output_fw_va: FW virtual address of the output interface buffer. */
414 		u32 output_fw_va;
415 	} iface;
416 
417 	/**
418 	 * @syncwait: Stores information about the synchronization object this
419 	 * queue is waiting on.
420 	 */
421 	struct {
422 		/** @gpu_va: GPU address of the synchronization object. */
423 		u64 gpu_va;
424 
425 		/** @ref: Reference value to compare against. */
426 		u64 ref;
427 
428 		/** @gt: True if this is a greater-than test. */
429 		bool gt;
430 
431 		/** @sync64: True if this is a 64-bit sync object. */
432 		bool sync64;
433 
434 		/** @bo: Buffer object holding the synchronization object. */
435 		struct drm_gem_object *obj;
436 
437 		/** @offset: Offset of the synchronization object inside @bo. */
438 		u64 offset;
439 
440 		/**
441 		 * @kmap: Kernel mapping of the buffer object holding the
442 		 * synchronization object.
443 		 */
444 		void *kmap;
445 	} syncwait;
446 
447 	/** @fence_ctx: Fence context fields. */
448 	struct {
449 		/** @lock: Used to protect access to all fences allocated by this context. */
450 		spinlock_t lock;
451 
452 		/**
453 		 * @id: Fence context ID.
454 		 *
455 		 * Allocated with dma_fence_context_alloc().
456 		 */
457 		u64 id;
458 
459 		/** @seqno: Sequence number of the last initialized fence. */
460 		atomic64_t seqno;
461 
462 		/**
463 		 * @last_fence: Fence of the last submitted job.
464 		 *
465 		 * We return this fence when we get an empty command stream.
466 		 * This way, we are guaranteed that all earlier jobs have completed
467 		 * when drm_sched_job::s_fence::finished without having to feed
468 		 * the CS ring buffer with a dummy job that only signals the fence.
469 		 */
470 		struct dma_fence *last_fence;
471 
472 		/**
473 		 * @in_flight_jobs: List containing all in-flight jobs.
474 		 *
475 		 * Used to keep track and signal panthor_job::done_fence when the
476 		 * synchronization object attached to the queue is signaled.
477 		 */
478 		struct list_head in_flight_jobs;
479 	} fence_ctx;
480 
481 	/** @profiling: Job profiling data slots and access information. */
482 	struct {
483 		/** @slots: Kernel BO holding the slots. */
484 		struct panthor_kernel_bo *slots;
485 
486 		/** @slot_count: Number of jobs ringbuffer can hold at once. */
487 		u32 slot_count;
488 
489 		/** @seqno: Index of the next available profiling information slot. */
490 		u32 seqno;
491 	} profiling;
492 };
493 
494 /**
495  * enum panthor_group_state - Scheduling group state.
496  */
497 enum panthor_group_state {
498 	/** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */
499 	PANTHOR_CS_GROUP_CREATED,
500 
501 	/** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */
502 	PANTHOR_CS_GROUP_ACTIVE,
503 
504 	/**
505 	 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is
506 	 * inactive/suspended right now.
507 	 */
508 	PANTHOR_CS_GROUP_SUSPENDED,
509 
510 	/**
511 	 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated.
512 	 *
513 	 * Can no longer be scheduled. The only allowed action is a destruction.
514 	 */
515 	PANTHOR_CS_GROUP_TERMINATED,
516 
517 	/**
518 	 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state.
519 	 *
520 	 * The FW returned an inconsistent state. The group is flagged unusable
521 	 * and can no longer be scheduled. The only allowed action is a
522 	 * destruction.
523 	 *
524 	 * When that happens, we also schedule a FW reset, to start from a fresh
525 	 * state.
526 	 */
527 	PANTHOR_CS_GROUP_UNKNOWN_STATE,
528 };
529 
530 /**
531  * struct panthor_group - Scheduling group object
532  */
533 struct panthor_group {
534 	/** @refcount: Reference count */
535 	struct kref refcount;
536 
537 	/** @ptdev: Device. */
538 	struct panthor_device *ptdev;
539 
540 	/** @vm: VM bound to the group. */
541 	struct panthor_vm *vm;
542 
543 	/** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */
544 	u64 compute_core_mask;
545 
546 	/** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */
547 	u64 fragment_core_mask;
548 
549 	/** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */
550 	u64 tiler_core_mask;
551 
552 	/** @max_compute_cores: Maximum number of shader cores used for compute jobs. */
553 	u8 max_compute_cores;
554 
555 	/** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */
556 	u8 max_fragment_cores;
557 
558 	/** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */
559 	u8 max_tiler_cores;
560 
561 	/** @priority: Group priority (check panthor_csg_priority). */
562 	u8 priority;
563 
564 	/** @blocked_queues: Bitmask reflecting the blocked queues. */
565 	u32 blocked_queues;
566 
567 	/** @idle_queues: Bitmask reflecting the idle queues. */
568 	u32 idle_queues;
569 
570 	/** @fatal_lock: Lock used to protect access to fatal fields. */
571 	spinlock_t fatal_lock;
572 
573 	/** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */
574 	u32 fatal_queues;
575 
576 	/** @tiler_oom: Mask of queues that have a tiler OOM event to process. */
577 	atomic_t tiler_oom;
578 
579 	/** @queue_count: Number of queues in this group. */
580 	u32 queue_count;
581 
582 	/** @queues: Queues owned by this group. */
583 	struct panthor_queue *queues[MAX_CS_PER_CSG];
584 
585 	/**
586 	 * @csg_id: ID of the FW group slot.
587 	 *
588 	 * -1 when the group is not scheduled/active.
589 	 */
590 	int csg_id;
591 
592 	/**
593 	 * @destroyed: True when the group has been destroyed.
594 	 *
595 	 * If a group is destroyed it becomes useless: no further jobs can be submitted
596 	 * to its queues. We simply wait for all references to be dropped so we can
597 	 * release the group object.
598 	 */
599 	bool destroyed;
600 
601 	/**
602 	 * @timedout: True when a timeout occurred on any of the queues owned by
603 	 * this group.
604 	 *
605 	 * Timeouts can be reported by drm_sched or by the FW. If a reset is required,
606 	 * and the group can't be suspended, this also leads to a timeout. In any case,
607 	 * any timeout situation is unrecoverable, and the group becomes useless. We
608 	 * simply wait for all references to be dropped so we can release the group
609 	 * object.
610 	 */
611 	bool timedout;
612 
613 	/**
614 	 * @innocent: True when the group becomes unusable because the group suspension
615 	 * failed during a reset.
616 	 *
617 	 * Sometimes the FW was put in a bad state by other groups, causing the group
618 	 * suspension happening in the reset path to fail. In that case, we consider the
619 	 * group innocent.
620 	 */
621 	bool innocent;
622 
623 	/**
624 	 * @syncobjs: Pool of per-queue synchronization objects.
625 	 *
626 	 * One sync object per queue. The position of the sync object is
627 	 * determined by the queue index.
628 	 */
629 	struct panthor_kernel_bo *syncobjs;
630 
631 	/** @fdinfo: Per-file total cycle and timestamp values reference. */
632 	struct {
633 		/** @data: Total sampled values for jobs in queues from this group. */
634 		struct panthor_gpu_usage data;
635 
636 		/**
637 		 * @lock: Mutex to govern concurrent access from drm file's fdinfo callback
638 		 * and job post-completion processing function
639 		 */
640 		struct mutex lock;
641 	} fdinfo;
642 
643 	/** @state: Group state. */
644 	enum panthor_group_state state;
645 
646 	/**
647 	 * @suspend_buf: Suspend buffer.
648 	 *
649 	 * Stores the state of the group and its queues when a group is suspended.
650 	 * Used at resume time to restore the group in its previous state.
651 	 *
652 	 * The size of the suspend buffer is exposed through the FW interface.
653 	 */
654 	struct panthor_kernel_bo *suspend_buf;
655 
656 	/**
657 	 * @protm_suspend_buf: Protection mode suspend buffer.
658 	 *
659 	 * Stores the state of the group and its queues when a group that's in
660 	 * protection mode is suspended.
661 	 *
662 	 * Used at resume time to restore the group in its previous state.
663 	 *
664 	 * The size of the protection mode suspend buffer is exposed through the
665 	 * FW interface.
666 	 */
667 	struct panthor_kernel_bo *protm_suspend_buf;
668 
669 	/** @sync_upd_work: Work used to check/signal job fences. */
670 	struct work_struct sync_upd_work;
671 
672 	/** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */
673 	struct work_struct tiler_oom_work;
674 
675 	/** @term_work: Work used to finish the group termination procedure. */
676 	struct work_struct term_work;
677 
678 	/**
679 	 * @release_work: Work used to release group resources.
680 	 *
681 	 * We need to postpone the group release to avoid a deadlock when
682 	 * the last ref is released in the tick work.
683 	 */
684 	struct work_struct release_work;
685 
686 	/**
687 	 * @run_node: Node used to insert the group in the
688 	 * panthor_group::groups::{runnable,idle} and
689 	 * panthor_group::reset.stopped_groups lists.
690 	 */
691 	struct list_head run_node;
692 
693 	/**
694 	 * @wait_node: Node used to insert the group in the
695 	 * panthor_group::groups::waiting list.
696 	 */
697 	struct list_head wait_node;
698 };
699 
700 struct panthor_job_profiling_data {
701 	struct {
702 		u64 before;
703 		u64 after;
704 	} cycles;
705 
706 	struct {
707 		u64 before;
708 		u64 after;
709 	} time;
710 };
711 
712 /**
713  * group_queue_work() - Queue a group work
714  * @group: Group to queue the work for.
715  * @wname: Work name.
716  *
717  * Grabs a ref and queue a work item to the scheduler workqueue. If
718  * the work was already queued, we release the reference we grabbed.
719  *
720  * Work callbacks must release the reference we grabbed here.
721  */
722 #define group_queue_work(group, wname) \
723 	do { \
724 		group_get(group); \
725 		if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
726 			group_put(group); \
727 	} while (0)
728 
729 /**
730  * sched_queue_work() - Queue a scheduler work.
731  * @sched: Scheduler object.
732  * @wname: Work name.
733  *
734  * Conditionally queues a scheduler work if no reset is pending/in-progress.
735  */
736 #define sched_queue_work(sched, wname) \
737 	do { \
738 		if (!atomic_read(&(sched)->reset.in_progress) && \
739 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
740 			queue_work((sched)->wq, &(sched)->wname ## _work); \
741 	} while (0)
742 
743 /**
744  * sched_queue_delayed_work() - Queue a scheduler delayed work.
745  * @sched: Scheduler object.
746  * @wname: Work name.
747  * @delay: Work delay in jiffies.
748  *
749  * Conditionally queues a scheduler delayed work if no reset is
750  * pending/in-progress.
751  */
752 #define sched_queue_delayed_work(sched, wname, delay) \
753 	do { \
754 		if (!atomic_read(&sched->reset.in_progress) && \
755 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
756 			mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
757 	} while (0)
758 
759 /*
760  * We currently set the maximum of groups per file to an arbitrary low value.
761  * But this can be updated if we need more.
762  */
763 #define MAX_GROUPS_PER_POOL 128
764 
765 /**
766  * struct panthor_group_pool - Group pool
767  *
768  * Each file get assigned a group pool.
769  */
770 struct panthor_group_pool {
771 	/** @xa: Xarray used to manage group handles. */
772 	struct xarray xa;
773 };
774 
775 /**
776  * struct panthor_job - Used to manage GPU job
777  */
778 struct panthor_job {
779 	/** @base: Inherit from drm_sched_job. */
780 	struct drm_sched_job base;
781 
782 	/** @refcount: Reference count. */
783 	struct kref refcount;
784 
785 	/** @group: Group of the queue this job will be pushed to. */
786 	struct panthor_group *group;
787 
788 	/** @queue_idx: Index of the queue inside @group. */
789 	u32 queue_idx;
790 
791 	/** @call_info: Information about the userspace command stream call. */
792 	struct {
793 		/** @start: GPU address of the userspace command stream. */
794 		u64 start;
795 
796 		/** @size: Size of the userspace command stream. */
797 		u32 size;
798 
799 		/**
800 		 * @latest_flush: Flush ID at the time the userspace command
801 		 * stream was built.
802 		 *
803 		 * Needed for the flush reduction mechanism.
804 		 */
805 		u32 latest_flush;
806 	} call_info;
807 
808 	/** @ringbuf: Position of this job is in the ring buffer. */
809 	struct {
810 		/** @start: Start offset. */
811 		u64 start;
812 
813 		/** @end: End offset. */
814 		u64 end;
815 	} ringbuf;
816 
817 	/**
818 	 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs
819 	 * list.
820 	 */
821 	struct list_head node;
822 
823 	/** @done_fence: Fence signaled when the job is finished or cancelled. */
824 	struct dma_fence *done_fence;
825 
826 	/** @profiling: Job profiling information. */
827 	struct {
828 		/** @mask: Current device job profiling enablement bitmask. */
829 		u32 mask;
830 
831 		/** @slot: Job index in the profiling slots BO. */
832 		u32 slot;
833 	} profiling;
834 };
835 
836 static void
panthor_queue_put_syncwait_obj(struct panthor_queue * queue)837 panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
838 {
839 	if (queue->syncwait.kmap) {
840 		struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
841 
842 		drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
843 		queue->syncwait.kmap = NULL;
844 	}
845 
846 	drm_gem_object_put(queue->syncwait.obj);
847 	queue->syncwait.obj = NULL;
848 }
849 
850 static void *
panthor_queue_get_syncwait_obj(struct panthor_group * group,struct panthor_queue * queue)851 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue)
852 {
853 	struct panthor_device *ptdev = group->ptdev;
854 	struct panthor_gem_object *bo;
855 	struct iosys_map map;
856 	int ret;
857 
858 	if (queue->syncwait.kmap)
859 		return queue->syncwait.kmap + queue->syncwait.offset;
860 
861 	bo = panthor_vm_get_bo_for_va(group->vm,
862 				      queue->syncwait.gpu_va,
863 				      &queue->syncwait.offset);
864 	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
865 		goto err_put_syncwait_obj;
866 
867 	queue->syncwait.obj = &bo->base.base;
868 	ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
869 	if (drm_WARN_ON(&ptdev->base, ret))
870 		goto err_put_syncwait_obj;
871 
872 	queue->syncwait.kmap = map.vaddr;
873 	if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
874 		goto err_put_syncwait_obj;
875 
876 	return queue->syncwait.kmap + queue->syncwait.offset;
877 
878 err_put_syncwait_obj:
879 	panthor_queue_put_syncwait_obj(queue);
880 	return NULL;
881 }
882 
group_free_queue(struct panthor_group * group,struct panthor_queue * queue)883 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue)
884 {
885 	if (IS_ERR_OR_NULL(queue))
886 		return;
887 
888 	if (queue->entity.fence_context)
889 		drm_sched_entity_destroy(&queue->entity);
890 
891 	if (queue->scheduler.ops)
892 		drm_sched_fini(&queue->scheduler);
893 
894 	panthor_queue_put_syncwait_obj(queue);
895 
896 	panthor_kernel_bo_destroy(queue->ringbuf);
897 	panthor_kernel_bo_destroy(queue->iface.mem);
898 	panthor_kernel_bo_destroy(queue->profiling.slots);
899 
900 	/* Release the last_fence we were holding, if any. */
901 	dma_fence_put(queue->fence_ctx.last_fence);
902 
903 	kfree(queue);
904 }
905 
group_release_work(struct work_struct * work)906 static void group_release_work(struct work_struct *work)
907 {
908 	struct panthor_group *group = container_of(work,
909 						   struct panthor_group,
910 						   release_work);
911 	u32 i;
912 
913 	mutex_destroy(&group->fdinfo.lock);
914 
915 	for (i = 0; i < group->queue_count; i++)
916 		group_free_queue(group, group->queues[i]);
917 
918 	panthor_kernel_bo_destroy(group->suspend_buf);
919 	panthor_kernel_bo_destroy(group->protm_suspend_buf);
920 	panthor_kernel_bo_destroy(group->syncobjs);
921 
922 	panthor_vm_put(group->vm);
923 	kfree(group);
924 }
925 
group_release(struct kref * kref)926 static void group_release(struct kref *kref)
927 {
928 	struct panthor_group *group = container_of(kref,
929 						   struct panthor_group,
930 						   refcount);
931 	struct panthor_device *ptdev = group->ptdev;
932 
933 	drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
934 	drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
935 	drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
936 
937 	queue_work(panthor_cleanup_wq, &group->release_work);
938 }
939 
group_put(struct panthor_group * group)940 static void group_put(struct panthor_group *group)
941 {
942 	if (group)
943 		kref_put(&group->refcount, group_release);
944 }
945 
946 static struct panthor_group *
group_get(struct panthor_group * group)947 group_get(struct panthor_group *group)
948 {
949 	if (group)
950 		kref_get(&group->refcount);
951 
952 	return group;
953 }
954 
955 /**
956  * group_bind_locked() - Bind a group to a group slot
957  * @group: Group.
958  * @csg_id: Slot.
959  *
960  * Return: 0 on success, a negative error code otherwise.
961  */
962 static int
group_bind_locked(struct panthor_group * group,u32 csg_id)963 group_bind_locked(struct panthor_group *group, u32 csg_id)
964 {
965 	struct panthor_device *ptdev = group->ptdev;
966 	struct panthor_csg_slot *csg_slot;
967 	int ret;
968 
969 	lockdep_assert_held(&ptdev->scheduler->lock);
970 
971 	if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
972 			ptdev->scheduler->csg_slots[csg_id].group))
973 		return -EINVAL;
974 
975 	ret = panthor_vm_active(group->vm);
976 	if (ret)
977 		return ret;
978 
979 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
980 	group_get(group);
981 	group->csg_id = csg_id;
982 
983 	/* Dummy doorbell allocation: doorbell is assigned to the group and
984 	 * all queues use the same doorbell.
985 	 *
986 	 * TODO: Implement LRU-based doorbell assignment, so the most often
987 	 * updated queues get their own doorbell, thus avoiding useless checks
988 	 * on queues belonging to the same group that are rarely updated.
989 	 */
990 	for (u32 i = 0; i < group->queue_count; i++)
991 		group->queues[i]->doorbell_id = csg_id + 1;
992 
993 	csg_slot->group = group;
994 
995 	return 0;
996 }
997 
998 /**
999  * group_unbind_locked() - Unbind a group from a slot.
1000  * @group: Group to unbind.
1001  *
1002  * Return: 0 on success, a negative error code otherwise.
1003  */
1004 static int
group_unbind_locked(struct panthor_group * group)1005 group_unbind_locked(struct panthor_group *group)
1006 {
1007 	struct panthor_device *ptdev = group->ptdev;
1008 	struct panthor_csg_slot *slot;
1009 
1010 	lockdep_assert_held(&ptdev->scheduler->lock);
1011 
1012 	if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
1013 		return -EINVAL;
1014 
1015 	if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
1016 		return -EINVAL;
1017 
1018 	slot = &ptdev->scheduler->csg_slots[group->csg_id];
1019 	panthor_vm_idle(group->vm);
1020 	group->csg_id = -1;
1021 
1022 	/* Tiler OOM events will be re-issued next time the group is scheduled. */
1023 	atomic_set(&group->tiler_oom, 0);
1024 	cancel_work(&group->tiler_oom_work);
1025 
1026 	for (u32 i = 0; i < group->queue_count; i++)
1027 		group->queues[i]->doorbell_id = -1;
1028 
1029 	slot->group = NULL;
1030 
1031 	group_put(group);
1032 	return 0;
1033 }
1034 
1035 /**
1036  * cs_slot_prog_locked() - Program a queue slot
1037  * @ptdev: Device.
1038  * @csg_id: Group slot ID.
1039  * @cs_id: Queue slot ID.
1040  *
1041  * Program a queue slot with the queue information so things can start being
1042  * executed on this queue.
1043  *
1044  * The group slot must have a group bound to it already (group_bind_locked()).
1045  */
1046 static void
cs_slot_prog_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1047 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1048 {
1049 	struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
1050 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1051 
1052 	lockdep_assert_held(&ptdev->scheduler->lock);
1053 
1054 	queue->iface.input->extract = queue->iface.output->extract;
1055 	drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
1056 
1057 	cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf);
1058 	cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
1059 	cs_iface->input->ringbuf_input = queue->iface.input_fw_va;
1060 	cs_iface->input->ringbuf_output = queue->iface.output_fw_va;
1061 	cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) |
1062 				  CS_CONFIG_DOORBELL(queue->doorbell_id);
1063 	cs_iface->input->ack_irq_mask = ~0;
1064 	panthor_fw_update_reqs(cs_iface, req,
1065 			       CS_IDLE_SYNC_WAIT |
1066 			       CS_IDLE_EMPTY |
1067 			       CS_STATE_START |
1068 			       CS_EXTRACT_EVENT,
1069 			       CS_IDLE_SYNC_WAIT |
1070 			       CS_IDLE_EMPTY |
1071 			       CS_STATE_MASK |
1072 			       CS_EXTRACT_EVENT);
1073 	if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
1074 		drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
1075 		queue->timeout_suspended = false;
1076 	}
1077 }
1078 
1079 /**
1080  * cs_slot_reset_locked() - Reset a queue slot
1081  * @ptdev: Device.
1082  * @csg_id: Group slot.
1083  * @cs_id: Queue slot.
1084  *
1085  * Change the queue slot state to STOP and suspend the queue timeout if
1086  * the queue is not blocked.
1087  *
1088  * The group slot must have a group bound to it (group_bind_locked()).
1089  */
1090 static int
cs_slot_reset_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1091 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1092 {
1093 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1094 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1095 	struct panthor_queue *queue = group->queues[cs_id];
1096 
1097 	lockdep_assert_held(&ptdev->scheduler->lock);
1098 
1099 	panthor_fw_update_reqs(cs_iface, req,
1100 			       CS_STATE_STOP,
1101 			       CS_STATE_MASK);
1102 
1103 	/* If the queue is blocked, we want to keep the timeout running, so
1104 	 * we can detect unbounded waits and kill the group when that happens.
1105 	 */
1106 	if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
1107 		queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
1108 		queue->timeout_suspended = true;
1109 		WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
1110 	}
1111 
1112 	return 0;
1113 }
1114 
1115 /**
1116  * csg_slot_sync_priority_locked() - Synchronize the group slot priority
1117  * @ptdev: Device.
1118  * @csg_id: Group slot ID.
1119  *
1120  * Group slot priority update happens asynchronously. When we receive a
1121  * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can
1122  * reflect it to our panthor_csg_slot object.
1123  */
1124 static void
csg_slot_sync_priority_locked(struct panthor_device * ptdev,u32 csg_id)1125 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
1126 {
1127 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1128 	struct panthor_fw_csg_iface *csg_iface;
1129 
1130 	lockdep_assert_held(&ptdev->scheduler->lock);
1131 
1132 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1133 	csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
1134 }
1135 
1136 /**
1137  * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority
1138  * @ptdev: Device.
1139  * @csg_id: Group slot.
1140  * @cs_id: Queue slot.
1141  *
1142  * Queue state is updated on group suspend or STATUS_UPDATE event.
1143  */
1144 static void
cs_slot_sync_queue_state_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1145 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1146 {
1147 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1148 	struct panthor_queue *queue = group->queues[cs_id];
1149 	struct panthor_fw_cs_iface *cs_iface =
1150 		panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
1151 
1152 	u32 status_wait_cond;
1153 
1154 	switch (cs_iface->output->status_blocked_reason) {
1155 	case CS_STATUS_BLOCKED_REASON_UNBLOCKED:
1156 		if (queue->iface.input->insert == queue->iface.output->extract &&
1157 		    cs_iface->output->status_scoreboards == 0)
1158 			group->idle_queues |= BIT(cs_id);
1159 		break;
1160 
1161 	case CS_STATUS_BLOCKED_REASON_SYNC_WAIT:
1162 		if (list_empty(&group->wait_node)) {
1163 			list_move_tail(&group->wait_node,
1164 				       &group->ptdev->scheduler->groups.waiting);
1165 		}
1166 
1167 		/* The queue is only blocked if there's no deferred operation
1168 		 * pending, which can be checked through the scoreboard status.
1169 		 */
1170 		if (!cs_iface->output->status_scoreboards)
1171 			group->blocked_queues |= BIT(cs_id);
1172 
1173 		queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
1174 		queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
1175 		status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
1176 		queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT;
1177 		if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) {
1178 			u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi;
1179 
1180 			queue->syncwait.sync64 = true;
1181 			queue->syncwait.ref |= sync_val_hi << 32;
1182 		} else {
1183 			queue->syncwait.sync64 = false;
1184 		}
1185 		break;
1186 
1187 	default:
1188 		/* Other reasons are not blocking. Consider the queue as runnable
1189 		 * in those cases.
1190 		 */
1191 		break;
1192 	}
1193 }
1194 
1195 static void
csg_slot_sync_queues_state_locked(struct panthor_device * ptdev,u32 csg_id)1196 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
1197 {
1198 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1199 	struct panthor_group *group = csg_slot->group;
1200 	u32 i;
1201 
1202 	lockdep_assert_held(&ptdev->scheduler->lock);
1203 
1204 	group->idle_queues = 0;
1205 	group->blocked_queues = 0;
1206 
1207 	for (i = 0; i < group->queue_count; i++) {
1208 		if (group->queues[i])
1209 			cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
1210 	}
1211 }
1212 
1213 static void
csg_slot_sync_state_locked(struct panthor_device * ptdev,u32 csg_id)1214 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
1215 {
1216 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1217 	struct panthor_fw_csg_iface *csg_iface;
1218 	struct panthor_group *group;
1219 	enum panthor_group_state new_state, old_state;
1220 	u32 csg_state;
1221 
1222 	lockdep_assert_held(&ptdev->scheduler->lock);
1223 
1224 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1225 	group = csg_slot->group;
1226 
1227 	if (!group)
1228 		return;
1229 
1230 	old_state = group->state;
1231 	csg_state = csg_iface->output->ack & CSG_STATE_MASK;
1232 	switch (csg_state) {
1233 	case CSG_STATE_START:
1234 	case CSG_STATE_RESUME:
1235 		new_state = PANTHOR_CS_GROUP_ACTIVE;
1236 		break;
1237 	case CSG_STATE_TERMINATE:
1238 		new_state = PANTHOR_CS_GROUP_TERMINATED;
1239 		break;
1240 	case CSG_STATE_SUSPEND:
1241 		new_state = PANTHOR_CS_GROUP_SUSPENDED;
1242 		break;
1243 	default:
1244 		/* The unknown state might be caused by a FW state corruption,
1245 		 * which means the group metadata can't be trusted anymore, and
1246 		 * the SUSPEND operation might propagate the corruption to the
1247 		 * suspend buffers. Flag the group state as unknown to make
1248 		 * sure it's unusable after that point.
1249 		 */
1250 		drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)",
1251 			csg_id, csg_state);
1252 		new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE;
1253 		break;
1254 	}
1255 
1256 	if (old_state == new_state)
1257 		return;
1258 
1259 	/* The unknown state might be caused by a FW issue, reset the FW to
1260 	 * take a fresh start.
1261 	 */
1262 	if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE)
1263 		panthor_device_schedule_reset(ptdev);
1264 
1265 	if (new_state == PANTHOR_CS_GROUP_SUSPENDED)
1266 		csg_slot_sync_queues_state_locked(ptdev, csg_id);
1267 
1268 	if (old_state == PANTHOR_CS_GROUP_ACTIVE) {
1269 		u32 i;
1270 
1271 		/* Reset the queue slots so we start from a clean
1272 		 * state when starting/resuming a new group on this
1273 		 * CSG slot. No wait needed here, and no ringbell
1274 		 * either, since the CS slot will only be re-used
1275 		 * on the next CSG start operation.
1276 		 */
1277 		for (i = 0; i < group->queue_count; i++) {
1278 			if (group->queues[i])
1279 				cs_slot_reset_locked(ptdev, csg_id, i);
1280 		}
1281 	}
1282 
1283 	group->state = new_state;
1284 }
1285 
1286 static int
csg_slot_prog_locked(struct panthor_device * ptdev,u32 csg_id,u32 priority)1287 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
1288 {
1289 	struct panthor_fw_csg_iface *csg_iface;
1290 	struct panthor_csg_slot *csg_slot;
1291 	struct panthor_group *group;
1292 	u32 queue_mask = 0, i;
1293 
1294 	lockdep_assert_held(&ptdev->scheduler->lock);
1295 
1296 	if (priority > MAX_CSG_PRIO)
1297 		return -EINVAL;
1298 
1299 	if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
1300 		return -EINVAL;
1301 
1302 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1303 	group = csg_slot->group;
1304 	if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE)
1305 		return 0;
1306 
1307 	csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
1308 
1309 	for (i = 0; i < group->queue_count; i++) {
1310 		if (group->queues[i]) {
1311 			cs_slot_prog_locked(ptdev, csg_id, i);
1312 			queue_mask |= BIT(i);
1313 		}
1314 	}
1315 
1316 	csg_iface->input->allow_compute = group->compute_core_mask;
1317 	csg_iface->input->allow_fragment = group->fragment_core_mask;
1318 	csg_iface->input->allow_other = group->tiler_core_mask;
1319 	csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
1320 					 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
1321 					 CSG_EP_REQ_TILER(group->max_tiler_cores) |
1322 					 CSG_EP_REQ_PRIORITY(priority);
1323 	csg_iface->input->config = panthor_vm_as(group->vm);
1324 
1325 	if (group->suspend_buf)
1326 		csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf);
1327 	else
1328 		csg_iface->input->suspend_buf = 0;
1329 
1330 	if (group->protm_suspend_buf) {
1331 		csg_iface->input->protm_suspend_buf =
1332 			panthor_kernel_bo_gpuva(group->protm_suspend_buf);
1333 	} else {
1334 		csg_iface->input->protm_suspend_buf = 0;
1335 	}
1336 
1337 	csg_iface->input->ack_irq_mask = ~0;
1338 	panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask);
1339 	return 0;
1340 }
1341 
1342 static void
cs_slot_process_fatal_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1343 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
1344 				   u32 csg_id, u32 cs_id)
1345 {
1346 	struct panthor_scheduler *sched = ptdev->scheduler;
1347 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1348 	struct panthor_group *group = csg_slot->group;
1349 	struct panthor_fw_cs_iface *cs_iface;
1350 	u32 fatal;
1351 	u64 info;
1352 
1353 	lockdep_assert_held(&sched->lock);
1354 
1355 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1356 	fatal = cs_iface->output->fatal;
1357 	info = cs_iface->output->fatal_info;
1358 
1359 	if (group)
1360 		group->fatal_queues |= BIT(cs_id);
1361 
1362 	if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
1363 		/* If this exception is unrecoverable, queue a reset, and make
1364 		 * sure we stop scheduling groups until the reset has happened.
1365 		 */
1366 		panthor_device_schedule_reset(ptdev);
1367 		cancel_delayed_work(&sched->tick_work);
1368 	} else {
1369 		sched_queue_delayed_work(sched, tick, 0);
1370 	}
1371 
1372 	drm_warn(&ptdev->base,
1373 		 "CSG slot %d CS slot: %d\n"
1374 		 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
1375 		 "CS_FATAL.EXCEPTION_DATA: 0x%x\n"
1376 		 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n",
1377 		 csg_id, cs_id,
1378 		 (unsigned int)CS_EXCEPTION_TYPE(fatal),
1379 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
1380 		 (unsigned int)CS_EXCEPTION_DATA(fatal),
1381 		 info);
1382 }
1383 
1384 static void
cs_slot_process_fault_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1385 cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
1386 				   u32 csg_id, u32 cs_id)
1387 {
1388 	struct panthor_scheduler *sched = ptdev->scheduler;
1389 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1390 	struct panthor_group *group = csg_slot->group;
1391 	struct panthor_queue *queue = group && cs_id < group->queue_count ?
1392 				      group->queues[cs_id] : NULL;
1393 	struct panthor_fw_cs_iface *cs_iface;
1394 	u32 fault;
1395 	u64 info;
1396 
1397 	lockdep_assert_held(&sched->lock);
1398 
1399 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1400 	fault = cs_iface->output->fault;
1401 	info = cs_iface->output->fault_info;
1402 
1403 	if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
1404 		u64 cs_extract = queue->iface.output->extract;
1405 		struct panthor_job *job;
1406 
1407 		spin_lock(&queue->fence_ctx.lock);
1408 		list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
1409 			if (cs_extract >= job->ringbuf.end)
1410 				continue;
1411 
1412 			if (cs_extract < job->ringbuf.start)
1413 				break;
1414 
1415 			dma_fence_set_error(job->done_fence, -EINVAL);
1416 		}
1417 		spin_unlock(&queue->fence_ctx.lock);
1418 	}
1419 
1420 	drm_warn(&ptdev->base,
1421 		 "CSG slot %d CS slot: %d\n"
1422 		 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
1423 		 "CS_FAULT.EXCEPTION_DATA: 0x%x\n"
1424 		 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n",
1425 		 csg_id, cs_id,
1426 		 (unsigned int)CS_EXCEPTION_TYPE(fault),
1427 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
1428 		 (unsigned int)CS_EXCEPTION_DATA(fault),
1429 		 info);
1430 }
1431 
group_process_tiler_oom(struct panthor_group * group,u32 cs_id)1432 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
1433 {
1434 	struct panthor_device *ptdev = group->ptdev;
1435 	struct panthor_scheduler *sched = ptdev->scheduler;
1436 	u32 renderpasses_in_flight, pending_frag_count;
1437 	struct panthor_heap_pool *heaps = NULL;
1438 	u64 heap_address, new_chunk_va = 0;
1439 	u32 vt_start, vt_end, frag_end;
1440 	int ret, csg_id;
1441 
1442 	mutex_lock(&sched->lock);
1443 	csg_id = group->csg_id;
1444 	if (csg_id >= 0) {
1445 		struct panthor_fw_cs_iface *cs_iface;
1446 
1447 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1448 		heaps = panthor_vm_get_heap_pool(group->vm, false);
1449 		heap_address = cs_iface->output->heap_address;
1450 		vt_start = cs_iface->output->heap_vt_start;
1451 		vt_end = cs_iface->output->heap_vt_end;
1452 		frag_end = cs_iface->output->heap_frag_end;
1453 		renderpasses_in_flight = vt_start - frag_end;
1454 		pending_frag_count = vt_end - frag_end;
1455 	}
1456 	mutex_unlock(&sched->lock);
1457 
1458 	/* The group got scheduled out, we stop here. We will get a new tiler OOM event
1459 	 * when it's scheduled again.
1460 	 */
1461 	if (unlikely(csg_id < 0))
1462 		return 0;
1463 
1464 	if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) {
1465 		ret = -EINVAL;
1466 	} else {
1467 		/* We do the allocation without holding the scheduler lock to avoid
1468 		 * blocking the scheduling.
1469 		 */
1470 		ret = panthor_heap_grow(heaps, heap_address,
1471 					renderpasses_in_flight,
1472 					pending_frag_count, &new_chunk_va);
1473 	}
1474 
1475 	/* If the heap context doesn't have memory for us, we want to let the
1476 	 * FW try to reclaim memory by waiting for fragment jobs to land or by
1477 	 * executing the tiler OOM exception handler, which is supposed to
1478 	 * implement incremental rendering.
1479 	 */
1480 	if (ret && ret != -ENOMEM) {
1481 		drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
1482 		group->fatal_queues |= BIT(cs_id);
1483 		sched_queue_delayed_work(sched, tick, 0);
1484 		goto out_put_heap_pool;
1485 	}
1486 
1487 	mutex_lock(&sched->lock);
1488 	csg_id = group->csg_id;
1489 	if (csg_id >= 0) {
1490 		struct panthor_fw_csg_iface *csg_iface;
1491 		struct panthor_fw_cs_iface *cs_iface;
1492 
1493 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1494 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1495 
1496 		cs_iface->input->heap_start = new_chunk_va;
1497 		cs_iface->input->heap_end = new_chunk_va;
1498 		panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM);
1499 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id));
1500 		panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1501 	}
1502 	mutex_unlock(&sched->lock);
1503 
1504 	/* We allocated a chunck, but couldn't link it to the heap
1505 	 * context because the group was scheduled out while we were
1506 	 * allocating memory. We need to return this chunk to the heap.
1507 	 */
1508 	if (unlikely(csg_id < 0 && new_chunk_va))
1509 		panthor_heap_return_chunk(heaps, heap_address, new_chunk_va);
1510 
1511 	ret = 0;
1512 
1513 out_put_heap_pool:
1514 	panthor_heap_pool_put(heaps);
1515 	return ret;
1516 }
1517 
group_tiler_oom_work(struct work_struct * work)1518 static void group_tiler_oom_work(struct work_struct *work)
1519 {
1520 	struct panthor_group *group =
1521 		container_of(work, struct panthor_group, tiler_oom_work);
1522 	u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
1523 
1524 	while (tiler_oom) {
1525 		u32 cs_id = ffs(tiler_oom) - 1;
1526 
1527 		group_process_tiler_oom(group, cs_id);
1528 		tiler_oom &= ~BIT(cs_id);
1529 	}
1530 
1531 	group_put(group);
1532 }
1533 
1534 static void
cs_slot_process_tiler_oom_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1535 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
1536 				       u32 csg_id, u32 cs_id)
1537 {
1538 	struct panthor_scheduler *sched = ptdev->scheduler;
1539 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1540 	struct panthor_group *group = csg_slot->group;
1541 
1542 	lockdep_assert_held(&sched->lock);
1543 
1544 	if (drm_WARN_ON(&ptdev->base, !group))
1545 		return;
1546 
1547 	atomic_or(BIT(cs_id), &group->tiler_oom);
1548 
1549 	/* We don't use group_queue_work() here because we want to queue the
1550 	 * work item to the heap_alloc_wq.
1551 	 */
1552 	group_get(group);
1553 	if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
1554 		group_put(group);
1555 }
1556 
cs_slot_process_irq_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1557 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
1558 				       u32 csg_id, u32 cs_id)
1559 {
1560 	struct panthor_fw_cs_iface *cs_iface;
1561 	u32 req, ack, events;
1562 
1563 	lockdep_assert_held(&ptdev->scheduler->lock);
1564 
1565 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1566 	req = cs_iface->input->req;
1567 	ack = cs_iface->output->ack;
1568 	events = (req ^ ack) & CS_EVT_MASK;
1569 
1570 	if (events & CS_FATAL)
1571 		cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
1572 
1573 	if (events & CS_FAULT)
1574 		cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
1575 
1576 	if (events & CS_TILER_OOM)
1577 		cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
1578 
1579 	/* We don't acknowledge the TILER_OOM event since its handling is
1580 	 * deferred to a separate work.
1581 	 */
1582 	panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT);
1583 
1584 	return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
1585 }
1586 
csg_slot_sync_idle_state_locked(struct panthor_device * ptdev,u32 csg_id)1587 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id)
1588 {
1589 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1590 	struct panthor_fw_csg_iface *csg_iface;
1591 
1592 	lockdep_assert_held(&ptdev->scheduler->lock);
1593 
1594 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1595 	csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE;
1596 }
1597 
csg_slot_process_idle_event_locked(struct panthor_device * ptdev,u32 csg_id)1598 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
1599 {
1600 	struct panthor_scheduler *sched = ptdev->scheduler;
1601 
1602 	lockdep_assert_held(&sched->lock);
1603 
1604 	sched->might_have_idle_groups = true;
1605 
1606 	/* Schedule a tick so we can evict idle groups and schedule non-idle
1607 	 * ones. This will also update runtime PM and devfreq busy/idle states,
1608 	 * so the device can lower its frequency or get suspended.
1609 	 */
1610 	sched_queue_delayed_work(sched, tick, 0);
1611 }
1612 
csg_slot_sync_update_locked(struct panthor_device * ptdev,u32 csg_id)1613 static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
1614 					u32 csg_id)
1615 {
1616 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1617 	struct panthor_group *group = csg_slot->group;
1618 
1619 	lockdep_assert_held(&ptdev->scheduler->lock);
1620 
1621 	if (group)
1622 		group_queue_work(group, sync_upd);
1623 
1624 	sched_queue_work(ptdev->scheduler, sync_upd);
1625 }
1626 
1627 static void
csg_slot_process_progress_timer_event_locked(struct panthor_device * ptdev,u32 csg_id)1628 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
1629 {
1630 	struct panthor_scheduler *sched = ptdev->scheduler;
1631 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1632 	struct panthor_group *group = csg_slot->group;
1633 
1634 	lockdep_assert_held(&sched->lock);
1635 
1636 	drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
1637 
1638 	group = csg_slot->group;
1639 	if (!drm_WARN_ON(&ptdev->base, !group))
1640 		group->timedout = true;
1641 
1642 	sched_queue_delayed_work(sched, tick, 0);
1643 }
1644 
sched_process_csg_irq_locked(struct panthor_device * ptdev,u32 csg_id)1645 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
1646 {
1647 	u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events;
1648 	struct panthor_fw_csg_iface *csg_iface;
1649 	u32 ring_cs_db_mask = 0;
1650 
1651 	lockdep_assert_held(&ptdev->scheduler->lock);
1652 
1653 	if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1654 		return;
1655 
1656 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1657 	req = READ_ONCE(csg_iface->input->req);
1658 	ack = READ_ONCE(csg_iface->output->ack);
1659 	cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req);
1660 	cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack);
1661 	csg_events = (req ^ ack) & CSG_EVT_MASK;
1662 
1663 	/* There may not be any pending CSG/CS interrupts to process */
1664 	if (req == ack && cs_irq_req == cs_irq_ack)
1665 		return;
1666 
1667 	/* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before
1668 	 * examining the CS_ACK & CS_REQ bits. This would ensure that Host
1669 	 * doesn't miss an interrupt for the CS in the race scenario where
1670 	 * whilst Host is servicing an interrupt for the CS, firmware sends
1671 	 * another interrupt for that CS.
1672 	 */
1673 	csg_iface->input->cs_irq_ack = cs_irq_req;
1674 
1675 	panthor_fw_update_reqs(csg_iface, req, ack,
1676 			       CSG_SYNC_UPDATE |
1677 			       CSG_IDLE |
1678 			       CSG_PROGRESS_TIMER_EVENT);
1679 
1680 	if (csg_events & CSG_IDLE)
1681 		csg_slot_process_idle_event_locked(ptdev, csg_id);
1682 
1683 	if (csg_events & CSG_PROGRESS_TIMER_EVENT)
1684 		csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
1685 
1686 	cs_irqs = cs_irq_req ^ cs_irq_ack;
1687 	while (cs_irqs) {
1688 		u32 cs_id = ffs(cs_irqs) - 1;
1689 
1690 		if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
1691 			ring_cs_db_mask |= BIT(cs_id);
1692 
1693 		cs_irqs &= ~BIT(cs_id);
1694 	}
1695 
1696 	if (csg_events & CSG_SYNC_UPDATE)
1697 		csg_slot_sync_update_locked(ptdev, csg_id);
1698 
1699 	if (ring_cs_db_mask)
1700 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask);
1701 
1702 	panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1703 }
1704 
sched_process_idle_event_locked(struct panthor_device * ptdev)1705 static void sched_process_idle_event_locked(struct panthor_device *ptdev)
1706 {
1707 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1708 
1709 	lockdep_assert_held(&ptdev->scheduler->lock);
1710 
1711 	/* Acknowledge the idle event and schedule a tick. */
1712 	panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE);
1713 	sched_queue_delayed_work(ptdev->scheduler, tick, 0);
1714 }
1715 
1716 /**
1717  * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ
1718  * @ptdev: Device.
1719  */
sched_process_global_irq_locked(struct panthor_device * ptdev)1720 static void sched_process_global_irq_locked(struct panthor_device *ptdev)
1721 {
1722 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1723 	u32 req, ack, evts;
1724 
1725 	lockdep_assert_held(&ptdev->scheduler->lock);
1726 
1727 	req = READ_ONCE(glb_iface->input->req);
1728 	ack = READ_ONCE(glb_iface->output->ack);
1729 	evts = (req ^ ack) & GLB_EVT_MASK;
1730 
1731 	if (evts & GLB_IDLE)
1732 		sched_process_idle_event_locked(ptdev);
1733 }
1734 
process_fw_events_work(struct work_struct * work)1735 static void process_fw_events_work(struct work_struct *work)
1736 {
1737 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
1738 						      fw_events_work);
1739 	u32 events = atomic_xchg(&sched->fw_events, 0);
1740 	struct panthor_device *ptdev = sched->ptdev;
1741 
1742 	mutex_lock(&sched->lock);
1743 
1744 	if (events & JOB_INT_GLOBAL_IF) {
1745 		sched_process_global_irq_locked(ptdev);
1746 		events &= ~JOB_INT_GLOBAL_IF;
1747 	}
1748 
1749 	while (events) {
1750 		u32 csg_id = ffs(events) - 1;
1751 
1752 		sched_process_csg_irq_locked(ptdev, csg_id);
1753 		events &= ~BIT(csg_id);
1754 	}
1755 
1756 	mutex_unlock(&sched->lock);
1757 }
1758 
1759 /**
1760  * panthor_sched_report_fw_events() - Report FW events to the scheduler.
1761  */
panthor_sched_report_fw_events(struct panthor_device * ptdev,u32 events)1762 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
1763 {
1764 	if (!ptdev->scheduler)
1765 		return;
1766 
1767 	atomic_or(events, &ptdev->scheduler->fw_events);
1768 	sched_queue_work(ptdev->scheduler, fw_events);
1769 }
1770 
fence_get_driver_name(struct dma_fence * fence)1771 static const char *fence_get_driver_name(struct dma_fence *fence)
1772 {
1773 	return "panthor";
1774 }
1775 
queue_fence_get_timeline_name(struct dma_fence * fence)1776 static const char *queue_fence_get_timeline_name(struct dma_fence *fence)
1777 {
1778 	return "queue-fence";
1779 }
1780 
1781 static const struct dma_fence_ops panthor_queue_fence_ops = {
1782 	.get_driver_name = fence_get_driver_name,
1783 	.get_timeline_name = queue_fence_get_timeline_name,
1784 };
1785 
1786 struct panthor_csg_slots_upd_ctx {
1787 	u32 update_mask;
1788 	u32 timedout_mask;
1789 	struct {
1790 		u32 value;
1791 		u32 mask;
1792 	} requests[MAX_CSGS];
1793 };
1794 
csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx * ctx)1795 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
1796 {
1797 	memset(ctx, 0, sizeof(*ctx));
1798 }
1799 
csgs_upd_ctx_queue_reqs(struct panthor_device * ptdev,struct panthor_csg_slots_upd_ctx * ctx,u32 csg_id,u32 value,u32 mask)1800 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
1801 				    struct panthor_csg_slots_upd_ctx *ctx,
1802 				    u32 csg_id, u32 value, u32 mask)
1803 {
1804 	if (drm_WARN_ON(&ptdev->base, !mask) ||
1805 	    drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1806 		return;
1807 
1808 	ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
1809 	ctx->requests[csg_id].mask |= mask;
1810 	ctx->update_mask |= BIT(csg_id);
1811 }
1812 
csgs_upd_ctx_apply_locked(struct panthor_device * ptdev,struct panthor_csg_slots_upd_ctx * ctx)1813 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
1814 				     struct panthor_csg_slots_upd_ctx *ctx)
1815 {
1816 	struct panthor_scheduler *sched = ptdev->scheduler;
1817 	u32 update_slots = ctx->update_mask;
1818 
1819 	lockdep_assert_held(&sched->lock);
1820 
1821 	if (!ctx->update_mask)
1822 		return 0;
1823 
1824 	while (update_slots) {
1825 		struct panthor_fw_csg_iface *csg_iface;
1826 		u32 csg_id = ffs(update_slots) - 1;
1827 
1828 		update_slots &= ~BIT(csg_id);
1829 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1830 		panthor_fw_update_reqs(csg_iface, req,
1831 				       ctx->requests[csg_id].value,
1832 				       ctx->requests[csg_id].mask);
1833 	}
1834 
1835 	panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
1836 
1837 	update_slots = ctx->update_mask;
1838 	while (update_slots) {
1839 		struct panthor_fw_csg_iface *csg_iface;
1840 		u32 csg_id = ffs(update_slots) - 1;
1841 		u32 req_mask = ctx->requests[csg_id].mask, acked;
1842 		int ret;
1843 
1844 		update_slots &= ~BIT(csg_id);
1845 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1846 
1847 		ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
1848 
1849 		if (acked & CSG_ENDPOINT_CONFIG)
1850 			csg_slot_sync_priority_locked(ptdev, csg_id);
1851 
1852 		if (acked & CSG_STATE_MASK)
1853 			csg_slot_sync_state_locked(ptdev, csg_id);
1854 
1855 		if (acked & CSG_STATUS_UPDATE) {
1856 			csg_slot_sync_queues_state_locked(ptdev, csg_id);
1857 			csg_slot_sync_idle_state_locked(ptdev, csg_id);
1858 		}
1859 
1860 		if (ret && acked != req_mask &&
1861 		    ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {
1862 			drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
1863 			ctx->timedout_mask |= BIT(csg_id);
1864 		}
1865 	}
1866 
1867 	if (ctx->timedout_mask)
1868 		return -ETIMEDOUT;
1869 
1870 	return 0;
1871 }
1872 
1873 struct panthor_sched_tick_ctx {
1874 	struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT];
1875 	struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
1876 	u32 idle_group_count;
1877 	u32 group_count;
1878 	enum panthor_csg_priority min_priority;
1879 	struct panthor_vm *vms[MAX_CS_PER_CSG];
1880 	u32 as_count;
1881 	bool immediate_tick;
1882 	u32 csg_upd_failed_mask;
1883 };
1884 
1885 static bool
tick_ctx_is_full(const struct panthor_scheduler * sched,const struct panthor_sched_tick_ctx * ctx)1886 tick_ctx_is_full(const struct panthor_scheduler *sched,
1887 		 const struct panthor_sched_tick_ctx *ctx)
1888 {
1889 	return ctx->group_count == sched->csg_slot_count;
1890 }
1891 
1892 static bool
group_is_idle(struct panthor_group * group)1893 group_is_idle(struct panthor_group *group)
1894 {
1895 	struct panthor_device *ptdev = group->ptdev;
1896 	u32 inactive_queues;
1897 
1898 	if (group->csg_id >= 0)
1899 		return ptdev->scheduler->csg_slots[group->csg_id].idle;
1900 
1901 	inactive_queues = group->idle_queues | group->blocked_queues;
1902 	return hweight32(inactive_queues) == group->queue_count;
1903 }
1904 
1905 static bool
group_can_run(struct panthor_group * group)1906 group_can_run(struct panthor_group *group)
1907 {
1908 	return group->state != PANTHOR_CS_GROUP_TERMINATED &&
1909 	       group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
1910 	       !group->destroyed && group->fatal_queues == 0 &&
1911 	       !group->timedout;
1912 }
1913 
1914 static void
tick_ctx_pick_groups_from_list(const struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,struct list_head * queue,bool skip_idle_groups,bool owned_by_tick_ctx)1915 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
1916 			       struct panthor_sched_tick_ctx *ctx,
1917 			       struct list_head *queue,
1918 			       bool skip_idle_groups,
1919 			       bool owned_by_tick_ctx)
1920 {
1921 	struct panthor_group *group, *tmp;
1922 
1923 	if (tick_ctx_is_full(sched, ctx))
1924 		return;
1925 
1926 	list_for_each_entry_safe(group, tmp, queue, run_node) {
1927 		u32 i;
1928 
1929 		if (!group_can_run(group))
1930 			continue;
1931 
1932 		if (skip_idle_groups && group_is_idle(group))
1933 			continue;
1934 
1935 		for (i = 0; i < ctx->as_count; i++) {
1936 			if (ctx->vms[i] == group->vm)
1937 				break;
1938 		}
1939 
1940 		if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
1941 			continue;
1942 
1943 		if (!owned_by_tick_ctx)
1944 			group_get(group);
1945 
1946 		list_move_tail(&group->run_node, &ctx->groups[group->priority]);
1947 		ctx->group_count++;
1948 		if (group_is_idle(group))
1949 			ctx->idle_group_count++;
1950 
1951 		if (i == ctx->as_count)
1952 			ctx->vms[ctx->as_count++] = group->vm;
1953 
1954 		if (ctx->min_priority > group->priority)
1955 			ctx->min_priority = group->priority;
1956 
1957 		if (tick_ctx_is_full(sched, ctx))
1958 			return;
1959 	}
1960 }
1961 
1962 static void
tick_ctx_insert_old_group(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,struct panthor_group * group,bool full_tick)1963 tick_ctx_insert_old_group(struct panthor_scheduler *sched,
1964 			  struct panthor_sched_tick_ctx *ctx,
1965 			  struct panthor_group *group,
1966 			  bool full_tick)
1967 {
1968 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
1969 	struct panthor_group *other_group;
1970 
1971 	if (!full_tick) {
1972 		list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1973 		return;
1974 	}
1975 
1976 	/* Rotate to make sure groups with lower CSG slot
1977 	 * priorities have a chance to get a higher CSG slot
1978 	 * priority next time they get picked. This priority
1979 	 * has an impact on resource request ordering, so it's
1980 	 * important to make sure we don't let one group starve
1981 	 * all other groups with the same group priority.
1982 	 */
1983 	list_for_each_entry(other_group,
1984 			    &ctx->old_groups[csg_slot->group->priority],
1985 			    run_node) {
1986 		struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
1987 
1988 		if (other_csg_slot->priority > csg_slot->priority) {
1989 			list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
1990 			return;
1991 		}
1992 	}
1993 
1994 	list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1995 }
1996 
1997 static void
tick_ctx_init(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,bool full_tick)1998 tick_ctx_init(struct panthor_scheduler *sched,
1999 	      struct panthor_sched_tick_ctx *ctx,
2000 	      bool full_tick)
2001 {
2002 	struct panthor_device *ptdev = sched->ptdev;
2003 	struct panthor_csg_slots_upd_ctx upd_ctx;
2004 	int ret;
2005 	u32 i;
2006 
2007 	memset(ctx, 0, sizeof(*ctx));
2008 	csgs_upd_ctx_init(&upd_ctx);
2009 
2010 	ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
2011 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2012 		INIT_LIST_HEAD(&ctx->groups[i]);
2013 		INIT_LIST_HEAD(&ctx->old_groups[i]);
2014 	}
2015 
2016 	for (i = 0; i < sched->csg_slot_count; i++) {
2017 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2018 		struct panthor_group *group = csg_slot->group;
2019 		struct panthor_fw_csg_iface *csg_iface;
2020 
2021 		if (!group)
2022 			continue;
2023 
2024 		csg_iface = panthor_fw_get_csg_iface(ptdev, i);
2025 		group_get(group);
2026 
2027 		/* If there was unhandled faults on the VM, force processing of
2028 		 * CSG IRQs, so we can flag the faulty queue.
2029 		 */
2030 		if (panthor_vm_has_unhandled_faults(group->vm)) {
2031 			sched_process_csg_irq_locked(ptdev, i);
2032 
2033 			/* No fatal fault reported, flag all queues as faulty. */
2034 			if (!group->fatal_queues)
2035 				group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
2036 		}
2037 
2038 		tick_ctx_insert_old_group(sched, ctx, group, full_tick);
2039 		csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2040 					csg_iface->output->ack ^ CSG_STATUS_UPDATE,
2041 					CSG_STATUS_UPDATE);
2042 	}
2043 
2044 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2045 	if (ret) {
2046 		panthor_device_schedule_reset(ptdev);
2047 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2048 	}
2049 }
2050 
2051 static void
group_term_post_processing(struct panthor_group * group)2052 group_term_post_processing(struct panthor_group *group)
2053 {
2054 	struct panthor_job *job, *tmp;
2055 	LIST_HEAD(faulty_jobs);
2056 	bool cookie;
2057 	u32 i = 0;
2058 
2059 	if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
2060 		return;
2061 
2062 	cookie = dma_fence_begin_signalling();
2063 	for (i = 0; i < group->queue_count; i++) {
2064 		struct panthor_queue *queue = group->queues[i];
2065 		struct panthor_syncobj_64b *syncobj;
2066 		int err;
2067 
2068 		if (group->fatal_queues & BIT(i))
2069 			err = -EINVAL;
2070 		else if (group->timedout)
2071 			err = -ETIMEDOUT;
2072 		else
2073 			err = -ECANCELED;
2074 
2075 		if (!queue)
2076 			continue;
2077 
2078 		spin_lock(&queue->fence_ctx.lock);
2079 		list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
2080 			list_move_tail(&job->node, &faulty_jobs);
2081 			dma_fence_set_error(job->done_fence, err);
2082 			dma_fence_signal_locked(job->done_fence);
2083 		}
2084 		spin_unlock(&queue->fence_ctx.lock);
2085 
2086 		/* Manually update the syncobj seqno to unblock waiters. */
2087 		syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj));
2088 		syncobj->status = ~0;
2089 		syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno);
2090 		sched_queue_work(group->ptdev->scheduler, sync_upd);
2091 	}
2092 	dma_fence_end_signalling(cookie);
2093 
2094 	list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
2095 		list_del_init(&job->node);
2096 		panthor_job_put(&job->base);
2097 	}
2098 }
2099 
group_term_work(struct work_struct * work)2100 static void group_term_work(struct work_struct *work)
2101 {
2102 	struct panthor_group *group =
2103 		container_of(work, struct panthor_group, term_work);
2104 
2105 	group_term_post_processing(group);
2106 	group_put(group);
2107 }
2108 
2109 static void
tick_ctx_cleanup(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx)2110 tick_ctx_cleanup(struct panthor_scheduler *sched,
2111 		 struct panthor_sched_tick_ctx *ctx)
2112 {
2113 	struct panthor_device *ptdev = sched->ptdev;
2114 	struct panthor_group *group, *tmp;
2115 	u32 i;
2116 
2117 	for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) {
2118 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) {
2119 			/* If everything went fine, we should only have groups
2120 			 * to be terminated in the old_groups lists.
2121 			 */
2122 			drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
2123 				    group_can_run(group));
2124 
2125 			if (!group_can_run(group)) {
2126 				list_del_init(&group->run_node);
2127 				list_del_init(&group->wait_node);
2128 				group_queue_work(group, term);
2129 			} else if (group->csg_id >= 0) {
2130 				list_del_init(&group->run_node);
2131 			} else {
2132 				list_move(&group->run_node,
2133 					  group_is_idle(group) ?
2134 					  &sched->groups.idle[group->priority] :
2135 					  &sched->groups.runnable[group->priority]);
2136 			}
2137 			group_put(group);
2138 		}
2139 	}
2140 
2141 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2142 		/* If everything went fine, the groups to schedule lists should
2143 		 * be empty.
2144 		 */
2145 		drm_WARN_ON(&ptdev->base,
2146 			    !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
2147 
2148 		list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
2149 			if (group->csg_id >= 0) {
2150 				list_del_init(&group->run_node);
2151 			} else {
2152 				list_move(&group->run_node,
2153 					  group_is_idle(group) ?
2154 					  &sched->groups.idle[group->priority] :
2155 					  &sched->groups.runnable[group->priority]);
2156 			}
2157 			group_put(group);
2158 		}
2159 	}
2160 }
2161 
2162 static void
tick_ctx_apply(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx)2163 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
2164 {
2165 	struct panthor_group *group, *tmp;
2166 	struct panthor_device *ptdev = sched->ptdev;
2167 	struct panthor_csg_slot *csg_slot;
2168 	int prio, new_csg_prio = MAX_CSG_PRIO, i;
2169 	u32 free_csg_slots = 0;
2170 	struct panthor_csg_slots_upd_ctx upd_ctx;
2171 	int ret;
2172 
2173 	csgs_upd_ctx_init(&upd_ctx);
2174 
2175 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2176 		/* Suspend or terminate evicted groups. */
2177 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2178 			bool term = !group_can_run(group);
2179 			int csg_id = group->csg_id;
2180 
2181 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2182 				continue;
2183 
2184 			csg_slot = &sched->csg_slots[csg_id];
2185 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2186 						term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
2187 						CSG_STATE_MASK);
2188 		}
2189 
2190 		/* Update priorities on already running groups. */
2191 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2192 			struct panthor_fw_csg_iface *csg_iface;
2193 			int csg_id = group->csg_id;
2194 
2195 			if (csg_id < 0) {
2196 				new_csg_prio--;
2197 				continue;
2198 			}
2199 
2200 			csg_slot = &sched->csg_slots[csg_id];
2201 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2202 			if (csg_slot->priority == new_csg_prio) {
2203 				new_csg_prio--;
2204 				continue;
2205 			}
2206 
2207 			panthor_fw_update_reqs(csg_iface, endpoint_req,
2208 					       CSG_EP_REQ_PRIORITY(new_csg_prio),
2209 					       CSG_EP_REQ_PRIORITY_MASK);
2210 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2211 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2212 						CSG_ENDPOINT_CONFIG);
2213 			new_csg_prio--;
2214 		}
2215 	}
2216 
2217 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2218 	if (ret) {
2219 		panthor_device_schedule_reset(ptdev);
2220 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2221 		return;
2222 	}
2223 
2224 	/* Unbind evicted groups. */
2225 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2226 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2227 			/* This group is gone. Process interrupts to clear
2228 			 * any pending interrupts before we start the new
2229 			 * group.
2230 			 */
2231 			if (group->csg_id >= 0)
2232 				sched_process_csg_irq_locked(ptdev, group->csg_id);
2233 
2234 			group_unbind_locked(group);
2235 		}
2236 	}
2237 
2238 	for (i = 0; i < sched->csg_slot_count; i++) {
2239 		if (!sched->csg_slots[i].group)
2240 			free_csg_slots |= BIT(i);
2241 	}
2242 
2243 	csgs_upd_ctx_init(&upd_ctx);
2244 	new_csg_prio = MAX_CSG_PRIO;
2245 
2246 	/* Start new groups. */
2247 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2248 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2249 			int csg_id = group->csg_id;
2250 			struct panthor_fw_csg_iface *csg_iface;
2251 
2252 			if (csg_id >= 0) {
2253 				new_csg_prio--;
2254 				continue;
2255 			}
2256 
2257 			csg_id = ffs(free_csg_slots) - 1;
2258 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2259 				break;
2260 
2261 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2262 			csg_slot = &sched->csg_slots[csg_id];
2263 			group_bind_locked(group, csg_id);
2264 			csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
2265 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2266 						group->state == PANTHOR_CS_GROUP_SUSPENDED ?
2267 						CSG_STATE_RESUME : CSG_STATE_START,
2268 						CSG_STATE_MASK);
2269 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2270 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2271 						CSG_ENDPOINT_CONFIG);
2272 			free_csg_slots &= ~BIT(csg_id);
2273 		}
2274 	}
2275 
2276 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2277 	if (ret) {
2278 		panthor_device_schedule_reset(ptdev);
2279 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2280 		return;
2281 	}
2282 
2283 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2284 		list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) {
2285 			list_del_init(&group->run_node);
2286 
2287 			/* If the group has been destroyed while we were
2288 			 * scheduling, ask for an immediate tick to
2289 			 * re-evaluate as soon as possible and get rid of
2290 			 * this dangling group.
2291 			 */
2292 			if (group->destroyed)
2293 				ctx->immediate_tick = true;
2294 			group_put(group);
2295 		}
2296 
2297 		/* Return evicted groups to the idle or run queues. Groups
2298 		 * that can no longer be run (because they've been destroyed
2299 		 * or experienced an unrecoverable error) will be scheduled
2300 		 * for destruction in tick_ctx_cleanup().
2301 		 */
2302 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) {
2303 			if (!group_can_run(group))
2304 				continue;
2305 
2306 			if (group_is_idle(group))
2307 				list_move_tail(&group->run_node, &sched->groups.idle[prio]);
2308 			else
2309 				list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
2310 			group_put(group);
2311 		}
2312 	}
2313 
2314 	sched->used_csg_slot_count = ctx->group_count;
2315 	sched->might_have_idle_groups = ctx->idle_group_count > 0;
2316 }
2317 
2318 static u64
tick_ctx_update_resched_target(struct panthor_scheduler * sched,const struct panthor_sched_tick_ctx * ctx)2319 tick_ctx_update_resched_target(struct panthor_scheduler *sched,
2320 			       const struct panthor_sched_tick_ctx *ctx)
2321 {
2322 	/* We had space left, no need to reschedule until some external event happens. */
2323 	if (!tick_ctx_is_full(sched, ctx))
2324 		goto no_tick;
2325 
2326 	/* If idle groups were scheduled, no need to wake up until some external
2327 	 * event happens (group unblocked, new job submitted, ...).
2328 	 */
2329 	if (ctx->idle_group_count)
2330 		goto no_tick;
2331 
2332 	if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT))
2333 		goto no_tick;
2334 
2335 	/* If there are groups of the same priority waiting, we need to
2336 	 * keep the scheduler ticking, otherwise, we'll just wait for
2337 	 * new groups with higher priority to be queued.
2338 	 */
2339 	if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
2340 		u64 resched_target = sched->last_tick + sched->tick_period;
2341 
2342 		if (time_before64(sched->resched_target, sched->last_tick) ||
2343 		    time_before64(resched_target, sched->resched_target))
2344 			sched->resched_target = resched_target;
2345 
2346 		return sched->resched_target - sched->last_tick;
2347 	}
2348 
2349 no_tick:
2350 	sched->resched_target = U64_MAX;
2351 	return U64_MAX;
2352 }
2353 
tick_work(struct work_struct * work)2354 static void tick_work(struct work_struct *work)
2355 {
2356 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
2357 						      tick_work.work);
2358 	struct panthor_device *ptdev = sched->ptdev;
2359 	struct panthor_sched_tick_ctx ctx;
2360 	u64 remaining_jiffies = 0, resched_delay;
2361 	u64 now = get_jiffies_64();
2362 	int prio, ret, cookie;
2363 
2364 	if (!drm_dev_enter(&ptdev->base, &cookie))
2365 		return;
2366 
2367 	ret = panthor_device_resume_and_get(ptdev);
2368 	if (drm_WARN_ON(&ptdev->base, ret))
2369 		goto out_dev_exit;
2370 
2371 	if (time_before64(now, sched->resched_target))
2372 		remaining_jiffies = sched->resched_target - now;
2373 
2374 	mutex_lock(&sched->lock);
2375 	if (panthor_device_reset_is_pending(sched->ptdev))
2376 		goto out_unlock;
2377 
2378 	tick_ctx_init(sched, &ctx, remaining_jiffies != 0);
2379 	if (ctx.csg_upd_failed_mask)
2380 		goto out_cleanup_ctx;
2381 
2382 	if (remaining_jiffies) {
2383 		/* Scheduling forced in the middle of a tick. Only RT groups
2384 		 * can preempt non-RT ones. Currently running RT groups can't be
2385 		 * preempted.
2386 		 */
2387 		for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2388 		     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2389 		     prio--) {
2390 			tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
2391 						       true, true);
2392 			if (prio == PANTHOR_CSG_PRIORITY_RT) {
2393 				tick_ctx_pick_groups_from_list(sched, &ctx,
2394 							       &sched->groups.runnable[prio],
2395 							       true, false);
2396 			}
2397 		}
2398 	}
2399 
2400 	/* First pick non-idle groups */
2401 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2402 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2403 	     prio--) {
2404 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
2405 					       true, false);
2406 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
2407 	}
2408 
2409 	/* If we have free CSG slots left, pick idle groups */
2410 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2411 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2412 	     prio--) {
2413 		/* Check the old_group queue first to avoid reprogramming the slots */
2414 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
2415 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
2416 					       false, false);
2417 	}
2418 
2419 	tick_ctx_apply(sched, &ctx);
2420 	if (ctx.csg_upd_failed_mask)
2421 		goto out_cleanup_ctx;
2422 
2423 	if (ctx.idle_group_count == ctx.group_count) {
2424 		panthor_devfreq_record_idle(sched->ptdev);
2425 		if (sched->pm.has_ref) {
2426 			pm_runtime_put_autosuspend(ptdev->base.dev);
2427 			sched->pm.has_ref = false;
2428 		}
2429 	} else {
2430 		panthor_devfreq_record_busy(sched->ptdev);
2431 		if (!sched->pm.has_ref) {
2432 			pm_runtime_get(ptdev->base.dev);
2433 			sched->pm.has_ref = true;
2434 		}
2435 	}
2436 
2437 	sched->last_tick = now;
2438 	resched_delay = tick_ctx_update_resched_target(sched, &ctx);
2439 	if (ctx.immediate_tick)
2440 		resched_delay = 0;
2441 
2442 	if (resched_delay != U64_MAX)
2443 		sched_queue_delayed_work(sched, tick, resched_delay);
2444 
2445 out_cleanup_ctx:
2446 	tick_ctx_cleanup(sched, &ctx);
2447 
2448 out_unlock:
2449 	mutex_unlock(&sched->lock);
2450 	pm_runtime_mark_last_busy(ptdev->base.dev);
2451 	pm_runtime_put_autosuspend(ptdev->base.dev);
2452 
2453 out_dev_exit:
2454 	drm_dev_exit(cookie);
2455 }
2456 
panthor_queue_eval_syncwait(struct panthor_group * group,u8 queue_idx)2457 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)
2458 {
2459 	struct panthor_queue *queue = group->queues[queue_idx];
2460 	union {
2461 		struct panthor_syncobj_64b sync64;
2462 		struct panthor_syncobj_32b sync32;
2463 	} *syncobj;
2464 	bool result;
2465 	u64 value;
2466 
2467 	syncobj = panthor_queue_get_syncwait_obj(group, queue);
2468 	if (!syncobj)
2469 		return -EINVAL;
2470 
2471 	value = queue->syncwait.sync64 ?
2472 		syncobj->sync64.seqno :
2473 		syncobj->sync32.seqno;
2474 
2475 	if (queue->syncwait.gt)
2476 		result = value > queue->syncwait.ref;
2477 	else
2478 		result = value <= queue->syncwait.ref;
2479 
2480 	if (result)
2481 		panthor_queue_put_syncwait_obj(queue);
2482 
2483 	return result;
2484 }
2485 
sync_upd_work(struct work_struct * work)2486 static void sync_upd_work(struct work_struct *work)
2487 {
2488 	struct panthor_scheduler *sched = container_of(work,
2489 						      struct panthor_scheduler,
2490 						      sync_upd_work);
2491 	struct panthor_group *group, *tmp;
2492 	bool immediate_tick = false;
2493 
2494 	mutex_lock(&sched->lock);
2495 	list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
2496 		u32 tested_queues = group->blocked_queues;
2497 		u32 unblocked_queues = 0;
2498 
2499 		while (tested_queues) {
2500 			u32 cs_id = ffs(tested_queues) - 1;
2501 			int ret;
2502 
2503 			ret = panthor_queue_eval_syncwait(group, cs_id);
2504 			drm_WARN_ON(&group->ptdev->base, ret < 0);
2505 			if (ret)
2506 				unblocked_queues |= BIT(cs_id);
2507 
2508 			tested_queues &= ~BIT(cs_id);
2509 		}
2510 
2511 		if (unblocked_queues) {
2512 			group->blocked_queues &= ~unblocked_queues;
2513 
2514 			if (group->csg_id < 0) {
2515 				list_move(&group->run_node,
2516 					  &sched->groups.runnable[group->priority]);
2517 				if (group->priority == PANTHOR_CSG_PRIORITY_RT)
2518 					immediate_tick = true;
2519 			}
2520 		}
2521 
2522 		if (!group->blocked_queues)
2523 			list_del_init(&group->wait_node);
2524 	}
2525 	mutex_unlock(&sched->lock);
2526 
2527 	if (immediate_tick)
2528 		sched_queue_delayed_work(sched, tick, 0);
2529 }
2530 
group_schedule_locked(struct panthor_group * group,u32 queue_mask)2531 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
2532 {
2533 	struct panthor_device *ptdev = group->ptdev;
2534 	struct panthor_scheduler *sched = ptdev->scheduler;
2535 	struct list_head *queue = &sched->groups.runnable[group->priority];
2536 	u64 delay_jiffies = 0;
2537 	bool was_idle;
2538 	u64 now;
2539 
2540 	if (!group_can_run(group))
2541 		return;
2542 
2543 	/* All updated queues are blocked, no need to wake up the scheduler. */
2544 	if ((queue_mask & group->blocked_queues) == queue_mask)
2545 		return;
2546 
2547 	was_idle = group_is_idle(group);
2548 	group->idle_queues &= ~queue_mask;
2549 
2550 	/* Don't mess up with the lists if we're in a middle of a reset. */
2551 	if (atomic_read(&sched->reset.in_progress))
2552 		return;
2553 
2554 	if (was_idle && !group_is_idle(group))
2555 		list_move_tail(&group->run_node, queue);
2556 
2557 	/* RT groups are preemptive. */
2558 	if (group->priority == PANTHOR_CSG_PRIORITY_RT) {
2559 		sched_queue_delayed_work(sched, tick, 0);
2560 		return;
2561 	}
2562 
2563 	/* Some groups might be idle, force an immediate tick to
2564 	 * re-evaluate.
2565 	 */
2566 	if (sched->might_have_idle_groups) {
2567 		sched_queue_delayed_work(sched, tick, 0);
2568 		return;
2569 	}
2570 
2571 	/* Scheduler is ticking, nothing to do. */
2572 	if (sched->resched_target != U64_MAX) {
2573 		/* If there are free slots, force immediating ticking. */
2574 		if (sched->used_csg_slot_count < sched->csg_slot_count)
2575 			sched_queue_delayed_work(sched, tick, 0);
2576 
2577 		return;
2578 	}
2579 
2580 	/* Scheduler tick was off, recalculate the resched_target based on the
2581 	 * last tick event, and queue the scheduler work.
2582 	 */
2583 	now = get_jiffies_64();
2584 	sched->resched_target = sched->last_tick + sched->tick_period;
2585 	if (sched->used_csg_slot_count == sched->csg_slot_count &&
2586 	    time_before64(now, sched->resched_target))
2587 		delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
2588 
2589 	sched_queue_delayed_work(sched, tick, delay_jiffies);
2590 }
2591 
queue_stop(struct panthor_queue * queue,struct panthor_job * bad_job)2592 static void queue_stop(struct panthor_queue *queue,
2593 		       struct panthor_job *bad_job)
2594 {
2595 	drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
2596 }
2597 
queue_start(struct panthor_queue * queue)2598 static void queue_start(struct panthor_queue *queue)
2599 {
2600 	struct panthor_job *job;
2601 
2602 	/* Re-assign the parent fences. */
2603 	list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
2604 		job->base.s_fence->parent = dma_fence_get(job->done_fence);
2605 
2606 	drm_sched_start(&queue->scheduler, 0);
2607 }
2608 
panthor_group_stop(struct panthor_group * group)2609 static void panthor_group_stop(struct panthor_group *group)
2610 {
2611 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2612 
2613 	lockdep_assert_held(&sched->reset.lock);
2614 
2615 	for (u32 i = 0; i < group->queue_count; i++)
2616 		queue_stop(group->queues[i], NULL);
2617 
2618 	group_get(group);
2619 	list_move_tail(&group->run_node, &sched->reset.stopped_groups);
2620 }
2621 
panthor_group_start(struct panthor_group * group)2622 static void panthor_group_start(struct panthor_group *group)
2623 {
2624 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2625 
2626 	lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
2627 
2628 	for (u32 i = 0; i < group->queue_count; i++)
2629 		queue_start(group->queues[i]);
2630 
2631 	if (group_can_run(group)) {
2632 		list_move_tail(&group->run_node,
2633 			       group_is_idle(group) ?
2634 			       &sched->groups.idle[group->priority] :
2635 			       &sched->groups.runnable[group->priority]);
2636 	} else {
2637 		list_del_init(&group->run_node);
2638 		list_del_init(&group->wait_node);
2639 		group_queue_work(group, term);
2640 	}
2641 
2642 	group_put(group);
2643 }
2644 
panthor_sched_immediate_tick(struct panthor_device * ptdev)2645 static void panthor_sched_immediate_tick(struct panthor_device *ptdev)
2646 {
2647 	struct panthor_scheduler *sched = ptdev->scheduler;
2648 
2649 	sched_queue_delayed_work(sched, tick, 0);
2650 }
2651 
2652 /**
2653  * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler.
2654  */
panthor_sched_report_mmu_fault(struct panthor_device * ptdev)2655 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
2656 {
2657 	/* Force a tick to immediately kill faulty groups. */
2658 	if (ptdev->scheduler)
2659 		panthor_sched_immediate_tick(ptdev);
2660 }
2661 
panthor_sched_resume(struct panthor_device * ptdev)2662 void panthor_sched_resume(struct panthor_device *ptdev)
2663 {
2664 	/* Force a tick to re-evaluate after a resume. */
2665 	panthor_sched_immediate_tick(ptdev);
2666 }
2667 
panthor_sched_suspend(struct panthor_device * ptdev)2668 void panthor_sched_suspend(struct panthor_device *ptdev)
2669 {
2670 	struct panthor_scheduler *sched = ptdev->scheduler;
2671 	struct panthor_csg_slots_upd_ctx upd_ctx;
2672 	struct panthor_group *group;
2673 	u32 suspended_slots;
2674 	u32 i;
2675 
2676 	mutex_lock(&sched->lock);
2677 	csgs_upd_ctx_init(&upd_ctx);
2678 	for (i = 0; i < sched->csg_slot_count; i++) {
2679 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2680 
2681 		if (csg_slot->group) {
2682 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2683 						group_can_run(csg_slot->group) ?
2684 						CSG_STATE_SUSPEND : CSG_STATE_TERMINATE,
2685 						CSG_STATE_MASK);
2686 		}
2687 	}
2688 
2689 	suspended_slots = upd_ctx.update_mask;
2690 
2691 	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2692 	suspended_slots &= ~upd_ctx.timedout_mask;
2693 
2694 	if (upd_ctx.timedout_mask) {
2695 		u32 slot_mask = upd_ctx.timedout_mask;
2696 
2697 		drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
2698 		csgs_upd_ctx_init(&upd_ctx);
2699 		while (slot_mask) {
2700 			u32 csg_id = ffs(slot_mask) - 1;
2701 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2702 
2703 			/* If the group was still usable before that point, we consider
2704 			 * it innocent.
2705 			 */
2706 			if (group_can_run(csg_slot->group))
2707 				csg_slot->group->innocent = true;
2708 
2709 			/* We consider group suspension failures as fatal and flag the
2710 			 * group as unusable by setting timedout=true.
2711 			 */
2712 			csg_slot->group->timedout = true;
2713 
2714 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2715 						CSG_STATE_TERMINATE,
2716 						CSG_STATE_MASK);
2717 			slot_mask &= ~BIT(csg_id);
2718 		}
2719 
2720 		csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2721 
2722 		slot_mask = upd_ctx.timedout_mask;
2723 		while (slot_mask) {
2724 			u32 csg_id = ffs(slot_mask) - 1;
2725 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2726 
2727 			/* Terminate command timedout, but the soft-reset will
2728 			 * automatically terminate all active groups, so let's
2729 			 * force the state to halted here.
2730 			 */
2731 			if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
2732 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2733 			slot_mask &= ~BIT(csg_id);
2734 		}
2735 	}
2736 
2737 	/* Flush L2 and LSC caches to make sure suspend state is up-to-date.
2738 	 * If the flush fails, flag all queues for termination.
2739 	 */
2740 	if (suspended_slots) {
2741 		bool flush_caches_failed = false;
2742 		u32 slot_mask = suspended_slots;
2743 
2744 		if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
2745 			flush_caches_failed = true;
2746 
2747 		while (slot_mask) {
2748 			u32 csg_id = ffs(slot_mask) - 1;
2749 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2750 
2751 			if (flush_caches_failed)
2752 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2753 			else
2754 				csg_slot_sync_update_locked(ptdev, csg_id);
2755 
2756 			slot_mask &= ~BIT(csg_id);
2757 		}
2758 	}
2759 
2760 	for (i = 0; i < sched->csg_slot_count; i++) {
2761 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2762 
2763 		group = csg_slot->group;
2764 		if (!group)
2765 			continue;
2766 
2767 		group_get(group);
2768 
2769 		if (group->csg_id >= 0)
2770 			sched_process_csg_irq_locked(ptdev, group->csg_id);
2771 
2772 		group_unbind_locked(group);
2773 
2774 		drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
2775 
2776 		if (group_can_run(group)) {
2777 			list_add(&group->run_node,
2778 				 &sched->groups.idle[group->priority]);
2779 		} else {
2780 			/* We don't bother stopping the scheduler if the group is
2781 			 * faulty, the group termination work will finish the job.
2782 			 */
2783 			list_del_init(&group->wait_node);
2784 			group_queue_work(group, term);
2785 		}
2786 		group_put(group);
2787 	}
2788 	mutex_unlock(&sched->lock);
2789 }
2790 
panthor_sched_pre_reset(struct panthor_device * ptdev)2791 void panthor_sched_pre_reset(struct panthor_device *ptdev)
2792 {
2793 	struct panthor_scheduler *sched = ptdev->scheduler;
2794 	struct panthor_group *group, *group_tmp;
2795 	u32 i;
2796 
2797 	mutex_lock(&sched->reset.lock);
2798 	atomic_set(&sched->reset.in_progress, true);
2799 
2800 	/* Cancel all scheduler works. Once this is done, these works can't be
2801 	 * scheduled again until the reset operation is complete.
2802 	 */
2803 	cancel_work_sync(&sched->sync_upd_work);
2804 	cancel_delayed_work_sync(&sched->tick_work);
2805 
2806 	panthor_sched_suspend(ptdev);
2807 
2808 	/* Stop all groups that might still accept jobs, so we don't get passed
2809 	 * new jobs while we're resetting.
2810 	 */
2811 	for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
2812 		/* All groups should be in the idle lists. */
2813 		drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i]));
2814 		list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
2815 			panthor_group_stop(group);
2816 	}
2817 
2818 	for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
2819 		list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
2820 			panthor_group_stop(group);
2821 	}
2822 
2823 	mutex_unlock(&sched->reset.lock);
2824 }
2825 
panthor_sched_post_reset(struct panthor_device * ptdev,bool reset_failed)2826 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
2827 {
2828 	struct panthor_scheduler *sched = ptdev->scheduler;
2829 	struct panthor_group *group, *group_tmp;
2830 
2831 	mutex_lock(&sched->reset.lock);
2832 
2833 	list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
2834 		/* Consider all previously running group as terminated if the
2835 		 * reset failed.
2836 		 */
2837 		if (reset_failed)
2838 			group->state = PANTHOR_CS_GROUP_TERMINATED;
2839 
2840 		panthor_group_start(group);
2841 	}
2842 
2843 	/* We're done resetting the GPU, clear the reset.in_progress bit so we can
2844 	 * kick the scheduler.
2845 	 */
2846 	atomic_set(&sched->reset.in_progress, false);
2847 	mutex_unlock(&sched->reset.lock);
2848 
2849 	/* No need to queue a tick and update syncs if the reset failed. */
2850 	if (!reset_failed) {
2851 		sched_queue_delayed_work(sched, tick, 0);
2852 		sched_queue_work(sched, sync_upd);
2853 	}
2854 }
2855 
update_fdinfo_stats(struct panthor_job * job)2856 static void update_fdinfo_stats(struct panthor_job *job)
2857 {
2858 	struct panthor_group *group = job->group;
2859 	struct panthor_queue *queue = group->queues[job->queue_idx];
2860 	struct panthor_gpu_usage *fdinfo = &group->fdinfo.data;
2861 	struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
2862 	struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
2863 
2864 	mutex_lock(&group->fdinfo.lock);
2865 	if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
2866 		fdinfo->cycles += data->cycles.after - data->cycles.before;
2867 	if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
2868 		fdinfo->time += data->time.after - data->time.before;
2869 	mutex_unlock(&group->fdinfo.lock);
2870 }
2871 
panthor_fdinfo_gather_group_samples(struct panthor_file * pfile)2872 void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
2873 {
2874 	struct panthor_group_pool *gpool = pfile->groups;
2875 	struct panthor_group *group;
2876 	unsigned long i;
2877 
2878 	if (IS_ERR_OR_NULL(gpool))
2879 		return;
2880 
2881 	xa_for_each(&gpool->xa, i, group) {
2882 		mutex_lock(&group->fdinfo.lock);
2883 		pfile->stats.cycles += group->fdinfo.data.cycles;
2884 		pfile->stats.time += group->fdinfo.data.time;
2885 		group->fdinfo.data.cycles = 0;
2886 		group->fdinfo.data.time = 0;
2887 		mutex_unlock(&group->fdinfo.lock);
2888 	}
2889 }
2890 
group_sync_upd_work(struct work_struct * work)2891 static void group_sync_upd_work(struct work_struct *work)
2892 {
2893 	struct panthor_group *group =
2894 		container_of(work, struct panthor_group, sync_upd_work);
2895 	struct panthor_job *job, *job_tmp;
2896 	LIST_HEAD(done_jobs);
2897 	u32 queue_idx;
2898 	bool cookie;
2899 
2900 	cookie = dma_fence_begin_signalling();
2901 	for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
2902 		struct panthor_queue *queue = group->queues[queue_idx];
2903 		struct panthor_syncobj_64b *syncobj;
2904 
2905 		if (!queue)
2906 			continue;
2907 
2908 		syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
2909 
2910 		spin_lock(&queue->fence_ctx.lock);
2911 		list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
2912 			if (syncobj->seqno < job->done_fence->seqno)
2913 				break;
2914 
2915 			list_move_tail(&job->node, &done_jobs);
2916 			dma_fence_signal_locked(job->done_fence);
2917 		}
2918 		spin_unlock(&queue->fence_ctx.lock);
2919 	}
2920 	dma_fence_end_signalling(cookie);
2921 
2922 	list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
2923 		if (job->profiling.mask)
2924 			update_fdinfo_stats(job);
2925 		list_del_init(&job->node);
2926 		panthor_job_put(&job->base);
2927 	}
2928 
2929 	group_put(group);
2930 }
2931 
2932 struct panthor_job_ringbuf_instrs {
2933 	u64 buffer[MAX_INSTRS_PER_JOB];
2934 	u32 count;
2935 };
2936 
2937 struct panthor_job_instr {
2938 	u32 profile_mask;
2939 	u64 instr;
2940 };
2941 
2942 #define JOB_INSTR(__prof, __instr) \
2943 	{ \
2944 		.profile_mask = __prof, \
2945 		.instr = __instr, \
2946 	}
2947 
2948 static void
copy_instrs_to_ringbuf(struct panthor_queue * queue,struct panthor_job * job,struct panthor_job_ringbuf_instrs * instrs)2949 copy_instrs_to_ringbuf(struct panthor_queue *queue,
2950 		       struct panthor_job *job,
2951 		       struct panthor_job_ringbuf_instrs *instrs)
2952 {
2953 	u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
2954 	u64 start = job->ringbuf.start & (ringbuf_size - 1);
2955 	u64 size, written;
2956 
2957 	/*
2958 	 * We need to write a whole slot, including any trailing zeroes
2959 	 * that may come at the end of it. Also, because instrs.buffer has
2960 	 * been zero-initialised, there's no need to pad it with 0's
2961 	 */
2962 	instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
2963 	size = instrs->count * sizeof(u64);
2964 	WARN_ON(size > ringbuf_size);
2965 	written = min(ringbuf_size - start, size);
2966 
2967 	memcpy(queue->ringbuf->kmap + start, instrs->buffer, written);
2968 
2969 	if (written < size)
2970 		memcpy(queue->ringbuf->kmap,
2971 		       &instrs->buffer[written / sizeof(u64)],
2972 		       size - written);
2973 }
2974 
2975 struct panthor_job_cs_params {
2976 	u32 profile_mask;
2977 	u64 addr_reg; u64 val_reg;
2978 	u64 cycle_reg; u64 time_reg;
2979 	u64 sync_addr; u64 times_addr;
2980 	u64 cs_start; u64 cs_size;
2981 	u32 last_flush; u32 waitall_mask;
2982 };
2983 
2984 static void
get_job_cs_params(struct panthor_job * job,struct panthor_job_cs_params * params)2985 get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params)
2986 {
2987 	struct panthor_group *group = job->group;
2988 	struct panthor_queue *queue = group->queues[job->queue_idx];
2989 	struct panthor_device *ptdev = group->ptdev;
2990 	struct panthor_scheduler *sched = ptdev->scheduler;
2991 
2992 	params->addr_reg = ptdev->csif_info.cs_reg_count -
2993 			   ptdev->csif_info.unpreserved_cs_reg_count;
2994 	params->val_reg = params->addr_reg + 2;
2995 	params->cycle_reg = params->addr_reg;
2996 	params->time_reg = params->val_reg;
2997 
2998 	params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
2999 			    job->queue_idx * sizeof(struct panthor_syncobj_64b);
3000 	params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) +
3001 			     (job->profiling.slot * sizeof(struct panthor_job_profiling_data));
3002 	params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
3003 
3004 	params->cs_start = job->call_info.start;
3005 	params->cs_size = job->call_info.size;
3006 	params->last_flush = job->call_info.latest_flush;
3007 
3008 	params->profile_mask = job->profiling.mask;
3009 }
3010 
3011 #define JOB_INSTR_ALWAYS(instr) \
3012 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr))
3013 #define JOB_INSTR_TIMESTAMP(instr) \
3014 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr))
3015 #define JOB_INSTR_CYCLES(instr) \
3016 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr))
3017 
3018 static void
prepare_job_instrs(const struct panthor_job_cs_params * params,struct panthor_job_ringbuf_instrs * instrs)3019 prepare_job_instrs(const struct panthor_job_cs_params *params,
3020 		   struct panthor_job_ringbuf_instrs *instrs)
3021 {
3022 	const struct panthor_job_instr instr_seq[] = {
3023 		/* MOV32 rX+2, cs.latest_flush */
3024 		JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush),
3025 		/* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
3026 		JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) |
3027 				 (0 << 16) | 0x233),
3028 		/* MOV48 rX:rX+1, cycles_offset */
3029 		JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
3030 				 (params->times_addr +
3031 				  offsetof(struct panthor_job_profiling_data, cycles.before))),
3032 		/* STORE_STATE cycles */
3033 		JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
3034 		/* MOV48 rX:rX+1, time_offset */
3035 		JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
3036 				    (params->times_addr +
3037 				     offsetof(struct panthor_job_profiling_data, time.before))),
3038 		/* STORE_STATE timer */
3039 		JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
3040 		/* MOV48 rX:rX+1, cs.start */
3041 		JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start),
3042 		/* MOV32 rX+2, cs.size */
3043 		JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size),
3044 		/* WAIT(0) => waits for FLUSH_CACHE2 instruction */
3045 		JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)),
3046 		/* CALL rX:rX+1, rX+2 */
3047 		JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) |
3048 				 (params->val_reg << 32)),
3049 		/* MOV48 rX:rX+1, cycles_offset */
3050 		JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
3051 				 (params->times_addr +
3052 				  offsetof(struct panthor_job_profiling_data, cycles.after))),
3053 		/* STORE_STATE cycles */
3054 		JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
3055 		/* MOV48 rX:rX+1, time_offset */
3056 		JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
3057 			  (params->times_addr +
3058 			   offsetof(struct panthor_job_profiling_data, time.after))),
3059 		/* STORE_STATE timer */
3060 		JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
3061 		/* MOV48 rX:rX+1, sync_addr */
3062 		JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr),
3063 		/* MOV48 rX+2, #1 */
3064 		JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1),
3065 		/* WAIT(all) */
3066 		JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)),
3067 		/* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
3068 		JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) |
3069 				 (params->val_reg << 32) | (0 << 16) | 1),
3070 		/* ERROR_BARRIER, so we can recover from faults at job boundaries. */
3071 		JOB_INSTR_ALWAYS((47ull << 56)),
3072 	};
3073 	u32 pad;
3074 
3075 	instrs->count = 0;
3076 
3077 	/* NEED to be cacheline aligned to please the prefetcher. */
3078 	static_assert(sizeof(instrs->buffer) % 64 == 0,
3079 		      "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline");
3080 
3081 	/* Make sure we have enough storage to store the whole sequence. */
3082 	static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) ==
3083 		      ARRAY_SIZE(instrs->buffer),
3084 		      "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch");
3085 
3086 	for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) {
3087 		/* If the profile mask of this instruction is not enabled, skip it. */
3088 		if (instr_seq[i].profile_mask &&
3089 		    !(instr_seq[i].profile_mask & params->profile_mask))
3090 			continue;
3091 
3092 		instrs->buffer[instrs->count++] = instr_seq[i].instr;
3093 	}
3094 
3095 	pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
3096 	memset(&instrs->buffer[instrs->count], 0,
3097 	       (pad - instrs->count) * sizeof(instrs->buffer[0]));
3098 	instrs->count = pad;
3099 }
3100 
calc_job_credits(u32 profile_mask)3101 static u32 calc_job_credits(u32 profile_mask)
3102 {
3103 	struct panthor_job_ringbuf_instrs instrs;
3104 	struct panthor_job_cs_params params = {
3105 		.profile_mask = profile_mask,
3106 	};
3107 
3108 	prepare_job_instrs(&params, &instrs);
3109 	return instrs.count;
3110 }
3111 
3112 static struct dma_fence *
queue_run_job(struct drm_sched_job * sched_job)3113 queue_run_job(struct drm_sched_job *sched_job)
3114 {
3115 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3116 	struct panthor_group *group = job->group;
3117 	struct panthor_queue *queue = group->queues[job->queue_idx];
3118 	struct panthor_device *ptdev = group->ptdev;
3119 	struct panthor_scheduler *sched = ptdev->scheduler;
3120 	struct panthor_job_ringbuf_instrs instrs;
3121 	struct panthor_job_cs_params cs_params;
3122 	struct dma_fence *done_fence;
3123 	int ret;
3124 
3125 	/* Stream size is zero, nothing to do except making sure all previously
3126 	 * submitted jobs are done before we signal the
3127 	 * drm_sched_job::s_fence::finished fence.
3128 	 */
3129 	if (!job->call_info.size) {
3130 		job->done_fence = dma_fence_get(queue->fence_ctx.last_fence);
3131 		return dma_fence_get(job->done_fence);
3132 	}
3133 
3134 	ret = panthor_device_resume_and_get(ptdev);
3135 	if (drm_WARN_ON(&ptdev->base, ret))
3136 		return ERR_PTR(ret);
3137 
3138 	mutex_lock(&sched->lock);
3139 	if (!group_can_run(group)) {
3140 		done_fence = ERR_PTR(-ECANCELED);
3141 		goto out_unlock;
3142 	}
3143 
3144 	dma_fence_init(job->done_fence,
3145 		       &panthor_queue_fence_ops,
3146 		       &queue->fence_ctx.lock,
3147 		       queue->fence_ctx.id,
3148 		       atomic64_inc_return(&queue->fence_ctx.seqno));
3149 
3150 	job->profiling.slot = queue->profiling.seqno++;
3151 	if (queue->profiling.seqno == queue->profiling.slot_count)
3152 		queue->profiling.seqno = 0;
3153 
3154 	job->ringbuf.start = queue->iface.input->insert;
3155 
3156 	get_job_cs_params(job, &cs_params);
3157 	prepare_job_instrs(&cs_params, &instrs);
3158 	copy_instrs_to_ringbuf(queue, job, &instrs);
3159 
3160 	job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64));
3161 
3162 	panthor_job_get(&job->base);
3163 	spin_lock(&queue->fence_ctx.lock);
3164 	list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
3165 	spin_unlock(&queue->fence_ctx.lock);
3166 
3167 	/* Make sure the ring buffer is updated before the INSERT
3168 	 * register.
3169 	 */
3170 	wmb();
3171 
3172 	queue->iface.input->extract = queue->iface.output->extract;
3173 	queue->iface.input->insert = job->ringbuf.end;
3174 
3175 	if (group->csg_id < 0) {
3176 		/* If the queue is blocked, we want to keep the timeout running, so we
3177 		 * can detect unbounded waits and kill the group when that happens.
3178 		 * Otherwise, we suspend the timeout so the time we spend waiting for
3179 		 * a CSG slot is not counted.
3180 		 */
3181 		if (!(group->blocked_queues & BIT(job->queue_idx)) &&
3182 		    !queue->timeout_suspended) {
3183 			queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
3184 			queue->timeout_suspended = true;
3185 		}
3186 
3187 		group_schedule_locked(group, BIT(job->queue_idx));
3188 	} else {
3189 		gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
3190 		if (!sched->pm.has_ref &&
3191 		    !(group->blocked_queues & BIT(job->queue_idx))) {
3192 			pm_runtime_get(ptdev->base.dev);
3193 			sched->pm.has_ref = true;
3194 		}
3195 		panthor_devfreq_record_busy(sched->ptdev);
3196 	}
3197 
3198 	/* Update the last fence. */
3199 	dma_fence_put(queue->fence_ctx.last_fence);
3200 	queue->fence_ctx.last_fence = dma_fence_get(job->done_fence);
3201 
3202 	done_fence = dma_fence_get(job->done_fence);
3203 
3204 out_unlock:
3205 	mutex_unlock(&sched->lock);
3206 	pm_runtime_mark_last_busy(ptdev->base.dev);
3207 	pm_runtime_put_autosuspend(ptdev->base.dev);
3208 
3209 	return done_fence;
3210 }
3211 
3212 static enum drm_gpu_sched_stat
queue_timedout_job(struct drm_sched_job * sched_job)3213 queue_timedout_job(struct drm_sched_job *sched_job)
3214 {
3215 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3216 	struct panthor_group *group = job->group;
3217 	struct panthor_device *ptdev = group->ptdev;
3218 	struct panthor_scheduler *sched = ptdev->scheduler;
3219 	struct panthor_queue *queue = group->queues[job->queue_idx];
3220 
3221 	drm_warn(&ptdev->base, "job timeout\n");
3222 
3223 	drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
3224 
3225 	queue_stop(queue, job);
3226 
3227 	mutex_lock(&sched->lock);
3228 	group->timedout = true;
3229 	if (group->csg_id >= 0) {
3230 		sched_queue_delayed_work(ptdev->scheduler, tick, 0);
3231 	} else {
3232 		/* Remove from the run queues, so the scheduler can't
3233 		 * pick the group on the next tick.
3234 		 */
3235 		list_del_init(&group->run_node);
3236 		list_del_init(&group->wait_node);
3237 
3238 		group_queue_work(group, term);
3239 	}
3240 	mutex_unlock(&sched->lock);
3241 
3242 	queue_start(queue);
3243 
3244 	return DRM_GPU_SCHED_STAT_NOMINAL;
3245 }
3246 
queue_free_job(struct drm_sched_job * sched_job)3247 static void queue_free_job(struct drm_sched_job *sched_job)
3248 {
3249 	drm_sched_job_cleanup(sched_job);
3250 	panthor_job_put(sched_job);
3251 }
3252 
3253 static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
3254 	.run_job = queue_run_job,
3255 	.timedout_job = queue_timedout_job,
3256 	.free_job = queue_free_job,
3257 };
3258 
calc_profiling_ringbuf_num_slots(struct panthor_device * ptdev,u32 cs_ringbuf_size)3259 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
3260 					    u32 cs_ringbuf_size)
3261 {
3262 	u32 min_profiled_job_instrs = U32_MAX;
3263 	u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL);
3264 
3265 	/*
3266 	 * We want to calculate the minimum size of a profiled job's CS,
3267 	 * because since they need additional instructions for the sampling
3268 	 * of performance metrics, they might take up further slots in
3269 	 * the queue's ringbuffer. This means we might not need as many job
3270 	 * slots for keeping track of their profiling information. What we
3271 	 * need is the maximum number of slots we should allocate to this end,
3272 	 * which matches the maximum number of profiled jobs we can place
3273 	 * simultaneously in the queue's ring buffer.
3274 	 * That has to be calculated separately for every single job profiling
3275 	 * flag, but not in the case job profiling is disabled, since unprofiled
3276 	 * jobs don't need to keep track of this at all.
3277 	 */
3278 	for (u32 i = 0; i < last_flag; i++) {
3279 		min_profiled_job_instrs =
3280 			min(min_profiled_job_instrs, calc_job_credits(BIT(i)));
3281 	}
3282 
3283 	return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
3284 }
3285 
3286 static struct panthor_queue *
group_create_queue(struct panthor_group * group,const struct drm_panthor_queue_create * args)3287 group_create_queue(struct panthor_group *group,
3288 		   const struct drm_panthor_queue_create *args)
3289 {
3290 	struct drm_gpu_scheduler *drm_sched;
3291 	struct panthor_queue *queue;
3292 	int ret;
3293 
3294 	if (args->pad[0] || args->pad[1] || args->pad[2])
3295 		return ERR_PTR(-EINVAL);
3296 
3297 	if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K ||
3298 	    !is_power_of_2(args->ringbuf_size))
3299 		return ERR_PTR(-EINVAL);
3300 
3301 	if (args->priority > CSF_MAX_QUEUE_PRIO)
3302 		return ERR_PTR(-EINVAL);
3303 
3304 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
3305 	if (!queue)
3306 		return ERR_PTR(-ENOMEM);
3307 
3308 	queue->fence_ctx.id = dma_fence_context_alloc(1);
3309 	spin_lock_init(&queue->fence_ctx.lock);
3310 	INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
3311 
3312 	queue->priority = args->priority;
3313 
3314 	queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
3315 						  args->ringbuf_size,
3316 						  DRM_PANTHOR_BO_NO_MMAP,
3317 						  DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3318 						  DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3319 						  PANTHOR_VM_KERNEL_AUTO_VA);
3320 	if (IS_ERR(queue->ringbuf)) {
3321 		ret = PTR_ERR(queue->ringbuf);
3322 		goto err_free_queue;
3323 	}
3324 
3325 	ret = panthor_kernel_bo_vmap(queue->ringbuf);
3326 	if (ret)
3327 		goto err_free_queue;
3328 
3329 	queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
3330 							    &queue->iface.input,
3331 							    &queue->iface.output,
3332 							    &queue->iface.input_fw_va,
3333 							    &queue->iface.output_fw_va);
3334 	if (IS_ERR(queue->iface.mem)) {
3335 		ret = PTR_ERR(queue->iface.mem);
3336 		goto err_free_queue;
3337 	}
3338 
3339 	queue->profiling.slot_count =
3340 		calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size);
3341 
3342 	queue->profiling.slots =
3343 		panthor_kernel_bo_create(group->ptdev, group->vm,
3344 					 queue->profiling.slot_count *
3345 					 sizeof(struct panthor_job_profiling_data),
3346 					 DRM_PANTHOR_BO_NO_MMAP,
3347 					 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3348 					 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3349 					 PANTHOR_VM_KERNEL_AUTO_VA);
3350 
3351 	if (IS_ERR(queue->profiling.slots)) {
3352 		ret = PTR_ERR(queue->profiling.slots);
3353 		goto err_free_queue;
3354 	}
3355 
3356 	ret = panthor_kernel_bo_vmap(queue->profiling.slots);
3357 	if (ret)
3358 		goto err_free_queue;
3359 
3360 	/*
3361 	 * Credit limit argument tells us the total number of instructions
3362 	 * across all CS slots in the ringbuffer, with some jobs requiring
3363 	 * twice as many as others, depending on their profiling status.
3364 	 */
3365 	ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
3366 			     group->ptdev->scheduler->wq, 1,
3367 			     args->ringbuf_size / sizeof(u64),
3368 			     0, msecs_to_jiffies(JOB_TIMEOUT_MS),
3369 			     group->ptdev->reset.wq,
3370 			     NULL, "panthor-queue", group->ptdev->base.dev);
3371 	if (ret)
3372 		goto err_free_queue;
3373 
3374 	drm_sched = &queue->scheduler;
3375 	ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
3376 
3377 	return queue;
3378 
3379 err_free_queue:
3380 	group_free_queue(group, queue);
3381 	return ERR_PTR(ret);
3382 }
3383 
3384 #define MAX_GROUPS_PER_POOL		128
3385 
panthor_group_create(struct panthor_file * pfile,const struct drm_panthor_group_create * group_args,const struct drm_panthor_queue_create * queue_args)3386 int panthor_group_create(struct panthor_file *pfile,
3387 			 const struct drm_panthor_group_create *group_args,
3388 			 const struct drm_panthor_queue_create *queue_args)
3389 {
3390 	struct panthor_device *ptdev = pfile->ptdev;
3391 	struct panthor_group_pool *gpool = pfile->groups;
3392 	struct panthor_scheduler *sched = ptdev->scheduler;
3393 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3394 	struct panthor_group *group = NULL;
3395 	u32 gid, i, suspend_size;
3396 	int ret;
3397 
3398 	if (group_args->pad)
3399 		return -EINVAL;
3400 
3401 	if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT)
3402 		return -EINVAL;
3403 
3404 	if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
3405 	    (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
3406 	    (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
3407 		return -EINVAL;
3408 
3409 	if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
3410 	    hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
3411 	    hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
3412 		return -EINVAL;
3413 
3414 	group = kzalloc(sizeof(*group), GFP_KERNEL);
3415 	if (!group)
3416 		return -ENOMEM;
3417 
3418 	spin_lock_init(&group->fatal_lock);
3419 	kref_init(&group->refcount);
3420 	group->state = PANTHOR_CS_GROUP_CREATED;
3421 	group->csg_id = -1;
3422 
3423 	group->ptdev = ptdev;
3424 	group->max_compute_cores = group_args->max_compute_cores;
3425 	group->compute_core_mask = group_args->compute_core_mask;
3426 	group->max_fragment_cores = group_args->max_fragment_cores;
3427 	group->fragment_core_mask = group_args->fragment_core_mask;
3428 	group->max_tiler_cores = group_args->max_tiler_cores;
3429 	group->tiler_core_mask = group_args->tiler_core_mask;
3430 	group->priority = group_args->priority;
3431 
3432 	INIT_LIST_HEAD(&group->wait_node);
3433 	INIT_LIST_HEAD(&group->run_node);
3434 	INIT_WORK(&group->term_work, group_term_work);
3435 	INIT_WORK(&group->sync_upd_work, group_sync_upd_work);
3436 	INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work);
3437 	INIT_WORK(&group->release_work, group_release_work);
3438 
3439 	group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id);
3440 	if (!group->vm) {
3441 		ret = -EINVAL;
3442 		goto err_put_group;
3443 	}
3444 
3445 	suspend_size = csg_iface->control->suspend_size;
3446 	group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3447 	if (IS_ERR(group->suspend_buf)) {
3448 		ret = PTR_ERR(group->suspend_buf);
3449 		group->suspend_buf = NULL;
3450 		goto err_put_group;
3451 	}
3452 
3453 	suspend_size = csg_iface->control->protm_suspend_size;
3454 	group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3455 	if (IS_ERR(group->protm_suspend_buf)) {
3456 		ret = PTR_ERR(group->protm_suspend_buf);
3457 		group->protm_suspend_buf = NULL;
3458 		goto err_put_group;
3459 	}
3460 
3461 	group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
3462 						   group_args->queues.count *
3463 						   sizeof(struct panthor_syncobj_64b),
3464 						   DRM_PANTHOR_BO_NO_MMAP,
3465 						   DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3466 						   DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3467 						   PANTHOR_VM_KERNEL_AUTO_VA);
3468 	if (IS_ERR(group->syncobjs)) {
3469 		ret = PTR_ERR(group->syncobjs);
3470 		goto err_put_group;
3471 	}
3472 
3473 	ret = panthor_kernel_bo_vmap(group->syncobjs);
3474 	if (ret)
3475 		goto err_put_group;
3476 
3477 	memset(group->syncobjs->kmap, 0,
3478 	       group_args->queues.count * sizeof(struct panthor_syncobj_64b));
3479 
3480 	for (i = 0; i < group_args->queues.count; i++) {
3481 		group->queues[i] = group_create_queue(group, &queue_args[i]);
3482 		if (IS_ERR(group->queues[i])) {
3483 			ret = PTR_ERR(group->queues[i]);
3484 			group->queues[i] = NULL;
3485 			goto err_put_group;
3486 		}
3487 
3488 		group->queue_count++;
3489 	}
3490 
3491 	group->idle_queues = GENMASK(group->queue_count - 1, 0);
3492 
3493 	ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
3494 	if (ret)
3495 		goto err_put_group;
3496 
3497 	mutex_lock(&sched->reset.lock);
3498 	if (atomic_read(&sched->reset.in_progress)) {
3499 		panthor_group_stop(group);
3500 	} else {
3501 		mutex_lock(&sched->lock);
3502 		list_add_tail(&group->run_node,
3503 			      &sched->groups.idle[group->priority]);
3504 		mutex_unlock(&sched->lock);
3505 	}
3506 	mutex_unlock(&sched->reset.lock);
3507 
3508 	mutex_init(&group->fdinfo.lock);
3509 
3510 	return gid;
3511 
3512 err_put_group:
3513 	group_put(group);
3514 	return ret;
3515 }
3516 
panthor_group_destroy(struct panthor_file * pfile,u32 group_handle)3517 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
3518 {
3519 	struct panthor_group_pool *gpool = pfile->groups;
3520 	struct panthor_device *ptdev = pfile->ptdev;
3521 	struct panthor_scheduler *sched = ptdev->scheduler;
3522 	struct panthor_group *group;
3523 
3524 	group = xa_erase(&gpool->xa, group_handle);
3525 	if (!group)
3526 		return -EINVAL;
3527 
3528 	for (u32 i = 0; i < group->queue_count; i++) {
3529 		if (group->queues[i])
3530 			drm_sched_entity_destroy(&group->queues[i]->entity);
3531 	}
3532 
3533 	mutex_lock(&sched->reset.lock);
3534 	mutex_lock(&sched->lock);
3535 	group->destroyed = true;
3536 	if (group->csg_id >= 0) {
3537 		sched_queue_delayed_work(sched, tick, 0);
3538 	} else if (!atomic_read(&sched->reset.in_progress)) {
3539 		/* Remove from the run queues, so the scheduler can't
3540 		 * pick the group on the next tick.
3541 		 */
3542 		list_del_init(&group->run_node);
3543 		list_del_init(&group->wait_node);
3544 		group_queue_work(group, term);
3545 	}
3546 	mutex_unlock(&sched->lock);
3547 	mutex_unlock(&sched->reset.lock);
3548 
3549 	group_put(group);
3550 	return 0;
3551 }
3552 
group_from_handle(struct panthor_group_pool * pool,u32 group_handle)3553 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool,
3554 					       u32 group_handle)
3555 {
3556 	struct panthor_group *group;
3557 
3558 	xa_lock(&pool->xa);
3559 	group = group_get(xa_load(&pool->xa, group_handle));
3560 	xa_unlock(&pool->xa);
3561 
3562 	return group;
3563 }
3564 
panthor_group_get_state(struct panthor_file * pfile,struct drm_panthor_group_get_state * get_state)3565 int panthor_group_get_state(struct panthor_file *pfile,
3566 			    struct drm_panthor_group_get_state *get_state)
3567 {
3568 	struct panthor_group_pool *gpool = pfile->groups;
3569 	struct panthor_device *ptdev = pfile->ptdev;
3570 	struct panthor_scheduler *sched = ptdev->scheduler;
3571 	struct panthor_group *group;
3572 
3573 	if (get_state->pad)
3574 		return -EINVAL;
3575 
3576 	group = group_from_handle(gpool, get_state->group_handle);
3577 	if (!group)
3578 		return -EINVAL;
3579 
3580 	memset(get_state, 0, sizeof(*get_state));
3581 
3582 	mutex_lock(&sched->lock);
3583 	if (group->timedout)
3584 		get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT;
3585 	if (group->fatal_queues) {
3586 		get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
3587 		get_state->fatal_queues = group->fatal_queues;
3588 	}
3589 	if (group->innocent)
3590 		get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT;
3591 	mutex_unlock(&sched->lock);
3592 
3593 	group_put(group);
3594 	return 0;
3595 }
3596 
panthor_group_pool_create(struct panthor_file * pfile)3597 int panthor_group_pool_create(struct panthor_file *pfile)
3598 {
3599 	struct panthor_group_pool *gpool;
3600 
3601 	gpool = kzalloc(sizeof(*gpool), GFP_KERNEL);
3602 	if (!gpool)
3603 		return -ENOMEM;
3604 
3605 	xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1);
3606 	pfile->groups = gpool;
3607 	return 0;
3608 }
3609 
panthor_group_pool_destroy(struct panthor_file * pfile)3610 void panthor_group_pool_destroy(struct panthor_file *pfile)
3611 {
3612 	struct panthor_group_pool *gpool = pfile->groups;
3613 	struct panthor_group *group;
3614 	unsigned long i;
3615 
3616 	if (IS_ERR_OR_NULL(gpool))
3617 		return;
3618 
3619 	xa_for_each(&gpool->xa, i, group)
3620 		panthor_group_destroy(pfile, i);
3621 
3622 	xa_destroy(&gpool->xa);
3623 	kfree(gpool);
3624 	pfile->groups = NULL;
3625 }
3626 
job_release(struct kref * ref)3627 static void job_release(struct kref *ref)
3628 {
3629 	struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
3630 
3631 	drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
3632 
3633 	if (job->base.s_fence)
3634 		drm_sched_job_cleanup(&job->base);
3635 
3636 	if (job->done_fence && job->done_fence->ops)
3637 		dma_fence_put(job->done_fence);
3638 	else
3639 		dma_fence_free(job->done_fence);
3640 
3641 	group_put(job->group);
3642 
3643 	kfree(job);
3644 }
3645 
panthor_job_get(struct drm_sched_job * sched_job)3646 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job)
3647 {
3648 	if (sched_job) {
3649 		struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3650 
3651 		kref_get(&job->refcount);
3652 	}
3653 
3654 	return sched_job;
3655 }
3656 
panthor_job_put(struct drm_sched_job * sched_job)3657 void panthor_job_put(struct drm_sched_job *sched_job)
3658 {
3659 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3660 
3661 	if (sched_job)
3662 		kref_put(&job->refcount, job_release);
3663 }
3664 
panthor_job_vm(struct drm_sched_job * sched_job)3665 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
3666 {
3667 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3668 
3669 	return job->group->vm;
3670 }
3671 
3672 struct drm_sched_job *
panthor_job_create(struct panthor_file * pfile,u16 group_handle,const struct drm_panthor_queue_submit * qsubmit)3673 panthor_job_create(struct panthor_file *pfile,
3674 		   u16 group_handle,
3675 		   const struct drm_panthor_queue_submit *qsubmit)
3676 {
3677 	struct panthor_group_pool *gpool = pfile->groups;
3678 	struct panthor_job *job;
3679 	u32 credits;
3680 	int ret;
3681 
3682 	if (qsubmit->pad)
3683 		return ERR_PTR(-EINVAL);
3684 
3685 	/* If stream_addr is zero, so stream_size should be. */
3686 	if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0))
3687 		return ERR_PTR(-EINVAL);
3688 
3689 	/* Make sure the address is aligned on 64-byte (cacheline) and the size is
3690 	 * aligned on 8-byte (instruction size).
3691 	 */
3692 	if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7))
3693 		return ERR_PTR(-EINVAL);
3694 
3695 	/* bits 24:30 must be zero. */
3696 	if (qsubmit->latest_flush & GENMASK(30, 24))
3697 		return ERR_PTR(-EINVAL);
3698 
3699 	job = kzalloc(sizeof(*job), GFP_KERNEL);
3700 	if (!job)
3701 		return ERR_PTR(-ENOMEM);
3702 
3703 	kref_init(&job->refcount);
3704 	job->queue_idx = qsubmit->queue_index;
3705 	job->call_info.size = qsubmit->stream_size;
3706 	job->call_info.start = qsubmit->stream_addr;
3707 	job->call_info.latest_flush = qsubmit->latest_flush;
3708 	INIT_LIST_HEAD(&job->node);
3709 
3710 	job->group = group_from_handle(gpool, group_handle);
3711 	if (!job->group) {
3712 		ret = -EINVAL;
3713 		goto err_put_job;
3714 	}
3715 
3716 	if (!group_can_run(job->group)) {
3717 		ret = -EINVAL;
3718 		goto err_put_job;
3719 	}
3720 
3721 	if (job->queue_idx >= job->group->queue_count ||
3722 	    !job->group->queues[job->queue_idx]) {
3723 		ret = -EINVAL;
3724 		goto err_put_job;
3725 	}
3726 
3727 	/* Empty command streams don't need a fence, they'll pick the one from
3728 	 * the previously submitted job.
3729 	 */
3730 	if (job->call_info.size) {
3731 		job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
3732 		if (!job->done_fence) {
3733 			ret = -ENOMEM;
3734 			goto err_put_job;
3735 		}
3736 	}
3737 
3738 	job->profiling.mask = pfile->ptdev->profile_mask;
3739 	credits = calc_job_credits(job->profiling.mask);
3740 	if (credits == 0) {
3741 		ret = -EINVAL;
3742 		goto err_put_job;
3743 	}
3744 
3745 	ret = drm_sched_job_init(&job->base,
3746 				 &job->group->queues[job->queue_idx]->entity,
3747 				 credits, job->group);
3748 	if (ret)
3749 		goto err_put_job;
3750 
3751 	return &job->base;
3752 
3753 err_put_job:
3754 	panthor_job_put(&job->base);
3755 	return ERR_PTR(ret);
3756 }
3757 
panthor_job_update_resvs(struct drm_exec * exec,struct drm_sched_job * sched_job)3758 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job)
3759 {
3760 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3761 
3762 	panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
3763 				DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
3764 }
3765 
panthor_sched_unplug(struct panthor_device * ptdev)3766 void panthor_sched_unplug(struct panthor_device *ptdev)
3767 {
3768 	struct panthor_scheduler *sched = ptdev->scheduler;
3769 
3770 	cancel_delayed_work_sync(&sched->tick_work);
3771 
3772 	mutex_lock(&sched->lock);
3773 	if (sched->pm.has_ref) {
3774 		pm_runtime_put(ptdev->base.dev);
3775 		sched->pm.has_ref = false;
3776 	}
3777 	mutex_unlock(&sched->lock);
3778 }
3779 
panthor_sched_fini(struct drm_device * ddev,void * res)3780 static void panthor_sched_fini(struct drm_device *ddev, void *res)
3781 {
3782 	struct panthor_scheduler *sched = res;
3783 	int prio;
3784 
3785 	if (!sched || !sched->csg_slot_count)
3786 		return;
3787 
3788 	cancel_delayed_work_sync(&sched->tick_work);
3789 
3790 	if (sched->wq)
3791 		destroy_workqueue(sched->wq);
3792 
3793 	if (sched->heap_alloc_wq)
3794 		destroy_workqueue(sched->heap_alloc_wq);
3795 
3796 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3797 		drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
3798 		drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
3799 	}
3800 
3801 	drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
3802 }
3803 
panthor_sched_init(struct panthor_device * ptdev)3804 int panthor_sched_init(struct panthor_device *ptdev)
3805 {
3806 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
3807 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3808 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
3809 	struct panthor_scheduler *sched;
3810 	u32 gpu_as_count, num_groups;
3811 	int prio, ret;
3812 
3813 	sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
3814 	if (!sched)
3815 		return -ENOMEM;
3816 
3817 	/* The highest bit in JOB_INT_* is reserved for globabl IRQs. That
3818 	 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here.
3819 	 */
3820 	num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num);
3821 
3822 	/* The FW-side scheduler might deadlock if two groups with the same
3823 	 * priority try to access a set of resources that overlaps, with part
3824 	 * of the resources being allocated to one group and the other part to
3825 	 * the other group, both groups waiting for the remaining resources to
3826 	 * be allocated. To avoid that, it is recommended to assign each CSG a
3827 	 * different priority. In theory we could allow several groups to have
3828 	 * the same CSG priority if they don't request the same resources, but
3829 	 * that makes the scheduling logic more complicated, so let's clamp
3830 	 * the number of CSG slots to MAX_CSG_PRIO + 1 for now.
3831 	 */
3832 	num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups);
3833 
3834 	/* We need at least one AS for the MCU and one for the GPU contexts. */
3835 	gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
3836 	if (!gpu_as_count) {
3837 		drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
3838 			gpu_as_count + 1);
3839 		return -EINVAL;
3840 	}
3841 
3842 	sched->ptdev = ptdev;
3843 	sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
3844 	sched->csg_slot_count = num_groups;
3845 	sched->cs_slot_count = csg_iface->control->stream_num;
3846 	sched->as_slot_count = gpu_as_count;
3847 	ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
3848 	ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
3849 	ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
3850 
3851 	sched->last_tick = 0;
3852 	sched->resched_target = U64_MAX;
3853 	sched->tick_period = msecs_to_jiffies(10);
3854 	INIT_DELAYED_WORK(&sched->tick_work, tick_work);
3855 	INIT_WORK(&sched->sync_upd_work, sync_upd_work);
3856 	INIT_WORK(&sched->fw_events_work, process_fw_events_work);
3857 
3858 	ret = drmm_mutex_init(&ptdev->base, &sched->lock);
3859 	if (ret)
3860 		return ret;
3861 
3862 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3863 		INIT_LIST_HEAD(&sched->groups.runnable[prio]);
3864 		INIT_LIST_HEAD(&sched->groups.idle[prio]);
3865 	}
3866 	INIT_LIST_HEAD(&sched->groups.waiting);
3867 
3868 	ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
3869 	if (ret)
3870 		return ret;
3871 
3872 	INIT_LIST_HEAD(&sched->reset.stopped_groups);
3873 
3874 	/* sched->heap_alloc_wq will be used for heap chunk allocation on
3875 	 * tiler OOM events, which means we can't use the same workqueue for
3876 	 * the scheduler because works queued by the scheduler are in
3877 	 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to
3878 	 * work around this limitation.
3879 	 *
3880 	 * FIXME: Ultimately, what we need is a failable/non-blocking GEM
3881 	 * allocation path that we can call when a heap OOM is reported. The
3882 	 * FW is smart enough to fall back on other methods if the kernel can't
3883 	 * allocate memory, and fail the tiling job if none of these
3884 	 * countermeasures worked.
3885 	 *
3886 	 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the
3887 	 * system is running out of memory.
3888 	 */
3889 	sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
3890 	sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
3891 	if (!sched->wq || !sched->heap_alloc_wq) {
3892 		panthor_sched_fini(&ptdev->base, sched);
3893 		drm_err(&ptdev->base, "Failed to allocate the workqueues");
3894 		return -ENOMEM;
3895 	}
3896 
3897 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
3898 	if (ret)
3899 		return ret;
3900 
3901 	ptdev->scheduler = sched;
3902 	return 0;
3903 }
3904