xref: /linux/drivers/gpu/drm/panthor/panthor_sched.c (revision dfb31428444b00824b161d8c0741d4868552813a)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2023 Collabora ltd. */
3 
4 #include <drm/drm_drv.h>
5 #include <drm/drm_exec.h>
6 #include <drm/drm_gem_shmem_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/drm_print.h>
9 #include <drm/gpu_scheduler.h>
10 #include <drm/panthor_drm.h>
11 
12 #include <linux/build_bug.h>
13 #include <linux/cleanup.h>
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dma-resv.h>
18 #include <linux/firmware.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/iopoll.h>
22 #include <linux/iosys-map.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/rcupdate.h>
27 
28 #include "panthor_devfreq.h"
29 #include "panthor_device.h"
30 #include "panthor_fw.h"
31 #include "panthor_gem.h"
32 #include "panthor_gpu.h"
33 #include "panthor_heap.h"
34 #include "panthor_mmu.h"
35 #include "panthor_regs.h"
36 #include "panthor_sched.h"
37 
38 /**
39  * DOC: Scheduler
40  *
41  * Mali CSF hardware adopts a firmware-assisted scheduling model, where
42  * the firmware takes care of scheduling aspects, to some extent.
43  *
44  * The scheduling happens at the scheduling group level, each group
45  * contains 1 to N queues (N is FW/hardware dependent, and exposed
46  * through the firmware interface). Each queue is assigned a command
47  * stream ring buffer, which serves as a way to get jobs submitted to
48  * the GPU, among other things.
49  *
50  * The firmware can schedule a maximum of M groups (M is FW/hardware
51  * dependent, and exposed through the firmware interface). Passed
52  * this maximum number of groups, the kernel must take care of
53  * rotating the groups passed to the firmware so every group gets
54  * a chance to have his queues scheduled for execution.
55  *
56  * The current implementation only supports with kernel-mode queues.
57  * In other terms, userspace doesn't have access to the ring-buffer.
58  * Instead, userspace passes indirect command stream buffers that are
59  * called from the queue ring-buffer by the kernel using a pre-defined
60  * sequence of command stream instructions to ensure the userspace driver
61  * always gets consistent results (cache maintenance,
62  * synchronization, ...).
63  *
64  * We rely on the drm_gpu_scheduler framework to deal with job
65  * dependencies and submission. As any other driver dealing with a
66  * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
67  * entity has its own job scheduler. When a job is ready to be executed
68  * (all its dependencies are met), it is pushed to the appropriate
69  * queue ring-buffer, and the group is scheduled for execution if it
70  * wasn't already active.
71  *
72  * Kernel-side group scheduling is timeslice-based. When we have less
73  * groups than there are slots, the periodic tick is disabled and we
74  * just let the FW schedule the active groups. When there are more
75  * groups than slots, we let each group a chance to execute stuff for
76  * a given amount of time, and then re-evaluate and pick new groups
77  * to schedule. The group selection algorithm is based on
78  * priority+round-robin.
79  *
80  * Even though user-mode queues is out of the scope right now, the
81  * current design takes them into account by avoiding any guess on the
82  * group/queue state that would be based on information we wouldn't have
83  * if userspace was in charge of the ring-buffer. That's also one of the
84  * reason we don't do 'cooperative' scheduling (encoding FW group slot
85  * reservation as dma_fence that would be returned from the
86  * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
87  * a queue of waiters, ordered by job submission order). This approach
88  * would work for kernel-mode queues, but would make user-mode queues a
89  * lot more complicated to retrofit.
90  */
91 
92 #define JOB_TIMEOUT_MS				5000
93 
94 #define MAX_CSG_PRIO				0xf
95 
96 #define NUM_INSTRS_PER_CACHE_LINE		(64 / sizeof(u64))
97 #define MAX_INSTRS_PER_JOB			24
98 
99 struct panthor_group;
100 
101 /**
102  * struct panthor_csg_slot - Command stream group slot
103  *
104  * This represents a FW slot for a scheduling group.
105  */
106 struct panthor_csg_slot {
107 	/** @group: Scheduling group bound to this slot. */
108 	struct panthor_group *group;
109 
110 	/** @priority: Group priority. */
111 	u8 priority;
112 };
113 
114 /**
115  * enum panthor_csg_priority - Group priority
116  */
117 enum panthor_csg_priority {
118 	/** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */
119 	PANTHOR_CSG_PRIORITY_LOW = 0,
120 
121 	/** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */
122 	PANTHOR_CSG_PRIORITY_MEDIUM,
123 
124 	/** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */
125 	PANTHOR_CSG_PRIORITY_HIGH,
126 
127 	/**
128 	 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group.
129 	 *
130 	 * Real-time priority allows one to preempt scheduling of other
131 	 * non-real-time groups. When such a group becomes executable,
132 	 * it will evict the group with the lowest non-rt priority if
133 	 * there's no free group slot available.
134 	 */
135 	PANTHOR_CSG_PRIORITY_RT,
136 
137 	/** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */
138 	PANTHOR_CSG_PRIORITY_COUNT,
139 };
140 
141 /**
142  * struct panthor_scheduler - Object used to manage the scheduler
143  */
144 struct panthor_scheduler {
145 	/** @ptdev: Device. */
146 	struct panthor_device *ptdev;
147 
148 	/**
149 	 * @wq: Workqueue used by our internal scheduler logic and
150 	 * drm_gpu_scheduler.
151 	 *
152 	 * Used for the scheduler tick, group update or other kind of FW
153 	 * event processing that can't be handled in the threaded interrupt
154 	 * path. Also passed to the drm_gpu_scheduler instances embedded
155 	 * in panthor_queue.
156 	 */
157 	struct workqueue_struct *wq;
158 
159 	/**
160 	 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works.
161 	 *
162 	 * We have a queue dedicated to heap chunk allocation works to avoid
163 	 * blocking the rest of the scheduler if the allocation tries to
164 	 * reclaim memory.
165 	 */
166 	struct workqueue_struct *heap_alloc_wq;
167 
168 	/** @tick_work: Work executed on a scheduling tick. */
169 	struct delayed_work tick_work;
170 
171 	/**
172 	 * @sync_upd_work: Work used to process synchronization object updates.
173 	 *
174 	 * We use this work to unblock queues/groups that were waiting on a
175 	 * synchronization object.
176 	 */
177 	struct work_struct sync_upd_work;
178 
179 	/**
180 	 * @fw_events_work: Work used to process FW events outside the interrupt path.
181 	 *
182 	 * Even if the interrupt is threaded, we need any event processing
183 	 * that require taking the panthor_scheduler::lock to be processed
184 	 * outside the interrupt path so we don't block the tick logic when
185 	 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the
186 	 * event processing requires taking this lock, we just delegate all
187 	 * FW event processing to the scheduler workqueue.
188 	 */
189 	struct work_struct fw_events_work;
190 
191 	/**
192 	 * @fw_events: Bitmask encoding pending FW events.
193 	 */
194 	atomic_t fw_events;
195 
196 	/**
197 	 * @resched_target: When the next tick should occur.
198 	 *
199 	 * Expressed in jiffies.
200 	 */
201 	u64 resched_target;
202 
203 	/**
204 	 * @last_tick: When the last tick occurred.
205 	 *
206 	 * Expressed in jiffies.
207 	 */
208 	u64 last_tick;
209 
210 	/** @tick_period: Tick period in jiffies. */
211 	u64 tick_period;
212 
213 	/**
214 	 * @lock: Lock protecting access to all the scheduler fields.
215 	 *
216 	 * Should be taken in the tick work, the irq handler, and anywhere the @groups
217 	 * fields are touched.
218 	 */
219 	struct mutex lock;
220 
221 	/** @groups: Various lists used to classify groups. */
222 	struct {
223 		/**
224 		 * @runnable: Runnable group lists.
225 		 *
226 		 * When a group has queues that want to execute something,
227 		 * its panthor_group::run_node should be inserted here.
228 		 *
229 		 * One list per-priority.
230 		 */
231 		struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT];
232 
233 		/**
234 		 * @idle: Idle group lists.
235 		 *
236 		 * When all queues of a group are idle (either because they
237 		 * have nothing to execute, or because they are blocked), the
238 		 * panthor_group::run_node field should be inserted here.
239 		 *
240 		 * One list per-priority.
241 		 */
242 		struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT];
243 
244 		/**
245 		 * @waiting: List of groups whose queues are blocked on a
246 		 * synchronization object.
247 		 *
248 		 * Insert panthor_group::wait_node here when a group is waiting
249 		 * for synchronization objects to be signaled.
250 		 *
251 		 * This list is evaluated in the @sync_upd_work work.
252 		 */
253 		struct list_head waiting;
254 	} groups;
255 
256 	/**
257 	 * @csg_slots: FW command stream group slots.
258 	 */
259 	struct panthor_csg_slot csg_slots[MAX_CSGS];
260 
261 	/** @csg_slot_count: Number of command stream group slots exposed by the FW. */
262 	u32 csg_slot_count;
263 
264 	/** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */
265 	u32 cs_slot_count;
266 
267 	/** @as_slot_count: Number of address space slots supported by the MMU. */
268 	u32 as_slot_count;
269 
270 	/** @used_csg_slot_count: Number of command stream group slot currently used. */
271 	u32 used_csg_slot_count;
272 
273 	/** @sb_slot_count: Number of scoreboard slots. */
274 	u32 sb_slot_count;
275 
276 	/**
277 	 * @might_have_idle_groups: True if an active group might have become idle.
278 	 *
279 	 * This will force a tick, so other runnable groups can be scheduled if one
280 	 * or more active groups became idle.
281 	 */
282 	bool might_have_idle_groups;
283 
284 	/** @pm: Power management related fields. */
285 	struct {
286 		/** @has_ref: True if the scheduler owns a runtime PM reference. */
287 		bool has_ref;
288 	} pm;
289 
290 	/** @reset: Reset related fields. */
291 	struct {
292 		/** @lock: Lock protecting the other reset fields. */
293 		struct mutex lock;
294 
295 		/**
296 		 * @in_progress: True if a reset is in progress.
297 		 *
298 		 * Set to true in panthor_sched_pre_reset() and back to false in
299 		 * panthor_sched_post_reset().
300 		 */
301 		atomic_t in_progress;
302 
303 		/**
304 		 * @stopped_groups: List containing all groups that were stopped
305 		 * before a reset.
306 		 *
307 		 * Insert panthor_group::run_node in the pre_reset path.
308 		 */
309 		struct list_head stopped_groups;
310 	} reset;
311 };
312 
313 /**
314  * struct panthor_syncobj_32b - 32-bit FW synchronization object
315  */
316 struct panthor_syncobj_32b {
317 	/** @seqno: Sequence number. */
318 	u32 seqno;
319 
320 	/**
321 	 * @status: Status.
322 	 *
323 	 * Not zero on failure.
324 	 */
325 	u32 status;
326 };
327 
328 /**
329  * struct panthor_syncobj_64b - 64-bit FW synchronization object
330  */
331 struct panthor_syncobj_64b {
332 	/** @seqno: Sequence number. */
333 	u64 seqno;
334 
335 	/**
336 	 * @status: Status.
337 	 *
338 	 * Not zero on failure.
339 	 */
340 	u32 status;
341 
342 	/** @pad: MBZ. */
343 	u32 pad;
344 };
345 
346 /**
347  * struct panthor_queue - Execution queue
348  */
349 struct panthor_queue {
350 	/** @scheduler: DRM scheduler used for this queue. */
351 	struct drm_gpu_scheduler scheduler;
352 
353 	/** @entity: DRM scheduling entity used for this queue. */
354 	struct drm_sched_entity entity;
355 
356 	/** @name: DRM scheduler name for this queue. */
357 	char *name;
358 
359 	/** @timeout: Queue timeout related fields. */
360 	struct {
361 		/** @timeout.work: Work executed when a queue timeout occurs. */
362 		struct delayed_work work;
363 
364 		/**
365 		 * @timeout.remaining: Time remaining before a queue timeout.
366 		 *
367 		 * When the timer is running, this value is set to MAX_SCHEDULE_TIMEOUT.
368 		 * When the timer is suspended, it's set to the time remaining when the
369 		 * timer was suspended.
370 		 */
371 		unsigned long remaining;
372 	} timeout;
373 
374 	/**
375 	 * @doorbell_id: Doorbell assigned to this queue.
376 	 *
377 	 * Right now, all groups share the same doorbell, and the doorbell ID
378 	 * is assigned to group_slot + 1 when the group is assigned a slot. But
379 	 * we might decide to provide fine grained doorbell assignment at some
380 	 * point, so don't have to wake up all queues in a group every time one
381 	 * of them is updated.
382 	 */
383 	u8 doorbell_id;
384 
385 	/**
386 	 * @priority: Priority of the queue inside the group.
387 	 *
388 	 * Must be less than 16 (Only 4 bits available).
389 	 */
390 	u8 priority;
391 #define CSF_MAX_QUEUE_PRIO	GENMASK(3, 0)
392 
393 	/** @ringbuf: Command stream ring-buffer. */
394 	struct panthor_kernel_bo *ringbuf;
395 
396 	/** @iface: Firmware interface. */
397 	struct {
398 		/** @mem: FW memory allocated for this interface. */
399 		struct panthor_kernel_bo *mem;
400 
401 		/** @input: Input interface. */
402 		struct panthor_fw_ringbuf_input_iface *input;
403 
404 		/** @output: Output interface. */
405 		const struct panthor_fw_ringbuf_output_iface *output;
406 
407 		/** @input_fw_va: FW virtual address of the input interface buffer. */
408 		u32 input_fw_va;
409 
410 		/** @output_fw_va: FW virtual address of the output interface buffer. */
411 		u32 output_fw_va;
412 	} iface;
413 
414 	/**
415 	 * @syncwait: Stores information about the synchronization object this
416 	 * queue is waiting on.
417 	 */
418 	struct {
419 		/** @gpu_va: GPU address of the synchronization object. */
420 		u64 gpu_va;
421 
422 		/** @ref: Reference value to compare against. */
423 		u64 ref;
424 
425 		/** @gt: True if this is a greater-than test. */
426 		bool gt;
427 
428 		/** @sync64: True if this is a 64-bit sync object. */
429 		bool sync64;
430 
431 		/** @bo: Buffer object holding the synchronization object. */
432 		struct drm_gem_object *obj;
433 
434 		/** @offset: Offset of the synchronization object inside @bo. */
435 		u64 offset;
436 
437 		/**
438 		 * @kmap: Kernel mapping of the buffer object holding the
439 		 * synchronization object.
440 		 */
441 		void *kmap;
442 	} syncwait;
443 
444 	/** @fence_ctx: Fence context fields. */
445 	struct {
446 		/** @lock: Used to protect access to all fences allocated by this context. */
447 		spinlock_t lock;
448 
449 		/**
450 		 * @id: Fence context ID.
451 		 *
452 		 * Allocated with dma_fence_context_alloc().
453 		 */
454 		u64 id;
455 
456 		/** @seqno: Sequence number of the last initialized fence. */
457 		atomic64_t seqno;
458 
459 		/**
460 		 * @last_fence: Fence of the last submitted job.
461 		 *
462 		 * We return this fence when we get an empty command stream.
463 		 * This way, we are guaranteed that all earlier jobs have completed
464 		 * when drm_sched_job::s_fence::finished without having to feed
465 		 * the CS ring buffer with a dummy job that only signals the fence.
466 		 */
467 		struct dma_fence *last_fence;
468 
469 		/**
470 		 * @in_flight_jobs: List containing all in-flight jobs.
471 		 *
472 		 * Used to keep track and signal panthor_job::done_fence when the
473 		 * synchronization object attached to the queue is signaled.
474 		 */
475 		struct list_head in_flight_jobs;
476 	} fence_ctx;
477 
478 	/** @profiling: Job profiling data slots and access information. */
479 	struct {
480 		/** @slots: Kernel BO holding the slots. */
481 		struct panthor_kernel_bo *slots;
482 
483 		/** @slot_count: Number of jobs ringbuffer can hold at once. */
484 		u32 slot_count;
485 
486 		/** @seqno: Index of the next available profiling information slot. */
487 		u32 seqno;
488 	} profiling;
489 };
490 
491 /**
492  * enum panthor_group_state - Scheduling group state.
493  */
494 enum panthor_group_state {
495 	/** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */
496 	PANTHOR_CS_GROUP_CREATED,
497 
498 	/** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */
499 	PANTHOR_CS_GROUP_ACTIVE,
500 
501 	/**
502 	 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is
503 	 * inactive/suspended right now.
504 	 */
505 	PANTHOR_CS_GROUP_SUSPENDED,
506 
507 	/**
508 	 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated.
509 	 *
510 	 * Can no longer be scheduled. The only allowed action is a destruction.
511 	 */
512 	PANTHOR_CS_GROUP_TERMINATED,
513 
514 	/**
515 	 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state.
516 	 *
517 	 * The FW returned an inconsistent state. The group is flagged unusable
518 	 * and can no longer be scheduled. The only allowed action is a
519 	 * destruction.
520 	 *
521 	 * When that happens, we also schedule a FW reset, to start from a fresh
522 	 * state.
523 	 */
524 	PANTHOR_CS_GROUP_UNKNOWN_STATE,
525 };
526 
527 /**
528  * struct panthor_group - Scheduling group object
529  */
530 struct panthor_group {
531 	/** @refcount: Reference count */
532 	struct kref refcount;
533 
534 	/** @ptdev: Device. */
535 	struct panthor_device *ptdev;
536 
537 	/** @vm: VM bound to the group. */
538 	struct panthor_vm *vm;
539 
540 	/** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */
541 	u64 compute_core_mask;
542 
543 	/** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */
544 	u64 fragment_core_mask;
545 
546 	/** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */
547 	u64 tiler_core_mask;
548 
549 	/** @max_compute_cores: Maximum number of shader cores used for compute jobs. */
550 	u8 max_compute_cores;
551 
552 	/** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */
553 	u8 max_fragment_cores;
554 
555 	/** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */
556 	u8 max_tiler_cores;
557 
558 	/** @priority: Group priority (check panthor_csg_priority). */
559 	u8 priority;
560 
561 	/** @blocked_queues: Bitmask reflecting the blocked queues. */
562 	u32 blocked_queues;
563 
564 	/** @idle_queues: Bitmask reflecting the idle queues. */
565 	u32 idle_queues;
566 
567 	/** @fatal_lock: Lock used to protect access to fatal fields. */
568 	spinlock_t fatal_lock;
569 
570 	/** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */
571 	u32 fatal_queues;
572 
573 	/** @tiler_oom: Mask of queues that have a tiler OOM event to process. */
574 	atomic_t tiler_oom;
575 
576 	/** @queue_count: Number of queues in this group. */
577 	u32 queue_count;
578 
579 	/** @queues: Queues owned by this group. */
580 	struct panthor_queue *queues[MAX_CS_PER_CSG];
581 
582 	/**
583 	 * @csg_id: ID of the FW group slot.
584 	 *
585 	 * -1 when the group is not scheduled/active.
586 	 */
587 	int csg_id;
588 
589 	/**
590 	 * @destroyed: True when the group has been destroyed.
591 	 *
592 	 * If a group is destroyed it becomes useless: no further jobs can be submitted
593 	 * to its queues. We simply wait for all references to be dropped so we can
594 	 * release the group object.
595 	 */
596 	bool destroyed;
597 
598 	/**
599 	 * @timedout: True when a timeout occurred on any of the queues owned by
600 	 * this group.
601 	 *
602 	 * Timeouts can be reported by drm_sched or by the FW. If a reset is required,
603 	 * and the group can't be suspended, this also leads to a timeout. In any case,
604 	 * any timeout situation is unrecoverable, and the group becomes useless. We
605 	 * simply wait for all references to be dropped so we can release the group
606 	 * object.
607 	 */
608 	bool timedout;
609 
610 	/**
611 	 * @innocent: True when the group becomes unusable because the group suspension
612 	 * failed during a reset.
613 	 *
614 	 * Sometimes the FW was put in a bad state by other groups, causing the group
615 	 * suspension happening in the reset path to fail. In that case, we consider the
616 	 * group innocent.
617 	 */
618 	bool innocent;
619 
620 	/**
621 	 * @syncobjs: Pool of per-queue synchronization objects.
622 	 *
623 	 * One sync object per queue. The position of the sync object is
624 	 * determined by the queue index.
625 	 */
626 	struct panthor_kernel_bo *syncobjs;
627 
628 	/** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */
629 	struct {
630 		/** @data: Total sampled values for jobs in queues from this group. */
631 		struct panthor_gpu_usage data;
632 
633 		/**
634 		 * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo
635 		 * callback and job post-completion processing function
636 		 */
637 		spinlock_t lock;
638 
639 		/** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
640 		size_t kbo_sizes;
641 	} fdinfo;
642 
643 	/** @task_info: Info of current->group_leader that created the group. */
644 	struct {
645 		/** @task_info.pid: pid of current->group_leader */
646 		pid_t pid;
647 
648 		/** @task_info.comm: comm of current->group_leader */
649 		char comm[TASK_COMM_LEN];
650 	} task_info;
651 
652 	/** @state: Group state. */
653 	enum panthor_group_state state;
654 
655 	/**
656 	 * @suspend_buf: Suspend buffer.
657 	 *
658 	 * Stores the state of the group and its queues when a group is suspended.
659 	 * Used at resume time to restore the group in its previous state.
660 	 *
661 	 * The size of the suspend buffer is exposed through the FW interface.
662 	 */
663 	struct panthor_kernel_bo *suspend_buf;
664 
665 	/**
666 	 * @protm_suspend_buf: Protection mode suspend buffer.
667 	 *
668 	 * Stores the state of the group and its queues when a group that's in
669 	 * protection mode is suspended.
670 	 *
671 	 * Used at resume time to restore the group in its previous state.
672 	 *
673 	 * The size of the protection mode suspend buffer is exposed through the
674 	 * FW interface.
675 	 */
676 	struct panthor_kernel_bo *protm_suspend_buf;
677 
678 	/** @sync_upd_work: Work used to check/signal job fences. */
679 	struct work_struct sync_upd_work;
680 
681 	/** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */
682 	struct work_struct tiler_oom_work;
683 
684 	/** @term_work: Work used to finish the group termination procedure. */
685 	struct work_struct term_work;
686 
687 	/**
688 	 * @release_work: Work used to release group resources.
689 	 *
690 	 * We need to postpone the group release to avoid a deadlock when
691 	 * the last ref is released in the tick work.
692 	 */
693 	struct work_struct release_work;
694 
695 	/**
696 	 * @run_node: Node used to insert the group in the
697 	 * panthor_group::groups::{runnable,idle} and
698 	 * panthor_group::reset.stopped_groups lists.
699 	 */
700 	struct list_head run_node;
701 
702 	/**
703 	 * @wait_node: Node used to insert the group in the
704 	 * panthor_group::groups::waiting list.
705 	 */
706 	struct list_head wait_node;
707 };
708 
709 struct panthor_job_profiling_data {
710 	struct {
711 		u64 before;
712 		u64 after;
713 	} cycles;
714 
715 	struct {
716 		u64 before;
717 		u64 after;
718 	} time;
719 };
720 
721 /**
722  * group_queue_work() - Queue a group work
723  * @group: Group to queue the work for.
724  * @wname: Work name.
725  *
726  * Grabs a ref and queue a work item to the scheduler workqueue. If
727  * the work was already queued, we release the reference we grabbed.
728  *
729  * Work callbacks must release the reference we grabbed here.
730  */
731 #define group_queue_work(group, wname) \
732 	do { \
733 		group_get(group); \
734 		if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
735 			group_put(group); \
736 	} while (0)
737 
738 /**
739  * sched_queue_work() - Queue a scheduler work.
740  * @sched: Scheduler object.
741  * @wname: Work name.
742  *
743  * Conditionally queues a scheduler work if no reset is pending/in-progress.
744  */
745 #define sched_queue_work(sched, wname) \
746 	do { \
747 		if (!atomic_read(&(sched)->reset.in_progress) && \
748 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
749 			queue_work((sched)->wq, &(sched)->wname ## _work); \
750 	} while (0)
751 
752 /**
753  * sched_queue_delayed_work() - Queue a scheduler delayed work.
754  * @sched: Scheduler object.
755  * @wname: Work name.
756  * @delay: Work delay in jiffies.
757  *
758  * Conditionally queues a scheduler delayed work if no reset is
759  * pending/in-progress.
760  */
761 #define sched_queue_delayed_work(sched, wname, delay) \
762 	do { \
763 		if (!atomic_read(&sched->reset.in_progress) && \
764 		    !panthor_device_reset_is_pending((sched)->ptdev)) \
765 			mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
766 	} while (0)
767 
768 /*
769  * We currently set the maximum of groups per file to an arbitrary low value.
770  * But this can be updated if we need more.
771  */
772 #define MAX_GROUPS_PER_POOL 128
773 
774 /*
775  * Mark added on an entry of group pool Xarray to identify if the group has
776  * been fully initialized and can be accessed elsewhere in the driver code.
777  */
778 #define GROUP_REGISTERED XA_MARK_1
779 
780 /**
781  * struct panthor_group_pool - Group pool
782  *
783  * Each file get assigned a group pool.
784  */
785 struct panthor_group_pool {
786 	/** @xa: Xarray used to manage group handles. */
787 	struct xarray xa;
788 };
789 
790 /**
791  * struct panthor_job - Used to manage GPU job
792  */
793 struct panthor_job {
794 	/** @base: Inherit from drm_sched_job. */
795 	struct drm_sched_job base;
796 
797 	/** @refcount: Reference count. */
798 	struct kref refcount;
799 
800 	/** @group: Group of the queue this job will be pushed to. */
801 	struct panthor_group *group;
802 
803 	/** @queue_idx: Index of the queue inside @group. */
804 	u32 queue_idx;
805 
806 	/** @call_info: Information about the userspace command stream call. */
807 	struct {
808 		/** @start: GPU address of the userspace command stream. */
809 		u64 start;
810 
811 		/** @size: Size of the userspace command stream. */
812 		u32 size;
813 
814 		/**
815 		 * @latest_flush: Flush ID at the time the userspace command
816 		 * stream was built.
817 		 *
818 		 * Needed for the flush reduction mechanism.
819 		 */
820 		u32 latest_flush;
821 	} call_info;
822 
823 	/** @ringbuf: Position of this job is in the ring buffer. */
824 	struct {
825 		/** @start: Start offset. */
826 		u64 start;
827 
828 		/** @end: End offset. */
829 		u64 end;
830 	} ringbuf;
831 
832 	/**
833 	 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs
834 	 * list.
835 	 */
836 	struct list_head node;
837 
838 	/** @done_fence: Fence signaled when the job is finished or cancelled. */
839 	struct dma_fence *done_fence;
840 
841 	/** @profiling: Job profiling information. */
842 	struct {
843 		/** @mask: Current device job profiling enablement bitmask. */
844 		u32 mask;
845 
846 		/** @slot: Job index in the profiling slots BO. */
847 		u32 slot;
848 	} profiling;
849 };
850 
851 static void
panthor_queue_put_syncwait_obj(struct panthor_queue * queue)852 panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
853 {
854 	if (queue->syncwait.kmap) {
855 		struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
856 
857 		drm_gem_vunmap(queue->syncwait.obj, &map);
858 		queue->syncwait.kmap = NULL;
859 	}
860 
861 	drm_gem_object_put(queue->syncwait.obj);
862 	queue->syncwait.obj = NULL;
863 }
864 
865 static void *
panthor_queue_get_syncwait_obj(struct panthor_group * group,struct panthor_queue * queue)866 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue)
867 {
868 	struct panthor_device *ptdev = group->ptdev;
869 	struct panthor_gem_object *bo;
870 	struct iosys_map map;
871 	int ret;
872 
873 	if (queue->syncwait.kmap) {
874 		bo = container_of(queue->syncwait.obj,
875 				  struct panthor_gem_object, base.base);
876 		goto out_sync;
877 	}
878 
879 	bo = panthor_vm_get_bo_for_va(group->vm,
880 				      queue->syncwait.gpu_va,
881 				      &queue->syncwait.offset);
882 	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
883 		goto err_put_syncwait_obj;
884 
885 	queue->syncwait.obj = &bo->base.base;
886 	ret = drm_gem_vmap(queue->syncwait.obj, &map);
887 	if (drm_WARN_ON(&ptdev->base, ret))
888 		goto err_put_syncwait_obj;
889 
890 	queue->syncwait.kmap = map.vaddr;
891 	if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
892 		goto err_put_syncwait_obj;
893 
894 out_sync:
895 	/* Make sure the CPU caches are invalidated before the seqno is read.
896 	 * panthor_gem_sync() is a NOP if map_wc=true, so no need to check
897 	 * it here.
898 	 */
899 	panthor_gem_sync(&bo->base.base,
900 			 DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE,
901 			 queue->syncwait.offset,
902 			 queue->syncwait.sync64 ?
903 			 sizeof(struct panthor_syncobj_64b) :
904 			 sizeof(struct panthor_syncobj_32b));
905 
906 	return queue->syncwait.kmap + queue->syncwait.offset;
907 
908 err_put_syncwait_obj:
909 	panthor_queue_put_syncwait_obj(queue);
910 	return NULL;
911 }
912 
group_free_queue(struct panthor_group * group,struct panthor_queue * queue)913 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue)
914 {
915 	if (IS_ERR_OR_NULL(queue))
916 		return;
917 
918 	/* Disable the timeout before tearing down drm_sched components. */
919 	disable_delayed_work_sync(&queue->timeout.work);
920 
921 	if (queue->entity.fence_context)
922 		drm_sched_entity_destroy(&queue->entity);
923 
924 	if (queue->scheduler.ops)
925 		drm_sched_fini(&queue->scheduler);
926 
927 	kfree(queue->name);
928 
929 	panthor_queue_put_syncwait_obj(queue);
930 
931 	panthor_kernel_bo_destroy(queue->ringbuf);
932 	panthor_kernel_bo_destroy(queue->iface.mem);
933 	panthor_kernel_bo_destroy(queue->profiling.slots);
934 
935 	/* Release the last_fence we were holding, if any. */
936 	dma_fence_put(queue->fence_ctx.last_fence);
937 
938 	kfree(queue);
939 }
940 
group_release_work(struct work_struct * work)941 static void group_release_work(struct work_struct *work)
942 {
943 	struct panthor_group *group = container_of(work,
944 						   struct panthor_group,
945 						   release_work);
946 	u32 i;
947 
948 	/* dma-fences may still be accessing group->queues under rcu lock. */
949 	synchronize_rcu();
950 
951 	for (i = 0; i < group->queue_count; i++)
952 		group_free_queue(group, group->queues[i]);
953 
954 	panthor_kernel_bo_destroy(group->suspend_buf);
955 	panthor_kernel_bo_destroy(group->protm_suspend_buf);
956 	panthor_kernel_bo_destroy(group->syncobjs);
957 
958 	panthor_vm_put(group->vm);
959 	kfree(group);
960 }
961 
group_release(struct kref * kref)962 static void group_release(struct kref *kref)
963 {
964 	struct panthor_group *group = container_of(kref,
965 						   struct panthor_group,
966 						   refcount);
967 	struct panthor_device *ptdev = group->ptdev;
968 
969 	drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
970 	drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
971 	drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
972 
973 	queue_work(panthor_cleanup_wq, &group->release_work);
974 }
975 
group_put(struct panthor_group * group)976 static void group_put(struct panthor_group *group)
977 {
978 	if (group)
979 		kref_put(&group->refcount, group_release);
980 }
981 
982 static struct panthor_group *
group_get(struct panthor_group * group)983 group_get(struct panthor_group *group)
984 {
985 	if (group)
986 		kref_get(&group->refcount);
987 
988 	return group;
989 }
990 
991 /**
992  * group_bind_locked() - Bind a group to a group slot
993  * @group: Group.
994  * @csg_id: Slot.
995  *
996  * Return: 0 on success, a negative error code otherwise.
997  */
998 static int
group_bind_locked(struct panthor_group * group,u32 csg_id)999 group_bind_locked(struct panthor_group *group, u32 csg_id)
1000 {
1001 	struct panthor_device *ptdev = group->ptdev;
1002 	struct panthor_csg_slot *csg_slot;
1003 	int ret;
1004 
1005 	lockdep_assert_held(&ptdev->scheduler->lock);
1006 
1007 	if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
1008 			ptdev->scheduler->csg_slots[csg_id].group))
1009 		return -EINVAL;
1010 
1011 	ret = panthor_vm_active(group->vm);
1012 	if (ret)
1013 		return ret;
1014 
1015 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1016 	group_get(group);
1017 	group->csg_id = csg_id;
1018 
1019 	/* Dummy doorbell allocation: doorbell is assigned to the group and
1020 	 * all queues use the same doorbell.
1021 	 *
1022 	 * TODO: Implement LRU-based doorbell assignment, so the most often
1023 	 * updated queues get their own doorbell, thus avoiding useless checks
1024 	 * on queues belonging to the same group that are rarely updated.
1025 	 */
1026 	for (u32 i = 0; i < group->queue_count; i++)
1027 		group->queues[i]->doorbell_id = csg_id + 1;
1028 
1029 	csg_slot->group = group;
1030 
1031 	return 0;
1032 }
1033 
1034 /**
1035  * group_unbind_locked() - Unbind a group from a slot.
1036  * @group: Group to unbind.
1037  *
1038  * Return: 0 on success, a negative error code otherwise.
1039  */
1040 static int
group_unbind_locked(struct panthor_group * group)1041 group_unbind_locked(struct panthor_group *group)
1042 {
1043 	struct panthor_device *ptdev = group->ptdev;
1044 	struct panthor_csg_slot *slot;
1045 
1046 	lockdep_assert_held(&ptdev->scheduler->lock);
1047 
1048 	if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
1049 		return -EINVAL;
1050 
1051 	if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
1052 		return -EINVAL;
1053 
1054 	slot = &ptdev->scheduler->csg_slots[group->csg_id];
1055 	panthor_vm_idle(group->vm);
1056 	group->csg_id = -1;
1057 
1058 	/* Tiler OOM events will be re-issued next time the group is scheduled. */
1059 	atomic_set(&group->tiler_oom, 0);
1060 	cancel_work(&group->tiler_oom_work);
1061 
1062 	for (u32 i = 0; i < group->queue_count; i++)
1063 		group->queues[i]->doorbell_id = -1;
1064 
1065 	slot->group = NULL;
1066 
1067 	group_put(group);
1068 	return 0;
1069 }
1070 
1071 static bool
group_is_idle(struct panthor_group * group)1072 group_is_idle(struct panthor_group *group)
1073 {
1074 	u32 inactive_queues = group->idle_queues | group->blocked_queues;
1075 
1076 	return hweight32(inactive_queues) == group->queue_count;
1077 }
1078 
1079 static bool
group_can_run(struct panthor_group * group)1080 group_can_run(struct panthor_group *group)
1081 {
1082 	return group->state != PANTHOR_CS_GROUP_TERMINATED &&
1083 	       group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
1084 	       !group->destroyed && group->fatal_queues == 0 &&
1085 	       !group->timedout;
1086 }
1087 
1088 static bool
queue_timeout_is_suspended(struct panthor_queue * queue)1089 queue_timeout_is_suspended(struct panthor_queue *queue)
1090 {
1091 	/* When running, the remaining time is set to MAX_SCHEDULE_TIMEOUT. */
1092 	return queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT;
1093 }
1094 
1095 static void
queue_reset_timeout_locked(struct panthor_queue * queue)1096 queue_reset_timeout_locked(struct panthor_queue *queue)
1097 {
1098 	lockdep_assert_held(&queue->fence_ctx.lock);
1099 
1100 	if (!queue_timeout_is_suspended(queue)) {
1101 		mod_delayed_work(queue->scheduler.timeout_wq,
1102 				 &queue->timeout.work,
1103 				 msecs_to_jiffies(JOB_TIMEOUT_MS));
1104 	}
1105 }
1106 
1107 static void
queue_suspend_timeout_locked(struct panthor_queue * queue)1108 queue_suspend_timeout_locked(struct panthor_queue *queue)
1109 {
1110 	unsigned long qtimeout, now;
1111 	struct panthor_group *group;
1112 	struct panthor_job *job;
1113 	bool timer_was_active;
1114 
1115 	lockdep_assert_held(&queue->fence_ctx.lock);
1116 
1117 	/* Already suspended, nothing to do. */
1118 	if (queue_timeout_is_suspended(queue))
1119 		return;
1120 
1121 	job = list_first_entry_or_null(&queue->fence_ctx.in_flight_jobs,
1122 				       struct panthor_job, node);
1123 	group = job ? job->group : NULL;
1124 
1125 	/* If the queue is blocked and the group is idle, we want the timer to
1126 	 * keep running because the group can't be unblocked by other queues,
1127 	 * so it has to come from an external source, and we want to timebox
1128 	 * this external signalling.
1129 	 */
1130 	if (group && group_can_run(group) &&
1131 	    (group->blocked_queues & BIT(job->queue_idx)) &&
1132 	    group_is_idle(group))
1133 		return;
1134 
1135 	now = jiffies;
1136 	qtimeout = queue->timeout.work.timer.expires;
1137 
1138 	/* Cancel the timer. */
1139 	timer_was_active = cancel_delayed_work(&queue->timeout.work);
1140 	if (!timer_was_active || !job)
1141 		queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
1142 	else if (time_after(qtimeout, now))
1143 		queue->timeout.remaining = qtimeout - now;
1144 	else
1145 		queue->timeout.remaining = 0;
1146 
1147 	if (WARN_ON_ONCE(queue->timeout.remaining > msecs_to_jiffies(JOB_TIMEOUT_MS)))
1148 		queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
1149 }
1150 
1151 static void
queue_suspend_timeout(struct panthor_queue * queue)1152 queue_suspend_timeout(struct panthor_queue *queue)
1153 {
1154 	spin_lock(&queue->fence_ctx.lock);
1155 	queue_suspend_timeout_locked(queue);
1156 	spin_unlock(&queue->fence_ctx.lock);
1157 }
1158 
1159 static void
queue_resume_timeout(struct panthor_queue * queue)1160 queue_resume_timeout(struct panthor_queue *queue)
1161 {
1162 	spin_lock(&queue->fence_ctx.lock);
1163 
1164 	if (queue_timeout_is_suspended(queue)) {
1165 		mod_delayed_work(queue->scheduler.timeout_wq,
1166 				 &queue->timeout.work,
1167 				 queue->timeout.remaining);
1168 
1169 		queue->timeout.remaining = MAX_SCHEDULE_TIMEOUT;
1170 	}
1171 
1172 	spin_unlock(&queue->fence_ctx.lock);
1173 }
1174 
1175 /**
1176  * cs_slot_prog_locked() - Program a queue slot
1177  * @ptdev: Device.
1178  * @csg_id: Group slot ID.
1179  * @cs_id: Queue slot ID.
1180  *
1181  * Program a queue slot with the queue information so things can start being
1182  * executed on this queue.
1183  *
1184  * The group slot must have a group bound to it already (group_bind_locked()).
1185  */
1186 static void
cs_slot_prog_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1187 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1188 {
1189 	struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
1190 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1191 
1192 	lockdep_assert_held(&ptdev->scheduler->lock);
1193 
1194 	queue->iface.input->extract = queue->iface.output->extract;
1195 	drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
1196 
1197 	cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf);
1198 	cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
1199 	cs_iface->input->ringbuf_input = queue->iface.input_fw_va;
1200 	cs_iface->input->ringbuf_output = queue->iface.output_fw_va;
1201 	cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) |
1202 				  CS_CONFIG_DOORBELL(queue->doorbell_id);
1203 	cs_iface->input->ack_irq_mask = ~0;
1204 	panthor_fw_update_reqs(cs_iface, req,
1205 			       CS_IDLE_SYNC_WAIT |
1206 			       CS_IDLE_EMPTY |
1207 			       CS_STATE_START,
1208 			       CS_IDLE_SYNC_WAIT |
1209 			       CS_IDLE_EMPTY |
1210 			       CS_STATE_MASK);
1211 	if (queue->iface.input->insert != queue->iface.input->extract)
1212 		queue_resume_timeout(queue);
1213 }
1214 
1215 /**
1216  * cs_slot_reset_locked() - Reset a queue slot
1217  * @ptdev: Device.
1218  * @csg_id: Group slot.
1219  * @cs_id: Queue slot.
1220  *
1221  * Change the queue slot state to STOP and suspend the queue timeout if
1222  * the queue is not blocked.
1223  *
1224  * The group slot must have a group bound to it (group_bind_locked()).
1225  */
1226 static int
cs_slot_reset_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1227 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1228 {
1229 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1230 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1231 	struct panthor_queue *queue = group->queues[cs_id];
1232 
1233 	lockdep_assert_held(&ptdev->scheduler->lock);
1234 
1235 	panthor_fw_update_reqs(cs_iface, req,
1236 			       CS_STATE_STOP,
1237 			       CS_STATE_MASK);
1238 
1239 	queue_suspend_timeout(queue);
1240 
1241 	return 0;
1242 }
1243 
1244 /**
1245  * csg_slot_sync_priority_locked() - Synchronize the group slot priority
1246  * @ptdev: Device.
1247  * @csg_id: Group slot ID.
1248  *
1249  * Group slot priority update happens asynchronously. When we receive a
1250  * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can
1251  * reflect it to our panthor_csg_slot object.
1252  */
1253 static void
csg_slot_sync_priority_locked(struct panthor_device * ptdev,u32 csg_id)1254 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
1255 {
1256 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1257 	struct panthor_fw_csg_iface *csg_iface;
1258 	u64 endpoint_req;
1259 
1260 	lockdep_assert_held(&ptdev->scheduler->lock);
1261 
1262 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1263 	endpoint_req = panthor_fw_csg_endpoint_req_get(ptdev, csg_iface);
1264 	csg_slot->priority = CSG_EP_REQ_PRIORITY_GET(endpoint_req);
1265 }
1266 
1267 /**
1268  * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority
1269  * @ptdev: Device.
1270  * @csg_id: Group slot.
1271  * @cs_id: Queue slot.
1272  *
1273  * Queue state is updated on group suspend or STATUS_UPDATE event.
1274  */
1275 static void
cs_slot_sync_queue_state_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1276 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1277 {
1278 	struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1279 	struct panthor_queue *queue = group->queues[cs_id];
1280 	struct panthor_fw_cs_iface *cs_iface =
1281 		panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
1282 
1283 	u32 status_wait_cond;
1284 
1285 	switch (cs_iface->output->status_blocked_reason) {
1286 	case CS_STATUS_BLOCKED_REASON_UNBLOCKED:
1287 		if (queue->iface.input->insert == queue->iface.output->extract &&
1288 		    cs_iface->output->status_scoreboards == 0)
1289 			group->idle_queues |= BIT(cs_id);
1290 		break;
1291 
1292 	case CS_STATUS_BLOCKED_REASON_SYNC_WAIT:
1293 		if (list_empty(&group->wait_node)) {
1294 			list_move_tail(&group->wait_node,
1295 				       &group->ptdev->scheduler->groups.waiting);
1296 		}
1297 
1298 		/* The queue is only blocked if there's no deferred operation
1299 		 * pending, which can be checked through the scoreboard status.
1300 		 */
1301 		if (!cs_iface->output->status_scoreboards)
1302 			group->blocked_queues |= BIT(cs_id);
1303 
1304 		queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
1305 		queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
1306 		status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
1307 		queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT;
1308 		if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) {
1309 			u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi;
1310 
1311 			queue->syncwait.sync64 = true;
1312 			queue->syncwait.ref |= sync_val_hi << 32;
1313 		} else {
1314 			queue->syncwait.sync64 = false;
1315 		}
1316 		break;
1317 
1318 	default:
1319 		/* Other reasons are not blocking. Consider the queue as runnable
1320 		 * in those cases.
1321 		 */
1322 		break;
1323 	}
1324 }
1325 
1326 static void
csg_slot_sync_queues_state_locked(struct panthor_device * ptdev,u32 csg_id)1327 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
1328 {
1329 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1330 	struct panthor_group *group = csg_slot->group;
1331 	u32 i;
1332 
1333 	lockdep_assert_held(&ptdev->scheduler->lock);
1334 
1335 	group->idle_queues = 0;
1336 	group->blocked_queues = 0;
1337 
1338 	for (i = 0; i < group->queue_count; i++) {
1339 		if (group->queues[i])
1340 			cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
1341 	}
1342 }
1343 
1344 static void
csg_slot_sync_state_locked(struct panthor_device * ptdev,u32 csg_id)1345 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
1346 {
1347 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1348 	struct panthor_fw_csg_iface *csg_iface;
1349 	struct panthor_group *group;
1350 	enum panthor_group_state new_state, old_state;
1351 	u32 csg_state;
1352 
1353 	lockdep_assert_held(&ptdev->scheduler->lock);
1354 
1355 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1356 	group = csg_slot->group;
1357 
1358 	if (!group)
1359 		return;
1360 
1361 	old_state = group->state;
1362 	csg_state = csg_iface->output->ack & CSG_STATE_MASK;
1363 	switch (csg_state) {
1364 	case CSG_STATE_START:
1365 	case CSG_STATE_RESUME:
1366 		new_state = PANTHOR_CS_GROUP_ACTIVE;
1367 		break;
1368 	case CSG_STATE_TERMINATE:
1369 		new_state = PANTHOR_CS_GROUP_TERMINATED;
1370 		break;
1371 	case CSG_STATE_SUSPEND:
1372 		new_state = PANTHOR_CS_GROUP_SUSPENDED;
1373 		break;
1374 	default:
1375 		/* The unknown state might be caused by a FW state corruption,
1376 		 * which means the group metadata can't be trusted anymore, and
1377 		 * the SUSPEND operation might propagate the corruption to the
1378 		 * suspend buffers. Flag the group state as unknown to make
1379 		 * sure it's unusable after that point.
1380 		 */
1381 		drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)",
1382 			csg_id, csg_state);
1383 		new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE;
1384 		break;
1385 	}
1386 
1387 	if (old_state == new_state)
1388 		return;
1389 
1390 	/* The unknown state might be caused by a FW issue, reset the FW to
1391 	 * take a fresh start.
1392 	 */
1393 	if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE)
1394 		panthor_device_schedule_reset(ptdev);
1395 
1396 	if (new_state == PANTHOR_CS_GROUP_SUSPENDED)
1397 		csg_slot_sync_queues_state_locked(ptdev, csg_id);
1398 
1399 	if (old_state == PANTHOR_CS_GROUP_ACTIVE) {
1400 		u32 i;
1401 
1402 		/* Reset the queue slots so we start from a clean
1403 		 * state when starting/resuming a new group on this
1404 		 * CSG slot. No wait needed here, and no ringbell
1405 		 * either, since the CS slot will only be re-used
1406 		 * on the next CSG start operation.
1407 		 */
1408 		for (i = 0; i < group->queue_count; i++) {
1409 			if (group->queues[i])
1410 				cs_slot_reset_locked(ptdev, csg_id, i);
1411 		}
1412 	}
1413 
1414 	group->state = new_state;
1415 }
1416 
1417 static int
csg_slot_prog_locked(struct panthor_device * ptdev,u32 csg_id,u32 priority)1418 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
1419 {
1420 	struct panthor_fw_csg_iface *csg_iface;
1421 	struct panthor_csg_slot *csg_slot;
1422 	struct panthor_group *group;
1423 	u32 queue_mask = 0, i;
1424 	u64 endpoint_req;
1425 
1426 	lockdep_assert_held(&ptdev->scheduler->lock);
1427 
1428 	if (priority > MAX_CSG_PRIO)
1429 		return -EINVAL;
1430 
1431 	if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
1432 		return -EINVAL;
1433 
1434 	csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1435 	group = csg_slot->group;
1436 	if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE)
1437 		return 0;
1438 
1439 	csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
1440 
1441 	for (i = 0; i < group->queue_count; i++) {
1442 		if (group->queues[i]) {
1443 			cs_slot_prog_locked(ptdev, csg_id, i);
1444 			queue_mask |= BIT(i);
1445 		}
1446 	}
1447 
1448 	csg_iface->input->allow_compute = group->compute_core_mask;
1449 	csg_iface->input->allow_fragment = group->fragment_core_mask;
1450 	csg_iface->input->allow_other = group->tiler_core_mask;
1451 	endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
1452 		       CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
1453 		       CSG_EP_REQ_TILER(group->max_tiler_cores) |
1454 		       CSG_EP_REQ_PRIORITY(priority);
1455 	panthor_fw_csg_endpoint_req_set(ptdev, csg_iface, endpoint_req);
1456 
1457 	csg_iface->input->config = panthor_vm_as(group->vm);
1458 
1459 	if (group->suspend_buf)
1460 		csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf);
1461 	else
1462 		csg_iface->input->suspend_buf = 0;
1463 
1464 	if (group->protm_suspend_buf) {
1465 		csg_iface->input->protm_suspend_buf =
1466 			panthor_kernel_bo_gpuva(group->protm_suspend_buf);
1467 	} else {
1468 		csg_iface->input->protm_suspend_buf = 0;
1469 	}
1470 
1471 	csg_iface->input->ack_irq_mask = ~0;
1472 	panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask);
1473 	return 0;
1474 }
1475 
1476 static void
cs_slot_process_fatal_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1477 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
1478 				   u32 csg_id, u32 cs_id)
1479 {
1480 	struct panthor_scheduler *sched = ptdev->scheduler;
1481 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1482 	struct panthor_group *group = csg_slot->group;
1483 	struct panthor_fw_cs_iface *cs_iface;
1484 	u32 fatal;
1485 	u64 info;
1486 
1487 	lockdep_assert_held(&sched->lock);
1488 
1489 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1490 	fatal = cs_iface->output->fatal;
1491 	info = cs_iface->output->fatal_info;
1492 
1493 	if (group) {
1494 		drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n",
1495 			 group->task_info.pid, group->task_info.comm);
1496 
1497 		group->fatal_queues |= BIT(cs_id);
1498 	}
1499 
1500 	if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
1501 		/* If this exception is unrecoverable, queue a reset, and make
1502 		 * sure we stop scheduling groups until the reset has happened.
1503 		 */
1504 		panthor_device_schedule_reset(ptdev);
1505 		cancel_delayed_work(&sched->tick_work);
1506 	} else {
1507 		sched_queue_delayed_work(sched, tick, 0);
1508 	}
1509 
1510 	drm_warn(&ptdev->base,
1511 		 "CSG slot %d CS slot: %d\n"
1512 		 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
1513 		 "CS_FATAL.EXCEPTION_DATA: 0x%x\n"
1514 		 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n",
1515 		 csg_id, cs_id,
1516 		 (unsigned int)CS_EXCEPTION_TYPE(fatal),
1517 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
1518 		 (unsigned int)CS_EXCEPTION_DATA(fatal),
1519 		 info);
1520 }
1521 
1522 static void
cs_slot_process_fault_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1523 cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
1524 				   u32 csg_id, u32 cs_id)
1525 {
1526 	struct panthor_scheduler *sched = ptdev->scheduler;
1527 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1528 	struct panthor_group *group = csg_slot->group;
1529 	struct panthor_queue *queue = group && cs_id < group->queue_count ?
1530 				      group->queues[cs_id] : NULL;
1531 	struct panthor_fw_cs_iface *cs_iface;
1532 	u32 fault;
1533 	u64 info;
1534 
1535 	lockdep_assert_held(&sched->lock);
1536 
1537 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1538 	fault = cs_iface->output->fault;
1539 	info = cs_iface->output->fault_info;
1540 
1541 	if (queue) {
1542 		u64 cs_extract = queue->iface.output->extract;
1543 		struct panthor_job *job;
1544 
1545 		spin_lock(&queue->fence_ctx.lock);
1546 		list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
1547 			if (cs_extract >= job->ringbuf.end)
1548 				continue;
1549 
1550 			if (cs_extract < job->ringbuf.start)
1551 				break;
1552 
1553 			dma_fence_set_error(job->done_fence, -EINVAL);
1554 		}
1555 		spin_unlock(&queue->fence_ctx.lock);
1556 	}
1557 
1558 	if (group) {
1559 		drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n",
1560 			 group->task_info.pid, group->task_info.comm);
1561 	}
1562 
1563 	drm_warn(&ptdev->base,
1564 		 "CSG slot %d CS slot: %d\n"
1565 		 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
1566 		 "CS_FAULT.EXCEPTION_DATA: 0x%x\n"
1567 		 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n",
1568 		 csg_id, cs_id,
1569 		 (unsigned int)CS_EXCEPTION_TYPE(fault),
1570 		 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
1571 		 (unsigned int)CS_EXCEPTION_DATA(fault),
1572 		 info);
1573 }
1574 
group_process_tiler_oom(struct panthor_group * group,u32 cs_id)1575 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
1576 {
1577 	struct panthor_device *ptdev = group->ptdev;
1578 	struct panthor_scheduler *sched = ptdev->scheduler;
1579 	u32 renderpasses_in_flight, pending_frag_count;
1580 	struct panthor_heap_pool *heaps = NULL;
1581 	u64 heap_address, new_chunk_va = 0;
1582 	u32 vt_start, vt_end, frag_end;
1583 	int ret, csg_id;
1584 
1585 	mutex_lock(&sched->lock);
1586 	csg_id = group->csg_id;
1587 	if (csg_id >= 0) {
1588 		struct panthor_fw_cs_iface *cs_iface;
1589 
1590 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1591 		heaps = panthor_vm_get_heap_pool(group->vm, false);
1592 		heap_address = cs_iface->output->heap_address;
1593 		vt_start = cs_iface->output->heap_vt_start;
1594 		vt_end = cs_iface->output->heap_vt_end;
1595 		frag_end = cs_iface->output->heap_frag_end;
1596 		renderpasses_in_flight = vt_start - frag_end;
1597 		pending_frag_count = vt_end - frag_end;
1598 	}
1599 	mutex_unlock(&sched->lock);
1600 
1601 	/* The group got scheduled out, we stop here. We will get a new tiler OOM event
1602 	 * when it's scheduled again.
1603 	 */
1604 	if (unlikely(csg_id < 0))
1605 		return 0;
1606 
1607 	if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) {
1608 		ret = -EINVAL;
1609 	} else {
1610 		/* We do the allocation without holding the scheduler lock to avoid
1611 		 * blocking the scheduling.
1612 		 */
1613 		ret = panthor_heap_grow(heaps, heap_address,
1614 					renderpasses_in_flight,
1615 					pending_frag_count, &new_chunk_va);
1616 	}
1617 
1618 	/* If the heap context doesn't have memory for us, we want to let the
1619 	 * FW try to reclaim memory by waiting for fragment jobs to land or by
1620 	 * executing the tiler OOM exception handler, which is supposed to
1621 	 * implement incremental rendering.
1622 	 */
1623 	if (ret && ret != -ENOMEM) {
1624 		drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
1625 		group->fatal_queues |= BIT(cs_id);
1626 		sched_queue_delayed_work(sched, tick, 0);
1627 		goto out_put_heap_pool;
1628 	}
1629 
1630 	mutex_lock(&sched->lock);
1631 	csg_id = group->csg_id;
1632 	if (csg_id >= 0) {
1633 		struct panthor_fw_csg_iface *csg_iface;
1634 		struct panthor_fw_cs_iface *cs_iface;
1635 
1636 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1637 		cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1638 
1639 		cs_iface->input->heap_start = new_chunk_va;
1640 		cs_iface->input->heap_end = new_chunk_va;
1641 		panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM);
1642 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id));
1643 		panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1644 	}
1645 	mutex_unlock(&sched->lock);
1646 
1647 	/* We allocated a chunck, but couldn't link it to the heap
1648 	 * context because the group was scheduled out while we were
1649 	 * allocating memory. We need to return this chunk to the heap.
1650 	 */
1651 	if (unlikely(csg_id < 0 && new_chunk_va))
1652 		panthor_heap_return_chunk(heaps, heap_address, new_chunk_va);
1653 
1654 	ret = 0;
1655 
1656 out_put_heap_pool:
1657 	panthor_heap_pool_put(heaps);
1658 	return ret;
1659 }
1660 
group_tiler_oom_work(struct work_struct * work)1661 static void group_tiler_oom_work(struct work_struct *work)
1662 {
1663 	struct panthor_group *group =
1664 		container_of(work, struct panthor_group, tiler_oom_work);
1665 	u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
1666 
1667 	while (tiler_oom) {
1668 		u32 cs_id = ffs(tiler_oom) - 1;
1669 
1670 		group_process_tiler_oom(group, cs_id);
1671 		tiler_oom &= ~BIT(cs_id);
1672 	}
1673 
1674 	group_put(group);
1675 }
1676 
1677 static void
cs_slot_process_tiler_oom_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1678 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
1679 				       u32 csg_id, u32 cs_id)
1680 {
1681 	struct panthor_scheduler *sched = ptdev->scheduler;
1682 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1683 	struct panthor_group *group = csg_slot->group;
1684 
1685 	lockdep_assert_held(&sched->lock);
1686 
1687 	if (drm_WARN_ON(&ptdev->base, !group))
1688 		return;
1689 
1690 	atomic_or(BIT(cs_id), &group->tiler_oom);
1691 
1692 	/* We don't use group_queue_work() here because we want to queue the
1693 	 * work item to the heap_alloc_wq.
1694 	 */
1695 	group_get(group);
1696 	if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
1697 		group_put(group);
1698 }
1699 
cs_slot_process_irq_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1700 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
1701 				       u32 csg_id, u32 cs_id)
1702 {
1703 	struct panthor_fw_cs_iface *cs_iface;
1704 	u32 req, ack, events;
1705 
1706 	lockdep_assert_held(&ptdev->scheduler->lock);
1707 
1708 	cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1709 	req = cs_iface->input->req;
1710 	ack = cs_iface->output->ack;
1711 	events = (req ^ ack) & CS_EVT_MASK;
1712 
1713 	if (events & CS_FATAL)
1714 		cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
1715 
1716 	if (events & CS_FAULT)
1717 		cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
1718 
1719 	if (events & CS_TILER_OOM)
1720 		cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
1721 
1722 	/* We don't acknowledge the TILER_OOM event since its handling is
1723 	 * deferred to a separate work.
1724 	 */
1725 	panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT);
1726 
1727 	return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
1728 }
1729 
csg_slot_process_idle_event_locked(struct panthor_device * ptdev,u32 csg_id)1730 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
1731 {
1732 	struct panthor_scheduler *sched = ptdev->scheduler;
1733 
1734 	lockdep_assert_held(&sched->lock);
1735 
1736 	sched->might_have_idle_groups = true;
1737 
1738 	/* Schedule a tick so we can evict idle groups and schedule non-idle
1739 	 * ones. This will also update runtime PM and devfreq busy/idle states,
1740 	 * so the device can lower its frequency or get suspended.
1741 	 */
1742 	sched_queue_delayed_work(sched, tick, 0);
1743 }
1744 
csg_slot_sync_update_locked(struct panthor_device * ptdev,u32 csg_id)1745 static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
1746 					u32 csg_id)
1747 {
1748 	struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1749 	struct panthor_group *group = csg_slot->group;
1750 
1751 	lockdep_assert_held(&ptdev->scheduler->lock);
1752 
1753 	if (group)
1754 		group_queue_work(group, sync_upd);
1755 
1756 	sched_queue_work(ptdev->scheduler, sync_upd);
1757 }
1758 
1759 static void
csg_slot_process_progress_timer_event_locked(struct panthor_device * ptdev,u32 csg_id)1760 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
1761 {
1762 	struct panthor_scheduler *sched = ptdev->scheduler;
1763 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1764 	struct panthor_group *group = csg_slot->group;
1765 
1766 	lockdep_assert_held(&sched->lock);
1767 
1768 	group = csg_slot->group;
1769 	if (!drm_WARN_ON(&ptdev->base, !group)) {
1770 		drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n",
1771 			 group->task_info.pid, group->task_info.comm);
1772 
1773 		group->timedout = true;
1774 	}
1775 
1776 	drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
1777 
1778 	sched_queue_delayed_work(sched, tick, 0);
1779 }
1780 
sched_process_csg_irq_locked(struct panthor_device * ptdev,u32 csg_id)1781 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
1782 {
1783 	u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events;
1784 	struct panthor_fw_csg_iface *csg_iface;
1785 	u32 ring_cs_db_mask = 0;
1786 
1787 	lockdep_assert_held(&ptdev->scheduler->lock);
1788 
1789 	if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1790 		return;
1791 
1792 	csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1793 	req = READ_ONCE(csg_iface->input->req);
1794 	ack = READ_ONCE(csg_iface->output->ack);
1795 	cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req);
1796 	cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack);
1797 	csg_events = (req ^ ack) & CSG_EVT_MASK;
1798 
1799 	/* There may not be any pending CSG/CS interrupts to process */
1800 	if (req == ack && cs_irq_req == cs_irq_ack)
1801 		return;
1802 
1803 	/* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before
1804 	 * examining the CS_ACK & CS_REQ bits. This would ensure that Host
1805 	 * doesn't miss an interrupt for the CS in the race scenario where
1806 	 * whilst Host is servicing an interrupt for the CS, firmware sends
1807 	 * another interrupt for that CS.
1808 	 */
1809 	csg_iface->input->cs_irq_ack = cs_irq_req;
1810 
1811 	panthor_fw_update_reqs(csg_iface, req, ack,
1812 			       CSG_SYNC_UPDATE |
1813 			       CSG_IDLE |
1814 			       CSG_PROGRESS_TIMER_EVENT);
1815 
1816 	if (csg_events & CSG_IDLE)
1817 		csg_slot_process_idle_event_locked(ptdev, csg_id);
1818 
1819 	if (csg_events & CSG_PROGRESS_TIMER_EVENT)
1820 		csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
1821 
1822 	cs_irqs = cs_irq_req ^ cs_irq_ack;
1823 	while (cs_irqs) {
1824 		u32 cs_id = ffs(cs_irqs) - 1;
1825 
1826 		if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
1827 			ring_cs_db_mask |= BIT(cs_id);
1828 
1829 		cs_irqs &= ~BIT(cs_id);
1830 	}
1831 
1832 	if (csg_events & CSG_SYNC_UPDATE)
1833 		csg_slot_sync_update_locked(ptdev, csg_id);
1834 
1835 	if (ring_cs_db_mask)
1836 		panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask);
1837 
1838 	panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1839 }
1840 
sched_process_idle_event_locked(struct panthor_device * ptdev)1841 static void sched_process_idle_event_locked(struct panthor_device *ptdev)
1842 {
1843 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1844 
1845 	lockdep_assert_held(&ptdev->scheduler->lock);
1846 
1847 	/* Acknowledge the idle event and schedule a tick. */
1848 	panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE);
1849 	sched_queue_delayed_work(ptdev->scheduler, tick, 0);
1850 }
1851 
1852 /**
1853  * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ
1854  * @ptdev: Device.
1855  */
sched_process_global_irq_locked(struct panthor_device * ptdev)1856 static void sched_process_global_irq_locked(struct panthor_device *ptdev)
1857 {
1858 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1859 	u32 req, ack, evts;
1860 
1861 	lockdep_assert_held(&ptdev->scheduler->lock);
1862 
1863 	req = READ_ONCE(glb_iface->input->req);
1864 	ack = READ_ONCE(glb_iface->output->ack);
1865 	evts = (req ^ ack) & GLB_EVT_MASK;
1866 
1867 	if (evts & GLB_IDLE)
1868 		sched_process_idle_event_locked(ptdev);
1869 }
1870 
process_fw_events_work(struct work_struct * work)1871 static void process_fw_events_work(struct work_struct *work)
1872 {
1873 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
1874 						      fw_events_work);
1875 	u32 events = atomic_xchg(&sched->fw_events, 0);
1876 	struct panthor_device *ptdev = sched->ptdev;
1877 
1878 	mutex_lock(&sched->lock);
1879 
1880 	if (events & JOB_INT_GLOBAL_IF) {
1881 		sched_process_global_irq_locked(ptdev);
1882 		events &= ~JOB_INT_GLOBAL_IF;
1883 	}
1884 
1885 	while (events) {
1886 		u32 csg_id = ffs(events) - 1;
1887 
1888 		sched_process_csg_irq_locked(ptdev, csg_id);
1889 		events &= ~BIT(csg_id);
1890 	}
1891 
1892 	mutex_unlock(&sched->lock);
1893 }
1894 
1895 /**
1896  * panthor_sched_report_fw_events() - Report FW events to the scheduler.
1897  */
panthor_sched_report_fw_events(struct panthor_device * ptdev,u32 events)1898 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
1899 {
1900 	if (!ptdev->scheduler)
1901 		return;
1902 
1903 	atomic_or(events, &ptdev->scheduler->fw_events);
1904 	sched_queue_work(ptdev->scheduler, fw_events);
1905 }
1906 
fence_get_driver_name(struct dma_fence * fence)1907 static const char *fence_get_driver_name(struct dma_fence *fence)
1908 {
1909 	return "panthor";
1910 }
1911 
queue_fence_get_timeline_name(struct dma_fence * fence)1912 static const char *queue_fence_get_timeline_name(struct dma_fence *fence)
1913 {
1914 	return "queue-fence";
1915 }
1916 
1917 static const struct dma_fence_ops panthor_queue_fence_ops = {
1918 	.get_driver_name = fence_get_driver_name,
1919 	.get_timeline_name = queue_fence_get_timeline_name,
1920 };
1921 
1922 struct panthor_csg_slots_upd_ctx {
1923 	u32 update_mask;
1924 	u32 timedout_mask;
1925 	struct {
1926 		u32 value;
1927 		u32 mask;
1928 	} requests[MAX_CSGS];
1929 };
1930 
csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx * ctx)1931 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
1932 {
1933 	memset(ctx, 0, sizeof(*ctx));
1934 }
1935 
csgs_upd_ctx_queue_reqs(struct panthor_device * ptdev,struct panthor_csg_slots_upd_ctx * ctx,u32 csg_id,u32 value,u32 mask)1936 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
1937 				    struct panthor_csg_slots_upd_ctx *ctx,
1938 				    u32 csg_id, u32 value, u32 mask)
1939 {
1940 	if (drm_WARN_ON(&ptdev->base, !mask) ||
1941 	    drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1942 		return;
1943 
1944 	ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
1945 	ctx->requests[csg_id].mask |= mask;
1946 	ctx->update_mask |= BIT(csg_id);
1947 }
1948 
csgs_upd_ctx_apply_locked(struct panthor_device * ptdev,struct panthor_csg_slots_upd_ctx * ctx)1949 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
1950 				     struct panthor_csg_slots_upd_ctx *ctx)
1951 {
1952 	struct panthor_scheduler *sched = ptdev->scheduler;
1953 	u32 update_slots = ctx->update_mask;
1954 
1955 	lockdep_assert_held(&sched->lock);
1956 
1957 	if (!ctx->update_mask)
1958 		return 0;
1959 
1960 	while (update_slots) {
1961 		struct panthor_fw_csg_iface *csg_iface;
1962 		u32 csg_id = ffs(update_slots) - 1;
1963 
1964 		update_slots &= ~BIT(csg_id);
1965 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1966 		panthor_fw_update_reqs(csg_iface, req,
1967 				       ctx->requests[csg_id].value,
1968 				       ctx->requests[csg_id].mask);
1969 	}
1970 
1971 	panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
1972 
1973 	update_slots = ctx->update_mask;
1974 	while (update_slots) {
1975 		struct panthor_fw_csg_iface *csg_iface;
1976 		u32 csg_id = ffs(update_slots) - 1;
1977 		u32 req_mask = ctx->requests[csg_id].mask, acked;
1978 		int ret;
1979 
1980 		update_slots &= ~BIT(csg_id);
1981 		csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1982 
1983 		ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
1984 
1985 		if (acked & CSG_ENDPOINT_CONFIG)
1986 			csg_slot_sync_priority_locked(ptdev, csg_id);
1987 
1988 		if (acked & CSG_STATE_MASK)
1989 			csg_slot_sync_state_locked(ptdev, csg_id);
1990 
1991 		if (acked & CSG_STATUS_UPDATE)
1992 			csg_slot_sync_queues_state_locked(ptdev, csg_id);
1993 
1994 		if (ret && acked != req_mask &&
1995 		    ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {
1996 			drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
1997 			ctx->timedout_mask |= BIT(csg_id);
1998 		}
1999 	}
2000 
2001 	if (ctx->timedout_mask)
2002 		return -ETIMEDOUT;
2003 
2004 	return 0;
2005 }
2006 
2007 struct panthor_sched_tick_ctx {
2008 	struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT];
2009 	struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
2010 	u32 idle_group_count;
2011 	u32 group_count;
2012 	struct panthor_vm *vms[MAX_CS_PER_CSG];
2013 	u32 as_count;
2014 	bool immediate_tick;
2015 	bool stop_tick;
2016 	u32 csg_upd_failed_mask;
2017 };
2018 
2019 static bool
tick_ctx_is_full(const struct panthor_scheduler * sched,const struct panthor_sched_tick_ctx * ctx)2020 tick_ctx_is_full(const struct panthor_scheduler *sched,
2021 		 const struct panthor_sched_tick_ctx *ctx)
2022 {
2023 	return ctx->group_count == sched->csg_slot_count;
2024 }
2025 
2026 static void
tick_ctx_pick_groups_from_list(const struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,struct list_head * queue,bool skip_idle_groups,bool owned_by_tick_ctx)2027 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
2028 			       struct panthor_sched_tick_ctx *ctx,
2029 			       struct list_head *queue,
2030 			       bool skip_idle_groups,
2031 			       bool owned_by_tick_ctx)
2032 {
2033 	struct panthor_group *group, *tmp;
2034 
2035 	if (tick_ctx_is_full(sched, ctx))
2036 		return;
2037 
2038 	list_for_each_entry_safe(group, tmp, queue, run_node) {
2039 		u32 i;
2040 
2041 		if (!group_can_run(group))
2042 			continue;
2043 
2044 		if (skip_idle_groups && group_is_idle(group))
2045 			continue;
2046 
2047 		for (i = 0; i < ctx->as_count; i++) {
2048 			if (ctx->vms[i] == group->vm)
2049 				break;
2050 		}
2051 
2052 		if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
2053 			continue;
2054 
2055 		if (!owned_by_tick_ctx)
2056 			group_get(group);
2057 
2058 		ctx->group_count++;
2059 
2060 		/* If we have more than one active group with the same priority,
2061 		 * we need to keep ticking to rotate the CSG priority.
2062 		 */
2063 		if (group_is_idle(group))
2064 			ctx->idle_group_count++;
2065 		else if (!list_empty(&ctx->groups[group->priority]))
2066 			ctx->stop_tick = false;
2067 
2068 		list_move_tail(&group->run_node, &ctx->groups[group->priority]);
2069 
2070 		if (i == ctx->as_count)
2071 			ctx->vms[ctx->as_count++] = group->vm;
2072 
2073 		if (tick_ctx_is_full(sched, ctx))
2074 			return;
2075 	}
2076 }
2077 
2078 static void
tick_ctx_insert_old_group(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,struct panthor_group * group)2079 tick_ctx_insert_old_group(struct panthor_scheduler *sched,
2080 			  struct panthor_sched_tick_ctx *ctx,
2081 			  struct panthor_group *group)
2082 {
2083 	struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
2084 	struct panthor_group *other_group;
2085 
2086 	/* Class groups in descending priority order so we can easily rotate. */
2087 	list_for_each_entry(other_group,
2088 			    &ctx->old_groups[csg_slot->group->priority],
2089 			    run_node) {
2090 		struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
2091 
2092 		/* Our group has a higher prio than the one we're testing against,
2093 		 * place it just before.
2094 		 */
2095 		if (csg_slot->priority > other_csg_slot->priority) {
2096 			list_add_tail(&group->run_node, &other_group->run_node);
2097 			return;
2098 		}
2099 	}
2100 
2101 	list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
2102 }
2103 
2104 static void
tick_ctx_init(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx)2105 tick_ctx_init(struct panthor_scheduler *sched,
2106 	      struct panthor_sched_tick_ctx *ctx)
2107 {
2108 	struct panthor_device *ptdev = sched->ptdev;
2109 	struct panthor_csg_slots_upd_ctx upd_ctx;
2110 	int ret;
2111 	u32 i;
2112 
2113 	memset(ctx, 0, sizeof(*ctx));
2114 	csgs_upd_ctx_init(&upd_ctx);
2115 
2116 	ctx->stop_tick = true;
2117 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2118 		INIT_LIST_HEAD(&ctx->groups[i]);
2119 		INIT_LIST_HEAD(&ctx->old_groups[i]);
2120 	}
2121 
2122 	for (i = 0; i < sched->csg_slot_count; i++) {
2123 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2124 		struct panthor_group *group = csg_slot->group;
2125 		struct panthor_fw_csg_iface *csg_iface;
2126 
2127 		if (!group)
2128 			continue;
2129 
2130 		csg_iface = panthor_fw_get_csg_iface(ptdev, i);
2131 		group_get(group);
2132 
2133 		/* If there was unhandled faults on the VM, force processing of
2134 		 * CSG IRQs, so we can flag the faulty queue.
2135 		 */
2136 		if (panthor_vm_has_unhandled_faults(group->vm)) {
2137 			sched_process_csg_irq_locked(ptdev, i);
2138 
2139 			/* No fatal fault reported, flag all queues as faulty. */
2140 			if (!group->fatal_queues)
2141 				group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
2142 		}
2143 
2144 		tick_ctx_insert_old_group(sched, ctx, group);
2145 		csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2146 					csg_iface->output->ack ^ CSG_STATUS_UPDATE,
2147 					CSG_STATUS_UPDATE);
2148 	}
2149 
2150 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2151 	if (ret) {
2152 		panthor_device_schedule_reset(ptdev);
2153 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2154 	}
2155 }
2156 
2157 static void
group_term_post_processing(struct panthor_group * group)2158 group_term_post_processing(struct panthor_group *group)
2159 {
2160 	struct panthor_job *job, *tmp;
2161 	LIST_HEAD(faulty_jobs);
2162 	bool cookie;
2163 	u32 i = 0;
2164 
2165 	if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
2166 		return;
2167 
2168 	cookie = dma_fence_begin_signalling();
2169 	for (i = 0; i < group->queue_count; i++) {
2170 		struct panthor_queue *queue = group->queues[i];
2171 		struct panthor_syncobj_64b *syncobj;
2172 		int err;
2173 
2174 		if (group->fatal_queues & BIT(i))
2175 			err = -EINVAL;
2176 		else if (group->timedout)
2177 			err = -ETIMEDOUT;
2178 		else
2179 			err = -ECANCELED;
2180 
2181 		if (!queue)
2182 			continue;
2183 
2184 		spin_lock(&queue->fence_ctx.lock);
2185 		list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
2186 			list_move_tail(&job->node, &faulty_jobs);
2187 			dma_fence_set_error(job->done_fence, err);
2188 			dma_fence_signal_locked(job->done_fence);
2189 		}
2190 		spin_unlock(&queue->fence_ctx.lock);
2191 
2192 		/* Manually update the syncobj seqno to unblock waiters. */
2193 		syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj));
2194 		syncobj->status = ~0;
2195 		syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno);
2196 		sched_queue_work(group->ptdev->scheduler, sync_upd);
2197 	}
2198 	dma_fence_end_signalling(cookie);
2199 
2200 	list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
2201 		list_del_init(&job->node);
2202 		panthor_job_put(&job->base);
2203 	}
2204 }
2205 
group_term_work(struct work_struct * work)2206 static void group_term_work(struct work_struct *work)
2207 {
2208 	struct panthor_group *group =
2209 		container_of(work, struct panthor_group, term_work);
2210 
2211 	group_term_post_processing(group);
2212 	group_put(group);
2213 }
2214 
2215 static void
tick_ctx_cleanup(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx)2216 tick_ctx_cleanup(struct panthor_scheduler *sched,
2217 		 struct panthor_sched_tick_ctx *ctx)
2218 {
2219 	struct panthor_device *ptdev = sched->ptdev;
2220 	struct panthor_group *group, *tmp;
2221 	u32 i;
2222 
2223 	for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) {
2224 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) {
2225 			/* If everything went fine, we should only have groups
2226 			 * to be terminated in the old_groups lists.
2227 			 */
2228 			drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
2229 				    group_can_run(group));
2230 
2231 			if (!group_can_run(group)) {
2232 				list_del_init(&group->run_node);
2233 				list_del_init(&group->wait_node);
2234 				group_queue_work(group, term);
2235 			} else if (group->csg_id >= 0) {
2236 				list_del_init(&group->run_node);
2237 			} else {
2238 				list_move(&group->run_node,
2239 					  group_is_idle(group) ?
2240 					  &sched->groups.idle[group->priority] :
2241 					  &sched->groups.runnable[group->priority]);
2242 			}
2243 			group_put(group);
2244 		}
2245 	}
2246 
2247 	for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2248 		/* If everything went fine, the groups to schedule lists should
2249 		 * be empty.
2250 		 */
2251 		drm_WARN_ON(&ptdev->base,
2252 			    !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
2253 
2254 		list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
2255 			if (group->csg_id >= 0) {
2256 				list_del_init(&group->run_node);
2257 			} else {
2258 				list_move(&group->run_node,
2259 					  group_is_idle(group) ?
2260 					  &sched->groups.idle[group->priority] :
2261 					  &sched->groups.runnable[group->priority]);
2262 			}
2263 			group_put(group);
2264 		}
2265 	}
2266 }
2267 
2268 static void
tick_ctx_apply(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx)2269 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
2270 {
2271 	struct panthor_group *group, *tmp;
2272 	struct panthor_device *ptdev = sched->ptdev;
2273 	struct panthor_csg_slot *csg_slot;
2274 	int prio, new_csg_prio = MAX_CSG_PRIO, i;
2275 	u32 free_csg_slots = 0;
2276 	struct panthor_csg_slots_upd_ctx upd_ctx;
2277 	int ret;
2278 
2279 	csgs_upd_ctx_init(&upd_ctx);
2280 
2281 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2282 		/* Suspend or terminate evicted groups. */
2283 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2284 			bool term = !group_can_run(group);
2285 			int csg_id = group->csg_id;
2286 
2287 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2288 				continue;
2289 
2290 			csg_slot = &sched->csg_slots[csg_id];
2291 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2292 						term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
2293 						CSG_STATE_MASK);
2294 		}
2295 
2296 		/* Update priorities on already running groups. */
2297 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2298 			struct panthor_fw_csg_iface *csg_iface;
2299 			int csg_id = group->csg_id;
2300 
2301 			if (csg_id < 0) {
2302 				new_csg_prio--;
2303 				continue;
2304 			}
2305 
2306 			csg_slot = &sched->csg_slots[csg_id];
2307 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2308 			if (csg_slot->priority == new_csg_prio) {
2309 				new_csg_prio--;
2310 				continue;
2311 			}
2312 
2313 			panthor_fw_csg_endpoint_req_update(ptdev, csg_iface,
2314 							   CSG_EP_REQ_PRIORITY(new_csg_prio),
2315 							   CSG_EP_REQ_PRIORITY_MASK);
2316 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2317 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2318 						CSG_ENDPOINT_CONFIG);
2319 			new_csg_prio--;
2320 		}
2321 	}
2322 
2323 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2324 	if (ret) {
2325 		panthor_device_schedule_reset(ptdev);
2326 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2327 		return;
2328 	}
2329 
2330 	/* Unbind evicted groups. */
2331 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2332 		list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2333 			/* This group is gone. Process interrupts to clear
2334 			 * any pending interrupts before we start the new
2335 			 * group.
2336 			 */
2337 			if (group->csg_id >= 0)
2338 				sched_process_csg_irq_locked(ptdev, group->csg_id);
2339 
2340 			group_unbind_locked(group);
2341 		}
2342 	}
2343 
2344 	for (i = 0; i < sched->csg_slot_count; i++) {
2345 		if (!sched->csg_slots[i].group)
2346 			free_csg_slots |= BIT(i);
2347 	}
2348 
2349 	csgs_upd_ctx_init(&upd_ctx);
2350 	new_csg_prio = MAX_CSG_PRIO;
2351 
2352 	/* Start new groups. */
2353 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2354 		list_for_each_entry(group, &ctx->groups[prio], run_node) {
2355 			int csg_id = group->csg_id;
2356 			struct panthor_fw_csg_iface *csg_iface;
2357 
2358 			if (csg_id >= 0) {
2359 				new_csg_prio--;
2360 				continue;
2361 			}
2362 
2363 			csg_id = ffs(free_csg_slots) - 1;
2364 			if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2365 				break;
2366 
2367 			csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2368 			csg_slot = &sched->csg_slots[csg_id];
2369 			group_bind_locked(group, csg_id);
2370 			csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
2371 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2372 						group->state == PANTHOR_CS_GROUP_SUSPENDED ?
2373 						CSG_STATE_RESUME : CSG_STATE_START,
2374 						CSG_STATE_MASK);
2375 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2376 						csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2377 						CSG_ENDPOINT_CONFIG);
2378 			free_csg_slots &= ~BIT(csg_id);
2379 		}
2380 	}
2381 
2382 	ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2383 	if (ret) {
2384 		panthor_device_schedule_reset(ptdev);
2385 		ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2386 		return;
2387 	}
2388 
2389 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2390 		list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) {
2391 			list_del_init(&group->run_node);
2392 
2393 			/* If the group has been destroyed while we were
2394 			 * scheduling, ask for an immediate tick to
2395 			 * re-evaluate as soon as possible and get rid of
2396 			 * this dangling group.
2397 			 */
2398 			if (group->destroyed)
2399 				ctx->immediate_tick = true;
2400 			group_put(group);
2401 		}
2402 
2403 		/* Return evicted groups to the idle or run queues. Groups
2404 		 * that can no longer be run (because they've been destroyed
2405 		 * or experienced an unrecoverable error) will be scheduled
2406 		 * for destruction in tick_ctx_cleanup().
2407 		 */
2408 		list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) {
2409 			if (!group_can_run(group))
2410 				continue;
2411 
2412 			if (group_is_idle(group))
2413 				list_move_tail(&group->run_node, &sched->groups.idle[prio]);
2414 			else
2415 				list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
2416 			group_put(group);
2417 		}
2418 	}
2419 
2420 	sched->used_csg_slot_count = ctx->group_count;
2421 	sched->might_have_idle_groups = ctx->idle_group_count > 0;
2422 }
2423 
2424 static u64
tick_ctx_update_resched_target(struct panthor_scheduler * sched,const struct panthor_sched_tick_ctx * ctx)2425 tick_ctx_update_resched_target(struct panthor_scheduler *sched,
2426 			       const struct panthor_sched_tick_ctx *ctx)
2427 {
2428 	u64 resched_target;
2429 
2430 	if (ctx->stop_tick)
2431 		goto no_tick;
2432 
2433 	resched_target = sched->last_tick + sched->tick_period;
2434 
2435 	if (time_before64(sched->resched_target, sched->last_tick) ||
2436 	    time_before64(resched_target, sched->resched_target))
2437 		sched->resched_target = resched_target;
2438 
2439 	return sched->resched_target - sched->last_tick;
2440 
2441 no_tick:
2442 	sched->resched_target = U64_MAX;
2443 	return U64_MAX;
2444 }
2445 
tick_work(struct work_struct * work)2446 static void tick_work(struct work_struct *work)
2447 {
2448 	struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
2449 						      tick_work.work);
2450 	struct panthor_device *ptdev = sched->ptdev;
2451 	struct panthor_sched_tick_ctx ctx;
2452 	u64 resched_target = sched->resched_target;
2453 	u64 remaining_jiffies = 0, resched_delay;
2454 	u64 now = get_jiffies_64();
2455 	int prio, ret, cookie;
2456 	bool full_tick;
2457 
2458 	if (!drm_dev_enter(&ptdev->base, &cookie))
2459 		return;
2460 
2461 	ret = panthor_device_resume_and_get(ptdev);
2462 	if (drm_WARN_ON(&ptdev->base, ret))
2463 		goto out_dev_exit;
2464 
2465 	/* If the tick is stopped, calculate when the next tick would be */
2466 	if (resched_target == U64_MAX)
2467 		resched_target = sched->last_tick + sched->tick_period;
2468 
2469 	if (time_before64(now, resched_target))
2470 		remaining_jiffies = resched_target - now;
2471 
2472 	full_tick = remaining_jiffies == 0;
2473 
2474 	mutex_lock(&sched->lock);
2475 	if (panthor_device_reset_is_pending(sched->ptdev))
2476 		goto out_unlock;
2477 
2478 	tick_ctx_init(sched, &ctx);
2479 	if (ctx.csg_upd_failed_mask)
2480 		goto out_cleanup_ctx;
2481 
2482 	if (!full_tick) {
2483 		/* Scheduling forced in the middle of a tick. Only RT groups
2484 		 * can preempt non-RT ones. Currently running RT groups can't be
2485 		 * preempted.
2486 		 */
2487 		for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2488 		     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2489 		     prio--) {
2490 			tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
2491 						       true, true);
2492 			if (prio == PANTHOR_CSG_PRIORITY_RT) {
2493 				tick_ctx_pick_groups_from_list(sched, &ctx,
2494 							       &sched->groups.runnable[prio],
2495 							       true, false);
2496 			}
2497 		}
2498 	}
2499 
2500 	/* First pick non-idle groups */
2501 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2502 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2503 	     prio--) {
2504 		struct panthor_group *old_highest_prio_group =
2505 			list_first_entry_or_null(&ctx.old_groups[prio],
2506 						 struct panthor_group, run_node);
2507 
2508 		/* Pull out the group with the highest prio for rotation. */
2509 		if (old_highest_prio_group)
2510 			list_del(&old_highest_prio_group->run_node);
2511 
2512 		/* Re-insert old active groups so they get a chance to run with higher prio. */
2513 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
2514 
2515 		/* Fill the remaining slots with runnable groups. */
2516 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
2517 					       true, false);
2518 
2519 		/* Re-insert the old group with the highest prio, and give it a chance to be
2520 		 * scheduled again (but with a lower prio) if there's room left.
2521 		 */
2522 		if (old_highest_prio_group) {
2523 			list_add_tail(&old_highest_prio_group->run_node, &ctx.old_groups[prio]);
2524 			tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
2525 						       true, true);
2526 		}
2527 	}
2528 
2529 	/* If we have free CSG slots left, pick idle groups */
2530 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2531 	     prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2532 	     prio--) {
2533 		/* Check the old_group queue first to avoid reprogramming the slots */
2534 		tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
2535 		tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
2536 					       false, false);
2537 	}
2538 
2539 	tick_ctx_apply(sched, &ctx);
2540 	if (ctx.csg_upd_failed_mask)
2541 		goto out_cleanup_ctx;
2542 
2543 	if (ctx.idle_group_count == ctx.group_count) {
2544 		panthor_devfreq_record_idle(sched->ptdev);
2545 		if (sched->pm.has_ref) {
2546 			pm_runtime_put_autosuspend(ptdev->base.dev);
2547 			sched->pm.has_ref = false;
2548 		}
2549 	} else {
2550 		panthor_devfreq_record_busy(sched->ptdev);
2551 		if (!sched->pm.has_ref) {
2552 			pm_runtime_get(ptdev->base.dev);
2553 			sched->pm.has_ref = true;
2554 		}
2555 	}
2556 
2557 	sched->last_tick = now;
2558 	resched_delay = tick_ctx_update_resched_target(sched, &ctx);
2559 	if (ctx.immediate_tick)
2560 		resched_delay = 0;
2561 
2562 	if (resched_delay != U64_MAX)
2563 		sched_queue_delayed_work(sched, tick, resched_delay);
2564 
2565 out_cleanup_ctx:
2566 	tick_ctx_cleanup(sched, &ctx);
2567 
2568 out_unlock:
2569 	mutex_unlock(&sched->lock);
2570 	pm_runtime_mark_last_busy(ptdev->base.dev);
2571 	pm_runtime_put_autosuspend(ptdev->base.dev);
2572 
2573 out_dev_exit:
2574 	drm_dev_exit(cookie);
2575 }
2576 
panthor_queue_eval_syncwait(struct panthor_group * group,u8 queue_idx)2577 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)
2578 {
2579 	struct panthor_queue *queue = group->queues[queue_idx];
2580 	union {
2581 		struct panthor_syncobj_64b sync64;
2582 		struct panthor_syncobj_32b sync32;
2583 	} *syncobj;
2584 	bool result;
2585 	u64 value;
2586 
2587 	syncobj = panthor_queue_get_syncwait_obj(group, queue);
2588 	if (!syncobj)
2589 		return -EINVAL;
2590 
2591 	value = queue->syncwait.sync64 ?
2592 		syncobj->sync64.seqno :
2593 		syncobj->sync32.seqno;
2594 
2595 	if (queue->syncwait.gt)
2596 		result = value > queue->syncwait.ref;
2597 	else
2598 		result = value <= queue->syncwait.ref;
2599 
2600 	if (result)
2601 		panthor_queue_put_syncwait_obj(queue);
2602 
2603 	return result;
2604 }
2605 
sync_upd_work(struct work_struct * work)2606 static void sync_upd_work(struct work_struct *work)
2607 {
2608 	struct panthor_scheduler *sched = container_of(work,
2609 						      struct panthor_scheduler,
2610 						      sync_upd_work);
2611 	struct panthor_group *group, *tmp;
2612 	bool immediate_tick = false;
2613 
2614 	mutex_lock(&sched->lock);
2615 	list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
2616 		u32 tested_queues = group->blocked_queues;
2617 		u32 unblocked_queues = 0;
2618 
2619 		while (tested_queues) {
2620 			u32 cs_id = ffs(tested_queues) - 1;
2621 			int ret;
2622 
2623 			ret = panthor_queue_eval_syncwait(group, cs_id);
2624 			drm_WARN_ON(&group->ptdev->base, ret < 0);
2625 			if (ret)
2626 				unblocked_queues |= BIT(cs_id);
2627 
2628 			tested_queues &= ~BIT(cs_id);
2629 		}
2630 
2631 		if (unblocked_queues) {
2632 			group->blocked_queues &= ~unblocked_queues;
2633 
2634 			if (group->csg_id < 0) {
2635 				list_move(&group->run_node,
2636 					  &sched->groups.runnable[group->priority]);
2637 				if (group->priority == PANTHOR_CSG_PRIORITY_RT)
2638 					immediate_tick = true;
2639 			}
2640 		}
2641 
2642 		if (!group->blocked_queues)
2643 			list_del_init(&group->wait_node);
2644 	}
2645 	mutex_unlock(&sched->lock);
2646 
2647 	if (immediate_tick)
2648 		sched_queue_delayed_work(sched, tick, 0);
2649 }
2650 
sched_resume_tick(struct panthor_device * ptdev)2651 static void sched_resume_tick(struct panthor_device *ptdev)
2652 {
2653 	struct panthor_scheduler *sched = ptdev->scheduler;
2654 	u64 delay_jiffies, now;
2655 
2656 	drm_WARN_ON(&ptdev->base, sched->resched_target != U64_MAX);
2657 
2658 	/* Scheduler tick was off, recalculate the resched_target based on the
2659 	 * last tick event, and queue the scheduler work.
2660 	 */
2661 	now = get_jiffies_64();
2662 	sched->resched_target = sched->last_tick + sched->tick_period;
2663 	if (sched->used_csg_slot_count == sched->csg_slot_count &&
2664 	    time_before64(now, sched->resched_target))
2665 		delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
2666 	else
2667 		delay_jiffies = 0;
2668 
2669 	sched_queue_delayed_work(sched, tick, delay_jiffies);
2670 }
2671 
group_schedule_locked(struct panthor_group * group,u32 queue_mask)2672 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
2673 {
2674 	struct panthor_device *ptdev = group->ptdev;
2675 	struct panthor_scheduler *sched = ptdev->scheduler;
2676 	struct list_head *queue = &sched->groups.runnable[group->priority];
2677 	bool was_idle;
2678 
2679 	if (!group_can_run(group))
2680 		return;
2681 
2682 	/* All updated queues are blocked, no need to wake up the scheduler. */
2683 	if ((queue_mask & group->blocked_queues) == queue_mask)
2684 		return;
2685 
2686 	was_idle = group_is_idle(group);
2687 	group->idle_queues &= ~queue_mask;
2688 
2689 	/* Don't mess up with the lists if we're in a middle of a reset. */
2690 	if (atomic_read(&sched->reset.in_progress))
2691 		return;
2692 
2693 	if (was_idle && !group_is_idle(group))
2694 		list_move_tail(&group->run_node, queue);
2695 
2696 	/* RT groups are preemptive. */
2697 	if (group->priority == PANTHOR_CSG_PRIORITY_RT) {
2698 		sched_queue_delayed_work(sched, tick, 0);
2699 		return;
2700 	}
2701 
2702 	/* Some groups might be idle, force an immediate tick to
2703 	 * re-evaluate.
2704 	 */
2705 	if (sched->might_have_idle_groups) {
2706 		sched_queue_delayed_work(sched, tick, 0);
2707 		return;
2708 	}
2709 
2710 	/* Scheduler is ticking, nothing to do. */
2711 	if (sched->resched_target != U64_MAX) {
2712 		/* If there are free slots, force immediating ticking. */
2713 		if (sched->used_csg_slot_count < sched->csg_slot_count)
2714 			sched_queue_delayed_work(sched, tick, 0);
2715 
2716 		return;
2717 	}
2718 
2719 	/* Scheduler tick was off, recalculate the resched_target based on the
2720 	 * last tick event, and queue the scheduler work.
2721 	 */
2722 	sched_resume_tick(ptdev);
2723 }
2724 
queue_stop(struct panthor_queue * queue,struct panthor_job * bad_job)2725 static void queue_stop(struct panthor_queue *queue,
2726 		       struct panthor_job *bad_job)
2727 {
2728 	disable_delayed_work_sync(&queue->timeout.work);
2729 	drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
2730 }
2731 
queue_start(struct panthor_queue * queue)2732 static void queue_start(struct panthor_queue *queue)
2733 {
2734 	struct panthor_job *job;
2735 
2736 	/* Re-assign the parent fences. */
2737 	list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
2738 		job->base.s_fence->parent = dma_fence_get(job->done_fence);
2739 
2740 	enable_delayed_work(&queue->timeout.work);
2741 	drm_sched_start(&queue->scheduler, 0);
2742 }
2743 
panthor_group_stop(struct panthor_group * group)2744 static void panthor_group_stop(struct panthor_group *group)
2745 {
2746 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2747 
2748 	lockdep_assert_held(&sched->reset.lock);
2749 
2750 	for (u32 i = 0; i < group->queue_count; i++)
2751 		queue_stop(group->queues[i], NULL);
2752 
2753 	group_get(group);
2754 	list_move_tail(&group->run_node, &sched->reset.stopped_groups);
2755 }
2756 
panthor_group_start(struct panthor_group * group)2757 static void panthor_group_start(struct panthor_group *group)
2758 {
2759 	struct panthor_scheduler *sched = group->ptdev->scheduler;
2760 
2761 	lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
2762 
2763 	for (u32 i = 0; i < group->queue_count; i++)
2764 		queue_start(group->queues[i]);
2765 
2766 	if (group_can_run(group)) {
2767 		list_move_tail(&group->run_node,
2768 			       group_is_idle(group) ?
2769 			       &sched->groups.idle[group->priority] :
2770 			       &sched->groups.runnable[group->priority]);
2771 	} else {
2772 		list_del_init(&group->run_node);
2773 		list_del_init(&group->wait_node);
2774 		group_queue_work(group, term);
2775 	}
2776 
2777 	group_put(group);
2778 }
2779 
2780 /**
2781  * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler.
2782  */
panthor_sched_report_mmu_fault(struct panthor_device * ptdev)2783 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
2784 {
2785 	/* Force a tick to immediately kill faulty groups. */
2786 	if (ptdev->scheduler)
2787 		sched_queue_delayed_work(ptdev->scheduler, tick, 0);
2788 }
2789 
panthor_sched_prepare_for_vm_destruction(struct panthor_device * ptdev)2790 void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev)
2791 {
2792 	/* FW can write out internal state, like the heap context, during CSG
2793 	 * suspend. It is therefore important that the scheduler has fully
2794 	 * evicted any pending and related groups before VM destruction can
2795 	 * safely continue. Failure to do so can lead to GPU page faults.
2796 	 * A controlled termination of a Panthor instance involves destroying
2797 	 * the group(s) before the VM. This means any relevant group eviction
2798 	 * has already been initiated by this point, and we just need to
2799 	 * ensure that any pending tick_work() has been completed.
2800 	 */
2801 	flush_work(&ptdev->scheduler->tick_work.work);
2802 }
2803 
panthor_sched_resume(struct panthor_device * ptdev)2804 void panthor_sched_resume(struct panthor_device *ptdev)
2805 {
2806 	/* Force a tick to re-evaluate after a resume. */
2807 	sched_queue_delayed_work(ptdev->scheduler, tick, 0);
2808 }
2809 
panthor_sched_suspend(struct panthor_device * ptdev)2810 void panthor_sched_suspend(struct panthor_device *ptdev)
2811 {
2812 	struct panthor_scheduler *sched = ptdev->scheduler;
2813 	struct panthor_csg_slots_upd_ctx upd_ctx;
2814 	u32 suspended_slots;
2815 	u32 i;
2816 
2817 	mutex_lock(&sched->lock);
2818 	csgs_upd_ctx_init(&upd_ctx);
2819 	for (i = 0; i < sched->csg_slot_count; i++) {
2820 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2821 
2822 		if (csg_slot->group) {
2823 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2824 						group_can_run(csg_slot->group) ?
2825 						CSG_STATE_SUSPEND : CSG_STATE_TERMINATE,
2826 						CSG_STATE_MASK);
2827 		}
2828 	}
2829 
2830 	suspended_slots = upd_ctx.update_mask;
2831 
2832 	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2833 	suspended_slots &= ~upd_ctx.timedout_mask;
2834 
2835 	if (upd_ctx.timedout_mask) {
2836 		u32 slot_mask = upd_ctx.timedout_mask;
2837 
2838 		drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
2839 		csgs_upd_ctx_init(&upd_ctx);
2840 		while (slot_mask) {
2841 			u32 csg_id = ffs(slot_mask) - 1;
2842 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2843 
2844 			/* If the group was still usable before that point, we consider
2845 			 * it innocent.
2846 			 */
2847 			if (group_can_run(csg_slot->group))
2848 				csg_slot->group->innocent = true;
2849 
2850 			/* We consider group suspension failures as fatal and flag the
2851 			 * group as unusable by setting timedout=true.
2852 			 */
2853 			csg_slot->group->timedout = true;
2854 
2855 			csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2856 						CSG_STATE_TERMINATE,
2857 						CSG_STATE_MASK);
2858 			slot_mask &= ~BIT(csg_id);
2859 		}
2860 
2861 		csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2862 
2863 		slot_mask = upd_ctx.timedout_mask;
2864 		while (slot_mask) {
2865 			u32 csg_id = ffs(slot_mask) - 1;
2866 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2867 			struct panthor_group *group = csg_slot->group;
2868 
2869 			/* Terminate command timedout, but the soft-reset will
2870 			 * automatically terminate all active groups, so let's
2871 			 * force the state to halted here.
2872 			 */
2873 			if (group->state != PANTHOR_CS_GROUP_TERMINATED) {
2874 				group->state = PANTHOR_CS_GROUP_TERMINATED;
2875 
2876 				/* Reset the queue slots manually if the termination
2877 				 * request failed.
2878 				 */
2879 				for (i = 0; i < group->queue_count; i++) {
2880 					if (group->queues[i])
2881 						cs_slot_reset_locked(ptdev, csg_id, i);
2882 				}
2883 			}
2884 			slot_mask &= ~BIT(csg_id);
2885 		}
2886 	}
2887 
2888 	/* Flush L2 and LSC caches to make sure suspend state is up-to-date.
2889 	 * If the flush fails, flag all queues for termination.
2890 	 */
2891 	if (suspended_slots) {
2892 		bool flush_caches_failed = false;
2893 		u32 slot_mask = suspended_slots;
2894 
2895 		if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
2896 			flush_caches_failed = true;
2897 
2898 		while (slot_mask) {
2899 			u32 csg_id = ffs(slot_mask) - 1;
2900 			struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2901 
2902 			if (flush_caches_failed)
2903 				csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2904 			else
2905 				csg_slot_sync_update_locked(ptdev, csg_id);
2906 
2907 			slot_mask &= ~BIT(csg_id);
2908 		}
2909 	}
2910 
2911 	for (i = 0; i < sched->csg_slot_count; i++) {
2912 		struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2913 		struct panthor_group *group = csg_slot->group;
2914 
2915 		if (!group)
2916 			continue;
2917 
2918 		group_get(group);
2919 
2920 		if (group->csg_id >= 0)
2921 			sched_process_csg_irq_locked(ptdev, group->csg_id);
2922 
2923 		group_unbind_locked(group);
2924 
2925 		drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
2926 
2927 		if (group_can_run(group)) {
2928 			list_add(&group->run_node,
2929 				 &sched->groups.idle[group->priority]);
2930 		} else {
2931 			/* We don't bother stopping the scheduler if the group is
2932 			 * faulty, the group termination work will finish the job.
2933 			 */
2934 			list_del_init(&group->wait_node);
2935 			group_queue_work(group, term);
2936 		}
2937 		group_put(group);
2938 	}
2939 	mutex_unlock(&sched->lock);
2940 }
2941 
panthor_sched_pre_reset(struct panthor_device * ptdev)2942 void panthor_sched_pre_reset(struct panthor_device *ptdev)
2943 {
2944 	struct panthor_scheduler *sched = ptdev->scheduler;
2945 	struct panthor_group *group, *group_tmp;
2946 	u32 i;
2947 
2948 	mutex_lock(&sched->reset.lock);
2949 	atomic_set(&sched->reset.in_progress, true);
2950 
2951 	/* Cancel all scheduler works. Once this is done, these works can't be
2952 	 * scheduled again until the reset operation is complete.
2953 	 */
2954 	cancel_work_sync(&sched->sync_upd_work);
2955 	cancel_delayed_work_sync(&sched->tick_work);
2956 
2957 	panthor_sched_suspend(ptdev);
2958 
2959 	/* Stop all groups that might still accept jobs, so we don't get passed
2960 	 * new jobs while we're resetting.
2961 	 */
2962 	for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
2963 		list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
2964 			panthor_group_stop(group);
2965 	}
2966 
2967 	for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
2968 		list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
2969 			panthor_group_stop(group);
2970 	}
2971 
2972 	mutex_unlock(&sched->reset.lock);
2973 }
2974 
panthor_sched_post_reset(struct panthor_device * ptdev,bool reset_failed)2975 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
2976 {
2977 	struct panthor_scheduler *sched = ptdev->scheduler;
2978 	struct panthor_group *group, *group_tmp;
2979 
2980 	mutex_lock(&sched->reset.lock);
2981 
2982 	list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
2983 		/* Consider all previously running group as terminated if the
2984 		 * reset failed.
2985 		 */
2986 		if (reset_failed)
2987 			group->state = PANTHOR_CS_GROUP_TERMINATED;
2988 
2989 		panthor_group_start(group);
2990 	}
2991 
2992 	/* We're done resetting the GPU, clear the reset.in_progress bit so we can
2993 	 * kick the scheduler.
2994 	 */
2995 	atomic_set(&sched->reset.in_progress, false);
2996 	mutex_unlock(&sched->reset.lock);
2997 
2998 	/* No need to queue a tick and update syncs if the reset failed. */
2999 	if (!reset_failed) {
3000 		sched_queue_delayed_work(sched, tick, 0);
3001 		sched_queue_work(sched, sync_upd);
3002 	}
3003 }
3004 
update_fdinfo_stats(struct panthor_job * job)3005 static void update_fdinfo_stats(struct panthor_job *job)
3006 {
3007 	struct panthor_group *group = job->group;
3008 	struct panthor_queue *queue = group->queues[job->queue_idx];
3009 	struct panthor_gpu_usage *fdinfo = &group->fdinfo.data;
3010 	struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
3011 	struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
3012 
3013 	scoped_guard(spinlock, &group->fdinfo.lock) {
3014 		if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
3015 			fdinfo->cycles += data->cycles.after - data->cycles.before;
3016 		if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
3017 			fdinfo->time += data->time.after - data->time.before;
3018 	}
3019 }
3020 
panthor_fdinfo_gather_group_samples(struct panthor_file * pfile)3021 void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
3022 {
3023 	struct panthor_group_pool *gpool = pfile->groups;
3024 	struct panthor_group *group;
3025 	unsigned long i;
3026 
3027 	if (IS_ERR_OR_NULL(gpool))
3028 		return;
3029 
3030 	xa_lock(&gpool->xa);
3031 	xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) {
3032 		guard(spinlock)(&group->fdinfo.lock);
3033 		pfile->stats.cycles += group->fdinfo.data.cycles;
3034 		pfile->stats.time += group->fdinfo.data.time;
3035 		group->fdinfo.data.cycles = 0;
3036 		group->fdinfo.data.time = 0;
3037 	}
3038 	xa_unlock(&gpool->xa);
3039 }
3040 
queue_check_job_completion(struct panthor_queue * queue)3041 static bool queue_check_job_completion(struct panthor_queue *queue)
3042 {
3043 	struct panthor_syncobj_64b *syncobj = NULL;
3044 	struct panthor_job *job, *job_tmp;
3045 	bool cookie, progress = false;
3046 	LIST_HEAD(done_jobs);
3047 
3048 	cookie = dma_fence_begin_signalling();
3049 	spin_lock(&queue->fence_ctx.lock);
3050 	list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
3051 		if (!syncobj) {
3052 			struct panthor_group *group = job->group;
3053 
3054 			syncobj = group->syncobjs->kmap +
3055 				  (job->queue_idx * sizeof(*syncobj));
3056 		}
3057 
3058 		if (syncobj->seqno < job->done_fence->seqno)
3059 			break;
3060 
3061 		list_move_tail(&job->node, &done_jobs);
3062 		dma_fence_signal_locked(job->done_fence);
3063 	}
3064 
3065 	if (list_empty(&queue->fence_ctx.in_flight_jobs)) {
3066 		/* If we have no job left, we cancel the timer, and reset remaining
3067 		 * time to its default so it can be restarted next time
3068 		 * queue_resume_timeout() is called.
3069 		 */
3070 		queue_suspend_timeout_locked(queue);
3071 
3072 		/* If there's no job pending, we consider it progress to avoid a
3073 		 * spurious timeout if the timeout handler and the sync update
3074 		 * handler raced.
3075 		 */
3076 		progress = true;
3077 	} else if (!list_empty(&done_jobs)) {
3078 		queue_reset_timeout_locked(queue);
3079 		progress = true;
3080 	}
3081 	spin_unlock(&queue->fence_ctx.lock);
3082 	dma_fence_end_signalling(cookie);
3083 
3084 	list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
3085 		if (job->profiling.mask)
3086 			update_fdinfo_stats(job);
3087 		list_del_init(&job->node);
3088 		panthor_job_put(&job->base);
3089 	}
3090 
3091 	return progress;
3092 }
3093 
group_sync_upd_work(struct work_struct * work)3094 static void group_sync_upd_work(struct work_struct *work)
3095 {
3096 	struct panthor_group *group =
3097 		container_of(work, struct panthor_group, sync_upd_work);
3098 	u32 queue_idx;
3099 	bool cookie;
3100 
3101 	cookie = dma_fence_begin_signalling();
3102 	for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
3103 		struct panthor_queue *queue = group->queues[queue_idx];
3104 
3105 		if (!queue)
3106 			continue;
3107 
3108 		queue_check_job_completion(queue);
3109 	}
3110 	dma_fence_end_signalling(cookie);
3111 
3112 	group_put(group);
3113 }
3114 
3115 struct panthor_job_ringbuf_instrs {
3116 	u64 buffer[MAX_INSTRS_PER_JOB];
3117 	u32 count;
3118 };
3119 
3120 struct panthor_job_instr {
3121 	u32 profile_mask;
3122 	u64 instr;
3123 };
3124 
3125 #define JOB_INSTR(__prof, __instr) \
3126 	{ \
3127 		.profile_mask = __prof, \
3128 		.instr = __instr, \
3129 	}
3130 
3131 static void
copy_instrs_to_ringbuf(struct panthor_queue * queue,struct panthor_job * job,struct panthor_job_ringbuf_instrs * instrs)3132 copy_instrs_to_ringbuf(struct panthor_queue *queue,
3133 		       struct panthor_job *job,
3134 		       struct panthor_job_ringbuf_instrs *instrs)
3135 {
3136 	u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
3137 	u64 start = job->ringbuf.start & (ringbuf_size - 1);
3138 	u64 size, written;
3139 
3140 	/*
3141 	 * We need to write a whole slot, including any trailing zeroes
3142 	 * that may come at the end of it. Also, because instrs.buffer has
3143 	 * been zero-initialised, there's no need to pad it with 0's
3144 	 */
3145 	instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
3146 	size = instrs->count * sizeof(u64);
3147 	WARN_ON(size > ringbuf_size);
3148 	written = min(ringbuf_size - start, size);
3149 
3150 	memcpy(queue->ringbuf->kmap + start, instrs->buffer, written);
3151 
3152 	if (written < size)
3153 		memcpy(queue->ringbuf->kmap,
3154 		       &instrs->buffer[written / sizeof(u64)],
3155 		       size - written);
3156 }
3157 
3158 struct panthor_job_cs_params {
3159 	u32 profile_mask;
3160 	u64 addr_reg; u64 val_reg;
3161 	u64 cycle_reg; u64 time_reg;
3162 	u64 sync_addr; u64 times_addr;
3163 	u64 cs_start; u64 cs_size;
3164 	u32 last_flush; u32 waitall_mask;
3165 };
3166 
3167 static void
get_job_cs_params(struct panthor_job * job,struct panthor_job_cs_params * params)3168 get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params)
3169 {
3170 	struct panthor_group *group = job->group;
3171 	struct panthor_queue *queue = group->queues[job->queue_idx];
3172 	struct panthor_device *ptdev = group->ptdev;
3173 	struct panthor_scheduler *sched = ptdev->scheduler;
3174 
3175 	params->addr_reg = ptdev->csif_info.cs_reg_count -
3176 			   ptdev->csif_info.unpreserved_cs_reg_count;
3177 	params->val_reg = params->addr_reg + 2;
3178 	params->cycle_reg = params->addr_reg;
3179 	params->time_reg = params->val_reg;
3180 
3181 	params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
3182 			    job->queue_idx * sizeof(struct panthor_syncobj_64b);
3183 	params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) +
3184 			     (job->profiling.slot * sizeof(struct panthor_job_profiling_data));
3185 	params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
3186 
3187 	params->cs_start = job->call_info.start;
3188 	params->cs_size = job->call_info.size;
3189 	params->last_flush = job->call_info.latest_flush;
3190 
3191 	params->profile_mask = job->profiling.mask;
3192 }
3193 
3194 #define JOB_INSTR_ALWAYS(instr) \
3195 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr))
3196 #define JOB_INSTR_TIMESTAMP(instr) \
3197 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr))
3198 #define JOB_INSTR_CYCLES(instr) \
3199 	JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr))
3200 
3201 static void
prepare_job_instrs(const struct panthor_job_cs_params * params,struct panthor_job_ringbuf_instrs * instrs)3202 prepare_job_instrs(const struct panthor_job_cs_params *params,
3203 		   struct panthor_job_ringbuf_instrs *instrs)
3204 {
3205 	const struct panthor_job_instr instr_seq[] = {
3206 		/* MOV32 rX+2, cs.latest_flush */
3207 		JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush),
3208 		/* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
3209 		JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) |
3210 				 (0 << 16) | 0x233),
3211 		/* MOV48 rX:rX+1, cycles_offset */
3212 		JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
3213 				 (params->times_addr +
3214 				  offsetof(struct panthor_job_profiling_data, cycles.before))),
3215 		/* STORE_STATE cycles */
3216 		JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
3217 		/* MOV48 rX:rX+1, time_offset */
3218 		JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
3219 				    (params->times_addr +
3220 				     offsetof(struct panthor_job_profiling_data, time.before))),
3221 		/* STORE_STATE timer */
3222 		JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
3223 		/* MOV48 rX:rX+1, cs.start */
3224 		JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start),
3225 		/* MOV32 rX+2, cs.size */
3226 		JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size),
3227 		/* WAIT(0) => waits for FLUSH_CACHE2 instruction */
3228 		JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)),
3229 		/* CALL rX:rX+1, rX+2 */
3230 		JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) |
3231 				 (params->val_reg << 32)),
3232 		/* MOV48 rX:rX+1, cycles_offset */
3233 		JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
3234 				 (params->times_addr +
3235 				  offsetof(struct panthor_job_profiling_data, cycles.after))),
3236 		/* STORE_STATE cycles */
3237 		JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
3238 		/* MOV48 rX:rX+1, time_offset */
3239 		JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
3240 			  (params->times_addr +
3241 			   offsetof(struct panthor_job_profiling_data, time.after))),
3242 		/* STORE_STATE timer */
3243 		JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
3244 		/* MOV48 rX:rX+1, sync_addr */
3245 		JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr),
3246 		/* MOV48 rX+2, #1 */
3247 		JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1),
3248 		/* WAIT(all) */
3249 		JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)),
3250 		/* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
3251 		JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) |
3252 				 (params->val_reg << 32) | (0 << 16) | 1),
3253 		/* ERROR_BARRIER, so we can recover from faults at job boundaries. */
3254 		JOB_INSTR_ALWAYS((47ull << 56)),
3255 	};
3256 	u32 pad;
3257 
3258 	instrs->count = 0;
3259 
3260 	/* NEED to be cacheline aligned to please the prefetcher. */
3261 	static_assert(sizeof(instrs->buffer) % 64 == 0,
3262 		      "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline");
3263 
3264 	/* Make sure we have enough storage to store the whole sequence. */
3265 	static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) ==
3266 		      ARRAY_SIZE(instrs->buffer),
3267 		      "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch");
3268 
3269 	for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) {
3270 		/* If the profile mask of this instruction is not enabled, skip it. */
3271 		if (instr_seq[i].profile_mask &&
3272 		    !(instr_seq[i].profile_mask & params->profile_mask))
3273 			continue;
3274 
3275 		instrs->buffer[instrs->count++] = instr_seq[i].instr;
3276 	}
3277 
3278 	pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
3279 	memset(&instrs->buffer[instrs->count], 0,
3280 	       (pad - instrs->count) * sizeof(instrs->buffer[0]));
3281 	instrs->count = pad;
3282 }
3283 
calc_job_credits(u32 profile_mask)3284 static u32 calc_job_credits(u32 profile_mask)
3285 {
3286 	struct panthor_job_ringbuf_instrs instrs;
3287 	struct panthor_job_cs_params params = {
3288 		.profile_mask = profile_mask,
3289 	};
3290 
3291 	prepare_job_instrs(&params, &instrs);
3292 	return instrs.count;
3293 }
3294 
3295 static struct dma_fence *
queue_run_job(struct drm_sched_job * sched_job)3296 queue_run_job(struct drm_sched_job *sched_job)
3297 {
3298 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3299 	struct panthor_group *group = job->group;
3300 	struct panthor_queue *queue = group->queues[job->queue_idx];
3301 	struct panthor_device *ptdev = group->ptdev;
3302 	struct panthor_scheduler *sched = ptdev->scheduler;
3303 	struct panthor_job_ringbuf_instrs instrs;
3304 	struct panthor_job_cs_params cs_params;
3305 	struct dma_fence *done_fence;
3306 	int ret;
3307 
3308 	/* Stream size is zero, nothing to do except making sure all previously
3309 	 * submitted jobs are done before we signal the
3310 	 * drm_sched_job::s_fence::finished fence.
3311 	 */
3312 	if (!job->call_info.size) {
3313 		job->done_fence = dma_fence_get(queue->fence_ctx.last_fence);
3314 		return dma_fence_get(job->done_fence);
3315 	}
3316 
3317 	ret = panthor_device_resume_and_get(ptdev);
3318 	if (drm_WARN_ON(&ptdev->base, ret))
3319 		return ERR_PTR(ret);
3320 
3321 	mutex_lock(&sched->lock);
3322 	if (!group_can_run(group)) {
3323 		done_fence = ERR_PTR(-ECANCELED);
3324 		goto out_unlock;
3325 	}
3326 
3327 	dma_fence_init(job->done_fence,
3328 		       &panthor_queue_fence_ops,
3329 		       &queue->fence_ctx.lock,
3330 		       queue->fence_ctx.id,
3331 		       atomic64_inc_return(&queue->fence_ctx.seqno));
3332 
3333 	job->profiling.slot = queue->profiling.seqno++;
3334 	if (queue->profiling.seqno == queue->profiling.slot_count)
3335 		queue->profiling.seqno = 0;
3336 
3337 	job->ringbuf.start = queue->iface.input->insert;
3338 
3339 	get_job_cs_params(job, &cs_params);
3340 	prepare_job_instrs(&cs_params, &instrs);
3341 	copy_instrs_to_ringbuf(queue, job, &instrs);
3342 
3343 	job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64));
3344 
3345 	panthor_job_get(&job->base);
3346 	spin_lock(&queue->fence_ctx.lock);
3347 	list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
3348 	spin_unlock(&queue->fence_ctx.lock);
3349 
3350 	/* Make sure the ring buffer is updated before the INSERT
3351 	 * register.
3352 	 */
3353 	wmb();
3354 
3355 	queue->iface.input->extract = queue->iface.output->extract;
3356 	queue->iface.input->insert = job->ringbuf.end;
3357 
3358 	if (group->csg_id < 0) {
3359 		group_schedule_locked(group, BIT(job->queue_idx));
3360 	} else {
3361 		u32 queue_mask = BIT(job->queue_idx);
3362 		bool resume_tick = group_is_idle(group) &&
3363 				   (group->idle_queues & queue_mask) &&
3364 				   !(group->blocked_queues & queue_mask) &&
3365 				   sched->resched_target == U64_MAX;
3366 
3367 		/* We just added something to the queue, so it's no longer idle. */
3368 		group->idle_queues &= ~queue_mask;
3369 
3370 		if (resume_tick)
3371 			sched_resume_tick(ptdev);
3372 
3373 		gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
3374 		if (!sched->pm.has_ref &&
3375 		    !(group->blocked_queues & BIT(job->queue_idx))) {
3376 			pm_runtime_get(ptdev->base.dev);
3377 			sched->pm.has_ref = true;
3378 		}
3379 		queue_resume_timeout(queue);
3380 		panthor_devfreq_record_busy(sched->ptdev);
3381 	}
3382 
3383 	/* Update the last fence. */
3384 	dma_fence_put(queue->fence_ctx.last_fence);
3385 	queue->fence_ctx.last_fence = dma_fence_get(job->done_fence);
3386 
3387 	done_fence = dma_fence_get(job->done_fence);
3388 
3389 out_unlock:
3390 	mutex_unlock(&sched->lock);
3391 	pm_runtime_mark_last_busy(ptdev->base.dev);
3392 	pm_runtime_put_autosuspend(ptdev->base.dev);
3393 
3394 	return done_fence;
3395 }
3396 
3397 static enum drm_gpu_sched_stat
queue_timedout_job(struct drm_sched_job * sched_job)3398 queue_timedout_job(struct drm_sched_job *sched_job)
3399 {
3400 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3401 	struct panthor_group *group = job->group;
3402 	struct panthor_device *ptdev = group->ptdev;
3403 	struct panthor_scheduler *sched = ptdev->scheduler;
3404 	struct panthor_queue *queue = group->queues[job->queue_idx];
3405 
3406 	drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n",
3407 		 group->task_info.pid, group->task_info.comm, job->done_fence->seqno);
3408 
3409 	drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
3410 
3411 	queue_stop(queue, job);
3412 
3413 	mutex_lock(&sched->lock);
3414 	group->timedout = true;
3415 	if (group->csg_id >= 0) {
3416 		sched_queue_delayed_work(ptdev->scheduler, tick, 0);
3417 	} else {
3418 		/* Remove from the run queues, so the scheduler can't
3419 		 * pick the group on the next tick.
3420 		 */
3421 		list_del_init(&group->run_node);
3422 		list_del_init(&group->wait_node);
3423 
3424 		group_queue_work(group, term);
3425 	}
3426 	mutex_unlock(&sched->lock);
3427 
3428 	queue_start(queue);
3429 	return DRM_GPU_SCHED_STAT_RESET;
3430 }
3431 
queue_free_job(struct drm_sched_job * sched_job)3432 static void queue_free_job(struct drm_sched_job *sched_job)
3433 {
3434 	drm_sched_job_cleanup(sched_job);
3435 	panthor_job_put(sched_job);
3436 }
3437 
3438 static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
3439 	.run_job = queue_run_job,
3440 	.timedout_job = queue_timedout_job,
3441 	.free_job = queue_free_job,
3442 };
3443 
calc_profiling_ringbuf_num_slots(struct panthor_device * ptdev,u32 cs_ringbuf_size)3444 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
3445 					    u32 cs_ringbuf_size)
3446 {
3447 	u32 min_profiled_job_instrs = U32_MAX;
3448 	u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL);
3449 
3450 	/*
3451 	 * We want to calculate the minimum size of a profiled job's CS,
3452 	 * because since they need additional instructions for the sampling
3453 	 * of performance metrics, they might take up further slots in
3454 	 * the queue's ringbuffer. This means we might not need as many job
3455 	 * slots for keeping track of their profiling information. What we
3456 	 * need is the maximum number of slots we should allocate to this end,
3457 	 * which matches the maximum number of profiled jobs we can place
3458 	 * simultaneously in the queue's ring buffer.
3459 	 * That has to be calculated separately for every single job profiling
3460 	 * flag, but not in the case job profiling is disabled, since unprofiled
3461 	 * jobs don't need to keep track of this at all.
3462 	 */
3463 	for (u32 i = 0; i < last_flag; i++) {
3464 		min_profiled_job_instrs =
3465 			min(min_profiled_job_instrs, calc_job_credits(BIT(i)));
3466 	}
3467 
3468 	return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
3469 }
3470 
queue_timeout_work(struct work_struct * work)3471 static void queue_timeout_work(struct work_struct *work)
3472 {
3473 	struct panthor_queue *queue = container_of(work, struct panthor_queue,
3474 						   timeout.work.work);
3475 	bool progress;
3476 
3477 	progress = queue_check_job_completion(queue);
3478 	if (!progress)
3479 		drm_sched_fault(&queue->scheduler);
3480 }
3481 
3482 static struct panthor_queue *
group_create_queue(struct panthor_group * group,const struct drm_panthor_queue_create * args,u64 drm_client_id,u32 gid,u32 qid)3483 group_create_queue(struct panthor_group *group,
3484 		   const struct drm_panthor_queue_create *args,
3485 		   u64 drm_client_id, u32 gid, u32 qid)
3486 {
3487 	struct drm_sched_init_args sched_args = {
3488 		.ops = &panthor_queue_sched_ops,
3489 		.submit_wq = group->ptdev->scheduler->wq,
3490 		.num_rqs = 1,
3491 		/*
3492 		 * The credit limit argument tells us the total number of
3493 		 * instructions across all CS slots in the ringbuffer, with
3494 		 * some jobs requiring twice as many as others, depending on
3495 		 * their profiling status.
3496 		 */
3497 		.credit_limit = args->ringbuf_size / sizeof(u64),
3498 		.timeout = MAX_SCHEDULE_TIMEOUT,
3499 		.timeout_wq = group->ptdev->reset.wq,
3500 		.dev = group->ptdev->base.dev,
3501 	};
3502 	struct drm_gpu_scheduler *drm_sched;
3503 	struct panthor_queue *queue;
3504 	int ret;
3505 
3506 	if (args->pad[0] || args->pad[1] || args->pad[2])
3507 		return ERR_PTR(-EINVAL);
3508 
3509 	if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K ||
3510 	    !is_power_of_2(args->ringbuf_size))
3511 		return ERR_PTR(-EINVAL);
3512 
3513 	if (args->priority > CSF_MAX_QUEUE_PRIO)
3514 		return ERR_PTR(-EINVAL);
3515 
3516 	queue = kzalloc_obj(*queue);
3517 	if (!queue)
3518 		return ERR_PTR(-ENOMEM);
3519 
3520 	queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
3521 	INIT_DELAYED_WORK(&queue->timeout.work, queue_timeout_work);
3522 	queue->fence_ctx.id = dma_fence_context_alloc(1);
3523 	spin_lock_init(&queue->fence_ctx.lock);
3524 	INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
3525 
3526 	queue->priority = args->priority;
3527 
3528 	queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
3529 						  args->ringbuf_size,
3530 						  DRM_PANTHOR_BO_NO_MMAP,
3531 						  DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3532 						  DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3533 						  PANTHOR_VM_KERNEL_AUTO_VA,
3534 						  "CS ring buffer");
3535 	if (IS_ERR(queue->ringbuf)) {
3536 		ret = PTR_ERR(queue->ringbuf);
3537 		goto err_free_queue;
3538 	}
3539 
3540 	ret = panthor_kernel_bo_vmap(queue->ringbuf);
3541 	if (ret)
3542 		goto err_free_queue;
3543 
3544 	queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
3545 							    &queue->iface.input,
3546 							    &queue->iface.output,
3547 							    &queue->iface.input_fw_va,
3548 							    &queue->iface.output_fw_va);
3549 	if (IS_ERR(queue->iface.mem)) {
3550 		ret = PTR_ERR(queue->iface.mem);
3551 		goto err_free_queue;
3552 	}
3553 
3554 	queue->profiling.slot_count =
3555 		calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size);
3556 
3557 	queue->profiling.slots =
3558 		panthor_kernel_bo_create(group->ptdev, group->vm,
3559 					 queue->profiling.slot_count *
3560 					 sizeof(struct panthor_job_profiling_data),
3561 					 DRM_PANTHOR_BO_NO_MMAP,
3562 					 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3563 					 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3564 					 PANTHOR_VM_KERNEL_AUTO_VA,
3565 					 "Group job stats");
3566 
3567 	if (IS_ERR(queue->profiling.slots)) {
3568 		ret = PTR_ERR(queue->profiling.slots);
3569 		goto err_free_queue;
3570 	}
3571 
3572 	ret = panthor_kernel_bo_vmap(queue->profiling.slots);
3573 	if (ret)
3574 		goto err_free_queue;
3575 
3576 	/* assign a unique name */
3577 	queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid);
3578 	if (!queue->name) {
3579 		ret = -ENOMEM;
3580 		goto err_free_queue;
3581 	}
3582 
3583 	sched_args.name = queue->name;
3584 
3585 	ret = drm_sched_init(&queue->scheduler, &sched_args);
3586 	if (ret)
3587 		goto err_free_queue;
3588 
3589 	drm_sched = &queue->scheduler;
3590 	ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
3591 	if (ret)
3592 		goto err_free_queue;
3593 
3594 	return queue;
3595 
3596 err_free_queue:
3597 	group_free_queue(group, queue);
3598 	return ERR_PTR(ret);
3599 }
3600 
group_init_task_info(struct panthor_group * group)3601 static void group_init_task_info(struct panthor_group *group)
3602 {
3603 	struct task_struct *task = current->group_leader;
3604 
3605 	group->task_info.pid = task->pid;
3606 	get_task_comm(group->task_info.comm, task);
3607 }
3608 
add_group_kbo_sizes(struct panthor_device * ptdev,struct panthor_group * group)3609 static void add_group_kbo_sizes(struct panthor_device *ptdev,
3610 				struct panthor_group *group)
3611 {
3612 	struct panthor_queue *queue;
3613 	int i;
3614 
3615 	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
3616 		return;
3617 	if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
3618 		return;
3619 
3620 	group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
3621 	group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
3622 	group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
3623 
3624 	for (i = 0; i < group->queue_count; i++) {
3625 		queue =	group->queues[i];
3626 		group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
3627 		group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
3628 		group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
3629 	}
3630 }
3631 
3632 #define MAX_GROUPS_PER_POOL		128
3633 
panthor_group_create(struct panthor_file * pfile,const struct drm_panthor_group_create * group_args,const struct drm_panthor_queue_create * queue_args,u64 drm_client_id)3634 int panthor_group_create(struct panthor_file *pfile,
3635 			 const struct drm_panthor_group_create *group_args,
3636 			 const struct drm_panthor_queue_create *queue_args,
3637 			 u64 drm_client_id)
3638 {
3639 	struct panthor_device *ptdev = pfile->ptdev;
3640 	struct panthor_group_pool *gpool = pfile->groups;
3641 	struct panthor_scheduler *sched = ptdev->scheduler;
3642 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3643 	struct panthor_group *group = NULL;
3644 	u32 gid, i, suspend_size;
3645 	int ret;
3646 
3647 	if (group_args->pad)
3648 		return -EINVAL;
3649 
3650 	if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT)
3651 		return -EINVAL;
3652 
3653 	if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
3654 	    (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
3655 	    (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
3656 		return -EINVAL;
3657 
3658 	if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
3659 	    hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
3660 	    hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
3661 		return -EINVAL;
3662 
3663 	group = kzalloc_obj(*group);
3664 	if (!group)
3665 		return -ENOMEM;
3666 
3667 	spin_lock_init(&group->fatal_lock);
3668 	kref_init(&group->refcount);
3669 	group->state = PANTHOR_CS_GROUP_CREATED;
3670 	group->csg_id = -1;
3671 
3672 	group->ptdev = ptdev;
3673 	group->max_compute_cores = group_args->max_compute_cores;
3674 	group->compute_core_mask = group_args->compute_core_mask;
3675 	group->max_fragment_cores = group_args->max_fragment_cores;
3676 	group->fragment_core_mask = group_args->fragment_core_mask;
3677 	group->max_tiler_cores = group_args->max_tiler_cores;
3678 	group->tiler_core_mask = group_args->tiler_core_mask;
3679 	group->priority = group_args->priority;
3680 
3681 	INIT_LIST_HEAD(&group->wait_node);
3682 	INIT_LIST_HEAD(&group->run_node);
3683 	INIT_WORK(&group->term_work, group_term_work);
3684 	INIT_WORK(&group->sync_upd_work, group_sync_upd_work);
3685 	INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work);
3686 	INIT_WORK(&group->release_work, group_release_work);
3687 
3688 	group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id);
3689 	if (!group->vm) {
3690 		ret = -EINVAL;
3691 		goto err_put_group;
3692 	}
3693 
3694 	suspend_size = csg_iface->control->suspend_size;
3695 	group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3696 	if (IS_ERR(group->suspend_buf)) {
3697 		ret = PTR_ERR(group->suspend_buf);
3698 		group->suspend_buf = NULL;
3699 		goto err_put_group;
3700 	}
3701 
3702 	suspend_size = csg_iface->control->protm_suspend_size;
3703 	group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3704 	if (IS_ERR(group->protm_suspend_buf)) {
3705 		ret = PTR_ERR(group->protm_suspend_buf);
3706 		group->protm_suspend_buf = NULL;
3707 		goto err_put_group;
3708 	}
3709 
3710 	group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
3711 						   group_args->queues.count *
3712 						   sizeof(struct panthor_syncobj_64b),
3713 						   DRM_PANTHOR_BO_NO_MMAP,
3714 						   DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3715 						   DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3716 						   PANTHOR_VM_KERNEL_AUTO_VA,
3717 						   "Group sync objects");
3718 	if (IS_ERR(group->syncobjs)) {
3719 		ret = PTR_ERR(group->syncobjs);
3720 		goto err_put_group;
3721 	}
3722 
3723 	ret = panthor_kernel_bo_vmap(group->syncobjs);
3724 	if (ret)
3725 		goto err_put_group;
3726 
3727 	memset(group->syncobjs->kmap, 0,
3728 	       group_args->queues.count * sizeof(struct panthor_syncobj_64b));
3729 
3730 	ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
3731 	if (ret)
3732 		goto err_put_group;
3733 
3734 	for (i = 0; i < group_args->queues.count; i++) {
3735 		group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i);
3736 		if (IS_ERR(group->queues[i])) {
3737 			ret = PTR_ERR(group->queues[i]);
3738 			group->queues[i] = NULL;
3739 			goto err_erase_gid;
3740 		}
3741 
3742 		group->queue_count++;
3743 	}
3744 
3745 	group->idle_queues = GENMASK(group->queue_count - 1, 0);
3746 
3747 	mutex_lock(&sched->reset.lock);
3748 	if (atomic_read(&sched->reset.in_progress)) {
3749 		panthor_group_stop(group);
3750 	} else {
3751 		mutex_lock(&sched->lock);
3752 		list_add_tail(&group->run_node,
3753 			      &sched->groups.idle[group->priority]);
3754 		mutex_unlock(&sched->lock);
3755 	}
3756 	mutex_unlock(&sched->reset.lock);
3757 
3758 	add_group_kbo_sizes(group->ptdev, group);
3759 	spin_lock_init(&group->fdinfo.lock);
3760 
3761 	group_init_task_info(group);
3762 
3763 	xa_set_mark(&gpool->xa, gid, GROUP_REGISTERED);
3764 
3765 	return gid;
3766 
3767 err_erase_gid:
3768 	xa_erase(&gpool->xa, gid);
3769 
3770 err_put_group:
3771 	group_put(group);
3772 	return ret;
3773 }
3774 
panthor_group_destroy(struct panthor_file * pfile,u32 group_handle)3775 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
3776 {
3777 	struct panthor_group_pool *gpool = pfile->groups;
3778 	struct panthor_device *ptdev = pfile->ptdev;
3779 	struct panthor_scheduler *sched = ptdev->scheduler;
3780 	struct panthor_group *group;
3781 
3782 	if (!xa_get_mark(&gpool->xa, group_handle, GROUP_REGISTERED))
3783 		return -EINVAL;
3784 
3785 	group = xa_erase(&gpool->xa, group_handle);
3786 	if (!group)
3787 		return -EINVAL;
3788 
3789 	mutex_lock(&sched->reset.lock);
3790 	mutex_lock(&sched->lock);
3791 	group->destroyed = true;
3792 	if (group->csg_id >= 0) {
3793 		sched_queue_delayed_work(sched, tick, 0);
3794 	} else if (!atomic_read(&sched->reset.in_progress)) {
3795 		/* Remove from the run queues, so the scheduler can't
3796 		 * pick the group on the next tick.
3797 		 */
3798 		list_del_init(&group->run_node);
3799 		list_del_init(&group->wait_node);
3800 		group_queue_work(group, term);
3801 	}
3802 	mutex_unlock(&sched->lock);
3803 	mutex_unlock(&sched->reset.lock);
3804 
3805 	group_put(group);
3806 	return 0;
3807 }
3808 
group_from_handle(struct panthor_group_pool * pool,unsigned long group_handle)3809 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool,
3810 					       unsigned long group_handle)
3811 {
3812 	struct panthor_group *group;
3813 
3814 	xa_lock(&pool->xa);
3815 	group = group_get(xa_find(&pool->xa, &group_handle, group_handle, GROUP_REGISTERED));
3816 	xa_unlock(&pool->xa);
3817 
3818 	return group;
3819 }
3820 
panthor_group_get_state(struct panthor_file * pfile,struct drm_panthor_group_get_state * get_state)3821 int panthor_group_get_state(struct panthor_file *pfile,
3822 			    struct drm_panthor_group_get_state *get_state)
3823 {
3824 	struct panthor_group_pool *gpool = pfile->groups;
3825 	struct panthor_device *ptdev = pfile->ptdev;
3826 	struct panthor_scheduler *sched = ptdev->scheduler;
3827 	struct panthor_group *group;
3828 
3829 	if (get_state->pad)
3830 		return -EINVAL;
3831 
3832 	group = group_from_handle(gpool, get_state->group_handle);
3833 	if (!group)
3834 		return -EINVAL;
3835 
3836 	memset(get_state, 0, sizeof(*get_state));
3837 
3838 	mutex_lock(&sched->lock);
3839 	if (group->timedout)
3840 		get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT;
3841 	if (group->fatal_queues) {
3842 		get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
3843 		get_state->fatal_queues = group->fatal_queues;
3844 	}
3845 	if (group->innocent)
3846 		get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT;
3847 	mutex_unlock(&sched->lock);
3848 
3849 	group_put(group);
3850 	return 0;
3851 }
3852 
panthor_group_pool_create(struct panthor_file * pfile)3853 int panthor_group_pool_create(struct panthor_file *pfile)
3854 {
3855 	struct panthor_group_pool *gpool;
3856 
3857 	gpool = kzalloc_obj(*gpool);
3858 	if (!gpool)
3859 		return -ENOMEM;
3860 
3861 	xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1);
3862 	pfile->groups = gpool;
3863 	return 0;
3864 }
3865 
panthor_group_pool_destroy(struct panthor_file * pfile)3866 void panthor_group_pool_destroy(struct panthor_file *pfile)
3867 {
3868 	struct panthor_group_pool *gpool = pfile->groups;
3869 	struct panthor_group *group;
3870 	unsigned long i;
3871 
3872 	if (IS_ERR_OR_NULL(gpool))
3873 		return;
3874 
3875 	xa_for_each(&gpool->xa, i, group)
3876 		panthor_group_destroy(pfile, i);
3877 
3878 	xa_destroy(&gpool->xa);
3879 	kfree(gpool);
3880 	pfile->groups = NULL;
3881 }
3882 
3883 /**
3884  * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's
3885  * belonging to all the groups owned by an open Panthor file
3886  * @pfile: File.
3887  * @stats: Memory statistics to be updated.
3888  *
3889  */
3890 void
panthor_fdinfo_gather_group_mem_info(struct panthor_file * pfile,struct drm_memory_stats * stats)3891 panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
3892 				     struct drm_memory_stats *stats)
3893 {
3894 	struct panthor_group_pool *gpool = pfile->groups;
3895 	struct panthor_group *group;
3896 	unsigned long i;
3897 
3898 	if (IS_ERR_OR_NULL(gpool))
3899 		return;
3900 
3901 	xa_lock(&gpool->xa);
3902 	xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) {
3903 		stats->resident += group->fdinfo.kbo_sizes;
3904 		if (group->csg_id >= 0)
3905 			stats->active += group->fdinfo.kbo_sizes;
3906 	}
3907 	xa_unlock(&gpool->xa);
3908 }
3909 
job_release(struct kref * ref)3910 static void job_release(struct kref *ref)
3911 {
3912 	struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
3913 
3914 	drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
3915 
3916 	if (job->base.s_fence)
3917 		drm_sched_job_cleanup(&job->base);
3918 
3919 	if (job->done_fence && job->done_fence->ops)
3920 		dma_fence_put(job->done_fence);
3921 	else
3922 		dma_fence_free(job->done_fence);
3923 
3924 	group_put(job->group);
3925 
3926 	kfree(job);
3927 }
3928 
panthor_job_get(struct drm_sched_job * sched_job)3929 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job)
3930 {
3931 	if (sched_job) {
3932 		struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3933 
3934 		kref_get(&job->refcount);
3935 	}
3936 
3937 	return sched_job;
3938 }
3939 
panthor_job_put(struct drm_sched_job * sched_job)3940 void panthor_job_put(struct drm_sched_job *sched_job)
3941 {
3942 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3943 
3944 	if (sched_job)
3945 		kref_put(&job->refcount, job_release);
3946 }
3947 
panthor_job_vm(struct drm_sched_job * sched_job)3948 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
3949 {
3950 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3951 
3952 	return job->group->vm;
3953 }
3954 
3955 struct drm_sched_job *
panthor_job_create(struct panthor_file * pfile,u16 group_handle,const struct drm_panthor_queue_submit * qsubmit,u64 drm_client_id)3956 panthor_job_create(struct panthor_file *pfile,
3957 		   u16 group_handle,
3958 		   const struct drm_panthor_queue_submit *qsubmit,
3959 		   u64 drm_client_id)
3960 {
3961 	struct panthor_group_pool *gpool = pfile->groups;
3962 	struct panthor_job *job;
3963 	u32 credits;
3964 	int ret;
3965 
3966 	if (qsubmit->pad)
3967 		return ERR_PTR(-EINVAL);
3968 
3969 	/* If stream_addr is zero, so stream_size should be. */
3970 	if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0))
3971 		return ERR_PTR(-EINVAL);
3972 
3973 	/* Make sure the address is aligned on 64-byte (cacheline) and the size is
3974 	 * aligned on 8-byte (instruction size).
3975 	 */
3976 	if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7))
3977 		return ERR_PTR(-EINVAL);
3978 
3979 	/* bits 24:30 must be zero. */
3980 	if (qsubmit->latest_flush & GENMASK(30, 24))
3981 		return ERR_PTR(-EINVAL);
3982 
3983 	job = kzalloc_obj(*job);
3984 	if (!job)
3985 		return ERR_PTR(-ENOMEM);
3986 
3987 	kref_init(&job->refcount);
3988 	job->queue_idx = qsubmit->queue_index;
3989 	job->call_info.size = qsubmit->stream_size;
3990 	job->call_info.start = qsubmit->stream_addr;
3991 	job->call_info.latest_flush = qsubmit->latest_flush;
3992 	INIT_LIST_HEAD(&job->node);
3993 
3994 	job->group = group_from_handle(gpool, group_handle);
3995 	if (!job->group) {
3996 		ret = -EINVAL;
3997 		goto err_put_job;
3998 	}
3999 
4000 	if (!group_can_run(job->group)) {
4001 		ret = -EINVAL;
4002 		goto err_put_job;
4003 	}
4004 
4005 	if (job->queue_idx >= job->group->queue_count ||
4006 	    !job->group->queues[job->queue_idx]) {
4007 		ret = -EINVAL;
4008 		goto err_put_job;
4009 	}
4010 
4011 	/* Empty command streams don't need a fence, they'll pick the one from
4012 	 * the previously submitted job.
4013 	 */
4014 	if (job->call_info.size) {
4015 		job->done_fence = kzalloc_obj(*job->done_fence);
4016 		if (!job->done_fence) {
4017 			ret = -ENOMEM;
4018 			goto err_put_job;
4019 		}
4020 	}
4021 
4022 	job->profiling.mask = pfile->ptdev->profile_mask;
4023 	credits = calc_job_credits(job->profiling.mask);
4024 	if (credits == 0) {
4025 		ret = -EINVAL;
4026 		goto err_put_job;
4027 	}
4028 
4029 	ret = drm_sched_job_init(&job->base,
4030 				 &job->group->queues[job->queue_idx]->entity,
4031 				 credits, job->group, drm_client_id);
4032 	if (ret)
4033 		goto err_put_job;
4034 
4035 	return &job->base;
4036 
4037 err_put_job:
4038 	panthor_job_put(&job->base);
4039 	return ERR_PTR(ret);
4040 }
4041 
panthor_job_update_resvs(struct drm_exec * exec,struct drm_sched_job * sched_job)4042 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job)
4043 {
4044 	struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
4045 
4046 	panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
4047 				DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
4048 }
4049 
panthor_sched_unplug(struct panthor_device * ptdev)4050 void panthor_sched_unplug(struct panthor_device *ptdev)
4051 {
4052 	struct panthor_scheduler *sched = ptdev->scheduler;
4053 
4054 	disable_delayed_work_sync(&sched->tick_work);
4055 	disable_work_sync(&sched->fw_events_work);
4056 	disable_work_sync(&sched->sync_upd_work);
4057 
4058 	mutex_lock(&sched->lock);
4059 	if (sched->pm.has_ref) {
4060 		pm_runtime_put(ptdev->base.dev);
4061 		sched->pm.has_ref = false;
4062 	}
4063 	mutex_unlock(&sched->lock);
4064 }
4065 
panthor_sched_fini(struct drm_device * ddev,void * res)4066 static void panthor_sched_fini(struct drm_device *ddev, void *res)
4067 {
4068 	struct panthor_scheduler *sched = res;
4069 	int prio;
4070 
4071 	if (!sched || !sched->csg_slot_count)
4072 		return;
4073 
4074 	if (sched->wq)
4075 		destroy_workqueue(sched->wq);
4076 
4077 	if (sched->heap_alloc_wq)
4078 		destroy_workqueue(sched->heap_alloc_wq);
4079 
4080 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
4081 		drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
4082 		drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
4083 	}
4084 
4085 	drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
4086 }
4087 
panthor_sched_init(struct panthor_device * ptdev)4088 int panthor_sched_init(struct panthor_device *ptdev)
4089 {
4090 	struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
4091 	struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
4092 	struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
4093 	struct panthor_scheduler *sched;
4094 	u32 gpu_as_count, num_groups;
4095 	int prio, ret;
4096 
4097 	sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
4098 	if (!sched)
4099 		return -ENOMEM;
4100 
4101 	/* The highest bit in JOB_INT_* is reserved for globabl IRQs. That
4102 	 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here.
4103 	 */
4104 	num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num);
4105 
4106 	/* The FW-side scheduler might deadlock if two groups with the same
4107 	 * priority try to access a set of resources that overlaps, with part
4108 	 * of the resources being allocated to one group and the other part to
4109 	 * the other group, both groups waiting for the remaining resources to
4110 	 * be allocated. To avoid that, it is recommended to assign each CSG a
4111 	 * different priority. In theory we could allow several groups to have
4112 	 * the same CSG priority if they don't request the same resources, but
4113 	 * that makes the scheduling logic more complicated, so let's clamp
4114 	 * the number of CSG slots to MAX_CSG_PRIO + 1 for now.
4115 	 */
4116 	num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups);
4117 
4118 	/* We need at least one AS for the MCU and one for the GPU contexts. */
4119 	gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
4120 	if (!gpu_as_count) {
4121 		drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
4122 			gpu_as_count + 1);
4123 		return -EINVAL;
4124 	}
4125 
4126 	sched->ptdev = ptdev;
4127 	sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
4128 	sched->csg_slot_count = num_groups;
4129 	sched->cs_slot_count = csg_iface->control->stream_num;
4130 	sched->as_slot_count = gpu_as_count;
4131 	ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
4132 	ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
4133 	ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
4134 
4135 	sched->last_tick = 0;
4136 	sched->resched_target = U64_MAX;
4137 	sched->tick_period = msecs_to_jiffies(10);
4138 	INIT_DELAYED_WORK(&sched->tick_work, tick_work);
4139 	INIT_WORK(&sched->sync_upd_work, sync_upd_work);
4140 	INIT_WORK(&sched->fw_events_work, process_fw_events_work);
4141 
4142 	ret = drmm_mutex_init(&ptdev->base, &sched->lock);
4143 	if (ret)
4144 		return ret;
4145 
4146 	for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
4147 		INIT_LIST_HEAD(&sched->groups.runnable[prio]);
4148 		INIT_LIST_HEAD(&sched->groups.idle[prio]);
4149 	}
4150 	INIT_LIST_HEAD(&sched->groups.waiting);
4151 
4152 	ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
4153 	if (ret)
4154 		return ret;
4155 
4156 	INIT_LIST_HEAD(&sched->reset.stopped_groups);
4157 
4158 	/* sched->heap_alloc_wq will be used for heap chunk allocation on
4159 	 * tiler OOM events, which means we can't use the same workqueue for
4160 	 * the scheduler because works queued by the scheduler are in
4161 	 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to
4162 	 * work around this limitation.
4163 	 *
4164 	 * FIXME: Ultimately, what we need is a failable/non-blocking GEM
4165 	 * allocation path that we can call when a heap OOM is reported. The
4166 	 * FW is smart enough to fall back on other methods if the kernel can't
4167 	 * allocate memory, and fail the tiling job if none of these
4168 	 * countermeasures worked.
4169 	 *
4170 	 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the
4171 	 * system is running out of memory.
4172 	 */
4173 	sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
4174 	sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
4175 	if (!sched->wq || !sched->heap_alloc_wq) {
4176 		panthor_sched_fini(&ptdev->base, sched);
4177 		drm_err(&ptdev->base, "Failed to allocate the workqueues");
4178 		return -ENOMEM;
4179 	}
4180 
4181 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
4182 	if (ret)
4183 		return ret;
4184 
4185 	ptdev->scheduler = sched;
4186 	return 0;
4187 }
4188