xref: /linux/drivers/gpu/drm/panthor/panthor_mmu.c (revision e47a324d6f07c9ef252cfce1f14cfa5110cbed99)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2023 Collabora ltd. */
4 
5 #include <drm/drm_debugfs.h>
6 #include <drm/drm_drv.h>
7 #include <drm/drm_exec.h>
8 #include <drm/drm_gpuvm.h>
9 #include <drm/drm_managed.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panthor_drm.h>
12 
13 #include <linux/atomic.h>
14 #include <linux/bitfield.h>
15 #include <linux/delay.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/io-pgtable.h>
21 #include <linux/iommu.h>
22 #include <linux/kmemleak.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/rwsem.h>
26 #include <linux/sched.h>
27 #include <linux/shmem_fs.h>
28 #include <linux/sizes.h>
29 
30 #include "panthor_device.h"
31 #include "panthor_gem.h"
32 #include "panthor_heap.h"
33 #include "panthor_mmu.h"
34 #include "panthor_regs.h"
35 #include "panthor_sched.h"
36 
37 #define MAX_AS_SLOTS			32
38 
39 struct panthor_vm;
40 
41 /**
42  * struct panthor_as_slot - Address space slot
43  */
44 struct panthor_as_slot {
45 	/** @vm: VM bound to this slot. NULL is no VM is bound. */
46 	struct panthor_vm *vm;
47 };
48 
49 /**
50  * struct panthor_mmu - MMU related data
51  */
52 struct panthor_mmu {
53 	/** @irq: The MMU irq. */
54 	struct panthor_irq irq;
55 
56 	/**
57 	 * @as: Address space related fields.
58 	 *
59 	 * The GPU has a limited number of address spaces (AS) slots, forcing
60 	 * us to re-assign them to re-assign slots on-demand.
61 	 */
62 	struct {
63 		/** @as.slots_lock: Lock protecting access to all other AS fields. */
64 		struct mutex slots_lock;
65 
66 		/** @as.alloc_mask: Bitmask encoding the allocated slots. */
67 		unsigned long alloc_mask;
68 
69 		/** @as.faulty_mask: Bitmask encoding the faulty slots. */
70 		unsigned long faulty_mask;
71 
72 		/** @as.slots: VMs currently bound to the AS slots. */
73 		struct panthor_as_slot slots[MAX_AS_SLOTS];
74 
75 		/**
76 		 * @as.lru_list: List of least recently used VMs.
77 		 *
78 		 * We use this list to pick a VM to evict when all slots are
79 		 * used.
80 		 *
81 		 * There should be no more active VMs than there are AS slots,
82 		 * so this LRU is just here to keep VMs bound until there's
83 		 * a need to release a slot, thus avoid unnecessary TLB/cache
84 		 * flushes.
85 		 */
86 		struct list_head lru_list;
87 	} as;
88 
89 	/** @vm: VMs management fields */
90 	struct {
91 		/** @vm.lock: Lock protecting access to list. */
92 		struct mutex lock;
93 
94 		/** @vm.list: List containing all VMs. */
95 		struct list_head list;
96 
97 		/** @vm.reset_in_progress: True if a reset is in progress. */
98 		bool reset_in_progress;
99 
100 		/** @vm.wq: Workqueue used for the VM_BIND queues. */
101 		struct workqueue_struct *wq;
102 	} vm;
103 };
104 
105 /**
106  * struct panthor_vm_pool - VM pool object
107  */
108 struct panthor_vm_pool {
109 	/** @xa: Array used for VM handle tracking. */
110 	struct xarray xa;
111 };
112 
113 /**
114  * struct panthor_vma - GPU mapping object
115  *
116  * This is used to track GEM mappings in GPU space.
117  */
118 struct panthor_vma {
119 	/** @base: Inherits from drm_gpuva. */
120 	struct drm_gpuva base;
121 
122 	/** @node: Used to implement deferred release of VMAs. */
123 	struct list_head node;
124 
125 	/**
126 	 * @flags: Combination of drm_panthor_vm_bind_op_flags.
127 	 *
128 	 * Only map related flags are accepted.
129 	 */
130 	u32 flags;
131 };
132 
133 /**
134  * struct panthor_vm_op_ctx - VM operation context
135  *
136  * With VM operations potentially taking place in a dma-signaling path, we
137  * need to make sure everything that might require resource allocation is
138  * pre-allocated upfront. This is what this operation context is far.
139  *
140  * We also collect resources that have been freed, so we can release them
141  * asynchronously, and let the VM_BIND scheduler process the next VM_BIND
142  * request.
143  */
144 struct panthor_vm_op_ctx {
145 	/** @rsvd_page_tables: Pages reserved for the MMU page table update. */
146 	struct {
147 		/** @rsvd_page_tables.count: Number of pages reserved. */
148 		u32 count;
149 
150 		/** @rsvd_page_tables.ptr: Point to the first unused page in the @pages table. */
151 		u32 ptr;
152 
153 		/**
154 		 * @rsvd_page_tables.pages: Array of pages to be used for an MMU page table update.
155 		 *
156 		 * After an VM operation, there might be free pages left in this array.
157 		 * They should be returned to the pt_cache as part of the op_ctx cleanup.
158 		 */
159 		void **pages;
160 	} rsvd_page_tables;
161 
162 	/**
163 	 * @preallocated_vmas: Pre-allocated VMAs to handle the remap case.
164 	 *
165 	 * Partial unmap requests or map requests overlapping existing mappings will
166 	 * trigger a remap call, which need to register up to three panthor_vma objects
167 	 * (one for the new mapping, and two for the previous and next mappings).
168 	 */
169 	struct panthor_vma *preallocated_vmas[3];
170 
171 	/** @flags: Combination of drm_panthor_vm_bind_op_flags. */
172 	u32 flags;
173 
174 	/** @va: Virtual range targeted by the VM operation. */
175 	struct {
176 		/** @va.addr: Start address. */
177 		u64 addr;
178 
179 		/** @va.range: Range size. */
180 		u64 range;
181 	} va;
182 
183 	/**
184 	 * @returned_vmas: List of panthor_vma objects returned after a VM operation.
185 	 *
186 	 * For unmap operations, this will contain all VMAs that were covered by the
187 	 * specified VA range.
188 	 *
189 	 * For map operations, this will contain all VMAs that previously mapped to
190 	 * the specified VA range.
191 	 *
192 	 * Those VMAs, and the resources they point to will be released as part of
193 	 * the op_ctx cleanup operation.
194 	 */
195 	struct list_head returned_vmas;
196 
197 	/** @map: Fields specific to a map operation. */
198 	struct {
199 		/** @map.vm_bo: Buffer object to map. */
200 		struct drm_gpuvm_bo *vm_bo;
201 
202 		/** @map.bo_offset: Offset in the buffer object. */
203 		u64 bo_offset;
204 
205 		/**
206 		 * @map.sgt: sg-table pointing to pages backing the GEM object.
207 		 *
208 		 * This is gathered at job creation time, such that we don't have
209 		 * to allocate in ::run_job().
210 		 */
211 		struct sg_table *sgt;
212 
213 		/**
214 		 * @map.new_vma: The new VMA object that will be inserted to the VA tree.
215 		 */
216 		struct panthor_vma *new_vma;
217 	} map;
218 };
219 
220 /**
221  * struct panthor_vm - VM object
222  *
223  * A VM is an object representing a GPU (or MCU) virtual address space.
224  * It embeds the MMU page table for this address space, a tree containing
225  * all the virtual mappings of GEM objects, and other things needed to manage
226  * the VM.
227  *
228  * Except for the MCU VM, which is managed by the kernel, all other VMs are
229  * created by userspace and mostly managed by userspace, using the
230  * %DRM_IOCTL_PANTHOR_VM_BIND ioctl.
231  *
232  * A portion of the virtual address space is reserved for kernel objects,
233  * like heap chunks, and userspace gets to decide how much of the virtual
234  * address space is left to the kernel (half of the virtual address space
235  * by default).
236  */
237 struct panthor_vm {
238 	/**
239 	 * @base: Inherit from drm_gpuvm.
240 	 *
241 	 * We delegate all the VA management to the common drm_gpuvm framework
242 	 * and only implement hooks to update the MMU page table.
243 	 */
244 	struct drm_gpuvm base;
245 
246 	/**
247 	 * @sched: Scheduler used for asynchronous VM_BIND request.
248 	 *
249 	 * We use a 1:1 scheduler here.
250 	 */
251 	struct drm_gpu_scheduler sched;
252 
253 	/**
254 	 * @entity: Scheduling entity representing the VM_BIND queue.
255 	 *
256 	 * There's currently one bind queue per VM. It doesn't make sense to
257 	 * allow more given the VM operations are serialized anyway.
258 	 */
259 	struct drm_sched_entity entity;
260 
261 	/** @ptdev: Device. */
262 	struct panthor_device *ptdev;
263 
264 	/** @memattr: Value to program to the AS_MEMATTR register. */
265 	u64 memattr;
266 
267 	/** @pgtbl_ops: Page table operations. */
268 	struct io_pgtable_ops *pgtbl_ops;
269 
270 	/** @root_page_table: Stores the root page table pointer. */
271 	void *root_page_table;
272 
273 	/**
274 	 * @op_lock: Lock used to serialize operations on a VM.
275 	 *
276 	 * The serialization of jobs queued to the VM_BIND queue is already
277 	 * taken care of by drm_sched, but we need to serialize synchronous
278 	 * and asynchronous VM_BIND request. This is what this lock is for.
279 	 */
280 	struct mutex op_lock;
281 
282 	/**
283 	 * @op_ctx: The context attached to the currently executing VM operation.
284 	 *
285 	 * NULL when no operation is in progress.
286 	 */
287 	struct panthor_vm_op_ctx *op_ctx;
288 
289 	/**
290 	 * @mm: Memory management object representing the auto-VA/kernel-VA.
291 	 *
292 	 * Used to auto-allocate VA space for kernel-managed objects (tiler
293 	 * heaps, ...).
294 	 *
295 	 * For the MCU VM, this is managing the VA range that's used to map
296 	 * all shared interfaces.
297 	 *
298 	 * For user VMs, the range is specified by userspace, and must not
299 	 * exceed half of the VA space addressable.
300 	 */
301 	struct drm_mm mm;
302 
303 	/** @mm_lock: Lock protecting the @mm field. */
304 	struct mutex mm_lock;
305 
306 	/** @kernel_auto_va: Automatic VA-range for kernel BOs. */
307 	struct {
308 		/** @kernel_auto_va.start: Start of the automatic VA-range for kernel BOs. */
309 		u64 start;
310 
311 		/** @kernel_auto_va.size: Size of the automatic VA-range for kernel BOs. */
312 		u64 end;
313 	} kernel_auto_va;
314 
315 	/** @as: Address space related fields. */
316 	struct {
317 		/**
318 		 * @as.id: ID of the address space this VM is bound to.
319 		 *
320 		 * A value of -1 means the VM is inactive/not bound.
321 		 */
322 		int id;
323 
324 		/** @as.active_cnt: Number of active users of this VM. */
325 		refcount_t active_cnt;
326 
327 		/**
328 		 * @as.lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
329 		 *
330 		 * Active VMs should not be inserted in the LRU list.
331 		 */
332 		struct list_head lru_node;
333 	} as;
334 
335 	/**
336 	 * @heaps: Tiler heap related fields.
337 	 */
338 	struct {
339 		/**
340 		 * @heaps.pool: The heap pool attached to this VM.
341 		 *
342 		 * Will stay NULL until someone creates a heap context on this VM.
343 		 */
344 		struct panthor_heap_pool *pool;
345 
346 		/** @heaps.lock: Lock used to protect access to @pool. */
347 		struct mutex lock;
348 	} heaps;
349 
350 	/** @node: Used to insert the VM in the panthor_mmu::vm::list. */
351 	struct list_head node;
352 
353 	/** @for_mcu: True if this is the MCU VM. */
354 	bool for_mcu;
355 
356 	/**
357 	 * @destroyed: True if the VM was destroyed.
358 	 *
359 	 * No further bind requests should be queued to a destroyed VM.
360 	 */
361 	bool destroyed;
362 
363 	/**
364 	 * @unusable: True if the VM has turned unusable because something
365 	 * bad happened during an asynchronous request.
366 	 *
367 	 * We don't try to recover from such failures, because this implies
368 	 * informing userspace about the specific operation that failed, and
369 	 * hoping the userspace driver can replay things from there. This all
370 	 * sounds very complicated for little gain.
371 	 *
372 	 * Instead, we should just flag the VM as unusable, and fail any
373 	 * further request targeting this VM.
374 	 *
375 	 * We also provide a way to query a VM state, so userspace can destroy
376 	 * it and create a new one.
377 	 *
378 	 * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST
379 	 * situation, where the logical device needs to be re-created.
380 	 */
381 	bool unusable;
382 
383 	/**
384 	 * @unhandled_fault: Unhandled fault happened.
385 	 *
386 	 * This should be reported to the scheduler, and the queue/group be
387 	 * flagged as faulty as a result.
388 	 */
389 	bool unhandled_fault;
390 };
391 
392 /**
393  * struct panthor_vm_bind_job - VM bind job
394  */
395 struct panthor_vm_bind_job {
396 	/** @base: Inherit from drm_sched_job. */
397 	struct drm_sched_job base;
398 
399 	/** @refcount: Reference count. */
400 	struct kref refcount;
401 
402 	/** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */
403 	struct work_struct cleanup_op_ctx_work;
404 
405 	/** @vm: VM targeted by the VM operation. */
406 	struct panthor_vm *vm;
407 
408 	/** @ctx: Operation context. */
409 	struct panthor_vm_op_ctx ctx;
410 };
411 
412 /*
413  * @pt_cache: Cache used to allocate MMU page tables.
414  *
415  * The pre-allocation pattern forces us to over-allocate to plan for
416  * the worst case scenario, and return the pages we didn't use.
417  *
418  * Having a kmem_cache allows us to speed allocations.
419  */
420 static struct kmem_cache *pt_cache;
421 
422 /**
423  * alloc_pt() - Custom page table allocator
424  * @cookie: Cookie passed at page table allocation time.
425  * @size: Size of the page table. This size should be fixed,
426  * and determined at creation time based on the granule size.
427  * @gfp: GFP flags.
428  *
429  * We want a custom allocator so we can use a cache for page table
430  * allocations and amortize the cost of the over-reservation that's
431  * done to allow asynchronous VM operations.
432  *
433  * Return: non-NULL on success, NULL if the allocation failed for any
434  * reason.
435  */
436 static void *alloc_pt(void *cookie, size_t size, gfp_t gfp)
437 {
438 	struct panthor_vm *vm = cookie;
439 	void *page;
440 
441 	/* Allocation of the root page table happening during init. */
442 	if (unlikely(!vm->root_page_table)) {
443 		struct page *p;
444 
445 		drm_WARN_ON(&vm->ptdev->base, vm->op_ctx);
446 		p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev),
447 				     gfp | __GFP_ZERO, get_order(size));
448 		page = p ? page_address(p) : NULL;
449 		vm->root_page_table = page;
450 		return page;
451 	}
452 
453 	/* We're not supposed to have anything bigger than 4k here, because we picked a
454 	 * 4k granule size at init time.
455 	 */
456 	if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
457 		return NULL;
458 
459 	/* We must have some op_ctx attached to the VM and it must have at least one
460 	 * free page.
461 	 */
462 	if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) ||
463 	    drm_WARN_ON(&vm->ptdev->base,
464 			vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count))
465 		return NULL;
466 
467 	page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++];
468 	memset(page, 0, SZ_4K);
469 
470 	/* Page table entries don't use virtual addresses, which trips out
471 	 * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
472 	 * are mixed with other fields, and I fear kmemleak won't detect that
473 	 * either.
474 	 *
475 	 * Let's just ignore memory passed to the page-table driver for now.
476 	 */
477 	kmemleak_ignore(page);
478 	return page;
479 }
480 
481 /**
482  * free_pt() - Custom page table free function
483  * @cookie: Cookie passed at page table allocation time.
484  * @data: Page table to free.
485  * @size: Size of the page table. This size should be fixed,
486  * and determined at creation time based on the granule size.
487  */
488 static void free_pt(void *cookie, void *data, size_t size)
489 {
490 	struct panthor_vm *vm = cookie;
491 
492 	if (unlikely(vm->root_page_table == data)) {
493 		free_pages((unsigned long)data, get_order(size));
494 		vm->root_page_table = NULL;
495 		return;
496 	}
497 
498 	if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
499 		return;
500 
501 	/* Return the page to the pt_cache. */
502 	kmem_cache_free(pt_cache, data);
503 }
504 
505 static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
506 {
507 	int ret;
508 	u32 val;
509 
510 	/* Wait for the MMU status to indicate there is no active command, in
511 	 * case one is pending.
512 	 */
513 	ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
514 						val, !(val & AS_STATUS_AS_ACTIVE),
515 						10, 100000);
516 
517 	if (ret) {
518 		panthor_device_schedule_reset(ptdev);
519 		drm_err(&ptdev->base, "AS_ACTIVE bit stuck\n");
520 	}
521 
522 	return ret;
523 }
524 
525 static int write_cmd(struct panthor_device *ptdev, u32 as_nr, u32 cmd)
526 {
527 	int status;
528 
529 	/* write AS_COMMAND when MMU is ready to accept another command */
530 	status = wait_ready(ptdev, as_nr);
531 	if (!status)
532 		gpu_write(ptdev, AS_COMMAND(as_nr), cmd);
533 
534 	return status;
535 }
536 
537 static void lock_region(struct panthor_device *ptdev, u32 as_nr,
538 			u64 region_start, u64 size)
539 {
540 	u8 region_width;
541 	u64 region;
542 	u64 region_end = region_start + size;
543 
544 	if (!size)
545 		return;
546 
547 	/*
548 	 * The locked region is a naturally aligned power of 2 block encoded as
549 	 * log2 minus(1).
550 	 * Calculate the desired start/end and look for the highest bit which
551 	 * differs. The smallest naturally aligned block must include this bit
552 	 * change, the desired region starts with this bit (and subsequent bits)
553 	 * zeroed and ends with the bit (and subsequent bits) set to one.
554 	 */
555 	region_width = max(fls64(region_start ^ (region_end - 1)),
556 			   const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
557 
558 	/*
559 	 * Mask off the low bits of region_start (which would be ignored by
560 	 * the hardware anyway)
561 	 */
562 	region_start &= GENMASK_ULL(63, region_width);
563 
564 	region = region_width | region_start;
565 
566 	/* Lock the region that needs to be updated */
567 	gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
568 	gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
569 	write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
570 }
571 
572 static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
573 				      u64 iova, u64 size, u32 op)
574 {
575 	lockdep_assert_held(&ptdev->mmu->as.slots_lock);
576 
577 	if (as_nr < 0)
578 		return 0;
579 
580 	/*
581 	 * If the AS number is greater than zero, then we can be sure
582 	 * the device is up and running, so we don't need to explicitly
583 	 * power it up
584 	 */
585 
586 	if (op != AS_COMMAND_UNLOCK)
587 		lock_region(ptdev, as_nr, iova, size);
588 
589 	/* Run the MMU operation */
590 	write_cmd(ptdev, as_nr, op);
591 
592 	/* Wait for the flush to complete */
593 	return wait_ready(ptdev, as_nr);
594 }
595 
596 static int mmu_hw_do_operation(struct panthor_vm *vm,
597 			       u64 iova, u64 size, u32 op)
598 {
599 	struct panthor_device *ptdev = vm->ptdev;
600 	int ret;
601 
602 	mutex_lock(&ptdev->mmu->as.slots_lock);
603 	ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op);
604 	mutex_unlock(&ptdev->mmu->as.slots_lock);
605 
606 	return ret;
607 }
608 
609 static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
610 				 u64 transtab, u64 transcfg, u64 memattr)
611 {
612 	int ret;
613 
614 	ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
615 	if (ret)
616 		return ret;
617 
618 	gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
619 	gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
620 
621 	gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
622 	gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
623 
624 	gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
625 	gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
626 
627 	return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
628 }
629 
630 static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
631 {
632 	int ret;
633 
634 	ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
635 	if (ret)
636 		return ret;
637 
638 	gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
639 	gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
640 
641 	gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
642 	gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
643 
644 	gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
645 	gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
646 
647 	return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
648 }
649 
650 static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value)
651 {
652 	/* Bits 16 to 31 mean REQ_COMPLETE. */
653 	return value & GENMASK(15, 0);
654 }
655 
656 static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as)
657 {
658 	return BIT(as);
659 }
660 
661 /**
662  * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
663  * @vm: VM to check.
664  *
665  * Return: true if the VM has unhandled faults, false otherwise.
666  */
667 bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm)
668 {
669 	return vm->unhandled_fault;
670 }
671 
672 /**
673  * panthor_vm_is_unusable() - Check if the VM is still usable
674  * @vm: VM to check.
675  *
676  * Return: true if the VM is unusable, false otherwise.
677  */
678 bool panthor_vm_is_unusable(struct panthor_vm *vm)
679 {
680 	return vm->unusable;
681 }
682 
683 static void panthor_vm_release_as_locked(struct panthor_vm *vm)
684 {
685 	struct panthor_device *ptdev = vm->ptdev;
686 
687 	lockdep_assert_held(&ptdev->mmu->as.slots_lock);
688 
689 	if (drm_WARN_ON(&ptdev->base, vm->as.id < 0))
690 		return;
691 
692 	ptdev->mmu->as.slots[vm->as.id].vm = NULL;
693 	clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
694 	refcount_set(&vm->as.active_cnt, 0);
695 	list_del_init(&vm->as.lru_node);
696 	vm->as.id = -1;
697 }
698 
699 /**
700  * panthor_vm_active() - Flag a VM as active
701  * @vm: VM to flag as active.
702  *
703  * Assigns an address space to a VM so it can be used by the GPU/MCU.
704  *
705  * Return: 0 on success, a negative error code otherwise.
706  */
707 int panthor_vm_active(struct panthor_vm *vm)
708 {
709 	struct panthor_device *ptdev = vm->ptdev;
710 	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
711 	struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg;
712 	int ret = 0, as, cookie;
713 	u64 transtab, transcfg;
714 
715 	if (!drm_dev_enter(&ptdev->base, &cookie))
716 		return -ENODEV;
717 
718 	if (refcount_inc_not_zero(&vm->as.active_cnt))
719 		goto out_dev_exit;
720 
721 	mutex_lock(&ptdev->mmu->as.slots_lock);
722 
723 	if (refcount_inc_not_zero(&vm->as.active_cnt))
724 		goto out_unlock;
725 
726 	as = vm->as.id;
727 	if (as >= 0) {
728 		/* Unhandled pagefault on this AS, the MMU was disabled. We need to
729 		 * re-enable the MMU after clearing+unmasking the AS interrupts.
730 		 */
731 		if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as))
732 			goto out_enable_as;
733 
734 		goto out_make_active;
735 	}
736 
737 	/* Check for a free AS */
738 	if (vm->for_mcu) {
739 		drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0));
740 		as = 0;
741 	} else {
742 		as = ffz(ptdev->mmu->as.alloc_mask | BIT(0));
743 	}
744 
745 	if (!(BIT(as) & ptdev->gpu_info.as_present)) {
746 		struct panthor_vm *lru_vm;
747 
748 		lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list,
749 						  struct panthor_vm,
750 						  as.lru_node);
751 		if (drm_WARN_ON(&ptdev->base, !lru_vm)) {
752 			ret = -EBUSY;
753 			goto out_unlock;
754 		}
755 
756 		drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt));
757 		as = lru_vm->as.id;
758 		panthor_vm_release_as_locked(lru_vm);
759 	}
760 
761 	/* Assign the free or reclaimed AS to the FD */
762 	vm->as.id = as;
763 	set_bit(as, &ptdev->mmu->as.alloc_mask);
764 	ptdev->mmu->as.slots[as].vm = vm;
765 
766 out_enable_as:
767 	transtab = cfg->arm_lpae_s1_cfg.ttbr;
768 	transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
769 		   AS_TRANSCFG_PTW_RA |
770 		   AS_TRANSCFG_ADRMODE_AARCH64_4K |
771 		   AS_TRANSCFG_INA_BITS(55 - va_bits);
772 	if (ptdev->coherent)
773 		transcfg |= AS_TRANSCFG_PTW_SH_OS;
774 
775 	/* If the VM is re-activated, we clear the fault. */
776 	vm->unhandled_fault = false;
777 
778 	/* Unhandled pagefault on this AS, clear the fault and re-enable interrupts
779 	 * before enabling the AS.
780 	 */
781 	if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
782 		gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
783 		ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
784 		ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as);
785 		gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
786 	}
787 
788 	ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr);
789 
790 out_make_active:
791 	if (!ret) {
792 		refcount_set(&vm->as.active_cnt, 1);
793 		list_del_init(&vm->as.lru_node);
794 	}
795 
796 out_unlock:
797 	mutex_unlock(&ptdev->mmu->as.slots_lock);
798 
799 out_dev_exit:
800 	drm_dev_exit(cookie);
801 	return ret;
802 }
803 
804 /**
805  * panthor_vm_idle() - Flag a VM idle
806  * @vm: VM to flag as idle.
807  *
808  * When we know the GPU is done with the VM (no more jobs to process),
809  * we can relinquish the AS slot attached to this VM, if any.
810  *
811  * We don't release the slot immediately, but instead place the VM in
812  * the LRU list, so it can be evicted if another VM needs an AS slot.
813  * This way, VMs keep attached to the AS they were given until we run
814  * out of free slot, limiting the number of MMU operations (TLB flush
815  * and other AS updates).
816  */
817 void panthor_vm_idle(struct panthor_vm *vm)
818 {
819 	struct panthor_device *ptdev = vm->ptdev;
820 
821 	if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock))
822 		return;
823 
824 	if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node)))
825 		list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list);
826 
827 	refcount_set(&vm->as.active_cnt, 0);
828 	mutex_unlock(&ptdev->mmu->as.slots_lock);
829 }
830 
831 u32 panthor_vm_page_size(struct panthor_vm *vm)
832 {
833 	const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
834 	u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
835 
836 	return 1u << pg_shift;
837 }
838 
839 static void panthor_vm_stop(struct panthor_vm *vm)
840 {
841 	drm_sched_stop(&vm->sched, NULL);
842 }
843 
844 static void panthor_vm_start(struct panthor_vm *vm)
845 {
846 	drm_sched_start(&vm->sched, 0);
847 }
848 
849 /**
850  * panthor_vm_as() - Get the AS slot attached to a VM
851  * @vm: VM to get the AS slot of.
852  *
853  * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise.
854  */
855 int panthor_vm_as(struct panthor_vm *vm)
856 {
857 	return vm->as.id;
858 }
859 
860 static size_t get_pgsize(u64 addr, size_t size, size_t *count)
861 {
862 	/*
863 	 * io-pgtable only operates on multiple pages within a single table
864 	 * entry, so we need to split at boundaries of the table size, i.e.
865 	 * the next block size up. The distance from address A to the next
866 	 * boundary of block size B is logically B - A % B, but in unsigned
867 	 * two's complement where B is a power of two we get the equivalence
868 	 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
869 	 */
870 	size_t blk_offset = -addr % SZ_2M;
871 
872 	if (blk_offset || size < SZ_2M) {
873 		*count = min_not_zero(blk_offset, size) / SZ_4K;
874 		return SZ_4K;
875 	}
876 	blk_offset = -addr % SZ_1G ?: SZ_1G;
877 	*count = min(blk_offset, size) / SZ_2M;
878 	return SZ_2M;
879 }
880 
881 static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
882 {
883 	struct panthor_device *ptdev = vm->ptdev;
884 	int ret = 0, cookie;
885 
886 	if (vm->as.id < 0)
887 		return 0;
888 
889 	/* If the device is unplugged, we just silently skip the flush. */
890 	if (!drm_dev_enter(&ptdev->base, &cookie))
891 		return 0;
892 
893 	ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT);
894 
895 	drm_dev_exit(cookie);
896 	return ret;
897 }
898 
899 /**
900  * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
901  * @vm: VM whose cache to flush
902  *
903  * Return: 0 on success, a negative error code if flush failed.
904  */
905 int panthor_vm_flush_all(struct panthor_vm *vm)
906 {
907 	return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range);
908 }
909 
910 static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
911 {
912 	struct panthor_device *ptdev = vm->ptdev;
913 	struct io_pgtable_ops *ops = vm->pgtbl_ops;
914 	u64 offset = 0;
915 
916 	drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size);
917 
918 	while (offset < size) {
919 		size_t unmapped_sz = 0, pgcount;
920 		size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount);
921 
922 		unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL);
923 
924 		if (drm_WARN_ON(&ptdev->base, unmapped_sz != pgsize * pgcount)) {
925 			drm_err(&ptdev->base, "failed to unmap range %llx-%llx (requested range %llx-%llx)\n",
926 				iova + offset + unmapped_sz,
927 				iova + offset + pgsize * pgcount,
928 				iova, iova + size);
929 			panthor_vm_flush_range(vm, iova, offset + unmapped_sz);
930 			return  -EINVAL;
931 		}
932 		offset += unmapped_sz;
933 	}
934 
935 	return panthor_vm_flush_range(vm, iova, size);
936 }
937 
938 static int
939 panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
940 		     struct sg_table *sgt, u64 offset, u64 size)
941 {
942 	struct panthor_device *ptdev = vm->ptdev;
943 	unsigned int count;
944 	struct scatterlist *sgl;
945 	struct io_pgtable_ops *ops = vm->pgtbl_ops;
946 	u64 start_iova = iova;
947 	int ret;
948 
949 	if (!size)
950 		return 0;
951 
952 	for_each_sgtable_dma_sg(sgt, sgl, count) {
953 		dma_addr_t paddr = sg_dma_address(sgl);
954 		size_t len = sg_dma_len(sgl);
955 
956 		if (len <= offset) {
957 			offset -= len;
958 			continue;
959 		}
960 
961 		paddr += offset;
962 		len -= offset;
963 		len = min_t(size_t, len, size);
964 		size -= len;
965 
966 		drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx",
967 			vm->as.id, iova, &paddr, len);
968 
969 		while (len) {
970 			size_t pgcount, mapped = 0;
971 			size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
972 
973 			ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
974 					     GFP_KERNEL, &mapped);
975 			iova += mapped;
976 			paddr += mapped;
977 			len -= mapped;
978 
979 			if (drm_WARN_ON(&ptdev->base, !ret && !mapped))
980 				ret = -ENOMEM;
981 
982 			if (ret) {
983 				/* If something failed, unmap what we've already mapped before
984 				 * returning. The unmap call is not supposed to fail.
985 				 */
986 				drm_WARN_ON(&ptdev->base,
987 					    panthor_vm_unmap_pages(vm, start_iova,
988 								   iova - start_iova));
989 				return ret;
990 			}
991 		}
992 
993 		if (!size)
994 			break;
995 
996 		offset = 0;
997 	}
998 
999 	return panthor_vm_flush_range(vm, start_iova, iova - start_iova);
1000 }
1001 
1002 static int flags_to_prot(u32 flags)
1003 {
1004 	int prot = 0;
1005 
1006 	if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC)
1007 		prot |= IOMMU_NOEXEC;
1008 
1009 	if (!(flags & DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED))
1010 		prot |= IOMMU_CACHE;
1011 
1012 	if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_READONLY)
1013 		prot |= IOMMU_READ;
1014 	else
1015 		prot |= IOMMU_READ | IOMMU_WRITE;
1016 
1017 	return prot;
1018 }
1019 
1020 /**
1021  * panthor_vm_alloc_va() - Allocate a region in the auto-va space
1022  * @vm: VM to allocate a region on.
1023  * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user
1024  * wants the VA to be automatically allocated from the auto-VA range.
1025  * @size: size of the VA range.
1026  * @va_node: drm_mm_node to initialize. Must be zero-initialized.
1027  *
1028  * Some GPU objects, like heap chunks, are fully managed by the kernel and
1029  * need to be mapped to the userspace VM, in the region reserved for kernel
1030  * objects.
1031  *
1032  * This function takes care of allocating a region in the kernel auto-VA space.
1033  *
1034  * Return: 0 on success, an error code otherwise.
1035  */
1036 int
1037 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
1038 		    struct drm_mm_node *va_node)
1039 {
1040 	ssize_t vm_pgsz = panthor_vm_page_size(vm);
1041 	int ret;
1042 
1043 	if (!size || !IS_ALIGNED(size, vm_pgsz))
1044 		return -EINVAL;
1045 
1046 	if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
1047 		return -EINVAL;
1048 
1049 	mutex_lock(&vm->mm_lock);
1050 	if (va != PANTHOR_VM_KERNEL_AUTO_VA) {
1051 		va_node->start = va;
1052 		va_node->size = size;
1053 		ret = drm_mm_reserve_node(&vm->mm, va_node);
1054 	} else {
1055 		ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size,
1056 						  size >= SZ_2M ? SZ_2M : SZ_4K,
1057 						  0, vm->kernel_auto_va.start,
1058 						  vm->kernel_auto_va.end,
1059 						  DRM_MM_INSERT_BEST);
1060 	}
1061 	mutex_unlock(&vm->mm_lock);
1062 
1063 	return ret;
1064 }
1065 
1066 /**
1067  * panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va()
1068  * @vm: VM to free the region on.
1069  * @va_node: Memory node representing the region to free.
1070  */
1071 void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
1072 {
1073 	mutex_lock(&vm->mm_lock);
1074 	drm_mm_remove_node(va_node);
1075 	mutex_unlock(&vm->mm_lock);
1076 }
1077 
1078 static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
1079 {
1080 	struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj);
1081 	struct drm_gpuvm *vm = vm_bo->vm;
1082 	bool unpin;
1083 
1084 	/* We must retain the GEM before calling drm_gpuvm_bo_put(),
1085 	 * otherwise the mutex might be destroyed while we hold it.
1086 	 * Same goes for the VM, since we take the VM resv lock.
1087 	 */
1088 	drm_gem_object_get(&bo->base.base);
1089 	drm_gpuvm_get(vm);
1090 
1091 	/* We take the resv lock to protect against concurrent accesses to the
1092 	 * gpuvm evicted/extobj lists that are modified in
1093 	 * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
1094 	 * releases sthe last vm_bo reference.
1095 	 * We take the BO GPUVA list lock to protect the vm_bo removal from the
1096 	 * GEM vm_bo list.
1097 	 */
1098 	dma_resv_lock(drm_gpuvm_resv(vm), NULL);
1099 	mutex_lock(&bo->gpuva_list_lock);
1100 	unpin = drm_gpuvm_bo_put(vm_bo);
1101 	mutex_unlock(&bo->gpuva_list_lock);
1102 	dma_resv_unlock(drm_gpuvm_resv(vm));
1103 
1104 	/* If the vm_bo object was destroyed, release the pin reference that
1105 	 * was hold by this object.
1106 	 */
1107 	if (unpin && !drm_gem_is_imported(&bo->base.base))
1108 		drm_gem_shmem_unpin(&bo->base);
1109 
1110 	drm_gpuvm_put(vm);
1111 	drm_gem_object_put(&bo->base.base);
1112 }
1113 
1114 static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1115 				      struct panthor_vm *vm)
1116 {
1117 	struct panthor_vma *vma, *tmp_vma;
1118 
1119 	u32 remaining_pt_count = op_ctx->rsvd_page_tables.count -
1120 				 op_ctx->rsvd_page_tables.ptr;
1121 
1122 	if (remaining_pt_count) {
1123 		kmem_cache_free_bulk(pt_cache, remaining_pt_count,
1124 				     op_ctx->rsvd_page_tables.pages +
1125 				     op_ctx->rsvd_page_tables.ptr);
1126 	}
1127 
1128 	kfree(op_ctx->rsvd_page_tables.pages);
1129 
1130 	if (op_ctx->map.vm_bo)
1131 		panthor_vm_bo_put(op_ctx->map.vm_bo);
1132 
1133 	for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++)
1134 		kfree(op_ctx->preallocated_vmas[i]);
1135 
1136 	list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) {
1137 		list_del(&vma->node);
1138 		panthor_vm_bo_put(vma->base.vm_bo);
1139 		kfree(vma);
1140 	}
1141 }
1142 
1143 static struct panthor_vma *
1144 panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx *op_ctx)
1145 {
1146 	for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) {
1147 		struct panthor_vma *vma = op_ctx->preallocated_vmas[i];
1148 
1149 		if (vma) {
1150 			op_ctx->preallocated_vmas[i] = NULL;
1151 			return vma;
1152 		}
1153 	}
1154 
1155 	return NULL;
1156 }
1157 
1158 static int
1159 panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
1160 {
1161 	u32 vma_count;
1162 
1163 	switch (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
1164 	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
1165 		/* One VMA for the new mapping, and two more VMAs for the remap case
1166 		 * which might contain both a prev and next VA.
1167 		 */
1168 		vma_count = 3;
1169 		break;
1170 
1171 	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
1172 		/* Partial unmaps might trigger a remap with either a prev or a next VA,
1173 		 * but not both.
1174 		 */
1175 		vma_count = 1;
1176 		break;
1177 
1178 	default:
1179 		return 0;
1180 	}
1181 
1182 	for (u32 i = 0; i < vma_count; i++) {
1183 		struct panthor_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
1184 
1185 		if (!vma)
1186 			return -ENOMEM;
1187 
1188 		op_ctx->preallocated_vmas[i] = vma;
1189 	}
1190 
1191 	return 0;
1192 }
1193 
1194 #define PANTHOR_VM_BIND_OP_MAP_FLAGS \
1195 	(DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
1196 	 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
1197 	 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED | \
1198 	 DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
1199 
1200 static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1201 					 struct panthor_vm *vm,
1202 					 struct panthor_gem_object *bo,
1203 					 u64 offset,
1204 					 u64 size, u64 va,
1205 					 u32 flags)
1206 {
1207 	struct drm_gpuvm_bo *preallocated_vm_bo;
1208 	struct sg_table *sgt = NULL;
1209 	u64 pt_count;
1210 	int ret;
1211 
1212 	if (!bo)
1213 		return -EINVAL;
1214 
1215 	if ((flags & ~PANTHOR_VM_BIND_OP_MAP_FLAGS) ||
1216 	    (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
1217 		return -EINVAL;
1218 
1219 	/* Make sure the VA and size are aligned and in-bounds. */
1220 	if (size > bo->base.base.size || offset > bo->base.base.size - size)
1221 		return -EINVAL;
1222 
1223 	/* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */
1224 	if (bo->exclusive_vm_root_gem &&
1225 	    bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm))
1226 		return -EINVAL;
1227 
1228 	memset(op_ctx, 0, sizeof(*op_ctx));
1229 	INIT_LIST_HEAD(&op_ctx->returned_vmas);
1230 	op_ctx->flags = flags;
1231 	op_ctx->va.range = size;
1232 	op_ctx->va.addr = va;
1233 
1234 	ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
1235 	if (ret)
1236 		goto err_cleanup;
1237 
1238 	if (!drm_gem_is_imported(&bo->base.base)) {
1239 		/* Pre-reserve the BO pages, so the map operation doesn't have to
1240 		 * allocate.
1241 		 */
1242 		ret = drm_gem_shmem_pin(&bo->base);
1243 		if (ret)
1244 			goto err_cleanup;
1245 	}
1246 
1247 	sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
1248 	if (IS_ERR(sgt)) {
1249 		if (!drm_gem_is_imported(&bo->base.base))
1250 			drm_gem_shmem_unpin(&bo->base);
1251 
1252 		ret = PTR_ERR(sgt);
1253 		goto err_cleanup;
1254 	}
1255 
1256 	op_ctx->map.sgt = sgt;
1257 
1258 	preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
1259 	if (!preallocated_vm_bo) {
1260 		if (!drm_gem_is_imported(&bo->base.base))
1261 			drm_gem_shmem_unpin(&bo->base);
1262 
1263 		ret = -ENOMEM;
1264 		goto err_cleanup;
1265 	}
1266 
1267 	/* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
1268 	 * pre-allocated BO if the <BO,VM> association exists. Given we
1269 	 * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
1270 	 * be called immediately, and we have to hold the VM resv lock when
1271 	 * calling this function.
1272 	 */
1273 	dma_resv_lock(panthor_vm_resv(vm), NULL);
1274 	mutex_lock(&bo->gpuva_list_lock);
1275 	op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
1276 	mutex_unlock(&bo->gpuva_list_lock);
1277 	dma_resv_unlock(panthor_vm_resv(vm));
1278 
1279 	/* If the a vm_bo for this <VM,BO> combination exists, it already
1280 	 * retains a pin ref, and we can release the one we took earlier.
1281 	 *
1282 	 * If our pre-allocated vm_bo is picked, it now retains the pin ref,
1283 	 * which will be released in panthor_vm_bo_put().
1284 	 */
1285 	if (preallocated_vm_bo != op_ctx->map.vm_bo &&
1286 	    !drm_gem_is_imported(&bo->base.base))
1287 		drm_gem_shmem_unpin(&bo->base);
1288 
1289 	op_ctx->map.bo_offset = offset;
1290 
1291 	/* L1, L2 and L3 page tables.
1292 	 * We could optimize L3 allocation by iterating over the sgt and merging
1293 	 * 2M contiguous blocks, but it's simpler to over-provision and return
1294 	 * the pages if they're not used.
1295 	 */
1296 	pt_count = ((ALIGN(va + size, 1ull << 39) - ALIGN_DOWN(va, 1ull << 39)) >> 39) +
1297 		   ((ALIGN(va + size, 1ull << 30) - ALIGN_DOWN(va, 1ull << 30)) >> 30) +
1298 		   ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21);
1299 
1300 	op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
1301 						 sizeof(*op_ctx->rsvd_page_tables.pages),
1302 						 GFP_KERNEL);
1303 	if (!op_ctx->rsvd_page_tables.pages) {
1304 		ret = -ENOMEM;
1305 		goto err_cleanup;
1306 	}
1307 
1308 	ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
1309 				    op_ctx->rsvd_page_tables.pages);
1310 	op_ctx->rsvd_page_tables.count = ret;
1311 	if (ret != pt_count) {
1312 		ret = -ENOMEM;
1313 		goto err_cleanup;
1314 	}
1315 
1316 	/* Insert BO into the extobj list last, when we know nothing can fail. */
1317 	dma_resv_lock(panthor_vm_resv(vm), NULL);
1318 	drm_gpuvm_bo_extobj_add(op_ctx->map.vm_bo);
1319 	dma_resv_unlock(panthor_vm_resv(vm));
1320 
1321 	return 0;
1322 
1323 err_cleanup:
1324 	panthor_vm_cleanup_op_ctx(op_ctx, vm);
1325 	return ret;
1326 }
1327 
1328 static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1329 					   struct panthor_vm *vm,
1330 					   u64 va, u64 size)
1331 {
1332 	u32 pt_count = 0;
1333 	int ret;
1334 
1335 	memset(op_ctx, 0, sizeof(*op_ctx));
1336 	INIT_LIST_HEAD(&op_ctx->returned_vmas);
1337 	op_ctx->va.range = size;
1338 	op_ctx->va.addr = va;
1339 	op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
1340 
1341 	/* Pre-allocate L3 page tables to account for the split-2M-block
1342 	 * situation on unmap.
1343 	 */
1344 	if (va != ALIGN(va, SZ_2M))
1345 		pt_count++;
1346 
1347 	if (va + size != ALIGN(va + size, SZ_2M) &&
1348 	    ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M))
1349 		pt_count++;
1350 
1351 	ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
1352 	if (ret)
1353 		goto err_cleanup;
1354 
1355 	if (pt_count) {
1356 		op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
1357 							 sizeof(*op_ctx->rsvd_page_tables.pages),
1358 							 GFP_KERNEL);
1359 		if (!op_ctx->rsvd_page_tables.pages) {
1360 			ret = -ENOMEM;
1361 			goto err_cleanup;
1362 		}
1363 
1364 		ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
1365 					    op_ctx->rsvd_page_tables.pages);
1366 		if (ret != pt_count) {
1367 			ret = -ENOMEM;
1368 			goto err_cleanup;
1369 		}
1370 		op_ctx->rsvd_page_tables.count = pt_count;
1371 	}
1372 
1373 	return 0;
1374 
1375 err_cleanup:
1376 	panthor_vm_cleanup_op_ctx(op_ctx, vm);
1377 	return ret;
1378 }
1379 
1380 static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1381 						struct panthor_vm *vm)
1382 {
1383 	memset(op_ctx, 0, sizeof(*op_ctx));
1384 	INIT_LIST_HEAD(&op_ctx->returned_vmas);
1385 	op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
1386 }
1387 
1388 /**
1389  * panthor_vm_get_bo_for_va() - Get the GEM object mapped at a virtual address
1390  * @vm: VM to look into.
1391  * @va: Virtual address to search for.
1392  * @bo_offset: Offset of the GEM object mapped at this virtual address.
1393  * Only valid on success.
1394  *
1395  * The object returned by this function might no longer be mapped when the
1396  * function returns. It's the caller responsibility to ensure there's no
1397  * concurrent map/unmap operations making the returned value invalid, or
1398  * make sure it doesn't matter if the object is no longer mapped.
1399  *
1400  * Return: A valid pointer on success, an ERR_PTR() otherwise.
1401  */
1402 struct panthor_gem_object *
1403 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset)
1404 {
1405 	struct panthor_gem_object *bo = ERR_PTR(-ENOENT);
1406 	struct drm_gpuva *gpuva;
1407 	struct panthor_vma *vma;
1408 
1409 	/* Take the VM lock to prevent concurrent map/unmap operations. */
1410 	mutex_lock(&vm->op_lock);
1411 	gpuva = drm_gpuva_find_first(&vm->base, va, 1);
1412 	vma = gpuva ? container_of(gpuva, struct panthor_vma, base) : NULL;
1413 	if (vma && vma->base.gem.obj) {
1414 		drm_gem_object_get(vma->base.gem.obj);
1415 		bo = to_panthor_bo(vma->base.gem.obj);
1416 		*bo_offset = vma->base.gem.offset + (va - vma->base.va.addr);
1417 	}
1418 	mutex_unlock(&vm->op_lock);
1419 
1420 	return bo;
1421 }
1422 
1423 #define PANTHOR_VM_MIN_KERNEL_VA_SIZE	SZ_256M
1424 
1425 static u64
1426 panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create *args,
1427 				    u64 full_va_range)
1428 {
1429 	u64 user_va_range;
1430 
1431 	/* Make sure we have a minimum amount of VA space for kernel objects. */
1432 	if (full_va_range < PANTHOR_VM_MIN_KERNEL_VA_SIZE)
1433 		return 0;
1434 
1435 	if (args->user_va_range) {
1436 		/* Use the user provided value if != 0. */
1437 		user_va_range = args->user_va_range;
1438 	} else if (TASK_SIZE_OF(current) < full_va_range) {
1439 		/* If the task VM size is smaller than the GPU VA range, pick this
1440 		 * as our default user VA range, so userspace can CPU/GPU map buffers
1441 		 * at the same address.
1442 		 */
1443 		user_va_range = TASK_SIZE_OF(current);
1444 	} else {
1445 		/* If the GPU VA range is smaller than the task VM size, we
1446 		 * just have to live with the fact we won't be able to map
1447 		 * all buffers at the same GPU/CPU address.
1448 		 *
1449 		 * If the GPU VA range is bigger than 4G (more than 32-bit of
1450 		 * VA), we split the range in two, and assign half of it to
1451 		 * the user and the other half to the kernel, if it's not, we
1452 		 * keep the kernel VA space as small as possible.
1453 		 */
1454 		user_va_range = full_va_range > SZ_4G ?
1455 				full_va_range / 2 :
1456 				full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
1457 	}
1458 
1459 	if (full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE < user_va_range)
1460 		user_va_range = full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
1461 
1462 	return user_va_range;
1463 }
1464 
1465 #define PANTHOR_VM_CREATE_FLAGS		0
1466 
1467 static int
1468 panthor_vm_create_check_args(const struct panthor_device *ptdev,
1469 			     const struct drm_panthor_vm_create *args,
1470 			     u64 *kernel_va_start, u64 *kernel_va_range)
1471 {
1472 	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
1473 	u64 full_va_range = 1ull << va_bits;
1474 	u64 user_va_range;
1475 
1476 	if (args->flags & ~PANTHOR_VM_CREATE_FLAGS)
1477 		return -EINVAL;
1478 
1479 	user_va_range = panthor_vm_create_get_user_va_range(args, full_va_range);
1480 	if (!user_va_range || (args->user_va_range && args->user_va_range > user_va_range))
1481 		return -EINVAL;
1482 
1483 	/* Pick a kernel VA range that's a power of two, to have a clear split. */
1484 	*kernel_va_range = rounddown_pow_of_two(full_va_range - user_va_range);
1485 	*kernel_va_start = full_va_range - *kernel_va_range;
1486 	return 0;
1487 }
1488 
1489 /*
1490  * Only 32 VMs per open file. If that becomes a limiting factor, we can
1491  * increase this number.
1492  */
1493 #define PANTHOR_MAX_VMS_PER_FILE	32
1494 
1495 /**
1496  * panthor_vm_pool_create_vm() - Create a VM
1497  * @ptdev: The panthor device
1498  * @pool: The VM to create this VM on.
1499  * @args: VM creation args.
1500  *
1501  * Return: a positive VM ID on success, a negative error code otherwise.
1502  */
1503 int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
1504 			      struct panthor_vm_pool *pool,
1505 			      struct drm_panthor_vm_create *args)
1506 {
1507 	u64 kernel_va_start, kernel_va_range;
1508 	struct panthor_vm *vm;
1509 	int ret;
1510 	u32 id;
1511 
1512 	ret = panthor_vm_create_check_args(ptdev, args, &kernel_va_start, &kernel_va_range);
1513 	if (ret)
1514 		return ret;
1515 
1516 	vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range,
1517 			       kernel_va_start, kernel_va_range);
1518 	if (IS_ERR(vm))
1519 		return PTR_ERR(vm);
1520 
1521 	ret = xa_alloc(&pool->xa, &id, vm,
1522 		       XA_LIMIT(1, PANTHOR_MAX_VMS_PER_FILE), GFP_KERNEL);
1523 
1524 	if (ret) {
1525 		panthor_vm_put(vm);
1526 		return ret;
1527 	}
1528 
1529 	args->user_va_range = kernel_va_start;
1530 	return id;
1531 }
1532 
1533 static void panthor_vm_destroy(struct panthor_vm *vm)
1534 {
1535 	if (!vm)
1536 		return;
1537 
1538 	vm->destroyed = true;
1539 
1540 	mutex_lock(&vm->heaps.lock);
1541 	panthor_heap_pool_destroy(vm->heaps.pool);
1542 	vm->heaps.pool = NULL;
1543 	mutex_unlock(&vm->heaps.lock);
1544 
1545 	drm_WARN_ON(&vm->ptdev->base,
1546 		    panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range));
1547 	panthor_vm_put(vm);
1548 }
1549 
1550 /**
1551  * panthor_vm_pool_destroy_vm() - Destroy a VM.
1552  * @pool: VM pool.
1553  * @handle: VM handle.
1554  *
1555  * This function doesn't free the VM object or its resources, it just kills
1556  * all mappings, and makes sure nothing can be mapped after that point.
1557  *
1558  * If there was any active jobs at the time this function is called, these
1559  * jobs should experience page faults and be killed as a result.
1560  *
1561  * The VM resources are freed when the last reference on the VM object is
1562  * dropped.
1563  *
1564  * Return: %0 for success, negative errno value for failure
1565  */
1566 int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle)
1567 {
1568 	struct panthor_vm *vm;
1569 
1570 	vm = xa_erase(&pool->xa, handle);
1571 
1572 	panthor_vm_destroy(vm);
1573 
1574 	return vm ? 0 : -EINVAL;
1575 }
1576 
1577 /**
1578  * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle
1579  * @pool: VM pool to check.
1580  * @handle: Handle of the VM to retrieve.
1581  *
1582  * Return: A valid pointer if the VM exists, NULL otherwise.
1583  */
1584 struct panthor_vm *
1585 panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle)
1586 {
1587 	struct panthor_vm *vm;
1588 
1589 	xa_lock(&pool->xa);
1590 	vm = panthor_vm_get(xa_load(&pool->xa, handle));
1591 	xa_unlock(&pool->xa);
1592 
1593 	return vm;
1594 }
1595 
1596 /**
1597  * panthor_vm_pool_destroy() - Destroy a VM pool.
1598  * @pfile: File.
1599  *
1600  * Destroy all VMs in the pool, and release the pool resources.
1601  *
1602  * Note that VMs can outlive the pool they were created from if other
1603  * objects hold a reference to there VMs.
1604  */
1605 void panthor_vm_pool_destroy(struct panthor_file *pfile)
1606 {
1607 	struct panthor_vm *vm;
1608 	unsigned long i;
1609 
1610 	if (!pfile->vms)
1611 		return;
1612 
1613 	xa_for_each(&pfile->vms->xa, i, vm)
1614 		panthor_vm_destroy(vm);
1615 
1616 	xa_destroy(&pfile->vms->xa);
1617 	kfree(pfile->vms);
1618 }
1619 
1620 /**
1621  * panthor_vm_pool_create() - Create a VM pool
1622  * @pfile: File.
1623  *
1624  * Return: 0 on success, a negative error code otherwise.
1625  */
1626 int panthor_vm_pool_create(struct panthor_file *pfile)
1627 {
1628 	pfile->vms = kzalloc(sizeof(*pfile->vms), GFP_KERNEL);
1629 	if (!pfile->vms)
1630 		return -ENOMEM;
1631 
1632 	xa_init_flags(&pfile->vms->xa, XA_FLAGS_ALLOC1);
1633 	return 0;
1634 }
1635 
1636 /* dummy TLB ops, the real TLB flush happens in panthor_vm_flush_range() */
1637 static void mmu_tlb_flush_all(void *cookie)
1638 {
1639 }
1640 
1641 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie)
1642 {
1643 }
1644 
1645 static const struct iommu_flush_ops mmu_tlb_ops = {
1646 	.tlb_flush_all = mmu_tlb_flush_all,
1647 	.tlb_flush_walk = mmu_tlb_flush_walk,
1648 };
1649 
1650 static const char *access_type_name(struct panthor_device *ptdev,
1651 				    u32 fault_status)
1652 {
1653 	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
1654 	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
1655 		return "ATOMIC";
1656 	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
1657 		return "READ";
1658 	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
1659 		return "WRITE";
1660 	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
1661 		return "EXECUTE";
1662 	default:
1663 		drm_WARN_ON(&ptdev->base, 1);
1664 		return NULL;
1665 	}
1666 }
1667 
1668 static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
1669 {
1670 	bool has_unhandled_faults = false;
1671 
1672 	status = panthor_mmu_fault_mask(ptdev, status);
1673 	while (status) {
1674 		u32 as = ffs(status | (status >> 16)) - 1;
1675 		u32 mask = panthor_mmu_as_fault_mask(ptdev, as);
1676 		u32 new_int_mask;
1677 		u64 addr;
1678 		u32 fault_status;
1679 		u32 exception_type;
1680 		u32 access_type;
1681 		u32 source_id;
1682 
1683 		fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
1684 		addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
1685 		addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
1686 
1687 		/* decode the fault status */
1688 		exception_type = fault_status & 0xFF;
1689 		access_type = (fault_status >> 8) & 0x3;
1690 		source_id = (fault_status >> 16);
1691 
1692 		mutex_lock(&ptdev->mmu->as.slots_lock);
1693 
1694 		ptdev->mmu->as.faulty_mask |= mask;
1695 		new_int_mask =
1696 			panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask);
1697 
1698 		/* terminal fault, print info about the fault */
1699 		drm_err(&ptdev->base,
1700 			"Unhandled Page fault in AS%d at VA 0x%016llX\n"
1701 			"raw fault status: 0x%X\n"
1702 			"decoded fault status: %s\n"
1703 			"exception type 0x%X: %s\n"
1704 			"access type 0x%X: %s\n"
1705 			"source id 0x%X\n",
1706 			as, addr,
1707 			fault_status,
1708 			(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
1709 			exception_type, panthor_exception_name(ptdev, exception_type),
1710 			access_type, access_type_name(ptdev, fault_status),
1711 			source_id);
1712 
1713 		/* We don't handle VM faults at the moment, so let's just clear the
1714 		 * interrupt and let the writer/reader crash.
1715 		 * Note that COMPLETED irqs are never cleared, but this is fine
1716 		 * because they are always masked.
1717 		 */
1718 		gpu_write(ptdev, MMU_INT_CLEAR, mask);
1719 
1720 		/* Ignore MMU interrupts on this AS until it's been
1721 		 * re-enabled.
1722 		 */
1723 		ptdev->mmu->irq.mask = new_int_mask;
1724 
1725 		if (ptdev->mmu->as.slots[as].vm)
1726 			ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
1727 
1728 		/* Disable the MMU to kill jobs on this AS. */
1729 		panthor_mmu_as_disable(ptdev, as);
1730 		mutex_unlock(&ptdev->mmu->as.slots_lock);
1731 
1732 		status &= ~mask;
1733 		has_unhandled_faults = true;
1734 	}
1735 
1736 	if (has_unhandled_faults)
1737 		panthor_sched_report_mmu_fault(ptdev);
1738 }
1739 PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler);
1740 
1741 /**
1742  * panthor_mmu_suspend() - Suspend the MMU logic
1743  * @ptdev: Device.
1744  *
1745  * All we do here is de-assign the AS slots on all active VMs, so things
1746  * get flushed to the main memory, and no further access to these VMs are
1747  * possible.
1748  *
1749  * We also suspend the MMU IRQ.
1750  */
1751 void panthor_mmu_suspend(struct panthor_device *ptdev)
1752 {
1753 	mutex_lock(&ptdev->mmu->as.slots_lock);
1754 	for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
1755 		struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
1756 
1757 		if (vm) {
1758 			drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
1759 			panthor_vm_release_as_locked(vm);
1760 		}
1761 	}
1762 	mutex_unlock(&ptdev->mmu->as.slots_lock);
1763 
1764 	panthor_mmu_irq_suspend(&ptdev->mmu->irq);
1765 }
1766 
1767 /**
1768  * panthor_mmu_resume() - Resume the MMU logic
1769  * @ptdev: Device.
1770  *
1771  * Resume the IRQ.
1772  *
1773  * We don't re-enable previously active VMs. We assume other parts of the
1774  * driver will call panthor_vm_active() on the VMs they intend to use.
1775  */
1776 void panthor_mmu_resume(struct panthor_device *ptdev)
1777 {
1778 	mutex_lock(&ptdev->mmu->as.slots_lock);
1779 	ptdev->mmu->as.alloc_mask = 0;
1780 	ptdev->mmu->as.faulty_mask = 0;
1781 	mutex_unlock(&ptdev->mmu->as.slots_lock);
1782 
1783 	panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
1784 }
1785 
1786 /**
1787  * panthor_mmu_pre_reset() - Prepare for a reset
1788  * @ptdev: Device.
1789  *
1790  * Suspend the IRQ, and make sure all VM_BIND queues are stopped, so we
1791  * don't get asked to do a VM operation while the GPU is down.
1792  *
1793  * We don't cleanly shutdown the AS slots here, because the reset might
1794  * come from an AS_ACTIVE_BIT stuck situation.
1795  */
1796 void panthor_mmu_pre_reset(struct panthor_device *ptdev)
1797 {
1798 	struct panthor_vm *vm;
1799 
1800 	panthor_mmu_irq_suspend(&ptdev->mmu->irq);
1801 
1802 	mutex_lock(&ptdev->mmu->vm.lock);
1803 	ptdev->mmu->vm.reset_in_progress = true;
1804 	list_for_each_entry(vm, &ptdev->mmu->vm.list, node)
1805 		panthor_vm_stop(vm);
1806 	mutex_unlock(&ptdev->mmu->vm.lock);
1807 }
1808 
1809 /**
1810  * panthor_mmu_post_reset() - Restore things after a reset
1811  * @ptdev: Device.
1812  *
1813  * Put the MMU logic back in action after a reset. That implies resuming the
1814  * IRQ and re-enabling the VM_BIND queues.
1815  */
1816 void panthor_mmu_post_reset(struct panthor_device *ptdev)
1817 {
1818 	struct panthor_vm *vm;
1819 
1820 	mutex_lock(&ptdev->mmu->as.slots_lock);
1821 
1822 	/* Now that the reset is effective, we can assume that none of the
1823 	 * AS slots are setup, and clear the faulty flags too.
1824 	 */
1825 	ptdev->mmu->as.alloc_mask = 0;
1826 	ptdev->mmu->as.faulty_mask = 0;
1827 
1828 	for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
1829 		struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
1830 
1831 		if (vm)
1832 			panthor_vm_release_as_locked(vm);
1833 	}
1834 
1835 	mutex_unlock(&ptdev->mmu->as.slots_lock);
1836 
1837 	panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
1838 
1839 	/* Restart the VM_BIND queues. */
1840 	mutex_lock(&ptdev->mmu->vm.lock);
1841 	list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
1842 		panthor_vm_start(vm);
1843 	}
1844 	ptdev->mmu->vm.reset_in_progress = false;
1845 	mutex_unlock(&ptdev->mmu->vm.lock);
1846 }
1847 
1848 static void panthor_vm_free(struct drm_gpuvm *gpuvm)
1849 {
1850 	struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base);
1851 	struct panthor_device *ptdev = vm->ptdev;
1852 
1853 	mutex_lock(&vm->heaps.lock);
1854 	if (drm_WARN_ON(&ptdev->base, vm->heaps.pool))
1855 		panthor_heap_pool_destroy(vm->heaps.pool);
1856 	mutex_unlock(&vm->heaps.lock);
1857 	mutex_destroy(&vm->heaps.lock);
1858 
1859 	mutex_lock(&ptdev->mmu->vm.lock);
1860 	list_del(&vm->node);
1861 	/* Restore the scheduler state so we can call drm_sched_entity_destroy()
1862 	 * and drm_sched_fini(). If get there, that means we have no job left
1863 	 * and no new jobs can be queued, so we can start the scheduler without
1864 	 * risking interfering with the reset.
1865 	 */
1866 	if (ptdev->mmu->vm.reset_in_progress)
1867 		panthor_vm_start(vm);
1868 	mutex_unlock(&ptdev->mmu->vm.lock);
1869 
1870 	drm_sched_entity_destroy(&vm->entity);
1871 	drm_sched_fini(&vm->sched);
1872 
1873 	mutex_lock(&ptdev->mmu->as.slots_lock);
1874 	if (vm->as.id >= 0) {
1875 		int cookie;
1876 
1877 		if (drm_dev_enter(&ptdev->base, &cookie)) {
1878 			panthor_mmu_as_disable(ptdev, vm->as.id);
1879 			drm_dev_exit(cookie);
1880 		}
1881 
1882 		ptdev->mmu->as.slots[vm->as.id].vm = NULL;
1883 		clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
1884 		list_del(&vm->as.lru_node);
1885 	}
1886 	mutex_unlock(&ptdev->mmu->as.slots_lock);
1887 
1888 	free_io_pgtable_ops(vm->pgtbl_ops);
1889 
1890 	drm_mm_takedown(&vm->mm);
1891 	kfree(vm);
1892 }
1893 
1894 /**
1895  * panthor_vm_put() - Release a reference on a VM
1896  * @vm: VM to release the reference on. Can be NULL.
1897  */
1898 void panthor_vm_put(struct panthor_vm *vm)
1899 {
1900 	drm_gpuvm_put(vm ? &vm->base : NULL);
1901 }
1902 
1903 /**
1904  * panthor_vm_get() - Get a VM reference
1905  * @vm: VM to get the reference on. Can be NULL.
1906  *
1907  * Return: @vm value.
1908  */
1909 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm)
1910 {
1911 	if (vm)
1912 		drm_gpuvm_get(&vm->base);
1913 
1914 	return vm;
1915 }
1916 
1917 /**
1918  * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM
1919  * @vm: VM to query the heap pool on.
1920  * @create: True if the heap pool should be created when it doesn't exist.
1921  *
1922  * Heap pools are per-VM. This function allows one to retrieve the heap pool
1923  * attached to a VM.
1924  *
1925  * If no heap pool exists yet, and @create is true, we create one.
1926  *
1927  * The returned panthor_heap_pool should be released with panthor_heap_pool_put().
1928  *
1929  * Return: A valid pointer on success, an ERR_PTR() otherwise.
1930  */
1931 struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create)
1932 {
1933 	struct panthor_heap_pool *pool;
1934 
1935 	mutex_lock(&vm->heaps.lock);
1936 	if (!vm->heaps.pool && create) {
1937 		if (vm->destroyed)
1938 			pool = ERR_PTR(-EINVAL);
1939 		else
1940 			pool = panthor_heap_pool_create(vm->ptdev, vm);
1941 
1942 		if (!IS_ERR(pool))
1943 			vm->heaps.pool = panthor_heap_pool_get(pool);
1944 	} else {
1945 		pool = panthor_heap_pool_get(vm->heaps.pool);
1946 		if (!pool)
1947 			pool = ERR_PTR(-ENOENT);
1948 	}
1949 	mutex_unlock(&vm->heaps.lock);
1950 
1951 	return pool;
1952 }
1953 
1954 /**
1955  * panthor_vm_heaps_sizes() - Calculate size of all heap chunks across all
1956  * heaps over all the heap pools in a VM
1957  * @pfile: File.
1958  * @stats: Memory stats to be updated.
1959  *
1960  * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
1961  * is active, record the size as active as well.
1962  */
1963 void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats)
1964 {
1965 	struct panthor_vm *vm;
1966 	unsigned long i;
1967 
1968 	if (!pfile->vms)
1969 		return;
1970 
1971 	xa_lock(&pfile->vms->xa);
1972 	xa_for_each(&pfile->vms->xa, i, vm) {
1973 		size_t size = panthor_heap_pool_size(vm->heaps.pool);
1974 		stats->resident += size;
1975 		if (vm->as.id >= 0)
1976 			stats->active += size;
1977 	}
1978 	xa_unlock(&pfile->vms->xa);
1979 }
1980 
1981 static u64 mair_to_memattr(u64 mair, bool coherent)
1982 {
1983 	u64 memattr = 0;
1984 	u32 i;
1985 
1986 	for (i = 0; i < 8; i++) {
1987 		u8 in_attr = mair >> (8 * i), out_attr;
1988 		u8 outer = in_attr >> 4, inner = in_attr & 0xf;
1989 
1990 		/* For caching to be enabled, inner and outer caching policy
1991 		 * have to be both write-back, if one of them is write-through
1992 		 * or non-cacheable, we just choose non-cacheable. Device
1993 		 * memory is also translated to non-cacheable.
1994 		 */
1995 		if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
1996 			out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
1997 				   AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
1998 				   AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
1999 		} else {
2000 			out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
2001 				   AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
2002 			/* Use SH_MIDGARD_INNER mode when device isn't coherent,
2003 			 * so SH_IS, which is used when IOMMU_CACHE is set, maps
2004 			 * to Mali's internal-shareable mode. As per the Mali
2005 			 * Spec, inner and outer-shareable modes aren't allowed
2006 			 * for WB memory when coherency is disabled.
2007 			 * Use SH_CPU_INNER mode when coherency is enabled, so
2008 			 * that SH_IS actually maps to the standard definition of
2009 			 * inner-shareable.
2010 			 */
2011 			if (!coherent)
2012 				out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
2013 			else
2014 				out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
2015 		}
2016 
2017 		memattr |= (u64)out_attr << (8 * i);
2018 	}
2019 
2020 	return memattr;
2021 }
2022 
2023 static void panthor_vma_link(struct panthor_vm *vm,
2024 			     struct panthor_vma *vma,
2025 			     struct drm_gpuvm_bo *vm_bo)
2026 {
2027 	struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
2028 
2029 	mutex_lock(&bo->gpuva_list_lock);
2030 	drm_gpuva_link(&vma->base, vm_bo);
2031 	drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
2032 	mutex_unlock(&bo->gpuva_list_lock);
2033 }
2034 
2035 static void panthor_vma_unlink(struct panthor_vm *vm,
2036 			       struct panthor_vma *vma)
2037 {
2038 	struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
2039 	struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
2040 
2041 	mutex_lock(&bo->gpuva_list_lock);
2042 	drm_gpuva_unlink(&vma->base);
2043 	mutex_unlock(&bo->gpuva_list_lock);
2044 
2045 	/* drm_gpuva_unlink() release the vm_bo, but we manually retained it
2046 	 * when entering this function, so we can implement deferred VMA
2047 	 * destruction. Re-assign it here.
2048 	 */
2049 	vma->base.vm_bo = vm_bo;
2050 	list_add_tail(&vma->node, &vm->op_ctx->returned_vmas);
2051 }
2052 
2053 static void panthor_vma_init(struct panthor_vma *vma, u32 flags)
2054 {
2055 	INIT_LIST_HEAD(&vma->node);
2056 	vma->flags = flags;
2057 }
2058 
2059 #define PANTHOR_VM_MAP_FLAGS \
2060 	(DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
2061 	 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
2062 	 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED)
2063 
2064 static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
2065 {
2066 	struct panthor_vm *vm = priv;
2067 	struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
2068 	struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx);
2069 	int ret;
2070 
2071 	if (!vma)
2072 		return -EINVAL;
2073 
2074 	panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS);
2075 
2076 	ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags),
2077 				   op_ctx->map.sgt, op->map.gem.offset,
2078 				   op->map.va.range);
2079 	if (ret)
2080 		return ret;
2081 
2082 	/* Ref owned by the mapping now, clear the obj field so we don't release the
2083 	 * pinning/obj ref behind GPUVA's back.
2084 	 */
2085 	drm_gpuva_map(&vm->base, &vma->base, &op->map);
2086 	panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
2087 	op_ctx->map.vm_bo = NULL;
2088 	return 0;
2089 }
2090 
2091 static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
2092 				       void *priv)
2093 {
2094 	struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base);
2095 	struct panthor_vm *vm = priv;
2096 	struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
2097 	struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
2098 	u64 unmap_start, unmap_range;
2099 	int ret;
2100 
2101 	drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
2102 	ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
2103 	if (ret)
2104 		return ret;
2105 
2106 	if (op->remap.prev) {
2107 		prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
2108 		panthor_vma_init(prev_vma, unmap_vma->flags);
2109 	}
2110 
2111 	if (op->remap.next) {
2112 		next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
2113 		panthor_vma_init(next_vma, unmap_vma->flags);
2114 	}
2115 
2116 	drm_gpuva_remap(prev_vma ? &prev_vma->base : NULL,
2117 			next_vma ? &next_vma->base : NULL,
2118 			&op->remap);
2119 
2120 	if (prev_vma) {
2121 		/* panthor_vma_link() transfers the vm_bo ownership to
2122 		 * the VMA object. Since the vm_bo we're passing is still
2123 		 * owned by the old mapping which will be released when this
2124 		 * mapping is destroyed, we need to grab a ref here.
2125 		 */
2126 		panthor_vma_link(vm, prev_vma,
2127 				 drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
2128 	}
2129 
2130 	if (next_vma) {
2131 		panthor_vma_link(vm, next_vma,
2132 				 drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
2133 	}
2134 
2135 	panthor_vma_unlink(vm, unmap_vma);
2136 	return 0;
2137 }
2138 
2139 static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
2140 				       void *priv)
2141 {
2142 	struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base);
2143 	struct panthor_vm *vm = priv;
2144 	int ret;
2145 
2146 	ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr,
2147 				     unmap_vma->base.va.range);
2148 	if (drm_WARN_ON(&vm->ptdev->base, ret))
2149 		return ret;
2150 
2151 	drm_gpuva_unmap(&op->unmap);
2152 	panthor_vma_unlink(vm, unmap_vma);
2153 	return 0;
2154 }
2155 
2156 static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
2157 	.vm_free = panthor_vm_free,
2158 	.sm_step_map = panthor_gpuva_sm_step_map,
2159 	.sm_step_remap = panthor_gpuva_sm_step_remap,
2160 	.sm_step_unmap = panthor_gpuva_sm_step_unmap,
2161 };
2162 
2163 /**
2164  * panthor_vm_resv() - Get the dma_resv object attached to a VM.
2165  * @vm: VM to get the dma_resv of.
2166  *
2167  * Return: A dma_resv object.
2168  */
2169 struct dma_resv *panthor_vm_resv(struct panthor_vm *vm)
2170 {
2171 	return drm_gpuvm_resv(&vm->base);
2172 }
2173 
2174 struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm)
2175 {
2176 	if (!vm)
2177 		return NULL;
2178 
2179 	return vm->base.r_obj;
2180 }
2181 
2182 static int
2183 panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
2184 		   bool flag_vm_unusable_on_failure)
2185 {
2186 	u32 op_type = op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK;
2187 	int ret;
2188 
2189 	if (op_type == DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY)
2190 		return 0;
2191 
2192 	mutex_lock(&vm->op_lock);
2193 	vm->op_ctx = op;
2194 	switch (op_type) {
2195 	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
2196 		if (vm->unusable) {
2197 			ret = -EINVAL;
2198 			break;
2199 		}
2200 
2201 		ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
2202 				       op->map.vm_bo->obj, op->map.bo_offset);
2203 		break;
2204 
2205 	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
2206 		ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
2207 		break;
2208 
2209 	default:
2210 		ret = -EINVAL;
2211 		break;
2212 	}
2213 
2214 	if (ret && flag_vm_unusable_on_failure)
2215 		vm->unusable = true;
2216 
2217 	vm->op_ctx = NULL;
2218 	mutex_unlock(&vm->op_lock);
2219 
2220 	return ret;
2221 }
2222 
2223 static struct dma_fence *
2224 panthor_vm_bind_run_job(struct drm_sched_job *sched_job)
2225 {
2226 	struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2227 	bool cookie;
2228 	int ret;
2229 
2230 	/* Not only we report an error whose result is propagated to the
2231 	 * drm_sched finished fence, but we also flag the VM as unusable, because
2232 	 * a failure in the async VM_BIND results in an inconsistent state. VM needs
2233 	 * to be destroyed and recreated.
2234 	 */
2235 	cookie = dma_fence_begin_signalling();
2236 	ret = panthor_vm_exec_op(job->vm, &job->ctx, true);
2237 	dma_fence_end_signalling(cookie);
2238 
2239 	return ret ? ERR_PTR(ret) : NULL;
2240 }
2241 
2242 static void panthor_vm_bind_job_release(struct kref *kref)
2243 {
2244 	struct panthor_vm_bind_job *job = container_of(kref, struct panthor_vm_bind_job, refcount);
2245 
2246 	if (job->base.s_fence)
2247 		drm_sched_job_cleanup(&job->base);
2248 
2249 	panthor_vm_cleanup_op_ctx(&job->ctx, job->vm);
2250 	panthor_vm_put(job->vm);
2251 	kfree(job);
2252 }
2253 
2254 /**
2255  * panthor_vm_bind_job_put() - Release a VM_BIND job reference
2256  * @sched_job: Job to release the reference on.
2257  */
2258 void panthor_vm_bind_job_put(struct drm_sched_job *sched_job)
2259 {
2260 	struct panthor_vm_bind_job *job =
2261 		container_of(sched_job, struct panthor_vm_bind_job, base);
2262 
2263 	if (sched_job)
2264 		kref_put(&job->refcount, panthor_vm_bind_job_release);
2265 }
2266 
2267 static void
2268 panthor_vm_bind_free_job(struct drm_sched_job *sched_job)
2269 {
2270 	struct panthor_vm_bind_job *job =
2271 		container_of(sched_job, struct panthor_vm_bind_job, base);
2272 
2273 	drm_sched_job_cleanup(sched_job);
2274 
2275 	/* Do the heavy cleanups asynchronously, so we're out of the
2276 	 * dma-signaling path and can acquire dma-resv locks safely.
2277 	 */
2278 	queue_work(panthor_cleanup_wq, &job->cleanup_op_ctx_work);
2279 }
2280 
2281 static enum drm_gpu_sched_stat
2282 panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
2283 {
2284 	WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
2285 	return DRM_GPU_SCHED_STAT_NOMINAL;
2286 }
2287 
2288 static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
2289 	.run_job = panthor_vm_bind_run_job,
2290 	.free_job = panthor_vm_bind_free_job,
2291 	.timedout_job = panthor_vm_bind_timedout_job,
2292 };
2293 
2294 /**
2295  * panthor_vm_create() - Create a VM
2296  * @ptdev: Device.
2297  * @for_mcu: True if this is the FW MCU VM.
2298  * @kernel_va_start: Start of the range reserved for kernel BO mapping.
2299  * @kernel_va_size: Size of the range reserved for kernel BO mapping.
2300  * @auto_kernel_va_start: Start of the auto-VA kernel range.
2301  * @auto_kernel_va_size: Size of the auto-VA kernel range.
2302  *
2303  * Return: A valid pointer on success, an ERR_PTR() otherwise.
2304  */
2305 struct panthor_vm *
2306 panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
2307 		  u64 kernel_va_start, u64 kernel_va_size,
2308 		  u64 auto_kernel_va_start, u64 auto_kernel_va_size)
2309 {
2310 	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
2311 	u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
2312 	u64 full_va_range = 1ull << va_bits;
2313 	struct drm_gem_object *dummy_gem;
2314 	struct drm_gpu_scheduler *sched;
2315 	const struct drm_sched_init_args sched_args = {
2316 		.ops = &panthor_vm_bind_ops,
2317 		.submit_wq = ptdev->mmu->vm.wq,
2318 		.num_rqs = 1,
2319 		.credit_limit = 1,
2320 		/* Bind operations are synchronous for now, no timeout needed. */
2321 		.timeout = MAX_SCHEDULE_TIMEOUT,
2322 		.name = "panthor-vm-bind",
2323 		.dev = ptdev->base.dev,
2324 	};
2325 	struct io_pgtable_cfg pgtbl_cfg;
2326 	u64 mair, min_va, va_range;
2327 	struct panthor_vm *vm;
2328 	int ret;
2329 
2330 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
2331 	if (!vm)
2332 		return ERR_PTR(-ENOMEM);
2333 
2334 	/* We allocate a dummy GEM for the VM. */
2335 	dummy_gem = drm_gpuvm_resv_object_alloc(&ptdev->base);
2336 	if (!dummy_gem) {
2337 		ret = -ENOMEM;
2338 		goto err_free_vm;
2339 	}
2340 
2341 	mutex_init(&vm->heaps.lock);
2342 	vm->for_mcu = for_mcu;
2343 	vm->ptdev = ptdev;
2344 	mutex_init(&vm->op_lock);
2345 
2346 	if (for_mcu) {
2347 		/* CSF MCU is a cortex M7, and can only address 4G */
2348 		min_va = 0;
2349 		va_range = SZ_4G;
2350 	} else {
2351 		min_va = 0;
2352 		va_range = full_va_range;
2353 	}
2354 
2355 	mutex_init(&vm->mm_lock);
2356 	drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size);
2357 	vm->kernel_auto_va.start = auto_kernel_va_start;
2358 	vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1;
2359 
2360 	INIT_LIST_HEAD(&vm->node);
2361 	INIT_LIST_HEAD(&vm->as.lru_node);
2362 	vm->as.id = -1;
2363 	refcount_set(&vm->as.active_cnt, 0);
2364 
2365 	pgtbl_cfg = (struct io_pgtable_cfg) {
2366 		.pgsize_bitmap	= SZ_4K | SZ_2M,
2367 		.ias		= va_bits,
2368 		.oas		= pa_bits,
2369 		.coherent_walk	= ptdev->coherent,
2370 		.tlb		= &mmu_tlb_ops,
2371 		.iommu_dev	= ptdev->base.dev,
2372 		.alloc		= alloc_pt,
2373 		.free		= free_pt,
2374 	};
2375 
2376 	vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm);
2377 	if (!vm->pgtbl_ops) {
2378 		ret = -EINVAL;
2379 		goto err_mm_takedown;
2380 	}
2381 
2382 	ret = drm_sched_init(&vm->sched, &sched_args);
2383 	if (ret)
2384 		goto err_free_io_pgtable;
2385 
2386 	sched = &vm->sched;
2387 	ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL);
2388 	if (ret)
2389 		goto err_sched_fini;
2390 
2391 	mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair;
2392 	vm->memattr = mair_to_memattr(mair, ptdev->coherent);
2393 
2394 	mutex_lock(&ptdev->mmu->vm.lock);
2395 	list_add_tail(&vm->node, &ptdev->mmu->vm.list);
2396 
2397 	/* If a reset is in progress, stop the scheduler. */
2398 	if (ptdev->mmu->vm.reset_in_progress)
2399 		panthor_vm_stop(vm);
2400 	mutex_unlock(&ptdev->mmu->vm.lock);
2401 
2402 	/* We intentionally leave the reserved range to zero, because we want kernel VMAs
2403 	 * to be handled the same way user VMAs are.
2404 	 */
2405 	drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
2406 		       DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
2407 		       min_va, va_range, 0, 0, &panthor_gpuvm_ops);
2408 	drm_gem_object_put(dummy_gem);
2409 	return vm;
2410 
2411 err_sched_fini:
2412 	drm_sched_fini(&vm->sched);
2413 
2414 err_free_io_pgtable:
2415 	free_io_pgtable_ops(vm->pgtbl_ops);
2416 
2417 err_mm_takedown:
2418 	drm_mm_takedown(&vm->mm);
2419 	drm_gem_object_put(dummy_gem);
2420 
2421 err_free_vm:
2422 	kfree(vm);
2423 	return ERR_PTR(ret);
2424 }
2425 
2426 static int
2427 panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
2428 			       struct panthor_vm *vm,
2429 			       const struct drm_panthor_vm_bind_op *op,
2430 			       struct panthor_vm_op_ctx *op_ctx)
2431 {
2432 	ssize_t vm_pgsz = panthor_vm_page_size(vm);
2433 	struct drm_gem_object *gem;
2434 	int ret;
2435 
2436 	/* Aligned on page size. */
2437 	if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
2438 		return -EINVAL;
2439 
2440 	switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
2441 	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
2442 		gem = drm_gem_object_lookup(file, op->bo_handle);
2443 		ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm,
2444 						    gem ? to_panthor_bo(gem) : NULL,
2445 						    op->bo_offset,
2446 						    op->size,
2447 						    op->va,
2448 						    op->flags);
2449 		drm_gem_object_put(gem);
2450 		return ret;
2451 
2452 	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
2453 		if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
2454 			return -EINVAL;
2455 
2456 		if (op->bo_handle || op->bo_offset)
2457 			return -EINVAL;
2458 
2459 		return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size);
2460 
2461 	case DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY:
2462 		if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
2463 			return -EINVAL;
2464 
2465 		if (op->bo_handle || op->bo_offset)
2466 			return -EINVAL;
2467 
2468 		if (op->va || op->size)
2469 			return -EINVAL;
2470 
2471 		if (!op->syncs.count)
2472 			return -EINVAL;
2473 
2474 		panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm);
2475 		return 0;
2476 
2477 	default:
2478 		return -EINVAL;
2479 	}
2480 }
2481 
2482 static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work)
2483 {
2484 	struct panthor_vm_bind_job *job =
2485 		container_of(work, struct panthor_vm_bind_job, cleanup_op_ctx_work);
2486 
2487 	panthor_vm_bind_job_put(&job->base);
2488 }
2489 
2490 /**
2491  * panthor_vm_bind_job_create() - Create a VM_BIND job
2492  * @file: File.
2493  * @vm: VM targeted by the VM_BIND job.
2494  * @op: VM operation data.
2495  *
2496  * Return: A valid pointer on success, an ERR_PTR() otherwise.
2497  */
2498 struct drm_sched_job *
2499 panthor_vm_bind_job_create(struct drm_file *file,
2500 			   struct panthor_vm *vm,
2501 			   const struct drm_panthor_vm_bind_op *op)
2502 {
2503 	struct panthor_vm_bind_job *job;
2504 	int ret;
2505 
2506 	if (!vm)
2507 		return ERR_PTR(-EINVAL);
2508 
2509 	if (vm->destroyed || vm->unusable)
2510 		return ERR_PTR(-EINVAL);
2511 
2512 	job = kzalloc(sizeof(*job), GFP_KERNEL);
2513 	if (!job)
2514 		return ERR_PTR(-ENOMEM);
2515 
2516 	ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx);
2517 	if (ret) {
2518 		kfree(job);
2519 		return ERR_PTR(ret);
2520 	}
2521 
2522 	INIT_WORK(&job->cleanup_op_ctx_work, panthor_vm_bind_job_cleanup_op_ctx_work);
2523 	kref_init(&job->refcount);
2524 	job->vm = panthor_vm_get(vm);
2525 
2526 	ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
2527 	if (ret)
2528 		goto err_put_job;
2529 
2530 	return &job->base;
2531 
2532 err_put_job:
2533 	panthor_vm_bind_job_put(&job->base);
2534 	return ERR_PTR(ret);
2535 }
2536 
2537 /**
2538  * panthor_vm_bind_job_prepare_resvs() - Prepare VM_BIND job dma_resvs
2539  * @exec: The locking/preparation context.
2540  * @sched_job: The job to prepare resvs on.
2541  *
2542  * Locks and prepare the VM resv.
2543  *
2544  * If this is a map operation, locks and prepares the GEM resv.
2545  *
2546  * Return: 0 on success, a negative error code otherwise.
2547  */
2548 int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec,
2549 				      struct drm_sched_job *sched_job)
2550 {
2551 	struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2552 	int ret;
2553 
2554 	/* Acquire the VM lock an reserve a slot for this VM bind job. */
2555 	ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1);
2556 	if (ret)
2557 		return ret;
2558 
2559 	if (job->ctx.map.vm_bo) {
2560 		/* Lock/prepare the GEM being mapped. */
2561 		ret = drm_exec_prepare_obj(exec, job->ctx.map.vm_bo->obj, 1);
2562 		if (ret)
2563 			return ret;
2564 	}
2565 
2566 	return 0;
2567 }
2568 
2569 /**
2570  * panthor_vm_bind_job_update_resvs() - Update the resv objects touched by a job
2571  * @exec: drm_exec context.
2572  * @sched_job: Job to update the resvs on.
2573  */
2574 void panthor_vm_bind_job_update_resvs(struct drm_exec *exec,
2575 				      struct drm_sched_job *sched_job)
2576 {
2577 	struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2578 
2579 	/* Explicit sync => we just register our job finished fence as bookkeep. */
2580 	drm_gpuvm_resv_add_fence(&job->vm->base, exec,
2581 				 &sched_job->s_fence->finished,
2582 				 DMA_RESV_USAGE_BOOKKEEP,
2583 				 DMA_RESV_USAGE_BOOKKEEP);
2584 }
2585 
2586 void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec,
2587 			     struct dma_fence *fence,
2588 			     enum dma_resv_usage private_usage,
2589 			     enum dma_resv_usage extobj_usage)
2590 {
2591 	drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage);
2592 }
2593 
2594 /**
2595  * panthor_vm_bind_exec_sync_op() - Execute a VM_BIND operation synchronously.
2596  * @file: File.
2597  * @vm: VM targeted by the VM operation.
2598  * @op: Data describing the VM operation.
2599  *
2600  * Return: 0 on success, a negative error code otherwise.
2601  */
2602 int panthor_vm_bind_exec_sync_op(struct drm_file *file,
2603 				 struct panthor_vm *vm,
2604 				 struct drm_panthor_vm_bind_op *op)
2605 {
2606 	struct panthor_vm_op_ctx op_ctx;
2607 	int ret;
2608 
2609 	/* No sync objects allowed on synchronous operations. */
2610 	if (op->syncs.count)
2611 		return -EINVAL;
2612 
2613 	if (!op->size)
2614 		return 0;
2615 
2616 	ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx);
2617 	if (ret)
2618 		return ret;
2619 
2620 	ret = panthor_vm_exec_op(vm, &op_ctx, false);
2621 	panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2622 
2623 	return ret;
2624 }
2625 
2626 /**
2627  * panthor_vm_map_bo_range() - Map a GEM object range to a VM
2628  * @vm: VM to map the GEM to.
2629  * @bo: GEM object to map.
2630  * @offset: Offset in the GEM object.
2631  * @size: Size to map.
2632  * @va: Virtual address to map the object to.
2633  * @flags: Combination of drm_panthor_vm_bind_op_flags flags.
2634  * Only map-related flags are valid.
2635  *
2636  * Internal use only. For userspace requests, use
2637  * panthor_vm_bind_exec_sync_op() instead.
2638  *
2639  * Return: 0 on success, a negative error code otherwise.
2640  */
2641 int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo,
2642 			    u64 offset, u64 size, u64 va, u32 flags)
2643 {
2644 	struct panthor_vm_op_ctx op_ctx;
2645 	int ret;
2646 
2647 	ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags);
2648 	if (ret)
2649 		return ret;
2650 
2651 	ret = panthor_vm_exec_op(vm, &op_ctx, false);
2652 	panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2653 
2654 	return ret;
2655 }
2656 
2657 /**
2658  * panthor_vm_unmap_range() - Unmap a portion of the VA space
2659  * @vm: VM to unmap the region from.
2660  * @va: Virtual address to unmap. Must be 4k aligned.
2661  * @size: Size of the region to unmap. Must be 4k aligned.
2662  *
2663  * Internal use only. For userspace requests, use
2664  * panthor_vm_bind_exec_sync_op() instead.
2665  *
2666  * Return: 0 on success, a negative error code otherwise.
2667  */
2668 int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size)
2669 {
2670 	struct panthor_vm_op_ctx op_ctx;
2671 	int ret;
2672 
2673 	ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size);
2674 	if (ret)
2675 		return ret;
2676 
2677 	ret = panthor_vm_exec_op(vm, &op_ctx, false);
2678 	panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2679 
2680 	return ret;
2681 }
2682 
2683 /**
2684  * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs.
2685  * @exec: Locking/preparation context.
2686  * @vm: VM targeted by the GPU job.
2687  * @slot_count: Number of slots to reserve.
2688  *
2689  * GPU jobs assume all BOs bound to the VM at the time the job is submitted
2690  * are available when the job is executed. In order to guarantee that, we
2691  * need to reserve a slot on all BOs mapped to a VM and update this slot with
2692  * the job fence after its submission.
2693  *
2694  * Return: 0 on success, a negative error code otherwise.
2695  */
2696 int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm,
2697 					u32 slot_count)
2698 {
2699 	int ret;
2700 
2701 	/* Acquire the VM lock and reserve a slot for this GPU job. */
2702 	ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count);
2703 	if (ret)
2704 		return ret;
2705 
2706 	return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count);
2707 }
2708 
2709 /**
2710  * panthor_mmu_unplug() - Unplug the MMU logic
2711  * @ptdev: Device.
2712  *
2713  * No access to the MMU regs should be done after this function is called.
2714  * We suspend the IRQ and disable all VMs to guarantee that.
2715  */
2716 void panthor_mmu_unplug(struct panthor_device *ptdev)
2717 {
2718 	if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
2719 		panthor_mmu_irq_suspend(&ptdev->mmu->irq);
2720 
2721 	mutex_lock(&ptdev->mmu->as.slots_lock);
2722 	for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
2723 		struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
2724 
2725 		if (vm) {
2726 			drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
2727 			panthor_vm_release_as_locked(vm);
2728 		}
2729 	}
2730 	mutex_unlock(&ptdev->mmu->as.slots_lock);
2731 }
2732 
2733 static void panthor_mmu_release_wq(struct drm_device *ddev, void *res)
2734 {
2735 	destroy_workqueue(res);
2736 }
2737 
2738 /**
2739  * panthor_mmu_init() - Initialize the MMU logic.
2740  * @ptdev: Device.
2741  *
2742  * Return: 0 on success, a negative error code otherwise.
2743  */
2744 int panthor_mmu_init(struct panthor_device *ptdev)
2745 {
2746 	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
2747 	struct panthor_mmu *mmu;
2748 	int ret, irq;
2749 
2750 	mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL);
2751 	if (!mmu)
2752 		return -ENOMEM;
2753 
2754 	INIT_LIST_HEAD(&mmu->as.lru_list);
2755 
2756 	ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock);
2757 	if (ret)
2758 		return ret;
2759 
2760 	INIT_LIST_HEAD(&mmu->vm.list);
2761 	ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock);
2762 	if (ret)
2763 		return ret;
2764 
2765 	ptdev->mmu = mmu;
2766 
2767 	irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu");
2768 	if (irq <= 0)
2769 		return -ENODEV;
2770 
2771 	ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq,
2772 				      panthor_mmu_fault_mask(ptdev, ~0));
2773 	if (ret)
2774 		return ret;
2775 
2776 	mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0);
2777 	if (!mmu->vm.wq)
2778 		return -ENOMEM;
2779 
2780 	/* On 32-bit kernels, the VA space is limited by the io_pgtable_ops abstraction,
2781 	 * which passes iova as an unsigned long. Patch the mmu_features to reflect this
2782 	 * limitation.
2783 	 */
2784 	if (va_bits > BITS_PER_LONG) {
2785 		ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0);
2786 		ptdev->gpu_info.mmu_features |= BITS_PER_LONG;
2787 	}
2788 
2789 	return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq);
2790 }
2791 
2792 #ifdef CONFIG_DEBUG_FS
2793 static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m)
2794 {
2795 	int ret;
2796 
2797 	mutex_lock(&vm->op_lock);
2798 	ret = drm_debugfs_gpuva_info(m, &vm->base);
2799 	mutex_unlock(&vm->op_lock);
2800 
2801 	return ret;
2802 }
2803 
2804 static int show_each_vm(struct seq_file *m, void *arg)
2805 {
2806 	struct drm_info_node *node = (struct drm_info_node *)m->private;
2807 	struct drm_device *ddev = node->minor->dev;
2808 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
2809 	int (*show)(struct panthor_vm *, struct seq_file *) = node->info_ent->data;
2810 	struct panthor_vm *vm;
2811 	int ret = 0;
2812 
2813 	mutex_lock(&ptdev->mmu->vm.lock);
2814 	list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
2815 		ret = show(vm, m);
2816 		if (ret < 0)
2817 			break;
2818 
2819 		seq_puts(m, "\n");
2820 	}
2821 	mutex_unlock(&ptdev->mmu->vm.lock);
2822 
2823 	return ret;
2824 }
2825 
2826 static struct drm_info_list panthor_mmu_debugfs_list[] = {
2827 	DRM_DEBUGFS_GPUVA_INFO(show_each_vm, show_vm_gpuvas),
2828 };
2829 
2830 /**
2831  * panthor_mmu_debugfs_init() - Initialize MMU debugfs entries
2832  * @minor: Minor.
2833  */
2834 void panthor_mmu_debugfs_init(struct drm_minor *minor)
2835 {
2836 	drm_debugfs_create_files(panthor_mmu_debugfs_list,
2837 				 ARRAY_SIZE(panthor_mmu_debugfs_list),
2838 				 minor->debugfs_root, minor);
2839 }
2840 #endif /* CONFIG_DEBUG_FS */
2841 
2842 /**
2843  * panthor_mmu_pt_cache_init() - Initialize the page table cache.
2844  *
2845  * Return: 0 on success, a negative error code otherwise.
2846  */
2847 int panthor_mmu_pt_cache_init(void)
2848 {
2849 	pt_cache = kmem_cache_create("panthor-mmu-pt", SZ_4K, SZ_4K, 0, NULL);
2850 	if (!pt_cache)
2851 		return -ENOMEM;
2852 
2853 	return 0;
2854 }
2855 
2856 /**
2857  * panthor_mmu_pt_cache_fini() - Destroy the page table cache.
2858  */
2859 void panthor_mmu_pt_cache_fini(void)
2860 {
2861 	kmem_cache_destroy(pt_cache);
2862 }
2863