xref: /linux/drivers/gpu/drm/panthor/panthor_mmu.c (revision cf950766e96e36c90871d955cfd2a2c1feddba37)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2023 Collabora ltd. */
4 
5 #include <drm/drm_debugfs.h>
6 #include <drm/drm_drv.h>
7 #include <drm/drm_exec.h>
8 #include <drm/drm_gpuvm.h>
9 #include <drm/drm_managed.h>
10 #include <drm/drm_print.h>
11 #include <drm/gpu_scheduler.h>
12 #include <drm/panthor_drm.h>
13 
14 #include <linux/atomic.h>
15 #include <linux/bitfield.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/io-pgtable.h>
22 #include <linux/iommu.h>
23 #include <linux/kmemleak.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/rwsem.h>
27 #include <linux/sched.h>
28 #include <linux/shmem_fs.h>
29 #include <linux/sizes.h>
30 
31 #include "panthor_device.h"
32 #include "panthor_gem.h"
33 #include "panthor_gpu.h"
34 #include "panthor_heap.h"
35 #include "panthor_mmu.h"
36 #include "panthor_regs.h"
37 #include "panthor_sched.h"
38 
39 #define MAX_AS_SLOTS			32
40 
41 struct panthor_vm;
42 
43 /**
44  * struct panthor_as_slot - Address space slot
45  */
46 struct panthor_as_slot {
47 	/** @vm: VM bound to this slot. NULL is no VM is bound. */
48 	struct panthor_vm *vm;
49 };
50 
51 /**
52  * struct panthor_mmu - MMU related data
53  */
54 struct panthor_mmu {
55 	/** @irq: The MMU irq. */
56 	struct panthor_irq irq;
57 
58 	/**
59 	 * @as: Address space related fields.
60 	 *
61 	 * The GPU has a limited number of address spaces (AS) slots, forcing
62 	 * us to re-assign them to re-assign slots on-demand.
63 	 */
64 	struct {
65 		/** @as.slots_lock: Lock protecting access to all other AS fields. */
66 		struct mutex slots_lock;
67 
68 		/** @as.alloc_mask: Bitmask encoding the allocated slots. */
69 		unsigned long alloc_mask;
70 
71 		/** @as.faulty_mask: Bitmask encoding the faulty slots. */
72 		unsigned long faulty_mask;
73 
74 		/** @as.slots: VMs currently bound to the AS slots. */
75 		struct panthor_as_slot slots[MAX_AS_SLOTS];
76 
77 		/**
78 		 * @as.lru_list: List of least recently used VMs.
79 		 *
80 		 * We use this list to pick a VM to evict when all slots are
81 		 * used.
82 		 *
83 		 * There should be no more active VMs than there are AS slots,
84 		 * so this LRU is just here to keep VMs bound until there's
85 		 * a need to release a slot, thus avoid unnecessary TLB/cache
86 		 * flushes.
87 		 */
88 		struct list_head lru_list;
89 	} as;
90 
91 	/** @vm: VMs management fields */
92 	struct {
93 		/** @vm.lock: Lock protecting access to list. */
94 		struct mutex lock;
95 
96 		/** @vm.list: List containing all VMs. */
97 		struct list_head list;
98 
99 		/** @vm.reset_in_progress: True if a reset is in progress. */
100 		bool reset_in_progress;
101 
102 		/** @vm.wq: Workqueue used for the VM_BIND queues. */
103 		struct workqueue_struct *wq;
104 	} vm;
105 };
106 
107 /**
108  * struct panthor_vm_pool - VM pool object
109  */
110 struct panthor_vm_pool {
111 	/** @xa: Array used for VM handle tracking. */
112 	struct xarray xa;
113 };
114 
115 /**
116  * struct panthor_vma - GPU mapping object
117  *
118  * This is used to track GEM mappings in GPU space.
119  */
120 struct panthor_vma {
121 	/** @base: Inherits from drm_gpuva. */
122 	struct drm_gpuva base;
123 
124 	/** @node: Used to implement deferred release of VMAs. */
125 	struct list_head node;
126 
127 	/**
128 	 * @flags: Combination of drm_panthor_vm_bind_op_flags.
129 	 *
130 	 * Only map related flags are accepted.
131 	 */
132 	u32 flags;
133 };
134 
135 /**
136  * struct panthor_vm_op_ctx - VM operation context
137  *
138  * With VM operations potentially taking place in a dma-signaling path, we
139  * need to make sure everything that might require resource allocation is
140  * pre-allocated upfront. This is what this operation context is far.
141  *
142  * We also collect resources that have been freed, so we can release them
143  * asynchronously, and let the VM_BIND scheduler process the next VM_BIND
144  * request.
145  */
146 struct panthor_vm_op_ctx {
147 	/** @rsvd_page_tables: Pages reserved for the MMU page table update. */
148 	struct {
149 		/** @rsvd_page_tables.count: Number of pages reserved. */
150 		u32 count;
151 
152 		/** @rsvd_page_tables.ptr: Point to the first unused page in the @pages table. */
153 		u32 ptr;
154 
155 		/**
156 		 * @rsvd_page_tables.pages: Array of pages to be used for an MMU page table update.
157 		 *
158 		 * After an VM operation, there might be free pages left in this array.
159 		 * They should be returned to the pt_cache as part of the op_ctx cleanup.
160 		 */
161 		void **pages;
162 	} rsvd_page_tables;
163 
164 	/**
165 	 * @preallocated_vmas: Pre-allocated VMAs to handle the remap case.
166 	 *
167 	 * Partial unmap requests or map requests overlapping existing mappings will
168 	 * trigger a remap call, which need to register up to three panthor_vma objects
169 	 * (one for the new mapping, and two for the previous and next mappings).
170 	 */
171 	struct panthor_vma *preallocated_vmas[3];
172 
173 	/** @flags: Combination of drm_panthor_vm_bind_op_flags. */
174 	u32 flags;
175 
176 	/** @va: Virtual range targeted by the VM operation. */
177 	struct {
178 		/** @va.addr: Start address. */
179 		u64 addr;
180 
181 		/** @va.range: Range size. */
182 		u64 range;
183 	} va;
184 
185 	/** @map: Fields specific to a map operation. */
186 	struct {
187 		/** @map.vm_bo: Buffer object to map. */
188 		struct drm_gpuvm_bo *vm_bo;
189 
190 		/** @map.bo_offset: Offset in the buffer object. */
191 		u64 bo_offset;
192 
193 		/**
194 		 * @map.sgt: sg-table pointing to pages backing the GEM object.
195 		 *
196 		 * This is gathered at job creation time, such that we don't have
197 		 * to allocate in ::run_job().
198 		 */
199 		struct sg_table *sgt;
200 
201 		/**
202 		 * @map.new_vma: The new VMA object that will be inserted to the VA tree.
203 		 */
204 		struct panthor_vma *new_vma;
205 	} map;
206 };
207 
208 /**
209  * struct panthor_vm - VM object
210  *
211  * A VM is an object representing a GPU (or MCU) virtual address space.
212  * It embeds the MMU page table for this address space, a tree containing
213  * all the virtual mappings of GEM objects, and other things needed to manage
214  * the VM.
215  *
216  * Except for the MCU VM, which is managed by the kernel, all other VMs are
217  * created by userspace and mostly managed by userspace, using the
218  * %DRM_IOCTL_PANTHOR_VM_BIND ioctl.
219  *
220  * A portion of the virtual address space is reserved for kernel objects,
221  * like heap chunks, and userspace gets to decide how much of the virtual
222  * address space is left to the kernel (half of the virtual address space
223  * by default).
224  */
225 struct panthor_vm {
226 	/**
227 	 * @base: Inherit from drm_gpuvm.
228 	 *
229 	 * We delegate all the VA management to the common drm_gpuvm framework
230 	 * and only implement hooks to update the MMU page table.
231 	 */
232 	struct drm_gpuvm base;
233 
234 	/**
235 	 * @sched: Scheduler used for asynchronous VM_BIND request.
236 	 *
237 	 * We use a 1:1 scheduler here.
238 	 */
239 	struct drm_gpu_scheduler sched;
240 
241 	/**
242 	 * @entity: Scheduling entity representing the VM_BIND queue.
243 	 *
244 	 * There's currently one bind queue per VM. It doesn't make sense to
245 	 * allow more given the VM operations are serialized anyway.
246 	 */
247 	struct drm_sched_entity entity;
248 
249 	/** @ptdev: Device. */
250 	struct panthor_device *ptdev;
251 
252 	/** @memattr: Value to program to the AS_MEMATTR register. */
253 	u64 memattr;
254 
255 	/** @pgtbl_ops: Page table operations. */
256 	struct io_pgtable_ops *pgtbl_ops;
257 
258 	/** @root_page_table: Stores the root page table pointer. */
259 	void *root_page_table;
260 
261 	/**
262 	 * @op_lock: Lock used to serialize operations on a VM.
263 	 *
264 	 * The serialization of jobs queued to the VM_BIND queue is already
265 	 * taken care of by drm_sched, but we need to serialize synchronous
266 	 * and asynchronous VM_BIND request. This is what this lock is for.
267 	 */
268 	struct mutex op_lock;
269 
270 	/**
271 	 * @op_ctx: The context attached to the currently executing VM operation.
272 	 *
273 	 * NULL when no operation is in progress.
274 	 */
275 	struct panthor_vm_op_ctx *op_ctx;
276 
277 	/**
278 	 * @mm: Memory management object representing the auto-VA/kernel-VA.
279 	 *
280 	 * Used to auto-allocate VA space for kernel-managed objects (tiler
281 	 * heaps, ...).
282 	 *
283 	 * For the MCU VM, this is managing the VA range that's used to map
284 	 * all shared interfaces.
285 	 *
286 	 * For user VMs, the range is specified by userspace, and must not
287 	 * exceed half of the VA space addressable.
288 	 */
289 	struct drm_mm mm;
290 
291 	/** @mm_lock: Lock protecting the @mm field. */
292 	struct mutex mm_lock;
293 
294 	/** @kernel_auto_va: Automatic VA-range for kernel BOs. */
295 	struct {
296 		/** @kernel_auto_va.start: Start of the automatic VA-range for kernel BOs. */
297 		u64 start;
298 
299 		/** @kernel_auto_va.size: Size of the automatic VA-range for kernel BOs. */
300 		u64 end;
301 	} kernel_auto_va;
302 
303 	/** @as: Address space related fields. */
304 	struct {
305 		/**
306 		 * @as.id: ID of the address space this VM is bound to.
307 		 *
308 		 * A value of -1 means the VM is inactive/not bound.
309 		 */
310 		int id;
311 
312 		/** @as.active_cnt: Number of active users of this VM. */
313 		refcount_t active_cnt;
314 
315 		/**
316 		 * @as.lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
317 		 *
318 		 * Active VMs should not be inserted in the LRU list.
319 		 */
320 		struct list_head lru_node;
321 	} as;
322 
323 	/**
324 	 * @heaps: Tiler heap related fields.
325 	 */
326 	struct {
327 		/**
328 		 * @heaps.pool: The heap pool attached to this VM.
329 		 *
330 		 * Will stay NULL until someone creates a heap context on this VM.
331 		 */
332 		struct panthor_heap_pool *pool;
333 
334 		/** @heaps.lock: Lock used to protect access to @pool. */
335 		struct mutex lock;
336 	} heaps;
337 
338 	/** @node: Used to insert the VM in the panthor_mmu::vm::list. */
339 	struct list_head node;
340 
341 	/** @for_mcu: True if this is the MCU VM. */
342 	bool for_mcu;
343 
344 	/**
345 	 * @destroyed: True if the VM was destroyed.
346 	 *
347 	 * No further bind requests should be queued to a destroyed VM.
348 	 */
349 	bool destroyed;
350 
351 	/**
352 	 * @unusable: True if the VM has turned unusable because something
353 	 * bad happened during an asynchronous request.
354 	 *
355 	 * We don't try to recover from such failures, because this implies
356 	 * informing userspace about the specific operation that failed, and
357 	 * hoping the userspace driver can replay things from there. This all
358 	 * sounds very complicated for little gain.
359 	 *
360 	 * Instead, we should just flag the VM as unusable, and fail any
361 	 * further request targeting this VM.
362 	 *
363 	 * We also provide a way to query a VM state, so userspace can destroy
364 	 * it and create a new one.
365 	 *
366 	 * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST
367 	 * situation, where the logical device needs to be re-created.
368 	 */
369 	bool unusable;
370 
371 	/**
372 	 * @unhandled_fault: Unhandled fault happened.
373 	 *
374 	 * This should be reported to the scheduler, and the queue/group be
375 	 * flagged as faulty as a result.
376 	 */
377 	bool unhandled_fault;
378 
379 	/** @locked_region: Information about the currently locked region currently. */
380 	struct {
381 		/** @locked_region.start: Start of the locked region. */
382 		u64 start;
383 
384 		/** @locked_region.size: Size of the locked region. */
385 		u64 size;
386 	} locked_region;
387 };
388 
389 /**
390  * struct panthor_vm_bind_job - VM bind job
391  */
392 struct panthor_vm_bind_job {
393 	/** @base: Inherit from drm_sched_job. */
394 	struct drm_sched_job base;
395 
396 	/** @refcount: Reference count. */
397 	struct kref refcount;
398 
399 	/** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */
400 	struct work_struct cleanup_op_ctx_work;
401 
402 	/** @vm: VM targeted by the VM operation. */
403 	struct panthor_vm *vm;
404 
405 	/** @ctx: Operation context. */
406 	struct panthor_vm_op_ctx ctx;
407 };
408 
409 /*
410  * @pt_cache: Cache used to allocate MMU page tables.
411  *
412  * The pre-allocation pattern forces us to over-allocate to plan for
413  * the worst case scenario, and return the pages we didn't use.
414  *
415  * Having a kmem_cache allows us to speed allocations.
416  */
417 static struct kmem_cache *pt_cache;
418 
419 /**
420  * alloc_pt() - Custom page table allocator
421  * @cookie: Cookie passed at page table allocation time.
422  * @size: Size of the page table. This size should be fixed,
423  * and determined at creation time based on the granule size.
424  * @gfp: GFP flags.
425  *
426  * We want a custom allocator so we can use a cache for page table
427  * allocations and amortize the cost of the over-reservation that's
428  * done to allow asynchronous VM operations.
429  *
430  * Return: non-NULL on success, NULL if the allocation failed for any
431  * reason.
432  */
alloc_pt(void * cookie,size_t size,gfp_t gfp)433 static void *alloc_pt(void *cookie, size_t size, gfp_t gfp)
434 {
435 	struct panthor_vm *vm = cookie;
436 	void *page;
437 
438 	/* Allocation of the root page table happening during init. */
439 	if (unlikely(!vm->root_page_table)) {
440 		struct page *p;
441 
442 		drm_WARN_ON(&vm->ptdev->base, vm->op_ctx);
443 		p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev),
444 				     gfp | __GFP_ZERO, get_order(size));
445 		page = p ? page_address(p) : NULL;
446 		vm->root_page_table = page;
447 		return page;
448 	}
449 
450 	/* We're not supposed to have anything bigger than 4k here, because we picked a
451 	 * 4k granule size at init time.
452 	 */
453 	if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
454 		return NULL;
455 
456 	/* We must have some op_ctx attached to the VM and it must have at least one
457 	 * free page.
458 	 */
459 	if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) ||
460 	    drm_WARN_ON(&vm->ptdev->base,
461 			vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count))
462 		return NULL;
463 
464 	page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++];
465 	memset(page, 0, SZ_4K);
466 
467 	/* Page table entries don't use virtual addresses, which trips out
468 	 * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
469 	 * are mixed with other fields, and I fear kmemleak won't detect that
470 	 * either.
471 	 *
472 	 * Let's just ignore memory passed to the page-table driver for now.
473 	 */
474 	kmemleak_ignore(page);
475 	return page;
476 }
477 
478 /**
479  * free_pt() - Custom page table free function
480  * @cookie: Cookie passed at page table allocation time.
481  * @data: Page table to free.
482  * @size: Size of the page table. This size should be fixed,
483  * and determined at creation time based on the granule size.
484  */
free_pt(void * cookie,void * data,size_t size)485 static void free_pt(void *cookie, void *data, size_t size)
486 {
487 	struct panthor_vm *vm = cookie;
488 
489 	if (unlikely(vm->root_page_table == data)) {
490 		free_pages((unsigned long)data, get_order(size));
491 		vm->root_page_table = NULL;
492 		return;
493 	}
494 
495 	if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
496 		return;
497 
498 	/* Return the page to the pt_cache. */
499 	kmem_cache_free(pt_cache, data);
500 }
501 
wait_ready(struct panthor_device * ptdev,u32 as_nr)502 static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
503 {
504 	int ret;
505 	u32 val;
506 
507 	/* Wait for the MMU status to indicate there is no active command, in
508 	 * case one is pending.
509 	 */
510 	ret = gpu_read_relaxed_poll_timeout_atomic(ptdev, AS_STATUS(as_nr), val,
511 						   !(val & AS_STATUS_AS_ACTIVE),
512 						   10, 100000);
513 
514 	if (ret) {
515 		panthor_device_schedule_reset(ptdev);
516 		drm_err(&ptdev->base, "AS_ACTIVE bit stuck\n");
517 	}
518 
519 	return ret;
520 }
521 
as_send_cmd_and_wait(struct panthor_device * ptdev,u32 as_nr,u32 cmd)522 static int as_send_cmd_and_wait(struct panthor_device *ptdev, u32 as_nr, u32 cmd)
523 {
524 	int status;
525 
526 	/* write AS_COMMAND when MMU is ready to accept another command */
527 	status = wait_ready(ptdev, as_nr);
528 	if (!status) {
529 		gpu_write(ptdev, AS_COMMAND(as_nr), cmd);
530 		status = wait_ready(ptdev, as_nr);
531 	}
532 
533 	return status;
534 }
535 
pack_region_range(struct panthor_device * ptdev,u64 * region_start,u64 * size)536 static u64 pack_region_range(struct panthor_device *ptdev, u64 *region_start, u64 *size)
537 {
538 	u8 region_width;
539 	u64 region_end = *region_start + *size;
540 
541 	if (drm_WARN_ON_ONCE(&ptdev->base, !*size))
542 		return 0;
543 
544 	/*
545 	 * The locked region is a naturally aligned power of 2 block encoded as
546 	 * log2 minus(1).
547 	 * Calculate the desired start/end and look for the highest bit which
548 	 * differs. The smallest naturally aligned block must include this bit
549 	 * change, the desired region starts with this bit (and subsequent bits)
550 	 * zeroed and ends with the bit (and subsequent bits) set to one.
551 	 */
552 	region_width = max(fls64(*region_start ^ (region_end - 1)),
553 			   const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
554 
555 	/*
556 	 * Mask off the low bits of region_start (which would be ignored by
557 	 * the hardware anyway)
558 	 */
559 	*region_start &= GENMASK_ULL(63, region_width);
560 	*size = 1ull << (region_width + 1);
561 
562 	return region_width | *region_start;
563 }
564 
panthor_mmu_as_fault_mask(struct panthor_device * ptdev,u32 as)565 static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as)
566 {
567 	return BIT(as);
568 }
569 
570 /* Forward declaration to call helpers within as_enable/disable */
571 static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status);
572 PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler);
573 
panthor_mmu_as_enable(struct panthor_device * ptdev,u32 as_nr,u64 transtab,u64 transcfg,u64 memattr)574 static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
575 				 u64 transtab, u64 transcfg, u64 memattr)
576 {
577 	panthor_mmu_irq_enable_events(&ptdev->mmu->irq,
578 				      panthor_mmu_as_fault_mask(ptdev, as_nr));
579 
580 	gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab);
581 	gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr);
582 	gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg);
583 
584 	return as_send_cmd_and_wait(ptdev, as_nr, AS_COMMAND_UPDATE);
585 }
586 
panthor_mmu_as_disable(struct panthor_device * ptdev,u32 as_nr,bool recycle_slot)587 static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr,
588 				  bool recycle_slot)
589 {
590 	struct panthor_vm *vm = ptdev->mmu->as.slots[as_nr].vm;
591 	int ret;
592 
593 	lockdep_assert_held(&ptdev->mmu->as.slots_lock);
594 
595 	panthor_mmu_irq_disable_events(&ptdev->mmu->irq,
596 				       panthor_mmu_as_fault_mask(ptdev, as_nr));
597 
598 	/* Flush+invalidate RW caches, invalidate RO ones. */
599 	ret = panthor_gpu_flush_caches(ptdev, CACHE_CLEAN | CACHE_INV,
600 				       CACHE_CLEAN | CACHE_INV, CACHE_INV);
601 	if (ret)
602 		return ret;
603 
604 	if (vm && vm->locked_region.size) {
605 		/* Unlock the region if there's a lock pending. */
606 		ret = as_send_cmd_and_wait(ptdev, vm->as.id, AS_COMMAND_UNLOCK);
607 		if (ret)
608 			return ret;
609 	}
610 
611 	/* If the slot is going to be used immediately, don't bother changing
612 	 * the config.
613 	 */
614 	if (recycle_slot)
615 		return 0;
616 
617 	gpu_write64(ptdev, AS_TRANSTAB(as_nr), 0);
618 	gpu_write64(ptdev, AS_MEMATTR(as_nr), 0);
619 	gpu_write64(ptdev, AS_TRANSCFG(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
620 
621 	return as_send_cmd_and_wait(ptdev, as_nr, AS_COMMAND_UPDATE);
622 }
623 
panthor_mmu_fault_mask(struct panthor_device * ptdev,u32 value)624 static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value)
625 {
626 	/* Bits 16 to 31 mean REQ_COMPLETE. */
627 	return value & GENMASK(15, 0);
628 }
629 
630 /**
631  * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
632  * @vm: VM to check.
633  *
634  * Return: true if the VM has unhandled faults, false otherwise.
635  */
panthor_vm_has_unhandled_faults(struct panthor_vm * vm)636 bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm)
637 {
638 	return vm->unhandled_fault;
639 }
640 
641 /**
642  * panthor_vm_is_unusable() - Check if the VM is still usable
643  * @vm: VM to check.
644  *
645  * Return: true if the VM is unusable, false otherwise.
646  */
panthor_vm_is_unusable(struct panthor_vm * vm)647 bool panthor_vm_is_unusable(struct panthor_vm *vm)
648 {
649 	return vm->unusable;
650 }
651 
panthor_vm_release_as_locked(struct panthor_vm * vm)652 static void panthor_vm_release_as_locked(struct panthor_vm *vm)
653 {
654 	struct panthor_device *ptdev = vm->ptdev;
655 
656 	lockdep_assert_held(&ptdev->mmu->as.slots_lock);
657 
658 	if (drm_WARN_ON(&ptdev->base, vm->as.id < 0))
659 		return;
660 
661 	ptdev->mmu->as.slots[vm->as.id].vm = NULL;
662 	clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
663 	refcount_set(&vm->as.active_cnt, 0);
664 	list_del_init(&vm->as.lru_node);
665 	vm->as.id = -1;
666 }
667 
668 /**
669  * panthor_vm_active() - Flag a VM as active
670  * @vm: VM to flag as active.
671  *
672  * Assigns an address space to a VM so it can be used by the GPU/MCU.
673  *
674  * Return: 0 on success, a negative error code otherwise.
675  */
panthor_vm_active(struct panthor_vm * vm)676 int panthor_vm_active(struct panthor_vm *vm)
677 {
678 	struct panthor_device *ptdev = vm->ptdev;
679 	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
680 	struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg;
681 	int ret = 0, as, cookie;
682 	u64 transtab, transcfg;
683 	u32 fault_mask;
684 
685 	if (!drm_dev_enter(&ptdev->base, &cookie))
686 		return -ENODEV;
687 
688 	if (refcount_inc_not_zero(&vm->as.active_cnt))
689 		goto out_dev_exit;
690 
691 	/* Make sure we don't race with lock/unlock_region() calls
692 	 * happening around VM bind operations.
693 	 */
694 	mutex_lock(&vm->op_lock);
695 	mutex_lock(&ptdev->mmu->as.slots_lock);
696 
697 	if (refcount_inc_not_zero(&vm->as.active_cnt))
698 		goto out_unlock;
699 
700 	as = vm->as.id;
701 	if (as >= 0) {
702 		/* Unhandled pagefault on this AS, the MMU was disabled. We need to
703 		 * re-enable the MMU after clearing+unmasking the AS interrupts.
704 		 */
705 		if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as))
706 			goto out_enable_as;
707 
708 		goto out_make_active;
709 	}
710 
711 	/* Check for a free AS */
712 	if (vm->for_mcu) {
713 		drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0));
714 		as = 0;
715 	} else {
716 		as = ffz(ptdev->mmu->as.alloc_mask | BIT(0));
717 	}
718 
719 	if (!(BIT(as) & ptdev->gpu_info.as_present)) {
720 		struct panthor_vm *lru_vm;
721 
722 		lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list,
723 						  struct panthor_vm,
724 						  as.lru_node);
725 		if (drm_WARN_ON(&ptdev->base, !lru_vm)) {
726 			ret = -EBUSY;
727 			goto out_unlock;
728 		}
729 
730 		drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt));
731 		as = lru_vm->as.id;
732 
733 		ret = panthor_mmu_as_disable(ptdev, as, true);
734 		if (ret)
735 			goto out_unlock;
736 
737 		panthor_vm_release_as_locked(lru_vm);
738 	}
739 
740 	/* Assign the free or reclaimed AS to the FD */
741 	vm->as.id = as;
742 	set_bit(as, &ptdev->mmu->as.alloc_mask);
743 	ptdev->mmu->as.slots[as].vm = vm;
744 
745 out_enable_as:
746 	transtab = cfg->arm_lpae_s1_cfg.ttbr;
747 	transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
748 		   AS_TRANSCFG_PTW_RA |
749 		   AS_TRANSCFG_ADRMODE_AARCH64_4K |
750 		   AS_TRANSCFG_INA_BITS(55 - va_bits);
751 	if (ptdev->coherent)
752 		transcfg |= AS_TRANSCFG_PTW_SH_OS;
753 
754 	/* If the VM is re-activated, we clear the fault. */
755 	vm->unhandled_fault = false;
756 
757 	/* Unhandled pagefault on this AS, clear the fault and enable the AS,
758 	 * which re-enables interrupts.
759 	 */
760 	fault_mask = panthor_mmu_as_fault_mask(ptdev, as);
761 	if (ptdev->mmu->as.faulty_mask & fault_mask) {
762 		gpu_write(ptdev, MMU_INT_CLEAR, fault_mask);
763 		ptdev->mmu->as.faulty_mask &= ~fault_mask;
764 	}
765 
766 	/* The VM update is guarded by ::op_lock, which we take at the beginning
767 	 * of this function, so we don't expect any locked region here.
768 	 */
769 	drm_WARN_ON(&vm->ptdev->base, vm->locked_region.size > 0);
770 	ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr);
771 
772 out_make_active:
773 	if (!ret) {
774 		refcount_set(&vm->as.active_cnt, 1);
775 		list_del_init(&vm->as.lru_node);
776 	}
777 
778 out_unlock:
779 	mutex_unlock(&ptdev->mmu->as.slots_lock);
780 	mutex_unlock(&vm->op_lock);
781 
782 out_dev_exit:
783 	drm_dev_exit(cookie);
784 	return ret;
785 }
786 
787 /**
788  * panthor_vm_idle() - Flag a VM idle
789  * @vm: VM to flag as idle.
790  *
791  * When we know the GPU is done with the VM (no more jobs to process),
792  * we can relinquish the AS slot attached to this VM, if any.
793  *
794  * We don't release the slot immediately, but instead place the VM in
795  * the LRU list, so it can be evicted if another VM needs an AS slot.
796  * This way, VMs keep attached to the AS they were given until we run
797  * out of free slot, limiting the number of MMU operations (TLB flush
798  * and other AS updates).
799  */
panthor_vm_idle(struct panthor_vm * vm)800 void panthor_vm_idle(struct panthor_vm *vm)
801 {
802 	struct panthor_device *ptdev = vm->ptdev;
803 
804 	if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock))
805 		return;
806 
807 	if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node)))
808 		list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list);
809 
810 	refcount_set(&vm->as.active_cnt, 0);
811 	mutex_unlock(&ptdev->mmu->as.slots_lock);
812 }
813 
panthor_vm_page_size(struct panthor_vm * vm)814 u32 panthor_vm_page_size(struct panthor_vm *vm)
815 {
816 	const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
817 	u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
818 
819 	return 1u << pg_shift;
820 }
821 
panthor_vm_stop(struct panthor_vm * vm)822 static void panthor_vm_stop(struct panthor_vm *vm)
823 {
824 	drm_sched_stop(&vm->sched, NULL);
825 }
826 
panthor_vm_start(struct panthor_vm * vm)827 static void panthor_vm_start(struct panthor_vm *vm)
828 {
829 	drm_sched_start(&vm->sched, 0);
830 }
831 
832 /**
833  * panthor_vm_as() - Get the AS slot attached to a VM
834  * @vm: VM to get the AS slot of.
835  *
836  * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise.
837  */
panthor_vm_as(struct panthor_vm * vm)838 int panthor_vm_as(struct panthor_vm *vm)
839 {
840 	return vm->as.id;
841 }
842 
get_pgsize(u64 addr,size_t size,size_t * count)843 static size_t get_pgsize(u64 addr, size_t size, size_t *count)
844 {
845 	/*
846 	 * io-pgtable only operates on multiple pages within a single table
847 	 * entry, so we need to split at boundaries of the table size, i.e.
848 	 * the next block size up. The distance from address A to the next
849 	 * boundary of block size B is logically B - A % B, but in unsigned
850 	 * two's complement where B is a power of two we get the equivalence
851 	 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
852 	 */
853 	size_t blk_offset = -addr % SZ_2M;
854 
855 	if (blk_offset || size < SZ_2M) {
856 		*count = min_not_zero(blk_offset, size) / SZ_4K;
857 		return SZ_4K;
858 	}
859 	blk_offset = -addr % SZ_1G ?: SZ_1G;
860 	*count = min(blk_offset, size) / SZ_2M;
861 	return SZ_2M;
862 }
863 
panthor_vm_declare_unusable(struct panthor_vm * vm)864 static void panthor_vm_declare_unusable(struct panthor_vm *vm)
865 {
866 	struct panthor_device *ptdev = vm->ptdev;
867 	int cookie;
868 
869 	if (vm->unusable)
870 		return;
871 
872 	vm->unusable = true;
873 	mutex_lock(&ptdev->mmu->as.slots_lock);
874 	if (vm->as.id >= 0 && drm_dev_enter(&ptdev->base, &cookie)) {
875 		panthor_mmu_as_disable(ptdev, vm->as.id, false);
876 		drm_dev_exit(cookie);
877 	}
878 	mutex_unlock(&ptdev->mmu->as.slots_lock);
879 }
880 
panthor_vm_unmap_pages(struct panthor_vm * vm,u64 iova,u64 size)881 static void panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
882 {
883 	struct panthor_device *ptdev = vm->ptdev;
884 	struct io_pgtable_ops *ops = vm->pgtbl_ops;
885 	u64 start_iova = iova;
886 	u64 offset = 0;
887 
888 	if (!size)
889 		return;
890 
891 	drm_WARN_ON(&ptdev->base,
892 		    (iova < vm->locked_region.start) ||
893 		    (iova + size > vm->locked_region.start + vm->locked_region.size));
894 
895 	while (offset < size) {
896 		size_t unmapped_sz = 0, pgcount;
897 		size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount);
898 
899 		unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL);
900 		if (drm_WARN_ON_ONCE(&ptdev->base, unmapped_sz != pgsize * pgcount)) {
901 			/* Gracefully handle sparsely unmapped regions to avoid leaving
902 			 * page table pages behind when the drm_gpuvm and VM page table
903 			 * are out-of-sync. This is not supposed to happen, hence the
904 			 * above WARN_ON().
905 			 */
906 			while (!ops->iova_to_phys(ops, iova + unmapped_sz) &&
907 			       unmapped_sz < pgsize * pgcount)
908 				unmapped_sz += SZ_4K;
909 
910 			/* We're passed the point where we can try to fix things,
911 			 * so flag the VM unusable to make sure it's not going
912 			 * to be used anymore.
913 			 */
914 			panthor_vm_declare_unusable(vm);
915 
916 			/* If we don't make progress, we're screwed. That also means
917 			 * something else prevents us from unmapping the region, but
918 			 * there's not much we can do here: time for debugging.
919 			 */
920 			if (drm_WARN_ON_ONCE(&ptdev->base, !unmapped_sz))
921 				return;
922 		}
923 
924 		drm_dbg(&ptdev->base,
925 			"unmap: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pgcnt=%zu, pgsz=%zu",
926 			vm->as.id, start_iova, size, iova + offset,
927 			unmapped_sz / pgsize, pgsize);
928 
929 		offset += unmapped_sz;
930 	}
931 }
932 
933 static int
panthor_vm_map_pages(struct panthor_vm * vm,u64 iova,int prot,struct sg_table * sgt,u64 offset,u64 size)934 panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
935 		     struct sg_table *sgt, u64 offset, u64 size)
936 {
937 	struct panthor_device *ptdev = vm->ptdev;
938 	unsigned int count;
939 	struct scatterlist *sgl;
940 	struct io_pgtable_ops *ops = vm->pgtbl_ops;
941 	u64 start_iova = iova;
942 	u64 start_size = size;
943 	int ret;
944 
945 	if (!size)
946 		return 0;
947 
948 	drm_WARN_ON(&ptdev->base,
949 		    (iova < vm->locked_region.start) ||
950 		    (iova + size > vm->locked_region.start + vm->locked_region.size));
951 
952 	for_each_sgtable_dma_sg(sgt, sgl, count) {
953 		dma_addr_t paddr = sg_dma_address(sgl);
954 		size_t len = sg_dma_len(sgl);
955 
956 		if (len <= offset) {
957 			offset -= len;
958 			continue;
959 		}
960 
961 		paddr += offset;
962 		len -= offset;
963 		len = min_t(size_t, len, size);
964 		size -= len;
965 
966 		while (len) {
967 			size_t pgcount, mapped = 0;
968 			size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
969 
970 			ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
971 					     GFP_KERNEL, &mapped);
972 
973 			drm_dbg(&ptdev->base,
974 				"map: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pa=%pad, pgcnt=%zu, pgsz=%zu",
975 				vm->as.id, start_iova, start_size, iova, &paddr,
976 				mapped / pgsize, pgsize);
977 
978 			iova += mapped;
979 			paddr += mapped;
980 			len -= mapped;
981 
982 			/* If nothing was mapped, consider it an ENOMEM. */
983 			if (!ret && !mapped)
984 				ret = -ENOMEM;
985 
986 			/* If something fails, we stop there, and flag the VM unusable. */
987 			if (drm_WARN_ON_ONCE(&ptdev->base, ret)) {
988 				/* Unmap what we've already mapped to avoid leaving page
989 				 * table pages behind.
990 				 */
991 				panthor_vm_unmap_pages(vm, start_iova, iova - start_iova);
992 				panthor_vm_declare_unusable(vm);
993 				return ret;
994 			}
995 		}
996 
997 		if (!size)
998 			break;
999 
1000 		offset = 0;
1001 	}
1002 
1003 	return 0;
1004 }
1005 
flags_to_prot(u32 flags)1006 static int flags_to_prot(u32 flags)
1007 {
1008 	int prot = 0;
1009 
1010 	if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC)
1011 		prot |= IOMMU_NOEXEC;
1012 
1013 	if (!(flags & DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED))
1014 		prot |= IOMMU_CACHE;
1015 
1016 	if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_READONLY)
1017 		prot |= IOMMU_READ;
1018 	else
1019 		prot |= IOMMU_READ | IOMMU_WRITE;
1020 
1021 	return prot;
1022 }
1023 
1024 /**
1025  * panthor_vm_alloc_va() - Allocate a region in the auto-va space
1026  * @vm: VM to allocate a region on.
1027  * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user
1028  * wants the VA to be automatically allocated from the auto-VA range.
1029  * @size: size of the VA range.
1030  * @va_node: drm_mm_node to initialize. Must be zero-initialized.
1031  *
1032  * Some GPU objects, like heap chunks, are fully managed by the kernel and
1033  * need to be mapped to the userspace VM, in the region reserved for kernel
1034  * objects.
1035  *
1036  * This function takes care of allocating a region in the kernel auto-VA space.
1037  *
1038  * Return: 0 on success, an error code otherwise.
1039  */
1040 int
panthor_vm_alloc_va(struct panthor_vm * vm,u64 va,u64 size,struct drm_mm_node * va_node)1041 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
1042 		    struct drm_mm_node *va_node)
1043 {
1044 	ssize_t vm_pgsz = panthor_vm_page_size(vm);
1045 	int ret;
1046 
1047 	if (!size || !IS_ALIGNED(size, vm_pgsz))
1048 		return -EINVAL;
1049 
1050 	if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
1051 		return -EINVAL;
1052 
1053 	mutex_lock(&vm->mm_lock);
1054 	if (va != PANTHOR_VM_KERNEL_AUTO_VA) {
1055 		va_node->start = va;
1056 		va_node->size = size;
1057 		ret = drm_mm_reserve_node(&vm->mm, va_node);
1058 	} else {
1059 		ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size,
1060 						  size >= SZ_2M ? SZ_2M : SZ_4K,
1061 						  0, vm->kernel_auto_va.start,
1062 						  vm->kernel_auto_va.end,
1063 						  DRM_MM_INSERT_BEST);
1064 	}
1065 	mutex_unlock(&vm->mm_lock);
1066 
1067 	return ret;
1068 }
1069 
1070 /**
1071  * panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va()
1072  * @vm: VM to free the region on.
1073  * @va_node: Memory node representing the region to free.
1074  */
panthor_vm_free_va(struct panthor_vm * vm,struct drm_mm_node * va_node)1075 void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
1076 {
1077 	mutex_lock(&vm->mm_lock);
1078 	drm_mm_remove_node(va_node);
1079 	mutex_unlock(&vm->mm_lock);
1080 }
1081 
panthor_vm_bo_free(struct drm_gpuvm_bo * vm_bo)1082 static void panthor_vm_bo_free(struct drm_gpuvm_bo *vm_bo)
1083 {
1084 	struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj);
1085 
1086 	if (!drm_gem_is_imported(&bo->base.base))
1087 		drm_gem_shmem_unpin(&bo->base);
1088 	kfree(vm_bo);
1089 }
1090 
panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx * op_ctx,struct panthor_vm * vm)1091 static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1092 				      struct panthor_vm *vm)
1093 {
1094 	u32 remaining_pt_count = op_ctx->rsvd_page_tables.count -
1095 				 op_ctx->rsvd_page_tables.ptr;
1096 
1097 	if (remaining_pt_count) {
1098 		kmem_cache_free_bulk(pt_cache, remaining_pt_count,
1099 				     op_ctx->rsvd_page_tables.pages +
1100 				     op_ctx->rsvd_page_tables.ptr);
1101 	}
1102 
1103 	kfree(op_ctx->rsvd_page_tables.pages);
1104 
1105 	if (op_ctx->map.vm_bo)
1106 		drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo);
1107 
1108 	for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++)
1109 		kfree(op_ctx->preallocated_vmas[i]);
1110 
1111 	drm_gpuvm_bo_deferred_cleanup(&vm->base);
1112 }
1113 
1114 static void
panthor_vm_op_ctx_return_vma(struct panthor_vm_op_ctx * op_ctx,struct panthor_vma * vma)1115 panthor_vm_op_ctx_return_vma(struct panthor_vm_op_ctx *op_ctx,
1116 			     struct panthor_vma *vma)
1117 {
1118 	for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) {
1119 		if (!op_ctx->preallocated_vmas[i]) {
1120 			op_ctx->preallocated_vmas[i] = vma;
1121 			return;
1122 		}
1123 	}
1124 
1125 	WARN_ON_ONCE(1);
1126 }
1127 
1128 static struct panthor_vma *
panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx * op_ctx)1129 panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx *op_ctx)
1130 {
1131 	for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) {
1132 		struct panthor_vma *vma = op_ctx->preallocated_vmas[i];
1133 
1134 		if (vma) {
1135 			op_ctx->preallocated_vmas[i] = NULL;
1136 			return vma;
1137 		}
1138 	}
1139 
1140 	return NULL;
1141 }
1142 
1143 static int
panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx * op_ctx)1144 panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
1145 {
1146 	u32 vma_count;
1147 
1148 	switch (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
1149 	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
1150 		/* One VMA for the new mapping, and two more VMAs for the remap case
1151 		 * which might contain both a prev and next VA.
1152 		 */
1153 		vma_count = 3;
1154 		break;
1155 
1156 	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
1157 		/* Two VMAs can be needed for an unmap, as an unmap can happen
1158 		 * in the middle of a drm_gpuva, requiring a remap with both
1159 		 * prev & next VA. Or an unmap can span more than one drm_gpuva
1160 		 * where the first and last ones are covered partially, requring
1161 		 * a remap for the first with a prev VA and remap for the last
1162 		 * with a next VA.
1163 		 */
1164 		vma_count = 2;
1165 		break;
1166 
1167 	default:
1168 		return 0;
1169 	}
1170 
1171 	for (u32 i = 0; i < vma_count; i++) {
1172 		struct panthor_vma *vma = kzalloc_obj(*vma);
1173 
1174 		if (!vma)
1175 			return -ENOMEM;
1176 
1177 		op_ctx->preallocated_vmas[i] = vma;
1178 	}
1179 
1180 	return 0;
1181 }
1182 
1183 #define PANTHOR_VM_BIND_OP_MAP_FLAGS \
1184 	(DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
1185 	 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
1186 	 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED | \
1187 	 DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
1188 
panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx * op_ctx,struct panthor_vm * vm,struct panthor_gem_object * bo,u64 offset,u64 size,u64 va,u32 flags)1189 static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1190 					 struct panthor_vm *vm,
1191 					 struct panthor_gem_object *bo,
1192 					 u64 offset,
1193 					 u64 size, u64 va,
1194 					 u32 flags)
1195 {
1196 	struct drm_gpuvm_bo *preallocated_vm_bo;
1197 	struct sg_table *sgt = NULL;
1198 	u64 pt_count;
1199 	int ret;
1200 
1201 	if (!bo)
1202 		return -EINVAL;
1203 
1204 	if ((flags & ~PANTHOR_VM_BIND_OP_MAP_FLAGS) ||
1205 	    (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
1206 		return -EINVAL;
1207 
1208 	/* Make sure the VA and size are in-bounds. */
1209 	if (size > bo->base.base.size || offset > bo->base.base.size - size)
1210 		return -EINVAL;
1211 
1212 	/* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */
1213 	if (bo->exclusive_vm_root_gem &&
1214 	    bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm))
1215 		return -EINVAL;
1216 
1217 	memset(op_ctx, 0, sizeof(*op_ctx));
1218 	op_ctx->flags = flags;
1219 	op_ctx->va.range = size;
1220 	op_ctx->va.addr = va;
1221 
1222 	ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
1223 	if (ret)
1224 		goto err_cleanup;
1225 
1226 	if (!drm_gem_is_imported(&bo->base.base)) {
1227 		/* Pre-reserve the BO pages, so the map operation doesn't have to
1228 		 * allocate. This pin is dropped in panthor_vm_bo_free(), so
1229 		 * once we have successfully called drm_gpuvm_bo_create(),
1230 		 * GPUVM will take care of dropping the pin for us.
1231 		 */
1232 		ret = drm_gem_shmem_pin(&bo->base);
1233 		if (ret)
1234 			goto err_cleanup;
1235 	}
1236 
1237 	sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
1238 	if (IS_ERR(sgt)) {
1239 		if (!drm_gem_is_imported(&bo->base.base))
1240 			drm_gem_shmem_unpin(&bo->base);
1241 
1242 		ret = PTR_ERR(sgt);
1243 		goto err_cleanup;
1244 	}
1245 
1246 	op_ctx->map.sgt = sgt;
1247 
1248 	preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
1249 	if (!preallocated_vm_bo) {
1250 		if (!drm_gem_is_imported(&bo->base.base))
1251 			drm_gem_shmem_unpin(&bo->base);
1252 
1253 		ret = -ENOMEM;
1254 		goto err_cleanup;
1255 	}
1256 
1257 	op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
1258 
1259 	op_ctx->map.bo_offset = offset;
1260 
1261 	/* L1, L2 and L3 page tables.
1262 	 * We could optimize L3 allocation by iterating over the sgt and merging
1263 	 * 2M contiguous blocks, but it's simpler to over-provision and return
1264 	 * the pages if they're not used.
1265 	 */
1266 	pt_count = ((ALIGN(va + size, 1ull << 39) - ALIGN_DOWN(va, 1ull << 39)) >> 39) +
1267 		   ((ALIGN(va + size, 1ull << 30) - ALIGN_DOWN(va, 1ull << 30)) >> 30) +
1268 		   ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21);
1269 
1270 	op_ctx->rsvd_page_tables.pages = kzalloc_objs(*op_ctx->rsvd_page_tables.pages,
1271 						      pt_count);
1272 	if (!op_ctx->rsvd_page_tables.pages) {
1273 		ret = -ENOMEM;
1274 		goto err_cleanup;
1275 	}
1276 
1277 	ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
1278 				    op_ctx->rsvd_page_tables.pages);
1279 	op_ctx->rsvd_page_tables.count = ret;
1280 	if (ret != pt_count) {
1281 		ret = -ENOMEM;
1282 		goto err_cleanup;
1283 	}
1284 
1285 	/* Insert BO into the extobj list last, when we know nothing can fail. */
1286 	dma_resv_lock(panthor_vm_resv(vm), NULL);
1287 	drm_gpuvm_bo_extobj_add(op_ctx->map.vm_bo);
1288 	dma_resv_unlock(panthor_vm_resv(vm));
1289 
1290 	return 0;
1291 
1292 err_cleanup:
1293 	panthor_vm_cleanup_op_ctx(op_ctx, vm);
1294 	return ret;
1295 }
1296 
panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx * op_ctx,struct panthor_vm * vm,u64 va,u64 size)1297 static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1298 					   struct panthor_vm *vm,
1299 					   u64 va, u64 size)
1300 {
1301 	u32 pt_count = 0;
1302 	int ret;
1303 
1304 	memset(op_ctx, 0, sizeof(*op_ctx));
1305 	op_ctx->va.range = size;
1306 	op_ctx->va.addr = va;
1307 	op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
1308 
1309 	/* Pre-allocate L3 page tables to account for the split-2M-block
1310 	 * situation on unmap.
1311 	 */
1312 	if (va != ALIGN(va, SZ_2M))
1313 		pt_count++;
1314 
1315 	if (va + size != ALIGN(va + size, SZ_2M) &&
1316 	    ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M))
1317 		pt_count++;
1318 
1319 	ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
1320 	if (ret)
1321 		goto err_cleanup;
1322 
1323 	if (pt_count) {
1324 		op_ctx->rsvd_page_tables.pages = kzalloc_objs(*op_ctx->rsvd_page_tables.pages,
1325 							      pt_count);
1326 		if (!op_ctx->rsvd_page_tables.pages) {
1327 			ret = -ENOMEM;
1328 			goto err_cleanup;
1329 		}
1330 
1331 		ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
1332 					    op_ctx->rsvd_page_tables.pages);
1333 		if (ret != pt_count) {
1334 			ret = -ENOMEM;
1335 			goto err_cleanup;
1336 		}
1337 		op_ctx->rsvd_page_tables.count = pt_count;
1338 	}
1339 
1340 	return 0;
1341 
1342 err_cleanup:
1343 	panthor_vm_cleanup_op_ctx(op_ctx, vm);
1344 	return ret;
1345 }
1346 
panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx * op_ctx,struct panthor_vm * vm)1347 static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1348 						struct panthor_vm *vm)
1349 {
1350 	memset(op_ctx, 0, sizeof(*op_ctx));
1351 	op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
1352 }
1353 
1354 /**
1355  * panthor_vm_get_bo_for_va() - Get the GEM object mapped at a virtual address
1356  * @vm: VM to look into.
1357  * @va: Virtual address to search for.
1358  * @bo_offset: Offset of the GEM object mapped at this virtual address.
1359  * Only valid on success.
1360  *
1361  * The object returned by this function might no longer be mapped when the
1362  * function returns. It's the caller responsibility to ensure there's no
1363  * concurrent map/unmap operations making the returned value invalid, or
1364  * make sure it doesn't matter if the object is no longer mapped.
1365  *
1366  * Return: A valid pointer on success, an ERR_PTR() otherwise.
1367  */
1368 struct panthor_gem_object *
panthor_vm_get_bo_for_va(struct panthor_vm * vm,u64 va,u64 * bo_offset)1369 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset)
1370 {
1371 	struct panthor_gem_object *bo = ERR_PTR(-ENOENT);
1372 	struct drm_gpuva *gpuva;
1373 	struct panthor_vma *vma;
1374 
1375 	/* Take the VM lock to prevent concurrent map/unmap operations. */
1376 	mutex_lock(&vm->op_lock);
1377 	gpuva = drm_gpuva_find_first(&vm->base, va, 1);
1378 	vma = gpuva ? container_of(gpuva, struct panthor_vma, base) : NULL;
1379 	if (vma && vma->base.gem.obj) {
1380 		drm_gem_object_get(vma->base.gem.obj);
1381 		bo = to_panthor_bo(vma->base.gem.obj);
1382 		*bo_offset = vma->base.gem.offset + (va - vma->base.va.addr);
1383 	}
1384 	mutex_unlock(&vm->op_lock);
1385 
1386 	return bo;
1387 }
1388 
1389 #define PANTHOR_VM_MIN_KERNEL_VA_SIZE	SZ_256M
1390 
1391 static u64
panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create * args,u64 full_va_range)1392 panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create *args,
1393 				    u64 full_va_range)
1394 {
1395 	u64 user_va_range;
1396 
1397 	/* Make sure we have a minimum amount of VA space for kernel objects. */
1398 	if (full_va_range < PANTHOR_VM_MIN_KERNEL_VA_SIZE)
1399 		return 0;
1400 
1401 	if (args->user_va_range) {
1402 		/* Use the user provided value if != 0. */
1403 		user_va_range = args->user_va_range;
1404 	} else if (TASK_SIZE_OF(current) < full_va_range) {
1405 		/* If the task VM size is smaller than the GPU VA range, pick this
1406 		 * as our default user VA range, so userspace can CPU/GPU map buffers
1407 		 * at the same address.
1408 		 */
1409 		user_va_range = TASK_SIZE_OF(current);
1410 	} else {
1411 		/* If the GPU VA range is smaller than the task VM size, we
1412 		 * just have to live with the fact we won't be able to map
1413 		 * all buffers at the same GPU/CPU address.
1414 		 *
1415 		 * If the GPU VA range is bigger than 4G (more than 32-bit of
1416 		 * VA), we split the range in two, and assign half of it to
1417 		 * the user and the other half to the kernel, if it's not, we
1418 		 * keep the kernel VA space as small as possible.
1419 		 */
1420 		user_va_range = full_va_range > SZ_4G ?
1421 				full_va_range / 2 :
1422 				full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
1423 	}
1424 
1425 	if (full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE < user_va_range)
1426 		user_va_range = full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
1427 
1428 	return user_va_range;
1429 }
1430 
1431 #define PANTHOR_VM_CREATE_FLAGS		0
1432 
1433 static int
panthor_vm_create_check_args(const struct panthor_device * ptdev,const struct drm_panthor_vm_create * args,u64 * kernel_va_start,u64 * kernel_va_range)1434 panthor_vm_create_check_args(const struct panthor_device *ptdev,
1435 			     const struct drm_panthor_vm_create *args,
1436 			     u64 *kernel_va_start, u64 *kernel_va_range)
1437 {
1438 	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
1439 	u64 full_va_range = 1ull << va_bits;
1440 	u64 user_va_range;
1441 
1442 	if (args->flags & ~PANTHOR_VM_CREATE_FLAGS)
1443 		return -EINVAL;
1444 
1445 	user_va_range = panthor_vm_create_get_user_va_range(args, full_va_range);
1446 	if (!user_va_range || (args->user_va_range && args->user_va_range > user_va_range))
1447 		return -EINVAL;
1448 
1449 	/* Pick a kernel VA range that's a power of two, to have a clear split. */
1450 	*kernel_va_range = rounddown_pow_of_two(full_va_range - user_va_range);
1451 	*kernel_va_start = full_va_range - *kernel_va_range;
1452 	return 0;
1453 }
1454 
1455 /*
1456  * Only 32 VMs per open file. If that becomes a limiting factor, we can
1457  * increase this number.
1458  */
1459 #define PANTHOR_MAX_VMS_PER_FILE	32
1460 
1461 /**
1462  * panthor_vm_pool_create_vm() - Create a VM
1463  * @ptdev: The panthor device
1464  * @pool: The VM to create this VM on.
1465  * @args: VM creation args.
1466  *
1467  * Return: a positive VM ID on success, a negative error code otherwise.
1468  */
panthor_vm_pool_create_vm(struct panthor_device * ptdev,struct panthor_vm_pool * pool,struct drm_panthor_vm_create * args)1469 int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
1470 			      struct panthor_vm_pool *pool,
1471 			      struct drm_panthor_vm_create *args)
1472 {
1473 	u64 kernel_va_start, kernel_va_range;
1474 	struct panthor_vm *vm;
1475 	int ret;
1476 	u32 id;
1477 
1478 	ret = panthor_vm_create_check_args(ptdev, args, &kernel_va_start, &kernel_va_range);
1479 	if (ret)
1480 		return ret;
1481 
1482 	vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range,
1483 			       kernel_va_start, kernel_va_range);
1484 	if (IS_ERR(vm))
1485 		return PTR_ERR(vm);
1486 
1487 	ret = xa_alloc(&pool->xa, &id, vm,
1488 		       XA_LIMIT(1, PANTHOR_MAX_VMS_PER_FILE), GFP_KERNEL);
1489 
1490 	if (ret) {
1491 		panthor_vm_put(vm);
1492 		return ret;
1493 	}
1494 
1495 	args->user_va_range = kernel_va_start;
1496 	return id;
1497 }
1498 
panthor_vm_destroy(struct panthor_vm * vm)1499 static void panthor_vm_destroy(struct panthor_vm *vm)
1500 {
1501 	if (!vm)
1502 		return;
1503 
1504 	vm->destroyed = true;
1505 
1506 	/* Tell scheduler to stop all GPU work related to this VM */
1507 	if (refcount_read(&vm->as.active_cnt) > 0)
1508 		panthor_sched_prepare_for_vm_destruction(vm->ptdev);
1509 
1510 	mutex_lock(&vm->heaps.lock);
1511 	panthor_heap_pool_destroy(vm->heaps.pool);
1512 	vm->heaps.pool = NULL;
1513 	mutex_unlock(&vm->heaps.lock);
1514 
1515 	drm_WARN_ON(&vm->ptdev->base,
1516 		    panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range));
1517 	panthor_vm_put(vm);
1518 }
1519 
1520 /**
1521  * panthor_vm_pool_destroy_vm() - Destroy a VM.
1522  * @pool: VM pool.
1523  * @handle: VM handle.
1524  *
1525  * This function doesn't free the VM object or its resources, it just kills
1526  * all mappings, and makes sure nothing can be mapped after that point.
1527  *
1528  * If there was any active jobs at the time this function is called, these
1529  * jobs should experience page faults and be killed as a result.
1530  *
1531  * The VM resources are freed when the last reference on the VM object is
1532  * dropped.
1533  *
1534  * Return: %0 for success, negative errno value for failure
1535  */
panthor_vm_pool_destroy_vm(struct panthor_vm_pool * pool,u32 handle)1536 int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle)
1537 {
1538 	struct panthor_vm *vm;
1539 
1540 	vm = xa_erase(&pool->xa, handle);
1541 
1542 	panthor_vm_destroy(vm);
1543 
1544 	return vm ? 0 : -EINVAL;
1545 }
1546 
1547 /**
1548  * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle
1549  * @pool: VM pool to check.
1550  * @handle: Handle of the VM to retrieve.
1551  *
1552  * Return: A valid pointer if the VM exists, NULL otherwise.
1553  */
1554 struct panthor_vm *
panthor_vm_pool_get_vm(struct panthor_vm_pool * pool,u32 handle)1555 panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle)
1556 {
1557 	struct panthor_vm *vm;
1558 
1559 	xa_lock(&pool->xa);
1560 	vm = panthor_vm_get(xa_load(&pool->xa, handle));
1561 	xa_unlock(&pool->xa);
1562 
1563 	return vm;
1564 }
1565 
1566 /**
1567  * panthor_vm_pool_destroy() - Destroy a VM pool.
1568  * @pfile: File.
1569  *
1570  * Destroy all VMs in the pool, and release the pool resources.
1571  *
1572  * Note that VMs can outlive the pool they were created from if other
1573  * objects hold a reference to there VMs.
1574  */
panthor_vm_pool_destroy(struct panthor_file * pfile)1575 void panthor_vm_pool_destroy(struct panthor_file *pfile)
1576 {
1577 	struct panthor_vm *vm;
1578 	unsigned long i;
1579 
1580 	if (!pfile->vms)
1581 		return;
1582 
1583 	xa_for_each(&pfile->vms->xa, i, vm)
1584 		panthor_vm_destroy(vm);
1585 
1586 	xa_destroy(&pfile->vms->xa);
1587 	kfree(pfile->vms);
1588 }
1589 
1590 /**
1591  * panthor_vm_pool_create() - Create a VM pool
1592  * @pfile: File.
1593  *
1594  * Return: 0 on success, a negative error code otherwise.
1595  */
panthor_vm_pool_create(struct panthor_file * pfile)1596 int panthor_vm_pool_create(struct panthor_file *pfile)
1597 {
1598 	pfile->vms = kzalloc_obj(*pfile->vms);
1599 	if (!pfile->vms)
1600 		return -ENOMEM;
1601 
1602 	xa_init_flags(&pfile->vms->xa, XA_FLAGS_ALLOC1);
1603 	return 0;
1604 }
1605 
1606 /* dummy TLB ops, the real TLB flush happens in panthor_vm_flush_range() */
mmu_tlb_flush_all(void * cookie)1607 static void mmu_tlb_flush_all(void *cookie)
1608 {
1609 }
1610 
mmu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)1611 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie)
1612 {
1613 }
1614 
1615 static const struct iommu_flush_ops mmu_tlb_ops = {
1616 	.tlb_flush_all = mmu_tlb_flush_all,
1617 	.tlb_flush_walk = mmu_tlb_flush_walk,
1618 };
1619 
access_type_name(struct panthor_device * ptdev,u32 fault_status)1620 static const char *access_type_name(struct panthor_device *ptdev,
1621 				    u32 fault_status)
1622 {
1623 	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
1624 	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
1625 		return "ATOMIC";
1626 	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
1627 		return "READ";
1628 	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
1629 		return "WRITE";
1630 	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
1631 		return "EXECUTE";
1632 	default:
1633 		drm_WARN_ON(&ptdev->base, 1);
1634 		return NULL;
1635 	}
1636 }
1637 
panthor_vm_lock_region(struct panthor_vm * vm,u64 start,u64 size)1638 static int panthor_vm_lock_region(struct panthor_vm *vm, u64 start, u64 size)
1639 {
1640 	struct panthor_device *ptdev = vm->ptdev;
1641 	int ret = 0;
1642 
1643 	/* sm_step_remap() can call panthor_vm_lock_region() to account for
1644 	 * the wider unmap needed when doing a partial huge page unamp. We
1645 	 * need to ignore the lock if it's already part of the locked region.
1646 	 */
1647 	if (start >= vm->locked_region.start &&
1648 	    start + size <= vm->locked_region.start + vm->locked_region.size)
1649 		return 0;
1650 
1651 	/* sm_step_remap() may need a locked region that isn't a strict superset
1652 	 * of the original one because of having to extend unmap boundaries beyond
1653 	 * it to deal with partial unmaps of transparent huge pages. What we want
1654 	 * in those cases is to lock the union of both regions. The new region must
1655 	 * always overlap with the original one, because the upper and lower unmap
1656 	 * boundaries in a remap operation can only shift up or down respectively,
1657 	 * but never otherwise.
1658 	 */
1659 	if (vm->locked_region.size) {
1660 		u64 end = max(vm->locked_region.start + vm->locked_region.size,
1661 			      start + size);
1662 
1663 		drm_WARN_ON_ONCE(&vm->ptdev->base, (start + size <= vm->locked_region.start) ||
1664 				 (start >= vm->locked_region.start + vm->locked_region.size));
1665 
1666 		start = min(start, vm->locked_region.start);
1667 		size = end - start;
1668 	}
1669 
1670 	mutex_lock(&ptdev->mmu->as.slots_lock);
1671 	if (vm->as.id >= 0 && size) {
1672 		/* Lock the region that needs to be updated */
1673 		gpu_write64(ptdev, AS_LOCKADDR(vm->as.id),
1674 			    pack_region_range(ptdev, &start, &size));
1675 
1676 		/* If the lock succeeded, update the locked_region info. */
1677 		ret = as_send_cmd_and_wait(ptdev, vm->as.id, AS_COMMAND_LOCK);
1678 	}
1679 
1680 	if (!ret) {
1681 		vm->locked_region.start = start;
1682 		vm->locked_region.size = size;
1683 	}
1684 	mutex_unlock(&ptdev->mmu->as.slots_lock);
1685 
1686 	return ret;
1687 }
1688 
panthor_vm_unlock_region(struct panthor_vm * vm)1689 static void panthor_vm_unlock_region(struct panthor_vm *vm)
1690 {
1691 	struct panthor_device *ptdev = vm->ptdev;
1692 
1693 	mutex_lock(&ptdev->mmu->as.slots_lock);
1694 	if (vm->as.id >= 0) {
1695 		int ret;
1696 
1697 		/* flush+invalidate RW caches and invalidate RO ones.
1698 		 * TODO: See if we can use FLUSH_PA_RANGE when the physical
1699 		 * range is narrow enough and the HW supports it.
1700 		 */
1701 		ret = panthor_gpu_flush_caches(ptdev, CACHE_CLEAN | CACHE_INV,
1702 					       CACHE_CLEAN | CACHE_INV,
1703 					       CACHE_INV);
1704 
1705 		/* Unlock the region if the flush is effective. */
1706 		if (!ret)
1707 			ret = as_send_cmd_and_wait(ptdev, vm->as.id, AS_COMMAND_UNLOCK);
1708 
1709 		/* If we fail to flush or unlock the region, schedule a GPU reset
1710 		 * to unblock the situation.
1711 		 */
1712 		if (ret)
1713 			panthor_device_schedule_reset(ptdev);
1714 	}
1715 	vm->locked_region.start = 0;
1716 	vm->locked_region.size = 0;
1717 	mutex_unlock(&ptdev->mmu->as.slots_lock);
1718 }
1719 
panthor_mmu_irq_handler(struct panthor_device * ptdev,u32 status)1720 static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
1721 {
1722 	bool has_unhandled_faults = false;
1723 
1724 	status = panthor_mmu_fault_mask(ptdev, status);
1725 	while (status) {
1726 		u32 as = ffs(status | (status >> 16)) - 1;
1727 		u32 mask = panthor_mmu_as_fault_mask(ptdev, as);
1728 		u64 addr;
1729 		u32 fault_status;
1730 		u32 exception_type;
1731 		u32 access_type;
1732 		u32 source_id;
1733 
1734 		fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
1735 		addr = gpu_read64(ptdev, AS_FAULTADDRESS(as));
1736 
1737 		/* decode the fault status */
1738 		exception_type = fault_status & 0xFF;
1739 		access_type = (fault_status >> 8) & 0x3;
1740 		source_id = (fault_status >> 16);
1741 
1742 		mutex_lock(&ptdev->mmu->as.slots_lock);
1743 
1744 		ptdev->mmu->as.faulty_mask |= mask;
1745 
1746 		/* terminal fault, print info about the fault */
1747 		drm_err(&ptdev->base,
1748 			"Unhandled Page fault in AS%d at VA 0x%016llX\n"
1749 			"raw fault status: 0x%X\n"
1750 			"decoded fault status: %s\n"
1751 			"exception type 0x%X: %s\n"
1752 			"access type 0x%X: %s\n"
1753 			"source id 0x%X\n",
1754 			as, addr,
1755 			fault_status,
1756 			(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
1757 			exception_type, panthor_exception_name(ptdev, exception_type),
1758 			access_type, access_type_name(ptdev, fault_status),
1759 			source_id);
1760 
1761 		/* We don't handle VM faults at the moment, so let's just clear the
1762 		 * interrupt and let the writer/reader crash.
1763 		 * Note that COMPLETED irqs are never cleared, but this is fine
1764 		 * because they are always masked.
1765 		 */
1766 		gpu_write(ptdev, MMU_INT_CLEAR, mask);
1767 
1768 		if (ptdev->mmu->as.slots[as].vm)
1769 			ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
1770 
1771 		/* Disable the MMU to kill jobs on this AS. */
1772 		panthor_mmu_as_disable(ptdev, as, false);
1773 		mutex_unlock(&ptdev->mmu->as.slots_lock);
1774 
1775 		status &= ~mask;
1776 		has_unhandled_faults = true;
1777 	}
1778 
1779 	if (has_unhandled_faults)
1780 		panthor_sched_report_mmu_fault(ptdev);
1781 }
1782 
1783 /**
1784  * panthor_mmu_suspend() - Suspend the MMU logic
1785  * @ptdev: Device.
1786  *
1787  * All we do here is de-assign the AS slots on all active VMs, so things
1788  * get flushed to the main memory, and no further access to these VMs are
1789  * possible.
1790  *
1791  * We also suspend the MMU IRQ.
1792  */
panthor_mmu_suspend(struct panthor_device * ptdev)1793 void panthor_mmu_suspend(struct panthor_device *ptdev)
1794 {
1795 	mutex_lock(&ptdev->mmu->as.slots_lock);
1796 	for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
1797 		struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
1798 
1799 		if (vm) {
1800 			drm_WARN_ON(&ptdev->base,
1801 				    panthor_mmu_as_disable(ptdev, i, false));
1802 			panthor_vm_release_as_locked(vm);
1803 		}
1804 	}
1805 	mutex_unlock(&ptdev->mmu->as.slots_lock);
1806 
1807 	panthor_mmu_irq_suspend(&ptdev->mmu->irq);
1808 }
1809 
1810 /**
1811  * panthor_mmu_resume() - Resume the MMU logic
1812  * @ptdev: Device.
1813  *
1814  * Resume the IRQ.
1815  *
1816  * We don't re-enable previously active VMs. We assume other parts of the
1817  * driver will call panthor_vm_active() on the VMs they intend to use.
1818  */
panthor_mmu_resume(struct panthor_device * ptdev)1819 void panthor_mmu_resume(struct panthor_device *ptdev)
1820 {
1821 	mutex_lock(&ptdev->mmu->as.slots_lock);
1822 	ptdev->mmu->as.alloc_mask = 0;
1823 	ptdev->mmu->as.faulty_mask = 0;
1824 	mutex_unlock(&ptdev->mmu->as.slots_lock);
1825 
1826 	panthor_mmu_irq_resume(&ptdev->mmu->irq);
1827 }
1828 
1829 /**
1830  * panthor_mmu_pre_reset() - Prepare for a reset
1831  * @ptdev: Device.
1832  *
1833  * Suspend the IRQ, and make sure all VM_BIND queues are stopped, so we
1834  * don't get asked to do a VM operation while the GPU is down.
1835  *
1836  * We don't cleanly shutdown the AS slots here, because the reset might
1837  * come from an AS_ACTIVE_BIT stuck situation.
1838  */
panthor_mmu_pre_reset(struct panthor_device * ptdev)1839 void panthor_mmu_pre_reset(struct panthor_device *ptdev)
1840 {
1841 	struct panthor_vm *vm;
1842 
1843 	panthor_mmu_irq_suspend(&ptdev->mmu->irq);
1844 
1845 	mutex_lock(&ptdev->mmu->vm.lock);
1846 	ptdev->mmu->vm.reset_in_progress = true;
1847 	list_for_each_entry(vm, &ptdev->mmu->vm.list, node)
1848 		panthor_vm_stop(vm);
1849 	mutex_unlock(&ptdev->mmu->vm.lock);
1850 }
1851 
1852 /**
1853  * panthor_mmu_post_reset() - Restore things after a reset
1854  * @ptdev: Device.
1855  *
1856  * Put the MMU logic back in action after a reset. That implies resuming the
1857  * IRQ and re-enabling the VM_BIND queues.
1858  */
panthor_mmu_post_reset(struct panthor_device * ptdev)1859 void panthor_mmu_post_reset(struct panthor_device *ptdev)
1860 {
1861 	struct panthor_vm *vm;
1862 
1863 	mutex_lock(&ptdev->mmu->as.slots_lock);
1864 
1865 	/* Now that the reset is effective, we can assume that none of the
1866 	 * AS slots are setup, and clear the faulty flags too.
1867 	 */
1868 	ptdev->mmu->as.alloc_mask = 0;
1869 	ptdev->mmu->as.faulty_mask = 0;
1870 
1871 	for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
1872 		struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
1873 
1874 		if (vm)
1875 			panthor_vm_release_as_locked(vm);
1876 	}
1877 
1878 	mutex_unlock(&ptdev->mmu->as.slots_lock);
1879 
1880 	panthor_mmu_irq_resume(&ptdev->mmu->irq);
1881 
1882 	/* Restart the VM_BIND queues. */
1883 	mutex_lock(&ptdev->mmu->vm.lock);
1884 	list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
1885 		panthor_vm_start(vm);
1886 	}
1887 	ptdev->mmu->vm.reset_in_progress = false;
1888 	mutex_unlock(&ptdev->mmu->vm.lock);
1889 }
1890 
panthor_vm_free(struct drm_gpuvm * gpuvm)1891 static void panthor_vm_free(struct drm_gpuvm *gpuvm)
1892 {
1893 	struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base);
1894 	struct panthor_device *ptdev = vm->ptdev;
1895 
1896 	mutex_lock(&vm->heaps.lock);
1897 	if (drm_WARN_ON(&ptdev->base, vm->heaps.pool))
1898 		panthor_heap_pool_destroy(vm->heaps.pool);
1899 	mutex_unlock(&vm->heaps.lock);
1900 	mutex_destroy(&vm->heaps.lock);
1901 
1902 	mutex_lock(&ptdev->mmu->vm.lock);
1903 	list_del(&vm->node);
1904 	/* Restore the scheduler state so we can call drm_sched_entity_destroy()
1905 	 * and drm_sched_fini(). If get there, that means we have no job left
1906 	 * and no new jobs can be queued, so we can start the scheduler without
1907 	 * risking interfering with the reset.
1908 	 */
1909 	if (ptdev->mmu->vm.reset_in_progress)
1910 		panthor_vm_start(vm);
1911 	mutex_unlock(&ptdev->mmu->vm.lock);
1912 
1913 	drm_sched_entity_destroy(&vm->entity);
1914 	drm_sched_fini(&vm->sched);
1915 
1916 	mutex_lock(&vm->op_lock);
1917 	mutex_lock(&ptdev->mmu->as.slots_lock);
1918 	if (vm->as.id >= 0) {
1919 		int cookie;
1920 
1921 		if (drm_dev_enter(&ptdev->base, &cookie)) {
1922 			panthor_mmu_as_disable(ptdev, vm->as.id, false);
1923 			drm_dev_exit(cookie);
1924 		}
1925 
1926 		ptdev->mmu->as.slots[vm->as.id].vm = NULL;
1927 		clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
1928 		list_del(&vm->as.lru_node);
1929 	}
1930 	mutex_unlock(&ptdev->mmu->as.slots_lock);
1931 	mutex_unlock(&vm->op_lock);
1932 
1933 	free_io_pgtable_ops(vm->pgtbl_ops);
1934 
1935 	drm_mm_takedown(&vm->mm);
1936 	kfree(vm);
1937 }
1938 
1939 /**
1940  * panthor_vm_put() - Release a reference on a VM
1941  * @vm: VM to release the reference on. Can be NULL.
1942  */
panthor_vm_put(struct panthor_vm * vm)1943 void panthor_vm_put(struct panthor_vm *vm)
1944 {
1945 	drm_gpuvm_put(vm ? &vm->base : NULL);
1946 }
1947 
1948 /**
1949  * panthor_vm_get() - Get a VM reference
1950  * @vm: VM to get the reference on. Can be NULL.
1951  *
1952  * Return: @vm value.
1953  */
panthor_vm_get(struct panthor_vm * vm)1954 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm)
1955 {
1956 	if (vm)
1957 		drm_gpuvm_get(&vm->base);
1958 
1959 	return vm;
1960 }
1961 
1962 /**
1963  * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM
1964  * @vm: VM to query the heap pool on.
1965  * @create: True if the heap pool should be created when it doesn't exist.
1966  *
1967  * Heap pools are per-VM. This function allows one to retrieve the heap pool
1968  * attached to a VM.
1969  *
1970  * If no heap pool exists yet, and @create is true, we create one.
1971  *
1972  * The returned panthor_heap_pool should be released with panthor_heap_pool_put().
1973  *
1974  * Return: A valid pointer on success, an ERR_PTR() otherwise.
1975  */
panthor_vm_get_heap_pool(struct panthor_vm * vm,bool create)1976 struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create)
1977 {
1978 	struct panthor_heap_pool *pool;
1979 
1980 	mutex_lock(&vm->heaps.lock);
1981 	if (!vm->heaps.pool && create) {
1982 		if (vm->destroyed)
1983 			pool = ERR_PTR(-EINVAL);
1984 		else
1985 			pool = panthor_heap_pool_create(vm->ptdev, vm);
1986 
1987 		if (!IS_ERR(pool))
1988 			vm->heaps.pool = panthor_heap_pool_get(pool);
1989 	} else {
1990 		pool = panthor_heap_pool_get(vm->heaps.pool);
1991 		if (!pool)
1992 			pool = ERR_PTR(-ENOENT);
1993 	}
1994 	mutex_unlock(&vm->heaps.lock);
1995 
1996 	return pool;
1997 }
1998 
1999 /**
2000  * panthor_vm_heaps_sizes() - Calculate size of all heap chunks across all
2001  * heaps over all the heap pools in a VM
2002  * @pfile: File.
2003  * @stats: Memory stats to be updated.
2004  *
2005  * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
2006  * is active, record the size as active as well.
2007  */
panthor_vm_heaps_sizes(struct panthor_file * pfile,struct drm_memory_stats * stats)2008 void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats)
2009 {
2010 	struct panthor_vm *vm;
2011 	unsigned long i;
2012 
2013 	if (!pfile->vms)
2014 		return;
2015 
2016 	xa_lock(&pfile->vms->xa);
2017 	xa_for_each(&pfile->vms->xa, i, vm) {
2018 		size_t size = panthor_heap_pool_size(vm->heaps.pool);
2019 		stats->resident += size;
2020 		if (vm->as.id >= 0)
2021 			stats->active += size;
2022 	}
2023 	xa_unlock(&pfile->vms->xa);
2024 }
2025 
mair_to_memattr(u64 mair,bool coherent)2026 static u64 mair_to_memattr(u64 mair, bool coherent)
2027 {
2028 	u64 memattr = 0;
2029 	u32 i;
2030 
2031 	for (i = 0; i < 8; i++) {
2032 		u8 in_attr = mair >> (8 * i), out_attr;
2033 		u8 outer = in_attr >> 4, inner = in_attr & 0xf;
2034 
2035 		/* For caching to be enabled, inner and outer caching policy
2036 		 * have to be both write-back, if one of them is write-through
2037 		 * or non-cacheable, we just choose non-cacheable. Device
2038 		 * memory is also translated to non-cacheable.
2039 		 */
2040 		if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
2041 			out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
2042 				   AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
2043 				   AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
2044 		} else {
2045 			out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
2046 				   AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
2047 			/* Use SH_MIDGARD_INNER mode when device isn't coherent,
2048 			 * so SH_IS, which is used when IOMMU_CACHE is set, maps
2049 			 * to Mali's internal-shareable mode. As per the Mali
2050 			 * Spec, inner and outer-shareable modes aren't allowed
2051 			 * for WB memory when coherency is disabled.
2052 			 * Use SH_CPU_INNER mode when coherency is enabled, so
2053 			 * that SH_IS actually maps to the standard definition of
2054 			 * inner-shareable.
2055 			 */
2056 			if (!coherent)
2057 				out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
2058 			else
2059 				out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
2060 		}
2061 
2062 		memattr |= (u64)out_attr << (8 * i);
2063 	}
2064 
2065 	return memattr;
2066 }
2067 
panthor_vma_link(struct panthor_vm * vm,struct panthor_vma * vma,struct drm_gpuvm_bo * vm_bo)2068 static void panthor_vma_link(struct panthor_vm *vm,
2069 			     struct panthor_vma *vma,
2070 			     struct drm_gpuvm_bo *vm_bo)
2071 {
2072 	struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
2073 
2074 	mutex_lock(&bo->base.base.gpuva.lock);
2075 	drm_gpuva_link(&vma->base, vm_bo);
2076 	mutex_unlock(&bo->base.base.gpuva.lock);
2077 }
2078 
panthor_vma_unlink(struct panthor_vma * vma)2079 static void panthor_vma_unlink(struct panthor_vma *vma)
2080 {
2081 	drm_gpuva_unlink_defer(&vma->base);
2082 	kfree(vma);
2083 }
2084 
panthor_vma_init(struct panthor_vma * vma,u32 flags)2085 static void panthor_vma_init(struct panthor_vma *vma, u32 flags)
2086 {
2087 	INIT_LIST_HEAD(&vma->node);
2088 	vma->flags = flags;
2089 }
2090 
2091 #define PANTHOR_VM_MAP_FLAGS \
2092 	(DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
2093 	 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
2094 	 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED)
2095 
panthor_gpuva_sm_step_map(struct drm_gpuva_op * op,void * priv)2096 static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
2097 {
2098 	struct panthor_vm *vm = priv;
2099 	struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
2100 	struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx);
2101 	int ret;
2102 
2103 	if (!vma)
2104 		return -EINVAL;
2105 
2106 	panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS);
2107 
2108 	ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags),
2109 				   op_ctx->map.sgt, op->map.gem.offset,
2110 				   op->map.va.range);
2111 	if (ret) {
2112 		panthor_vm_op_ctx_return_vma(op_ctx, vma);
2113 		return ret;
2114 	}
2115 
2116 	drm_gpuva_map(&vm->base, &vma->base, &op->map);
2117 	panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
2118 
2119 	drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo);
2120 	op_ctx->map.vm_bo = NULL;
2121 
2122 	return 0;
2123 }
2124 
2125 static bool
iova_mapped_as_huge_page(struct drm_gpuva_op_map * op,u64 addr)2126 iova_mapped_as_huge_page(struct drm_gpuva_op_map *op, u64 addr)
2127 {
2128 	const struct page *pg;
2129 	pgoff_t bo_offset;
2130 
2131 	bo_offset = addr - op->va.addr + op->gem.offset;
2132 	pg = to_panthor_bo(op->gem.obj)->base.pages[bo_offset >> PAGE_SHIFT];
2133 
2134 	return folio_size(page_folio(pg)) >= SZ_2M;
2135 }
2136 
2137 static void
unmap_hugepage_align(const struct drm_gpuva_op_remap * op,u64 * unmap_start,u64 * unmap_range)2138 unmap_hugepage_align(const struct drm_gpuva_op_remap *op,
2139 		     u64 *unmap_start, u64 *unmap_range)
2140 {
2141 	u64 aligned_unmap_start, aligned_unmap_end, unmap_end;
2142 
2143 	unmap_end = *unmap_start + *unmap_range;
2144 	aligned_unmap_start = ALIGN_DOWN(*unmap_start, SZ_2M);
2145 	aligned_unmap_end = ALIGN(unmap_end, SZ_2M);
2146 
2147 	/* If we're dealing with a huge page, make sure the unmap region is
2148 	 * aligned on the start of the page.
2149 	 */
2150 	if (op->prev && aligned_unmap_start < *unmap_start &&
2151 	    op->prev->va.addr <= aligned_unmap_start &&
2152 	    iova_mapped_as_huge_page(op->prev, *unmap_start)) {
2153 		*unmap_range += *unmap_start - aligned_unmap_start;
2154 		*unmap_start = aligned_unmap_start;
2155 	}
2156 
2157 	/* If we're dealing with a huge page, make sure the unmap region is
2158 	 * aligned on the end of the page.
2159 	 */
2160 	if (op->next && aligned_unmap_end > unmap_end &&
2161 	    op->next->va.addr + op->next->va.range >= aligned_unmap_end &&
2162 	    iova_mapped_as_huge_page(op->next, unmap_end - 1)) {
2163 		*unmap_range += aligned_unmap_end - unmap_end;
2164 	}
2165 }
2166 
panthor_gpuva_sm_step_remap(struct drm_gpuva_op * op,void * priv)2167 static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
2168 				       void *priv)
2169 {
2170 	struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base);
2171 	struct panthor_vm *vm = priv;
2172 	struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
2173 	struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
2174 	u64 unmap_start, unmap_range;
2175 	int ret;
2176 
2177 	drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
2178 
2179 	/*
2180 	 * ARM IOMMU page table management code disallows partial unmaps of huge pages,
2181 	 * so when a partial unmap is requested, we must first unmap the entire huge
2182 	 * page and then remap the difference between the huge page minus the requested
2183 	 * unmap region. Calculating the right start address and range for the expanded
2184 	 * unmap operation is the responsibility of the following function.
2185 	 */
2186 	unmap_hugepage_align(&op->remap, &unmap_start, &unmap_range);
2187 
2188 	/* If the range changed, we might have to lock a wider region to guarantee
2189 	 * atomicity. panthor_vm_lock_region() bails out early if the new region
2190 	 * is already part of the locked region, so no need to do this check here.
2191 	 */
2192 	panthor_vm_lock_region(vm, unmap_start, unmap_range);
2193 	panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
2194 
2195 	if (op->remap.prev) {
2196 		struct panthor_gem_object *bo = to_panthor_bo(op->remap.prev->gem.obj);
2197 		u64 offset = op->remap.prev->gem.offset + unmap_start - op->remap.prev->va.addr;
2198 		u64 size = op->remap.prev->va.addr + op->remap.prev->va.range - unmap_start;
2199 
2200 		ret = panthor_vm_map_pages(vm, unmap_start, flags_to_prot(unmap_vma->flags),
2201 					   bo->base.sgt, offset, size);
2202 		if (ret)
2203 			return ret;
2204 
2205 		prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
2206 		panthor_vma_init(prev_vma, unmap_vma->flags);
2207 	}
2208 
2209 	if (op->remap.next) {
2210 		struct panthor_gem_object *bo = to_panthor_bo(op->remap.next->gem.obj);
2211 		u64 addr = op->remap.next->va.addr;
2212 		u64 size = unmap_start + unmap_range - op->remap.next->va.addr;
2213 
2214 		ret = panthor_vm_map_pages(vm, addr, flags_to_prot(unmap_vma->flags),
2215 					   bo->base.sgt, op->remap.next->gem.offset, size);
2216 		if (ret)
2217 			return ret;
2218 
2219 		next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
2220 		panthor_vma_init(next_vma, unmap_vma->flags);
2221 	}
2222 
2223 	drm_gpuva_remap(prev_vma ? &prev_vma->base : NULL,
2224 			next_vma ? &next_vma->base : NULL,
2225 			&op->remap);
2226 
2227 	if (prev_vma) {
2228 		/* panthor_vma_link() transfers the vm_bo ownership to
2229 		 * the VMA object. Since the vm_bo we're passing is still
2230 		 * owned by the old mapping which will be released when this
2231 		 * mapping is destroyed, we need to grab a ref here.
2232 		 */
2233 		panthor_vma_link(vm, prev_vma, op->remap.unmap->va->vm_bo);
2234 	}
2235 
2236 	if (next_vma) {
2237 		panthor_vma_link(vm, next_vma, op->remap.unmap->va->vm_bo);
2238 	}
2239 
2240 	panthor_vma_unlink(unmap_vma);
2241 	return 0;
2242 }
2243 
panthor_gpuva_sm_step_unmap(struct drm_gpuva_op * op,void * priv)2244 static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
2245 				       void *priv)
2246 {
2247 	struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base);
2248 	struct panthor_vm *vm = priv;
2249 
2250 	panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr,
2251 			       unmap_vma->base.va.range);
2252 	drm_gpuva_unmap(&op->unmap);
2253 	panthor_vma_unlink(unmap_vma);
2254 	return 0;
2255 }
2256 
2257 static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
2258 	.vm_free = panthor_vm_free,
2259 	.vm_bo_free = panthor_vm_bo_free,
2260 	.sm_step_map = panthor_gpuva_sm_step_map,
2261 	.sm_step_remap = panthor_gpuva_sm_step_remap,
2262 	.sm_step_unmap = panthor_gpuva_sm_step_unmap,
2263 };
2264 
2265 /**
2266  * panthor_vm_resv() - Get the dma_resv object attached to a VM.
2267  * @vm: VM to get the dma_resv of.
2268  *
2269  * Return: A dma_resv object.
2270  */
panthor_vm_resv(struct panthor_vm * vm)2271 struct dma_resv *panthor_vm_resv(struct panthor_vm *vm)
2272 {
2273 	return drm_gpuvm_resv(&vm->base);
2274 }
2275 
panthor_vm_root_gem(struct panthor_vm * vm)2276 struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm)
2277 {
2278 	if (!vm)
2279 		return NULL;
2280 
2281 	return vm->base.r_obj;
2282 }
2283 
2284 static int
panthor_vm_exec_op(struct panthor_vm * vm,struct panthor_vm_op_ctx * op,bool flag_vm_unusable_on_failure)2285 panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
2286 		   bool flag_vm_unusable_on_failure)
2287 {
2288 	u32 op_type = op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK;
2289 	int ret;
2290 
2291 	if (op_type == DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY)
2292 		return 0;
2293 
2294 	mutex_lock(&vm->op_lock);
2295 	vm->op_ctx = op;
2296 
2297 	ret = panthor_vm_lock_region(vm, op->va.addr, op->va.range);
2298 	if (ret)
2299 		goto out;
2300 
2301 	switch (op_type) {
2302 	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
2303 		const struct drm_gpuvm_map_req map_req = {
2304 			.map.va.addr = op->va.addr,
2305 			.map.va.range = op->va.range,
2306 			.map.gem.obj = op->map.vm_bo->obj,
2307 			.map.gem.offset = op->map.bo_offset,
2308 		};
2309 
2310 		if (vm->unusable) {
2311 			ret = -EINVAL;
2312 			break;
2313 		}
2314 
2315 		ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
2316 		break;
2317 	}
2318 
2319 	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
2320 		ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
2321 		break;
2322 
2323 	default:
2324 		ret = -EINVAL;
2325 		break;
2326 	}
2327 
2328 	panthor_vm_unlock_region(vm);
2329 
2330 out:
2331 	if (ret && flag_vm_unusable_on_failure)
2332 		panthor_vm_declare_unusable(vm);
2333 
2334 	vm->op_ctx = NULL;
2335 	mutex_unlock(&vm->op_lock);
2336 
2337 	return ret;
2338 }
2339 
2340 static struct dma_fence *
panthor_vm_bind_run_job(struct drm_sched_job * sched_job)2341 panthor_vm_bind_run_job(struct drm_sched_job *sched_job)
2342 {
2343 	struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2344 	bool cookie;
2345 	int ret;
2346 
2347 	/* Not only we report an error whose result is propagated to the
2348 	 * drm_sched finished fence, but we also flag the VM as unusable, because
2349 	 * a failure in the async VM_BIND results in an inconsistent state. VM needs
2350 	 * to be destroyed and recreated.
2351 	 */
2352 	cookie = dma_fence_begin_signalling();
2353 	ret = panthor_vm_exec_op(job->vm, &job->ctx, true);
2354 	dma_fence_end_signalling(cookie);
2355 
2356 	return ret ? ERR_PTR(ret) : NULL;
2357 }
2358 
panthor_vm_bind_job_release(struct kref * kref)2359 static void panthor_vm_bind_job_release(struct kref *kref)
2360 {
2361 	struct panthor_vm_bind_job *job = container_of(kref, struct panthor_vm_bind_job, refcount);
2362 
2363 	if (job->base.s_fence)
2364 		drm_sched_job_cleanup(&job->base);
2365 
2366 	panthor_vm_cleanup_op_ctx(&job->ctx, job->vm);
2367 	panthor_vm_put(job->vm);
2368 	kfree(job);
2369 }
2370 
2371 /**
2372  * panthor_vm_bind_job_put() - Release a VM_BIND job reference
2373  * @sched_job: Job to release the reference on.
2374  */
panthor_vm_bind_job_put(struct drm_sched_job * sched_job)2375 void panthor_vm_bind_job_put(struct drm_sched_job *sched_job)
2376 {
2377 	struct panthor_vm_bind_job *job =
2378 		container_of(sched_job, struct panthor_vm_bind_job, base);
2379 
2380 	if (sched_job)
2381 		kref_put(&job->refcount, panthor_vm_bind_job_release);
2382 }
2383 
2384 static void
panthor_vm_bind_free_job(struct drm_sched_job * sched_job)2385 panthor_vm_bind_free_job(struct drm_sched_job *sched_job)
2386 {
2387 	struct panthor_vm_bind_job *job =
2388 		container_of(sched_job, struct panthor_vm_bind_job, base);
2389 
2390 	drm_sched_job_cleanup(sched_job);
2391 
2392 	/* Do the heavy cleanups asynchronously, so we're out of the
2393 	 * dma-signaling path and can acquire dma-resv locks safely.
2394 	 */
2395 	queue_work(panthor_cleanup_wq, &job->cleanup_op_ctx_work);
2396 }
2397 
2398 static enum drm_gpu_sched_stat
panthor_vm_bind_timedout_job(struct drm_sched_job * sched_job)2399 panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
2400 {
2401 	WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
2402 	return DRM_GPU_SCHED_STAT_RESET;
2403 }
2404 
2405 static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
2406 	.run_job = panthor_vm_bind_run_job,
2407 	.free_job = panthor_vm_bind_free_job,
2408 	.timedout_job = panthor_vm_bind_timedout_job,
2409 };
2410 
2411 /**
2412  * panthor_vm_create() - Create a VM
2413  * @ptdev: Device.
2414  * @for_mcu: True if this is the FW MCU VM.
2415  * @kernel_va_start: Start of the range reserved for kernel BO mapping.
2416  * @kernel_va_size: Size of the range reserved for kernel BO mapping.
2417  * @auto_kernel_va_start: Start of the auto-VA kernel range.
2418  * @auto_kernel_va_size: Size of the auto-VA kernel range.
2419  *
2420  * Return: A valid pointer on success, an ERR_PTR() otherwise.
2421  */
2422 struct panthor_vm *
panthor_vm_create(struct panthor_device * ptdev,bool for_mcu,u64 kernel_va_start,u64 kernel_va_size,u64 auto_kernel_va_start,u64 auto_kernel_va_size)2423 panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
2424 		  u64 kernel_va_start, u64 kernel_va_size,
2425 		  u64 auto_kernel_va_start, u64 auto_kernel_va_size)
2426 {
2427 	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
2428 	u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
2429 	u64 full_va_range = 1ull << va_bits;
2430 	struct drm_gem_object *dummy_gem;
2431 	struct drm_gpu_scheduler *sched;
2432 	const struct drm_sched_init_args sched_args = {
2433 		.ops = &panthor_vm_bind_ops,
2434 		.submit_wq = ptdev->mmu->vm.wq,
2435 		.num_rqs = 1,
2436 		.credit_limit = 1,
2437 		/* Bind operations are synchronous for now, no timeout needed. */
2438 		.timeout = MAX_SCHEDULE_TIMEOUT,
2439 		.name = "panthor-vm-bind",
2440 		.dev = ptdev->base.dev,
2441 	};
2442 	struct io_pgtable_cfg pgtbl_cfg;
2443 	u64 mair, min_va, va_range;
2444 	struct panthor_vm *vm;
2445 	int ret;
2446 
2447 	vm = kzalloc_obj(*vm);
2448 	if (!vm)
2449 		return ERR_PTR(-ENOMEM);
2450 
2451 	/* We allocate a dummy GEM for the VM. */
2452 	dummy_gem = drm_gpuvm_resv_object_alloc(&ptdev->base);
2453 	if (!dummy_gem) {
2454 		ret = -ENOMEM;
2455 		goto err_free_vm;
2456 	}
2457 
2458 	mutex_init(&vm->heaps.lock);
2459 	vm->for_mcu = for_mcu;
2460 	vm->ptdev = ptdev;
2461 	mutex_init(&vm->op_lock);
2462 
2463 	if (for_mcu) {
2464 		/* CSF MCU is a cortex M7, and can only address 4G */
2465 		min_va = 0;
2466 		va_range = SZ_4G;
2467 	} else {
2468 		min_va = 0;
2469 		va_range = full_va_range;
2470 	}
2471 
2472 	mutex_init(&vm->mm_lock);
2473 	drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size);
2474 	vm->kernel_auto_va.start = auto_kernel_va_start;
2475 	vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1;
2476 
2477 	INIT_LIST_HEAD(&vm->node);
2478 	INIT_LIST_HEAD(&vm->as.lru_node);
2479 	vm->as.id = -1;
2480 	refcount_set(&vm->as.active_cnt, 0);
2481 
2482 	pgtbl_cfg = (struct io_pgtable_cfg) {
2483 		.pgsize_bitmap	= SZ_4K | SZ_2M,
2484 		.ias		= va_bits,
2485 		.oas		= pa_bits,
2486 		.coherent_walk	= ptdev->coherent,
2487 		.tlb		= &mmu_tlb_ops,
2488 		.iommu_dev	= ptdev->base.dev,
2489 		.alloc		= alloc_pt,
2490 		.free		= free_pt,
2491 	};
2492 
2493 	vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm);
2494 	if (!vm->pgtbl_ops) {
2495 		ret = -EINVAL;
2496 		goto err_mm_takedown;
2497 	}
2498 
2499 	ret = drm_sched_init(&vm->sched, &sched_args);
2500 	if (ret)
2501 		goto err_free_io_pgtable;
2502 
2503 	sched = &vm->sched;
2504 	ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL);
2505 	if (ret)
2506 		goto err_sched_fini;
2507 
2508 	mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair;
2509 	vm->memattr = mair_to_memattr(mair, ptdev->coherent);
2510 
2511 	mutex_lock(&ptdev->mmu->vm.lock);
2512 	list_add_tail(&vm->node, &ptdev->mmu->vm.list);
2513 
2514 	/* If a reset is in progress, stop the scheduler. */
2515 	if (ptdev->mmu->vm.reset_in_progress)
2516 		panthor_vm_stop(vm);
2517 	mutex_unlock(&ptdev->mmu->vm.lock);
2518 
2519 	/* We intentionally leave the reserved range to zero, because we want kernel VMAs
2520 	 * to be handled the same way user VMAs are.
2521 	 */
2522 	drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
2523 		       DRM_GPUVM_RESV_PROTECTED | DRM_GPUVM_IMMEDIATE_MODE,
2524 		       &ptdev->base, dummy_gem, min_va, va_range, 0, 0,
2525 		       &panthor_gpuvm_ops);
2526 	drm_gem_object_put(dummy_gem);
2527 	return vm;
2528 
2529 err_sched_fini:
2530 	drm_sched_fini(&vm->sched);
2531 
2532 err_free_io_pgtable:
2533 	free_io_pgtable_ops(vm->pgtbl_ops);
2534 
2535 err_mm_takedown:
2536 	drm_mm_takedown(&vm->mm);
2537 	drm_gem_object_put(dummy_gem);
2538 
2539 err_free_vm:
2540 	kfree(vm);
2541 	return ERR_PTR(ret);
2542 }
2543 
2544 static int
panthor_vm_bind_prepare_op_ctx(struct drm_file * file,struct panthor_vm * vm,const struct drm_panthor_vm_bind_op * op,struct panthor_vm_op_ctx * op_ctx)2545 panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
2546 			       struct panthor_vm *vm,
2547 			       const struct drm_panthor_vm_bind_op *op,
2548 			       struct panthor_vm_op_ctx *op_ctx)
2549 {
2550 	ssize_t vm_pgsz = panthor_vm_page_size(vm);
2551 	struct drm_gem_object *gem;
2552 	int ret;
2553 
2554 	/* Aligned on page size. */
2555 	if (!IS_ALIGNED(op->va | op->size | op->bo_offset, vm_pgsz))
2556 		return -EINVAL;
2557 
2558 	switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
2559 	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
2560 		gem = drm_gem_object_lookup(file, op->bo_handle);
2561 		ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm,
2562 						    gem ? to_panthor_bo(gem) : NULL,
2563 						    op->bo_offset,
2564 						    op->size,
2565 						    op->va,
2566 						    op->flags);
2567 		drm_gem_object_put(gem);
2568 		return ret;
2569 
2570 	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
2571 		if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
2572 			return -EINVAL;
2573 
2574 		if (op->bo_handle || op->bo_offset)
2575 			return -EINVAL;
2576 
2577 		return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size);
2578 
2579 	case DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY:
2580 		if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
2581 			return -EINVAL;
2582 
2583 		if (op->bo_handle || op->bo_offset)
2584 			return -EINVAL;
2585 
2586 		if (op->va || op->size)
2587 			return -EINVAL;
2588 
2589 		if (!op->syncs.count)
2590 			return -EINVAL;
2591 
2592 		panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm);
2593 		return 0;
2594 
2595 	default:
2596 		return -EINVAL;
2597 	}
2598 }
2599 
panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct * work)2600 static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work)
2601 {
2602 	struct panthor_vm_bind_job *job =
2603 		container_of(work, struct panthor_vm_bind_job, cleanup_op_ctx_work);
2604 
2605 	panthor_vm_bind_job_put(&job->base);
2606 }
2607 
2608 /**
2609  * panthor_vm_bind_job_create() - Create a VM_BIND job
2610  * @file: File.
2611  * @vm: VM targeted by the VM_BIND job.
2612  * @op: VM operation data.
2613  *
2614  * Return: A valid pointer on success, an ERR_PTR() otherwise.
2615  */
2616 struct drm_sched_job *
panthor_vm_bind_job_create(struct drm_file * file,struct panthor_vm * vm,const struct drm_panthor_vm_bind_op * op)2617 panthor_vm_bind_job_create(struct drm_file *file,
2618 			   struct panthor_vm *vm,
2619 			   const struct drm_panthor_vm_bind_op *op)
2620 {
2621 	struct panthor_vm_bind_job *job;
2622 	int ret;
2623 
2624 	if (!vm)
2625 		return ERR_PTR(-EINVAL);
2626 
2627 	if (vm->destroyed || vm->unusable)
2628 		return ERR_PTR(-EINVAL);
2629 
2630 	job = kzalloc_obj(*job);
2631 	if (!job)
2632 		return ERR_PTR(-ENOMEM);
2633 
2634 	ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx);
2635 	if (ret) {
2636 		kfree(job);
2637 		return ERR_PTR(ret);
2638 	}
2639 
2640 	INIT_WORK(&job->cleanup_op_ctx_work, panthor_vm_bind_job_cleanup_op_ctx_work);
2641 	kref_init(&job->refcount);
2642 	job->vm = panthor_vm_get(vm);
2643 
2644 	ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm, file->client_id);
2645 	if (ret)
2646 		goto err_put_job;
2647 
2648 	return &job->base;
2649 
2650 err_put_job:
2651 	panthor_vm_bind_job_put(&job->base);
2652 	return ERR_PTR(ret);
2653 }
2654 
2655 /**
2656  * panthor_vm_bind_job_prepare_resvs() - Prepare VM_BIND job dma_resvs
2657  * @exec: The locking/preparation context.
2658  * @sched_job: The job to prepare resvs on.
2659  *
2660  * Locks and prepare the VM resv.
2661  *
2662  * If this is a map operation, locks and prepares the GEM resv.
2663  *
2664  * Return: 0 on success, a negative error code otherwise.
2665  */
panthor_vm_bind_job_prepare_resvs(struct drm_exec * exec,struct drm_sched_job * sched_job)2666 int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec,
2667 				      struct drm_sched_job *sched_job)
2668 {
2669 	struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2670 	int ret;
2671 
2672 	/* Acquire the VM lock an reserve a slot for this VM bind job. */
2673 	ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1);
2674 	if (ret)
2675 		return ret;
2676 
2677 	if (job->ctx.map.vm_bo) {
2678 		/* Lock/prepare the GEM being mapped. */
2679 		ret = drm_exec_prepare_obj(exec, job->ctx.map.vm_bo->obj, 1);
2680 		if (ret)
2681 			return ret;
2682 	}
2683 
2684 	return 0;
2685 }
2686 
2687 /**
2688  * panthor_vm_bind_job_update_resvs() - Update the resv objects touched by a job
2689  * @exec: drm_exec context.
2690  * @sched_job: Job to update the resvs on.
2691  */
panthor_vm_bind_job_update_resvs(struct drm_exec * exec,struct drm_sched_job * sched_job)2692 void panthor_vm_bind_job_update_resvs(struct drm_exec *exec,
2693 				      struct drm_sched_job *sched_job)
2694 {
2695 	struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2696 
2697 	/* Explicit sync => we just register our job finished fence as bookkeep. */
2698 	drm_gpuvm_resv_add_fence(&job->vm->base, exec,
2699 				 &sched_job->s_fence->finished,
2700 				 DMA_RESV_USAGE_BOOKKEEP,
2701 				 DMA_RESV_USAGE_BOOKKEEP);
2702 }
2703 
panthor_vm_update_resvs(struct panthor_vm * vm,struct drm_exec * exec,struct dma_fence * fence,enum dma_resv_usage private_usage,enum dma_resv_usage extobj_usage)2704 void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec,
2705 			     struct dma_fence *fence,
2706 			     enum dma_resv_usage private_usage,
2707 			     enum dma_resv_usage extobj_usage)
2708 {
2709 	drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage);
2710 }
2711 
2712 /**
2713  * panthor_vm_bind_exec_sync_op() - Execute a VM_BIND operation synchronously.
2714  * @file: File.
2715  * @vm: VM targeted by the VM operation.
2716  * @op: Data describing the VM operation.
2717  *
2718  * Return: 0 on success, a negative error code otherwise.
2719  */
panthor_vm_bind_exec_sync_op(struct drm_file * file,struct panthor_vm * vm,struct drm_panthor_vm_bind_op * op)2720 int panthor_vm_bind_exec_sync_op(struct drm_file *file,
2721 				 struct panthor_vm *vm,
2722 				 struct drm_panthor_vm_bind_op *op)
2723 {
2724 	struct panthor_vm_op_ctx op_ctx;
2725 	int ret;
2726 
2727 	/* No sync objects allowed on synchronous operations. */
2728 	if (op->syncs.count)
2729 		return -EINVAL;
2730 
2731 	if (!op->size)
2732 		return 0;
2733 
2734 	ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx);
2735 	if (ret)
2736 		return ret;
2737 
2738 	ret = panthor_vm_exec_op(vm, &op_ctx, false);
2739 	panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2740 
2741 	return ret;
2742 }
2743 
2744 /**
2745  * panthor_vm_map_bo_range() - Map a GEM object range to a VM
2746  * @vm: VM to map the GEM to.
2747  * @bo: GEM object to map.
2748  * @offset: Offset in the GEM object.
2749  * @size: Size to map.
2750  * @va: Virtual address to map the object to.
2751  * @flags: Combination of drm_panthor_vm_bind_op_flags flags.
2752  * Only map-related flags are valid.
2753  *
2754  * Internal use only. For userspace requests, use
2755  * panthor_vm_bind_exec_sync_op() instead.
2756  *
2757  * Return: 0 on success, a negative error code otherwise.
2758  */
panthor_vm_map_bo_range(struct panthor_vm * vm,struct panthor_gem_object * bo,u64 offset,u64 size,u64 va,u32 flags)2759 int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo,
2760 			    u64 offset, u64 size, u64 va, u32 flags)
2761 {
2762 	struct panthor_vm_op_ctx op_ctx;
2763 	int ret;
2764 
2765 	ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags);
2766 	if (ret)
2767 		return ret;
2768 
2769 	ret = panthor_vm_exec_op(vm, &op_ctx, false);
2770 	panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2771 
2772 	return ret;
2773 }
2774 
2775 /**
2776  * panthor_vm_unmap_range() - Unmap a portion of the VA space
2777  * @vm: VM to unmap the region from.
2778  * @va: Virtual address to unmap. Must be 4k aligned.
2779  * @size: Size of the region to unmap. Must be 4k aligned.
2780  *
2781  * Internal use only. For userspace requests, use
2782  * panthor_vm_bind_exec_sync_op() instead.
2783  *
2784  * Return: 0 on success, a negative error code otherwise.
2785  */
panthor_vm_unmap_range(struct panthor_vm * vm,u64 va,u64 size)2786 int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size)
2787 {
2788 	struct panthor_vm_op_ctx op_ctx;
2789 	int ret;
2790 
2791 	ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size);
2792 	if (ret)
2793 		return ret;
2794 
2795 	ret = panthor_vm_exec_op(vm, &op_ctx, false);
2796 	panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2797 
2798 	return ret;
2799 }
2800 
2801 /**
2802  * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs.
2803  * @exec: Locking/preparation context.
2804  * @vm: VM targeted by the GPU job.
2805  * @slot_count: Number of slots to reserve.
2806  *
2807  * GPU jobs assume all BOs bound to the VM at the time the job is submitted
2808  * are available when the job is executed. In order to guarantee that, we
2809  * need to reserve a slot on all BOs mapped to a VM and update this slot with
2810  * the job fence after its submission.
2811  *
2812  * Return: 0 on success, a negative error code otherwise.
2813  */
panthor_vm_prepare_mapped_bos_resvs(struct drm_exec * exec,struct panthor_vm * vm,u32 slot_count)2814 int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm,
2815 					u32 slot_count)
2816 {
2817 	int ret;
2818 
2819 	/* Acquire the VM lock and reserve a slot for this GPU job. */
2820 	ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count);
2821 	if (ret)
2822 		return ret;
2823 
2824 	return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count);
2825 }
2826 
2827 /**
2828  * panthor_mmu_unplug() - Unplug the MMU logic
2829  * @ptdev: Device.
2830  *
2831  * No access to the MMU regs should be done after this function is called.
2832  * We suspend the IRQ and disable all VMs to guarantee that.
2833  */
panthor_mmu_unplug(struct panthor_device * ptdev)2834 void panthor_mmu_unplug(struct panthor_device *ptdev)
2835 {
2836 	if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
2837 		panthor_mmu_irq_suspend(&ptdev->mmu->irq);
2838 
2839 	mutex_lock(&ptdev->mmu->as.slots_lock);
2840 	for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
2841 		struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
2842 
2843 		if (vm) {
2844 			drm_WARN_ON(&ptdev->base,
2845 				    panthor_mmu_as_disable(ptdev, i, false));
2846 			panthor_vm_release_as_locked(vm);
2847 		}
2848 	}
2849 	mutex_unlock(&ptdev->mmu->as.slots_lock);
2850 }
2851 
panthor_mmu_release_wq(struct drm_device * ddev,void * res)2852 static void panthor_mmu_release_wq(struct drm_device *ddev, void *res)
2853 {
2854 	destroy_workqueue(res);
2855 }
2856 
2857 /**
2858  * panthor_mmu_init() - Initialize the MMU logic.
2859  * @ptdev: Device.
2860  *
2861  * Return: 0 on success, a negative error code otherwise.
2862  */
panthor_mmu_init(struct panthor_device * ptdev)2863 int panthor_mmu_init(struct panthor_device *ptdev)
2864 {
2865 	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
2866 	struct panthor_mmu *mmu;
2867 	int ret, irq;
2868 
2869 	mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL);
2870 	if (!mmu)
2871 		return -ENOMEM;
2872 
2873 	INIT_LIST_HEAD(&mmu->as.lru_list);
2874 
2875 	ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock);
2876 	if (ret)
2877 		return ret;
2878 
2879 	INIT_LIST_HEAD(&mmu->vm.list);
2880 	ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock);
2881 	if (ret)
2882 		return ret;
2883 
2884 	ptdev->mmu = mmu;
2885 
2886 	irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu");
2887 	if (irq <= 0)
2888 		return -ENODEV;
2889 
2890 	ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq,
2891 				      panthor_mmu_fault_mask(ptdev, ~0));
2892 	if (ret)
2893 		return ret;
2894 
2895 	mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0);
2896 	if (!mmu->vm.wq)
2897 		return -ENOMEM;
2898 
2899 	/* On 32-bit kernels, the VA space is limited by the io_pgtable_ops abstraction,
2900 	 * which passes iova as an unsigned long. Patch the mmu_features to reflect this
2901 	 * limitation.
2902 	 */
2903 	if (va_bits > BITS_PER_LONG) {
2904 		ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0);
2905 		ptdev->gpu_info.mmu_features |= BITS_PER_LONG;
2906 	}
2907 
2908 	return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq);
2909 }
2910 
2911 #ifdef CONFIG_DEBUG_FS
show_vm_gpuvas(struct panthor_vm * vm,struct seq_file * m)2912 static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m)
2913 {
2914 	int ret;
2915 
2916 	mutex_lock(&vm->op_lock);
2917 	ret = drm_debugfs_gpuva_info(m, &vm->base);
2918 	mutex_unlock(&vm->op_lock);
2919 
2920 	return ret;
2921 }
2922 
show_each_vm(struct seq_file * m,void * arg)2923 static int show_each_vm(struct seq_file *m, void *arg)
2924 {
2925 	struct drm_info_node *node = (struct drm_info_node *)m->private;
2926 	struct drm_device *ddev = node->minor->dev;
2927 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
2928 	int (*show)(struct panthor_vm *, struct seq_file *) = node->info_ent->data;
2929 	struct panthor_vm *vm;
2930 	int ret = 0;
2931 
2932 	mutex_lock(&ptdev->mmu->vm.lock);
2933 	list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
2934 		ret = show(vm, m);
2935 		if (ret < 0)
2936 			break;
2937 
2938 		seq_puts(m, "\n");
2939 	}
2940 	mutex_unlock(&ptdev->mmu->vm.lock);
2941 
2942 	return ret;
2943 }
2944 
2945 static struct drm_info_list panthor_mmu_debugfs_list[] = {
2946 	DRM_DEBUGFS_GPUVA_INFO(show_each_vm, show_vm_gpuvas),
2947 };
2948 
2949 /**
2950  * panthor_mmu_debugfs_init() - Initialize MMU debugfs entries
2951  * @minor: Minor.
2952  */
panthor_mmu_debugfs_init(struct drm_minor * minor)2953 void panthor_mmu_debugfs_init(struct drm_minor *minor)
2954 {
2955 	drm_debugfs_create_files(panthor_mmu_debugfs_list,
2956 				 ARRAY_SIZE(panthor_mmu_debugfs_list),
2957 				 minor->debugfs_root, minor);
2958 }
2959 #endif /* CONFIG_DEBUG_FS */
2960 
2961 /**
2962  * panthor_mmu_pt_cache_init() - Initialize the page table cache.
2963  *
2964  * Return: 0 on success, a negative error code otherwise.
2965  */
panthor_mmu_pt_cache_init(void)2966 int panthor_mmu_pt_cache_init(void)
2967 {
2968 	pt_cache = kmem_cache_create("panthor-mmu-pt", SZ_4K, SZ_4K, 0, NULL);
2969 	if (!pt_cache)
2970 		return -ENOMEM;
2971 
2972 	return 0;
2973 }
2974 
2975 /**
2976  * panthor_mmu_pt_cache_fini() - Destroy the page table cache.
2977  */
panthor_mmu_pt_cache_fini(void)2978 void panthor_mmu_pt_cache_fini(void)
2979 {
2980 	kmem_cache_destroy(pt_cache);
2981 }
2982