xref: /freebsd/sys/arm64/iommu/iommu_pmap.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020-2021 Ruslan Bukin <br@bsdpad.com>
5  * Copyright (c) 2014-2021 Andrew Turner
6  * Copyright (c) 2014-2016 The FreeBSD Foundation
7  * All rights reserved.
8  *
9  * This work was supported by Innovate UK project 105694, "Digital Security
10  * by Design (DSbD) Technology Platform Prototype".
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  *	Manages physical address maps for ARM SMMUv3 and ARM Mali GPU.
39  */
40 
41 #include "opt_vm.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_radix.h>
57 
58 #include <machine/machdep.h>
59 
60 #include <arm64/iommu/iommu_pmap.h>
61 #include <arm64/iommu/iommu_pte.h>
62 
63 #define	IOMMU_PAGE_SIZE		4096
64 
65 #define	SMMU_PMAP_LOCK(pmap)	mtx_lock(&(pmap)->sp_mtx)
66 #define	SMMU_PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->sp_mtx)
67 #define	SMMU_PMAP_LOCK_ASSERT(pmap, type) \
68     mtx_assert(&(pmap)->sp_mtx, (type))
69 
70 #define	NL0PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
71 #define	NL1PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
72 #define	NL2PG		(IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
73 #define	NL3PG		(IOMMU_PAGE_SIZE/(sizeof (pt_entry_t)))
74 
75 #define	NUL0E		IOMMU_L0_ENTRIES
76 #define	NUL1E		(NUL0E * NL1PG)
77 #define	NUL2E		(NUL1E * NL2PG)
78 
79 #define	smmu_l0_pindex(v)	(NUL2E + NUL1E + ((v) >> IOMMU_L0_SHIFT))
80 #define	smmu_l1_pindex(v)	(NUL2E + ((v) >> IOMMU_L1_SHIFT))
81 #define	smmu_l2_pindex(v)	((v) >> IOMMU_L2_SHIFT)
82 
83 #define	smmu_l0_index(va)	(((va) >> IOMMU_L0_SHIFT) & IOMMU_L0_ADDR_MASK)
84 #define	smmu_l1_index(va)	(((va) >> IOMMU_L1_SHIFT) & IOMMU_Ln_ADDR_MASK)
85 #define	smmu_l2_index(va)	(((va) >> IOMMU_L2_SHIFT) & IOMMU_Ln_ADDR_MASK)
86 #define	smmu_l3_index(va)	(((va) >> IOMMU_L3_SHIFT) & IOMMU_Ln_ADDR_MASK)
87 
88 static vm_page_t _pmap_alloc_l3(struct smmu_pmap *pmap, vm_pindex_t ptepindex);
89 static void _smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va,
90     vm_page_t m, struct spglist *free);
91 
92 /*
93  * These load the old table data and store the new value.
94  * They need to be atomic as the System MMU may write to the table at
95  * the same time as the CPU.
96  */
97 #define	smmu_pmap_load(table)		(*table)
98 #define	smmu_pmap_clear(table)		atomic_store_64(table, 0)
99 #define	smmu_pmap_store(table, entry)	atomic_store_64(table, entry)
100 
101 /********************/
102 /* Inline functions */
103 /********************/
104 
105 static __inline pd_entry_t *
106 smmu_pmap_l0(struct smmu_pmap *pmap, vm_offset_t va)
107 {
108 
109 	return (&pmap->sp_l0[smmu_l0_index(va)]);
110 }
111 
112 static __inline pd_entry_t *
113 smmu_pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
114 {
115 	pd_entry_t *l1;
116 
117 	l1 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l0) & ~ATTR_MASK);
118 	return (&l1[smmu_l1_index(va)]);
119 }
120 
121 static __inline pd_entry_t *
122 smmu_pmap_l1(struct smmu_pmap *pmap, vm_offset_t va)
123 {
124 	pd_entry_t *l0;
125 
126 	l0 = smmu_pmap_l0(pmap, va);
127 	if ((smmu_pmap_load(l0) & ATTR_DESCR_MASK) != IOMMU_L0_TABLE)
128 		return (NULL);
129 
130 	return (smmu_pmap_l0_to_l1(l0, va));
131 }
132 
133 static __inline pd_entry_t *
134 smmu_pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va)
135 {
136 	pd_entry_t l1, *l2p;
137 
138 	l1 = smmu_pmap_load(l1p);
139 
140 	/*
141 	 * The valid bit may be clear if pmap_update_entry() is concurrently
142 	 * modifying the entry, so for KVA only the entry type may be checked.
143 	 */
144 	KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0,
145 	    ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va));
146 	KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
147 	    ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va));
148 	l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK);
149 	return (&l2p[smmu_l2_index(va)]);
150 }
151 
152 static __inline pd_entry_t *
153 smmu_pmap_l2(struct smmu_pmap *pmap, vm_offset_t va)
154 {
155 	pd_entry_t *l1;
156 
157 	l1 = smmu_pmap_l1(pmap, va);
158 	if ((smmu_pmap_load(l1) & ATTR_DESCR_MASK) != IOMMU_L1_TABLE)
159 		return (NULL);
160 
161 	return (smmu_pmap_l1_to_l2(l1, va));
162 }
163 
164 static __inline pt_entry_t *
165 smmu_pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va)
166 {
167 	pd_entry_t l2;
168 	pt_entry_t *l3p;
169 
170 	l2 = smmu_pmap_load(l2p);
171 
172 	/*
173 	 * The valid bit may be clear if pmap_update_entry() is concurrently
174 	 * modifying the entry, so for KVA only the entry type may be checked.
175 	 */
176 	KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0,
177 	    ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va));
178 	KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
179 	    ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va));
180 	l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK);
181 	return (&l3p[smmu_l3_index(va)]);
182 }
183 
184 /*
185  * Returns the lowest valid pde for a given virtual address.
186  * The next level may or may not point to a valid page or block.
187  */
188 static __inline pd_entry_t *
189 smmu_pmap_pde(struct smmu_pmap *pmap, vm_offset_t va, int *level)
190 {
191 	pd_entry_t *l0, *l1, *l2, desc;
192 
193 	l0 = smmu_pmap_l0(pmap, va);
194 	desc = smmu_pmap_load(l0) & ATTR_DESCR_MASK;
195 	if (desc != IOMMU_L0_TABLE) {
196 		*level = -1;
197 		return (NULL);
198 	}
199 
200 	l1 = smmu_pmap_l0_to_l1(l0, va);
201 	desc = smmu_pmap_load(l1) & ATTR_DESCR_MASK;
202 	if (desc != IOMMU_L1_TABLE) {
203 		*level = 0;
204 		return (l0);
205 	}
206 
207 	l2 = smmu_pmap_l1_to_l2(l1, va);
208 	desc = smmu_pmap_load(l2) & ATTR_DESCR_MASK;
209 	if (desc != IOMMU_L2_TABLE) {
210 		*level = 1;
211 		return (l1);
212 	}
213 
214 	*level = 2;
215 	return (l2);
216 }
217 
218 /*
219  * Returns the lowest valid pte block or table entry for a given virtual
220  * address. If there are no valid entries return NULL and set the level to
221  * the first invalid level.
222  */
223 static __inline pt_entry_t *
224 smmu_pmap_pte(struct smmu_pmap *pmap, vm_offset_t va, int *level)
225 {
226 	pd_entry_t *l1, *l2, desc;
227 	pt_entry_t *l3;
228 
229 	l1 = smmu_pmap_l1(pmap, va);
230 	if (l1 == NULL) {
231 		*level = 0;
232 		return (NULL);
233 	}
234 	desc = smmu_pmap_load(l1) & ATTR_DESCR_MASK;
235 	if (desc == IOMMU_L1_BLOCK) {
236 		*level = 1;
237 		return (l1);
238 	}
239 
240 	if (desc != IOMMU_L1_TABLE) {
241 		*level = 1;
242 		return (NULL);
243 	}
244 
245 	l2 = smmu_pmap_l1_to_l2(l1, va);
246 	desc = smmu_pmap_load(l2) & ATTR_DESCR_MASK;
247 	if (desc == IOMMU_L2_BLOCK) {
248 		*level = 2;
249 		return (l2);
250 	}
251 
252 	if (desc != IOMMU_L2_TABLE) {
253 		*level = 2;
254 		return (NULL);
255 	}
256 
257 	*level = 3;
258 	l3 = smmu_pmap_l2_to_l3(l2, va);
259 	if ((smmu_pmap_load(l3) & ATTR_DESCR_MASK) != IOMMU_L3_PAGE)
260 		return (NULL);
261 
262 	return (l3);
263 }
264 
265 static __inline int
266 smmu_pmap_l3_valid(pt_entry_t l3)
267 {
268 
269 	return ((l3 & ATTR_DESCR_MASK) == IOMMU_L3_PAGE);
270 }
271 
272 CTASSERT(IOMMU_L1_BLOCK == IOMMU_L2_BLOCK);
273 
274 #ifdef INVARIANTS
275 static __inline void
276 smmu_pmap_resident_count_inc(struct smmu_pmap *pmap, int count)
277 {
278 
279 	SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED);
280 	pmap->sp_resident_count += count;
281 }
282 
283 static __inline void
284 smmu_pmap_resident_count_dec(struct smmu_pmap *pmap, int count)
285 {
286 
287 	SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED);
288 	KASSERT(pmap->sp_resident_count >= count,
289 	    ("pmap %p resident count underflow %ld %d", pmap,
290 	    pmap->sp_resident_count, count));
291 	pmap->sp_resident_count -= count;
292 }
293 #else
294 static __inline void
295 smmu_pmap_resident_count_inc(struct smmu_pmap *pmap, int count)
296 {
297 }
298 
299 static __inline void
300 smmu_pmap_resident_count_dec(struct smmu_pmap *pmap, int count)
301 {
302 }
303 #endif
304 
305 /***************************************************
306  * Page table page management routines.....
307  ***************************************************/
308 /*
309  * Schedule the specified unused page table page to be freed.  Specifically,
310  * add the page to the specified list of pages that will be released to the
311  * physical memory manager after the TLB has been updated.
312  */
313 static __inline void
314 smmu_pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
315     boolean_t set_PG_ZERO)
316 {
317 
318 	if (set_PG_ZERO)
319 		m->flags |= PG_ZERO;
320 	else
321 		m->flags &= ~PG_ZERO;
322 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
323 }
324 
325 /***************************************************
326  * Low level mapping routines.....
327  ***************************************************/
328 
329 /*
330  * Decrements a page table page's reference count, which is used to record the
331  * number of valid page table entries within the page.  If the reference count
332  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
333  * page table page was unmapped and FALSE otherwise.
334  */
335 static inline boolean_t
336 smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va, vm_page_t m,
337     struct spglist *free)
338 {
339 
340 	--m->ref_count;
341 	if (m->ref_count == 0) {
342 		_smmu_pmap_unwire_l3(pmap, va, m, free);
343 		return (TRUE);
344 	} else
345 		return (FALSE);
346 }
347 
348 static void
349 _smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va, vm_page_t m,
350     struct spglist *free)
351 {
352 
353 	SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED);
354 	/*
355 	 * unmap the page table page
356 	 */
357 	if (m->pindex >= (NUL2E + NUL1E)) {
358 		/* l1 page */
359 		pd_entry_t *l0;
360 
361 		l0 = smmu_pmap_l0(pmap, va);
362 		smmu_pmap_clear(l0);
363 	} else if (m->pindex >= NUL2E) {
364 		/* l2 page */
365 		pd_entry_t *l1;
366 
367 		l1 = smmu_pmap_l1(pmap, va);
368 		smmu_pmap_clear(l1);
369 	} else {
370 		/* l3 page */
371 		pd_entry_t *l2;
372 
373 		l2 = smmu_pmap_l2(pmap, va);
374 		smmu_pmap_clear(l2);
375 	}
376 	smmu_pmap_resident_count_dec(pmap, 1);
377 	if (m->pindex < NUL2E) {
378 		/* We just released an l3, unhold the matching l2 */
379 		pd_entry_t *l1, tl1;
380 		vm_page_t l2pg;
381 
382 		l1 = smmu_pmap_l1(pmap, va);
383 		tl1 = smmu_pmap_load(l1);
384 		l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
385 		smmu_pmap_unwire_l3(pmap, va, l2pg, free);
386 	} else if (m->pindex < (NUL2E + NUL1E)) {
387 		/* We just released an l2, unhold the matching l1 */
388 		pd_entry_t *l0, tl0;
389 		vm_page_t l1pg;
390 
391 		l0 = smmu_pmap_l0(pmap, va);
392 		tl0 = smmu_pmap_load(l0);
393 		l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
394 		smmu_pmap_unwire_l3(pmap, va, l1pg, free);
395 	}
396 
397 	/*
398 	 * Put page on a list so that it is released after
399 	 * *ALL* TLB shootdown is done
400 	 */
401 	smmu_pmap_add_delayed_free_list(m, free, TRUE);
402 }
403 
404 int
405 smmu_pmap_pinit(struct smmu_pmap *pmap)
406 {
407 	vm_page_t m;
408 
409 	/*
410 	 * allocate the l0 page
411 	 */
412 	m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED |
413 	    VM_ALLOC_ZERO);
414 	pmap->sp_l0_paddr = VM_PAGE_TO_PHYS(m);
415 	pmap->sp_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->sp_l0_paddr);
416 
417 #ifdef INVARIANTS
418 	pmap->sp_resident_count = 0;
419 #endif
420 	mtx_init(&pmap->sp_mtx, "smmu pmap", NULL, MTX_DEF);
421 
422 	return (1);
423 }
424 
425 /*
426  * This routine is called if the desired page table page does not exist.
427  *
428  * If page table page allocation fails, this routine may sleep before
429  * returning NULL.  It sleeps only if a lock pointer was given.
430  *
431  * Note: If a page allocation fails at page table level two or three,
432  * one or two pages may be held during the wait, only to be released
433  * afterwards.  This conservative approach is easily argued to avoid
434  * race conditions.
435  */
436 static vm_page_t
437 _pmap_alloc_l3(struct smmu_pmap *pmap, vm_pindex_t ptepindex)
438 {
439 	vm_page_t m, l1pg, l2pg;
440 
441 	SMMU_PMAP_LOCK_ASSERT(pmap, MA_OWNED);
442 
443 	/*
444 	 * Allocate a page table page.
445 	 */
446 	if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
447 		/*
448 		 * Indicate the need to retry.  While waiting, the page table
449 		 * page may have been allocated.
450 		 */
451 		return (NULL);
452 	}
453 	m->pindex = ptepindex;
454 
455 	/*
456 	 * Because of AArch64's weak memory consistency model, we must have a
457 	 * barrier here to ensure that the stores for zeroing "m", whether by
458 	 * pmap_zero_page() or an earlier function, are visible before adding
459 	 * "m" to the page table.  Otherwise, a page table walk by another
460 	 * processor's MMU could see the mapping to "m" and a stale, non-zero
461 	 * PTE within "m".
462 	 */
463 	dmb(ishst);
464 
465 	/*
466 	 * Map the pagetable page into the process address space, if
467 	 * it isn't already there.
468 	 */
469 
470 	if (ptepindex >= (NUL2E + NUL1E)) {
471 		pd_entry_t *l0;
472 		vm_pindex_t l0index;
473 
474 		l0index = ptepindex - (NUL2E + NUL1E);
475 		l0 = &pmap->sp_l0[l0index];
476 		smmu_pmap_store(l0, VM_PAGE_TO_PHYS(m) | IOMMU_L0_TABLE);
477 	} else if (ptepindex >= NUL2E) {
478 		vm_pindex_t l0index, l1index;
479 		pd_entry_t *l0, *l1;
480 		pd_entry_t tl0;
481 
482 		l1index = ptepindex - NUL2E;
483 		l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT;
484 
485 		l0 = &pmap->sp_l0[l0index];
486 		tl0 = smmu_pmap_load(l0);
487 		if (tl0 == 0) {
488 			/* recurse for allocating page dir */
489 			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index)
490 			    == NULL) {
491 				vm_page_unwire_noq(m);
492 				vm_page_free_zero(m);
493 				return (NULL);
494 			}
495 		} else {
496 			l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
497 			l1pg->ref_count++;
498 		}
499 
500 		l1 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l0) &~ATTR_MASK);
501 		l1 = &l1[ptepindex & Ln_ADDR_MASK];
502 		smmu_pmap_store(l1, VM_PAGE_TO_PHYS(m) | IOMMU_L1_TABLE);
503 	} else {
504 		vm_pindex_t l0index, l1index;
505 		pd_entry_t *l0, *l1, *l2;
506 		pd_entry_t tl0, tl1;
507 
508 		l1index = ptepindex >> IOMMU_Ln_ENTRIES_SHIFT;
509 		l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT;
510 
511 		l0 = &pmap->sp_l0[l0index];
512 		tl0 = smmu_pmap_load(l0);
513 		if (tl0 == 0) {
514 			/* recurse for allocating page dir */
515 			if (_pmap_alloc_l3(pmap, NUL2E + l1index) == NULL) {
516 				vm_page_unwire_noq(m);
517 				vm_page_free_zero(m);
518 				return (NULL);
519 			}
520 			tl0 = smmu_pmap_load(l0);
521 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
522 			l1 = &l1[l1index & Ln_ADDR_MASK];
523 		} else {
524 			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
525 			l1 = &l1[l1index & Ln_ADDR_MASK];
526 			tl1 = smmu_pmap_load(l1);
527 			if (tl1 == 0) {
528 				/* recurse for allocating page dir */
529 				if (_pmap_alloc_l3(pmap, NUL2E + l1index)
530 				    == NULL) {
531 					vm_page_unwire_noq(m);
532 					vm_page_free_zero(m);
533 					return (NULL);
534 				}
535 			} else {
536 				l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
537 				l2pg->ref_count++;
538 			}
539 		}
540 
541 		l2 = (pd_entry_t *)PHYS_TO_DMAP(smmu_pmap_load(l1) &~ATTR_MASK);
542 		l2 = &l2[ptepindex & Ln_ADDR_MASK];
543 		smmu_pmap_store(l2, VM_PAGE_TO_PHYS(m) | IOMMU_L2_TABLE);
544 	}
545 
546 	smmu_pmap_resident_count_inc(pmap, 1);
547 
548 	return (m);
549 }
550 
551 /***************************************************
552  * Pmap allocation/deallocation routines.
553  ***************************************************/
554 
555 /*
556  * Release any resources held by the given physical map.
557  * Called when a pmap initialized by pmap_pinit is being released.
558  * Should only be called if the map contains no valid mappings.
559  */
560 void
561 smmu_pmap_release(struct smmu_pmap *pmap)
562 {
563 	vm_page_t m;
564 
565 	KASSERT(pmap->sp_resident_count == 0,
566 	    ("pmap_release: pmap resident count %ld != 0",
567 	    pmap->sp_resident_count));
568 
569 	m = PHYS_TO_VM_PAGE(pmap->sp_l0_paddr);
570 	vm_page_unwire_noq(m);
571 	vm_page_free_zero(m);
572 	mtx_destroy(&pmap->sp_mtx);
573 }
574 
575 /***************************************************
576  * page management routines.
577  ***************************************************/
578 
579 /*
580  * Add a single Mali GPU entry. This function does not sleep.
581  */
582 int
583 pmap_gpu_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa,
584     vm_prot_t prot, u_int flags)
585 {
586 	pd_entry_t *pde;
587 	pt_entry_t new_l3;
588 	pt_entry_t orig_l3 __diagused;
589 	pt_entry_t *l3;
590 	vm_page_t mpte;
591 	pd_entry_t *l1p;
592 	pd_entry_t *l2p;
593 	int lvl;
594 	int rv;
595 
596 	KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
597 	KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
598 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
599 
600 	new_l3 = (pt_entry_t)(pa | ATTR_SH(ATTR_SH_IS) | IOMMU_L3_BLOCK);
601 
602 	if ((prot & VM_PROT_WRITE) != 0)
603 		new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
604 	if ((prot & VM_PROT_READ) != 0)
605 		new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
606 	if ((prot & VM_PROT_EXECUTE) == 0)
607 		new_l3 |= ATTR_S2_XN(ATTR_S2_XN_ALL);
608 
609 	CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa);
610 
611 	SMMU_PMAP_LOCK(pmap);
612 
613 	/*
614 	 * In the case that a page table page is not
615 	 * resident, we are creating it here.
616 	 */
617 retry:
618 	pde = smmu_pmap_pde(pmap, va, &lvl);
619 	if (pde != NULL && lvl == 2) {
620 		l3 = smmu_pmap_l2_to_l3(pde, va);
621 	} else {
622 		mpte = _pmap_alloc_l3(pmap, smmu_l2_pindex(va));
623 		if (mpte == NULL) {
624 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
625 			rv = KERN_RESOURCE_SHORTAGE;
626 			goto out;
627 		}
628 
629 		/*
630 		 * Ensure newly created l1, l2 are visible to GPU.
631 		 * l0 is already visible by similar call in panfrost driver.
632 		 * The cache entry for l3 handled below.
633 		 */
634 
635 		l1p = smmu_pmap_l1(pmap, va);
636 		l2p = smmu_pmap_l2(pmap, va);
637 		cpu_dcache_wb_range((vm_offset_t)l1p, sizeof(pd_entry_t));
638 		cpu_dcache_wb_range((vm_offset_t)l2p, sizeof(pd_entry_t));
639 
640 		goto retry;
641 	}
642 
643 	orig_l3 = smmu_pmap_load(l3);
644 	KASSERT(!smmu_pmap_l3_valid(orig_l3), ("l3 is valid"));
645 
646 	/* New mapping */
647 	smmu_pmap_store(l3, new_l3);
648 
649 	cpu_dcache_wb_range((vm_offset_t)l3, sizeof(pt_entry_t));
650 
651 	smmu_pmap_resident_count_inc(pmap, 1);
652 	dsb(ishst);
653 
654 	rv = KERN_SUCCESS;
655 out:
656 	SMMU_PMAP_UNLOCK(pmap);
657 
658 	return (rv);
659 }
660 
661 /*
662  * Remove a single Mali GPU entry.
663  */
664 int
665 pmap_gpu_remove(struct smmu_pmap *pmap, vm_offset_t va)
666 {
667 	pd_entry_t *pde;
668 	pt_entry_t *pte;
669 	int lvl;
670 	int rc;
671 
672 	KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
673 
674 	SMMU_PMAP_LOCK(pmap);
675 
676 	pde = smmu_pmap_pde(pmap, va, &lvl);
677 	if (pde == NULL || lvl != 2) {
678 		rc = KERN_FAILURE;
679 		goto out;
680 	}
681 
682 	pte = smmu_pmap_l2_to_l3(pde, va);
683 
684 	smmu_pmap_resident_count_dec(pmap, 1);
685 	smmu_pmap_clear(pte);
686 	cpu_dcache_wb_range((vm_offset_t)pte, sizeof(pt_entry_t));
687 	rc = KERN_SUCCESS;
688 
689 out:
690 	SMMU_PMAP_UNLOCK(pmap);
691 
692 	return (rc);
693 }
694 
695 /*
696  * Add a single SMMU entry. This function does not sleep.
697  */
698 int
699 smmu_pmap_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa,
700     vm_prot_t prot, u_int flags)
701 {
702 	pd_entry_t *pde;
703 	pt_entry_t new_l3;
704 	pt_entry_t orig_l3 __diagused;
705 	pt_entry_t *l3;
706 	vm_page_t mpte;
707 	int lvl;
708 	int rv;
709 
710 	KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
711 
712 	va = trunc_page(va);
713 	new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT |
714 	    ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE);
715 	if ((prot & VM_PROT_WRITE) == 0)
716 		new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
717 	new_l3 |= ATTR_S1_XN; /* Execute never. */
718 	new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER);
719 	new_l3 |= ATTR_S1_nG; /* Non global. */
720 
721 	CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa);
722 
723 	SMMU_PMAP_LOCK(pmap);
724 
725 	/*
726 	 * In the case that a page table page is not
727 	 * resident, we are creating it here.
728 	 */
729 retry:
730 	pde = smmu_pmap_pde(pmap, va, &lvl);
731 	if (pde != NULL && lvl == 2) {
732 		l3 = smmu_pmap_l2_to_l3(pde, va);
733 	} else {
734 		mpte = _pmap_alloc_l3(pmap, smmu_l2_pindex(va));
735 		if (mpte == NULL) {
736 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
737 			rv = KERN_RESOURCE_SHORTAGE;
738 			goto out;
739 		}
740 		goto retry;
741 	}
742 
743 	orig_l3 = smmu_pmap_load(l3);
744 	KASSERT(!smmu_pmap_l3_valid(orig_l3), ("l3 is valid"));
745 
746 	/* New mapping */
747 	smmu_pmap_store(l3, new_l3);
748 	smmu_pmap_resident_count_inc(pmap, 1);
749 	dsb(ishst);
750 
751 	rv = KERN_SUCCESS;
752 out:
753 	SMMU_PMAP_UNLOCK(pmap);
754 
755 	return (rv);
756 }
757 
758 /*
759  * Remove a single SMMU entry.
760  */
761 int
762 smmu_pmap_remove(struct smmu_pmap *pmap, vm_offset_t va)
763 {
764 	pt_entry_t *pte;
765 	int lvl;
766 	int rc;
767 
768 	SMMU_PMAP_LOCK(pmap);
769 
770 	pte = smmu_pmap_pte(pmap, va, &lvl);
771 	KASSERT(lvl == 3,
772 	    ("Invalid SMMU pagetable level: %d != 3", lvl));
773 
774 	if (pte != NULL) {
775 		smmu_pmap_resident_count_dec(pmap, 1);
776 		smmu_pmap_clear(pte);
777 		rc = KERN_SUCCESS;
778 	} else
779 		rc = KERN_FAILURE;
780 
781 	SMMU_PMAP_UNLOCK(pmap);
782 
783 	return (rc);
784 }
785 
786 /*
787  * Remove all the allocated L1, L2 pages from SMMU pmap.
788  * All the L3 entires must be cleared in advance, otherwise
789  * this function panics.
790  */
791 void
792 smmu_pmap_remove_pages(struct smmu_pmap *pmap)
793 {
794 	pd_entry_t l0e, *l1, l1e, *l2, l2e;
795 	pt_entry_t *l3, l3e;
796 	vm_page_t m, m0, m1;
797 	vm_paddr_t pa;
798 	vm_paddr_t pa0;
799 	vm_paddr_t pa1;
800 	int i, j, k, l;
801 
802 	SMMU_PMAP_LOCK(pmap);
803 
804 	for (i = 0; i < IOMMU_L0_ENTRIES; i++) {
805 		l0e = pmap->sp_l0[i];
806 		if ((l0e & ATTR_DESCR_VALID) == 0) {
807 			continue;
808 		}
809 		pa0 = l0e & ~ATTR_MASK;
810 		m0 = PHYS_TO_VM_PAGE(pa0);
811 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0);
812 
813 		for (j = 0; j < IOMMU_Ln_ENTRIES; j++) {
814 			l1e = l1[j];
815 			if ((l1e & ATTR_DESCR_VALID) == 0) {
816 				continue;
817 			}
818 			if ((l1e & ATTR_DESCR_MASK) == IOMMU_L1_BLOCK) {
819 				continue;
820 			}
821 			pa1 = l1e & ~ATTR_MASK;
822 			m1 = PHYS_TO_VM_PAGE(pa1);
823 			l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1);
824 
825 			for (k = 0; k < IOMMU_Ln_ENTRIES; k++) {
826 				l2e = l2[k];
827 				if ((l2e & ATTR_DESCR_VALID) == 0) {
828 					continue;
829 				}
830 				pa = l2e & ~ATTR_MASK;
831 				m = PHYS_TO_VM_PAGE(pa);
832 				l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
833 
834 				for (l = 0; l < IOMMU_Ln_ENTRIES; l++) {
835 					l3e = l3[l];
836 					if ((l3e & ATTR_DESCR_VALID) == 0)
837 						continue;
838 					panic(
839 					  "%s: l3e found (indexes %d %d %d %d)",
840 					    __func__, i, j, k, l);
841 				}
842 
843 				vm_page_unwire_noq(m1);
844 				vm_page_unwire_noq(m);
845 				smmu_pmap_resident_count_dec(pmap, 1);
846 				vm_page_free(m);
847 				smmu_pmap_clear(&l2[k]);
848 			}
849 
850 			vm_page_unwire_noq(m0);
851 			smmu_pmap_resident_count_dec(pmap, 1);
852 			vm_page_free(m1);
853 			smmu_pmap_clear(&l1[j]);
854 		}
855 
856 		smmu_pmap_resident_count_dec(pmap, 1);
857 		vm_page_free(m0);
858 		smmu_pmap_clear(&pmap->sp_l0[i]);
859 	}
860 
861 	KASSERT(pmap->sp_resident_count == 0,
862 	    ("Invalid resident count %jd", pmap->sp_resident_count));
863 
864 	SMMU_PMAP_UNLOCK(pmap);
865 }
866