xref: /freebsd/sys/powerpc/booke/pmap_64.c (revision 580744621f33383027108364dcadad718df46ffe)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2020 Justin Hibbits
5  * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
6  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
21  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * Some hw specific parts of this pmap were derived or influenced
30  * by NetBSD's ibm4xx pmap module. More generic code is shared with
31  * a few other pmap modules from the FreeBSD tree.
32  */
33 
34  /*
35   * VM layout notes:
36   *
37   * Kernel and user threads run within one common virtual address space
38   * defined by AS=0.
39   *
40   * 64-bit pmap:
41   * Virtual address space layout:
42   * -----------------------------
43   * 0x0000_0000_0000_0000 - 0x3fff_ffff_ffff_ffff      : user process
44   * 0x4000_0000_0000_0000 - 0x7fff_ffff_ffff_ffff      : unused
45   * 0x8000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff      : mmio region
46   * 0xc000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff      : direct map
47   * 0xe000_0000_0000_0000 - 0xffff_ffff_ffff_ffff      : KVA
48   */
49 
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52 
53 #include "opt_ddb.h"
54 #include "opt_kstack_pages.h"
55 
56 #include <sys/param.h>
57 #include <sys/conf.h>
58 #include <sys/malloc.h>
59 #include <sys/ktr.h>
60 #include <sys/proc.h>
61 #include <sys/user.h>
62 #include <sys/queue.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/kerneldump.h>
66 #include <sys/linker.h>
67 #include <sys/msgbuf.h>
68 #include <sys/lock.h>
69 #include <sys/mutex.h>
70 #include <sys/rwlock.h>
71 #include <sys/sched.h>
72 #include <sys/smp.h>
73 #include <sys/vmmeter.h>
74 
75 #include <vm/vm.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_pager.h>
84 #include <vm/vm_phys.h>
85 #include <vm/vm_pagequeue.h>
86 #include <vm/uma.h>
87 
88 #include <machine/_inttypes.h>
89 #include <machine/cpu.h>
90 #include <machine/pcb.h>
91 #include <machine/platform.h>
92 
93 #include <machine/tlb.h>
94 #include <machine/spr.h>
95 #include <machine/md_var.h>
96 #include <machine/mmuvar.h>
97 #include <machine/pmap.h>
98 #include <machine/pte.h>
99 
100 #include <ddb/ddb.h>
101 
102 #include "mmu_if.h"
103 
104 #ifdef  DEBUG
105 #define debugf(fmt, args...) printf(fmt, ##args)
106 #else
107 #define debugf(fmt, args...)
108 #endif
109 
110 #define	PRI0ptrX	"016lx"
111 
112 /**************************************************************************/
113 /* PMAP */
114 /**************************************************************************/
115 
116 unsigned int kernel_pdirs;
117 static uma_zone_t ptbl_root_zone;
118 static pte_t ****kernel_ptbl_root;
119 
120 /*
121  * Base of the pmap_mapdev() region.  On 32-bit it immediately follows the
122  * userspace address range.  On On 64-bit it's far above, at (1 << 63), and
123  * ranges up to the DMAP, giving 62 bits of PA allowed.  This is far larger than
124  * the widest Book-E address bus, the e6500 has a 40-bit PA space.  This allows
125  * us to map akin to the DMAP, with addresses identical to the PA, offset by the
126  * base.
127  */
128 #define	VM_MAPDEV_BASE		0x8000000000000000
129 #define	VM_MAPDEV_PA_MAX	0x4000000000000000 /* Don't encroach on DMAP */
130 
131 static void tid_flush(tlbtid_t tid);
132 static unsigned long ilog2(unsigned long);
133 
134 /**************************************************************************/
135 /* Page table management */
136 /**************************************************************************/
137 
138 #define PMAP_ROOT_SIZE	(sizeof(pte_t****) * PG_ROOT_NENTRIES)
139 static pte_t *ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va,
140     bool nosleep, bool *is_new);
141 static void ptbl_hold(mmu_t, pmap_t, pte_t *);
142 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
143 
144 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
145 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
146 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
147 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
148 static pte_t *pte_find_next(mmu_t, pmap_t, vm_offset_t *);
149 static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
150 
151 /**************************************************************************/
152 /* Page table related */
153 /**************************************************************************/
154 
155 /* Allocate a page, to be used in a page table. */
156 static vm_offset_t
157 mmu_booke_alloc_page(mmu_t mmu, pmap_t pmap, unsigned int idx, bool nosleep)
158 {
159 	vm_page_t	m;
160 	int		req;
161 
162 	req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO;
163 	while ((m = vm_page_alloc(NULL, idx, req)) == NULL) {
164 		if (nosleep)
165 			return (0);
166 
167 		PMAP_UNLOCK(pmap);
168 		rw_wunlock(&pvh_global_lock);
169 		vm_wait(NULL);
170 		rw_wlock(&pvh_global_lock);
171 		PMAP_LOCK(pmap);
172 	}
173 
174 	if (!(m->flags & PG_ZERO))
175 		/* Zero whole ptbl. */
176 		mmu_booke_zero_page(mmu, m);
177 
178 	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
179 }
180 
181 /* Initialize pool of kva ptbl buffers. */
182 static void
183 ptbl_init(void)
184 {
185 }
186 
187 /* Get a pointer to a PTE in a page table. */
188 static __inline pte_t *
189 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
190 {
191 	pte_t        ***pdir_l1;
192 	pte_t         **pdir;
193 	pte_t          *ptbl;
194 
195 	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
196 
197 	pdir_l1 = pmap->pm_root[PG_ROOT_IDX(va)];
198 	if (pdir_l1 == NULL)
199 		return (NULL);
200 	pdir = pdir_l1[PDIR_L1_IDX(va)];
201 	if (pdir == NULL)
202 		return (NULL);
203 	ptbl = pdir[PDIR_IDX(va)];
204 
205 	return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
206 }
207 
208 /* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
209 static __inline pte_t *
210 pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
211 {
212 	vm_offset_t	va;
213 	pte_t	    ****pm_root;
214 	pte_t	       *pte;
215 	unsigned long	i, j, k, l;
216 
217 	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
218 
219 	va = *pva;
220 	i = PG_ROOT_IDX(va);
221 	j = PDIR_L1_IDX(va);
222 	k = PDIR_IDX(va);
223 	l = PTBL_IDX(va);
224 	pm_root = pmap->pm_root;
225 	/* truncate the VA for later. */
226 	va &= ~((1UL << (PG_ROOT_H + 1)) - 1);
227 	for (; i < PG_ROOT_NENTRIES; i++, j = 0) {
228 		if (pm_root[i] == 0)
229 			continue;
230 		for (; j < PDIR_L1_NENTRIES; j++, k = 0) {
231 			if (pm_root[i][j] == 0)
232 				continue;
233 			for (; k < PDIR_NENTRIES; k++, l = 0) {
234 				if (pm_root[i][j][k] == NULL)
235 					continue;
236 				for (; l < PTBL_NENTRIES; l++) {
237 					pte = &pm_root[i][j][k][l];
238 					if (!PTE_ISVALID(pte))
239 						continue;
240 					*pva = va + PG_ROOT_SIZE * i +
241 					    PDIR_L1_SIZE * j +
242 					    PDIR_SIZE * k +
243 					    PAGE_SIZE * l;
244 					return (pte);
245 				}
246 			}
247 		}
248 	}
249 	return (NULL);
250 }
251 
252 static bool
253 unhold_free_page(mmu_t mmu, pmap_t pmap, vm_page_t m)
254 {
255 
256 	m->ref_count--;
257 	if (m->ref_count == 0) {
258 		vm_wire_sub(1);
259 		vm_page_free_zero(m);
260 		return (true);
261 	}
262 
263 	return (false);
264 }
265 
266 static vm_offset_t
267 alloc_or_hold_page(mmu_t mmu, pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
268     bool nosleep, bool hold, bool *isnew)
269 {
270 	vm_offset_t	page;
271 	vm_page_t	m;
272 
273 	page = ptr_tbl[index];
274 	KASSERT(page != 0 || pmap != kernel_pmap,
275 	    ("NULL page table page found in kernel pmap!"));
276 	if (page == 0) {
277 		page = mmu_booke_alloc_page(mmu, pmap, index, nosleep);
278 		if (ptr_tbl[index] == 0) {
279 			*isnew = true;
280 			ptr_tbl[index] = page;
281 			return (page);
282 		}
283 		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page));
284 		page = ptr_tbl[index];
285 		vm_wire_sub(1);
286 		vm_page_free_zero(m);
287 	}
288 
289 	if (hold) {
290 		m = PHYS_TO_VM_PAGE(pmap_kextract(page));
291 		m->ref_count++;
292 	}
293 	*isnew = false;
294 
295 	return (page);
296 }
297 
298 /* Allocate page table. */
299 static pte_t*
300 ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
301 {
302 	unsigned int	pg_root_idx = PG_ROOT_IDX(va);
303 	unsigned int	pdir_l1_idx = PDIR_L1_IDX(va);
304 	unsigned int	pdir_idx = PDIR_IDX(va);
305 	vm_offset_t	pdir_l1, pdir, ptbl;
306 	bool		hold_page;
307 
308 	hold_page = (pmap != kernel_pmap);
309 	pdir_l1 = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pmap->pm_root,
310 	    pg_root_idx, nosleep, hold_page, is_new);
311 	if (pdir_l1 == 0)
312 		return (NULL);
313 	pdir = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
314 	    nosleep, hold_page, is_new);
315 	if (pdir == 0)
316 		return (NULL);
317 	ptbl = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pdir, pdir_idx,
318 	    nosleep, false, is_new);
319 
320 	return ((pte_t *)ptbl);
321 }
322 
323 /*
324  * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
325  * when removing pte entry from ptbl.
326  *
327  * Return 1 if ptbl pages were freed.
328  */
329 static int
330 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
331 {
332 	pte_t          *ptbl;
333 	vm_page_t	m;
334 	u_int		pg_root_idx;
335 	pte_t        ***pdir_l1;
336 	u_int		pdir_l1_idx;
337 	pte_t         **pdir;
338 	u_int		pdir_idx;
339 
340 	pg_root_idx = PG_ROOT_IDX(va);
341 	pdir_l1_idx = PDIR_L1_IDX(va);
342 	pdir_idx = PDIR_IDX(va);
343 
344 	KASSERT((pmap != kernel_pmap),
345 		("ptbl_unhold: unholding kernel ptbl!"));
346 
347 	pdir_l1 = pmap->pm_root[pg_root_idx];
348 	pdir = pdir_l1[pdir_l1_idx];
349 	ptbl = pdir[pdir_idx];
350 
351 	/* decrement hold count */
352 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
353 
354 	if (!unhold_free_page(mmu, pmap, m))
355 		return (0);
356 
357 	pdir[pdir_idx] = NULL;
358 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir));
359 
360 	if (!unhold_free_page(mmu, pmap, m))
361 		return (1);
362 
363 	pdir_l1[pdir_l1_idx] = NULL;
364 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1));
365 
366 	if (!unhold_free_page(mmu, pmap, m))
367 		return (1);
368 	pmap->pm_root[pg_root_idx] = NULL;
369 
370 	return (1);
371 }
372 
373 /*
374  * Increment hold count for ptbl pages. This routine is used when new pte
375  * entry is being inserted into ptbl.
376  */
377 static void
378 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t *ptbl)
379 {
380 	vm_page_t	m;
381 
382 	KASSERT((pmap != kernel_pmap),
383 		("ptbl_hold: holding kernel ptbl!"));
384 
385 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
386 	m->ref_count++;
387 }
388 
389 /*
390  * Clean pte entry, try to free page table page if requested.
391  *
392  * Return 1 if ptbl pages were freed, otherwise return 0.
393  */
394 static int
395 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
396 {
397 	vm_page_t	m;
398 	pte_t          *pte;
399 
400 	pte = pte_find(mmu, pmap, va);
401 	KASSERT(pte != NULL, ("%s: NULL pte for va %#jx, pmap %p",
402 	    __func__, (uintmax_t)va, pmap));
403 
404 	if (!PTE_ISVALID(pte))
405 		return (0);
406 
407 	/* Get vm_page_t for mapped pte. */
408 	m = PHYS_TO_VM_PAGE(PTE_PA(pte));
409 
410 	if (PTE_ISWIRED(pte))
411 		pmap->pm_stats.wired_count--;
412 
413 	/* Handle managed entry. */
414 	if (PTE_ISMANAGED(pte)) {
415 
416 		/* Handle modified pages. */
417 		if (PTE_ISMODIFIED(pte))
418 			vm_page_dirty(m);
419 
420 		/* Referenced pages. */
421 		if (PTE_ISREFERENCED(pte))
422 			vm_page_aflag_set(m, PGA_REFERENCED);
423 
424 		/* Remove pv_entry from pv_list. */
425 		pv_remove(pmap, va, m);
426 	} else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
427 		pv_remove(pmap, va, m);
428 		if (TAILQ_EMPTY(&m->md.pv_list))
429 			m->md.pv_tracked = false;
430 	}
431 	mtx_lock_spin(&tlbivax_mutex);
432 	tlb_miss_lock();
433 
434 	tlb0_flush_entry(va);
435 	*pte = 0;
436 
437 	tlb_miss_unlock();
438 	mtx_unlock_spin(&tlbivax_mutex);
439 
440 	pmap->pm_stats.resident_count--;
441 
442 	if (flags & PTBL_UNHOLD) {
443 		return (ptbl_unhold(mmu, pmap, va));
444 	}
445 	return (0);
446 }
447 
448 /*
449  * Insert PTE for a given page and virtual address.
450  */
451 static int
452 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
453     boolean_t nosleep)
454 {
455 	unsigned int	ptbl_idx = PTBL_IDX(va);
456 	pte_t          *ptbl, *pte, pte_tmp;
457 	bool		is_new;
458 
459 	/* Get the page directory pointer. */
460 	ptbl = ptbl_alloc(mmu, pmap, va, nosleep, &is_new);
461 	if (ptbl == NULL) {
462 		KASSERT(nosleep, ("nosleep and NULL ptbl"));
463 		return (ENOMEM);
464 	}
465 	if (is_new) {
466 		pte = &ptbl[ptbl_idx];
467 	} else {
468 		/*
469 		 * Check if there is valid mapping for requested va, if there
470 		 * is, remove it.
471 		 */
472 		pte = &ptbl[ptbl_idx];
473 		if (PTE_ISVALID(pte)) {
474 			pte_remove(mmu, pmap, va, PTBL_HOLD);
475 		} else {
476 			/*
477 			 * pte is not used, increment hold count for ptbl
478 			 * pages.
479 			 */
480 			if (pmap != kernel_pmap)
481 				ptbl_hold(mmu, pmap, ptbl);
482 		}
483 	}
484 
485 	/*
486 	 * Insert pv_entry into pv_list for mapped page if part of managed
487 	 * memory.
488 	 */
489 	if ((m->oflags & VPO_UNMANAGED) == 0) {
490 		flags |= PTE_MANAGED;
491 
492 		/* Create and insert pv entry. */
493 		pv_insert(pmap, va, m);
494 	}
495 
496 	pmap->pm_stats.resident_count++;
497 
498 	pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
499 	pte_tmp |= (PTE_VALID | flags);
500 
501 	mtx_lock_spin(&tlbivax_mutex);
502 	tlb_miss_lock();
503 
504 	tlb0_flush_entry(va);
505 	*pte = pte_tmp;
506 
507 	tlb_miss_unlock();
508 	mtx_unlock_spin(&tlbivax_mutex);
509 
510 	return (0);
511 }
512 
513 /* Return the pa for the given pmap/va. */
514 static	vm_paddr_t
515 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
516 {
517 	vm_paddr_t	pa = 0;
518 	pte_t          *pte;
519 
520 	pte = pte_find(mmu, pmap, va);
521 	if ((pte != NULL) && PTE_ISVALID(pte))
522 		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
523 	return (pa);
524 }
525 
526 
527 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
528 static void
529 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr)
530 {
531 	pte_t		*pte;
532 	vm_size_t	kva_size;
533 	int		kernel_pdirs, kernel_pgtbls, pdir_l1s;
534 	vm_offset_t	va, l1_va, pdir_va, ptbl_va;
535 	int		i, j, k;
536 
537 	kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
538 	kernel_pmap->pm_root = kernel_ptbl_root;
539 	pdir_l1s = howmany(kva_size, PG_ROOT_SIZE);
540 	kernel_pdirs = howmany(kva_size, PDIR_L1_SIZE);
541 	kernel_pgtbls = howmany(kva_size, PDIR_SIZE);
542 
543 	/* Initialize kernel pdir */
544 	l1_va = (vm_offset_t)kernel_ptbl_root +
545 	    round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
546 	pdir_va = l1_va + pdir_l1s * PAGE_SIZE;
547 	ptbl_va = pdir_va + kernel_pdirs * PAGE_SIZE;
548 	if (bootverbose) {
549 		printf("ptbl_root_va: %#lx\n", (vm_offset_t)kernel_ptbl_root);
550 		printf("l1_va: %#lx (%d entries)\n", l1_va, pdir_l1s);
551 		printf("pdir_va: %#lx(%d entries)\n", pdir_va, kernel_pdirs);
552 		printf("ptbl_va: %#lx(%d entries)\n", ptbl_va, kernel_pgtbls);
553 	}
554 
555 	va = VM_MIN_KERNEL_ADDRESS;
556 	for (i = 0; i < pdir_l1s; i++, l1_va += PAGE_SIZE) {
557 		kernel_pmap->pm_root[i] = (pte_t ***)l1_va;
558 		for (j = 0;
559 		    j < PDIR_L1_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
560 		    j++, pdir_va += PAGE_SIZE) {
561 			kernel_pmap->pm_root[i][j] = (pte_t **)pdir_va;
562 			for (k = 0;
563 			    k < PDIR_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
564 			    k++, va += PDIR_SIZE, ptbl_va += PAGE_SIZE)
565 				kernel_pmap->pm_root[i][j][k] = (pte_t *)ptbl_va;
566 		}
567 	}
568 	/*
569 	 * Fill in PTEs covering kernel code and data. They are not required
570 	 * for address translation, as this area is covered by static TLB1
571 	 * entries, but for pte_vatopa() to work correctly with kernel area
572 	 * addresses.
573 	 */
574 	for (va = addr; va < data_end; va += PAGE_SIZE) {
575 		pte = &(kernel_pmap->pm_root[PG_ROOT_IDX(va)][PDIR_L1_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
576 		*pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
577 		*pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
578 		    PTE_VALID | PTE_PS_4KB;
579 	}
580 }
581 
582 static vm_offset_t
583 mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
584 {
585 	vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
586 	kernel_ptbl_root = (pte_t ****)data_end;
587 
588 	data_end += round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
589 	data_end += howmany(kva_size, PG_ROOT_SIZE) * PAGE_SIZE;
590 	data_end += howmany(kva_size, PDIR_L1_SIZE) * PAGE_SIZE;
591 	data_end += howmany(kva_size, PDIR_SIZE) * PAGE_SIZE;
592 
593 	return (data_end);
594 }
595 
596 
597 /*
598  * Initialize a preallocated and zeroed pmap structure,
599  * such as one in a vmspace structure.
600  */
601 static void
602 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
603 {
604 	int i;
605 
606 	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
607 	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
608 
609 	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
610 
611 	for (i = 0; i < MAXCPU; i++)
612 		pmap->pm_tid[i] = TID_NONE;
613 	CPU_ZERO(&kernel_pmap->pm_active);
614 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
615 	pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK);
616 	bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES);
617 }
618 
619 /*
620  * Release any resources held by the given physical map.
621  * Called when a pmap initialized by mmu_booke_pinit is being released.
622  * Should only be called if the map contains no valid mappings.
623  */
624 static void
625 mmu_booke_release(mmu_t mmu, pmap_t pmap)
626 {
627 
628 	KASSERT(pmap->pm_stats.resident_count == 0,
629 	    ("pmap_release: pmap resident count %ld != 0",
630 	    pmap->pm_stats.resident_count));
631 	uma_zfree(ptbl_root_zone, pmap->pm_root);
632 }
633 
634 static void
635 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
636 {
637 	pte_t *pte;
638 	vm_paddr_t pa = 0;
639 	int sync_sz, valid;
640 
641 	while (sz > 0) {
642 		PMAP_LOCK(pm);
643 		pte = pte_find(mmu, pm, va);
644 		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
645 		if (valid)
646 			pa = PTE_PA(pte);
647 		PMAP_UNLOCK(pm);
648 		sync_sz = PAGE_SIZE - (va & PAGE_MASK);
649 		sync_sz = min(sync_sz, sz);
650 		if (valid) {
651 			pa += (va & PAGE_MASK);
652 			__syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
653 		}
654 		va += sync_sz;
655 		sz -= sync_sz;
656 	}
657 }
658 
659 /*
660  * mmu_booke_zero_page_area zeros the specified hardware page by
661  * mapping it into virtual memory and using bzero to clear
662  * its contents.
663  *
664  * off and size must reside within a single page.
665  */
666 static void
667 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
668 {
669 	vm_offset_t va;
670 
671 	/* XXX KASSERT off and size are within a single page? */
672 
673 	va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
674 	bzero((caddr_t)va + off, size);
675 }
676 
677 /*
678  * mmu_booke_zero_page zeros the specified hardware page.
679  */
680 static void
681 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
682 {
683 	vm_offset_t off, va;
684 
685 	va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
686 
687 	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
688 		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
689 }
690 
691 /*
692  * mmu_booke_copy_page copies the specified (machine independent) page by
693  * mapping the page into virtual memory and using memcopy to copy the page,
694  * one machine dependent page at a time.
695  */
696 static void
697 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
698 {
699 	vm_offset_t sva, dva;
700 
701 	sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
702 	dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
703 	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
704 }
705 
706 static inline void
707 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
708     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
709 {
710 	void *a_cp, *b_cp;
711 	vm_offset_t a_pg_offset, b_pg_offset;
712 	int cnt;
713 
714 	vm_page_t pa, pb;
715 
716 	while (xfersize > 0) {
717 		a_pg_offset = a_offset & PAGE_MASK;
718 		pa = ma[a_offset >> PAGE_SHIFT];
719 		b_pg_offset = b_offset & PAGE_MASK;
720 		pb = mb[b_offset >> PAGE_SHIFT];
721 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
722 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
723 		a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) +
724 		    a_pg_offset);
725 		b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) +
726 		    b_pg_offset);
727 		bcopy(a_cp, b_cp, cnt);
728 		a_offset += cnt;
729 		b_offset += cnt;
730 		xfersize -= cnt;
731 	}
732 }
733 
734 static vm_offset_t
735 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
736 {
737 	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
738 }
739 
740 static void
741 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
742 {
743 }
744 
745 /**************************************************************************/
746 /* TID handling */
747 /**************************************************************************/
748 
749 /*
750  * Return the largest uint value log such that 2^log <= num.
751  */
752 static unsigned long
753 ilog2(unsigned long num)
754 {
755 	long lz;
756 
757 	__asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num));
758 	return (63 - lz);
759 }
760 
761 /*
762  * Invalidate all TLB0 entries which match the given TID. Note this is
763  * dedicated for cases when invalidations should NOT be propagated to other
764  * CPUs.
765  */
766 static void
767 tid_flush(tlbtid_t tid)
768 {
769 	register_t msr;
770 
771 	/* Don't evict kernel translations */
772 	if (tid == TID_KERNEL)
773 		return;
774 
775 	msr = mfmsr();
776 	__asm __volatile("wrteei 0");
777 
778 	/*
779 	 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use
780 	 * it for PID invalidation.
781 	 */
782 	mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT);
783 	__asm __volatile("isync; .long 0x7c200024; isync; msync");
784 
785 	__asm __volatile("wrtee %0" :: "r"(msr));
786 }
787