xref: /freebsd/sys/powerpc/booke/pmap_64.c (revision ce6a89e27cd190313be39bb479880aeda4778436)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
5  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
20  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Some hw specific parts of this pmap were derived or influenced
29  * by NetBSD's ibm4xx pmap module. More generic code is shared with
30  * a few other pmap modules from the FreeBSD tree.
31  */
32 
33  /*
34   * VM layout notes:
35   *
36   * Kernel and user threads run within one common virtual address space
37   * defined by AS=0.
38   *
39   * 64-bit pmap:
40   * Virtual address space layout:
41   * -----------------------------
42   * 0x0000_0000_0000_0000 - 0x3fff_ffff_ffff_ffff      : user process
43   * 0x4000_0000_0000_0000 - 0x7fff_ffff_ffff_ffff      : unused
44   * 0x8000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff      : mmio region
45   * 0xc000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff      : direct map
46   * 0xe000_0000_0000_0000 - 0xffff_ffff_ffff_ffff      : KVA
47   */
48 
49 #include <sys/cdefs.h>
50 __FBSDID("$FreeBSD$");
51 
52 #include "opt_ddb.h"
53 #include "opt_kstack_pages.h"
54 
55 #include <sys/param.h>
56 #include <sys/conf.h>
57 #include <sys/malloc.h>
58 #include <sys/ktr.h>
59 #include <sys/proc.h>
60 #include <sys/user.h>
61 #include <sys/queue.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
64 #include <sys/kerneldump.h>
65 #include <sys/linker.h>
66 #include <sys/msgbuf.h>
67 #include <sys/lock.h>
68 #include <sys/mutex.h>
69 #include <sys/rwlock.h>
70 #include <sys/sched.h>
71 #include <sys/smp.h>
72 #include <sys/vmmeter.h>
73 
74 #include <vm/vm.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_extern.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_pager.h>
83 #include <vm/vm_phys.h>
84 #include <vm/vm_pagequeue.h>
85 #include <vm/uma.h>
86 
87 #include <machine/_inttypes.h>
88 #include <machine/cpu.h>
89 #include <machine/pcb.h>
90 #include <machine/platform.h>
91 
92 #include <machine/tlb.h>
93 #include <machine/spr.h>
94 #include <machine/md_var.h>
95 #include <machine/mmuvar.h>
96 #include <machine/pmap.h>
97 #include <machine/pte.h>
98 
99 #include <ddb/ddb.h>
100 
101 #include "mmu_if.h"
102 
103 #ifdef  DEBUG
104 #define debugf(fmt, args...) printf(fmt, ##args)
105 #else
106 #define debugf(fmt, args...)
107 #endif
108 
109 #define	PRI0ptrX	"016lx"
110 
111 /**************************************************************************/
112 /* PMAP */
113 /**************************************************************************/
114 
115 unsigned int kernel_pdirs;
116 static uma_zone_t ptbl_root_zone;
117 
118 /*
119  * Base of the pmap_mapdev() region.  On 32-bit it immediately follows the
120  * userspace address range.  On On 64-bit it's far above, at (1 << 63), and
121  * ranges up to the DMAP, giving 62 bits of PA allowed.  This is far larger than
122  * the widest Book-E address bus, the e6500 has a 40-bit PA space.  This allows
123  * us to map akin to the DMAP, with addresses identical to the PA, offset by the
124  * base.
125  */
126 #define	VM_MAPDEV_BASE		0x8000000000000000
127 #define	VM_MAPDEV_PA_MAX	0x4000000000000000 /* Don't encroach on DMAP */
128 
129 static void tid_flush(tlbtid_t tid);
130 static unsigned long ilog2(unsigned long);
131 
132 /**************************************************************************/
133 /* Page table management */
134 /**************************************************************************/
135 
136 static struct rwlock_padalign pvh_global_lock;
137 
138 #define PMAP_ROOT_SIZE	(sizeof(pte_t***) * PP2D_NENTRIES)
139 static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
140 			 unsigned int, boolean_t);
141 static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int, vm_page_t);
142 static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
143 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
144 
145 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
146 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
147 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
148 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
149 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
150 
151 /**************************************************************************/
152 /* Page table related */
153 /**************************************************************************/
154 
155 /* Initialize pool of kva ptbl buffers. */
156 static void
157 ptbl_init(void)
158 {
159 }
160 
161 /* Get a pointer to a PTE in a page table. */
162 static __inline pte_t *
163 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
164 {
165 	pte_t         **pdir;
166 	pte_t          *ptbl;
167 
168 	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
169 
170 	pdir = pmap->pm_pp2d[PP2D_IDX(va)];
171 	if (!pdir)
172 		return NULL;
173 	ptbl = pdir[PDIR_IDX(va)];
174 	return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
175 }
176 
177 /*
178  * allocate a page of pointers to page directories, do not preallocate the
179  * page tables
180  */
181 static pte_t  **
182 pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
183 {
184 	vm_page_t	m;
185 	pte_t          **pdir;
186 	int		req;
187 
188 	req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
189 	while ((m = vm_page_alloc(NULL, pp2d_idx, req)) == NULL) {
190 		PMAP_UNLOCK(pmap);
191 		if (nosleep) {
192 			return (NULL);
193 		}
194 		vm_wait(NULL);
195 		PMAP_LOCK(pmap);
196 	}
197 
198 	/* Zero whole ptbl. */
199 	pdir = (pte_t **)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
200 	mmu_booke_zero_page(mmu, m);
201 
202 	return (pdir);
203 }
204 
205 /* Free pdir pages and invalidate pdir entry. */
206 static void
207 pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, vm_page_t m)
208 {
209 	pte_t         **pdir;
210 
211 	pdir = pmap->pm_pp2d[pp2d_idx];
212 
213 	KASSERT((pdir != NULL), ("pdir_free: null pdir"));
214 
215 	pmap->pm_pp2d[pp2d_idx] = NULL;
216 
217 	vm_wire_sub(1);
218 	vm_page_free_zero(m);
219 }
220 
221 /*
222  * Decrement pdir pages hold count and attempt to free pdir pages. Called
223  * when removing directory entry from pdir.
224  *
225  * Return 1 if pdir pages were freed.
226  */
227 static int
228 pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
229 {
230 	pte_t         **pdir;
231 	vm_paddr_t	pa;
232 	vm_page_t	m;
233 
234 	KASSERT((pmap != kernel_pmap),
235 		("pdir_unhold: unholding kernel pdir!"));
236 
237 	pdir = pmap->pm_pp2d[pp2d_idx];
238 
239 	/* decrement hold count */
240 	pa = DMAP_TO_PHYS((vm_offset_t) pdir);
241 	m = PHYS_TO_VM_PAGE(pa);
242 
243 	/*
244 	 * Free pdir page if there are no dir entries in this pdir.
245 	 */
246 	m->ref_count--;
247 	if (m->ref_count == 0) {
248 		pdir_free(mmu, pmap, pp2d_idx, m);
249 		return (1);
250 	}
251 	return (0);
252 }
253 
254 /*
255  * Increment hold count for pdir pages. This routine is used when new ptlb
256  * entry is being inserted into pdir.
257  */
258 static void
259 pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
260 {
261 	vm_page_t	m;
262 
263 	KASSERT((pmap != kernel_pmap),
264 		("pdir_hold: holding kernel pdir!"));
265 
266 	KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
267 
268 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pdir));
269 	m->ref_count++;
270 }
271 
272 /* Allocate page table. */
273 static pte_t   *
274 ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
275     boolean_t nosleep)
276 {
277 	vm_page_t	m;
278 	pte_t          *ptbl;
279 	int		req;
280 
281 	KASSERT((pdir[pdir_idx] == NULL),
282 		("%s: valid ptbl entry exists!", __func__));
283 
284 	req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
285 	while ((m = vm_page_alloc(NULL, pdir_idx, req)) == NULL) {
286 		if (nosleep)
287 			return (NULL);
288 		PMAP_UNLOCK(pmap);
289 		rw_wunlock(&pvh_global_lock);
290 		vm_wait(NULL);
291 		rw_wlock(&pvh_global_lock);
292 		PMAP_LOCK(pmap);
293 	}
294 
295 	/* Zero whole ptbl. */
296 	ptbl = (pte_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
297 	mmu_booke_zero_page(mmu, m);
298 
299 	return (ptbl);
300 }
301 
302 /* Free ptbl pages and invalidate pdir entry. */
303 static void
304 ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx, vm_page_t m)
305 {
306 	pte_t          *ptbl;
307 
308 	ptbl = pdir[pdir_idx];
309 
310 	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
311 
312 	pdir[pdir_idx] = NULL;
313 
314 	vm_wire_sub(1);
315 	vm_page_free_zero(m);
316 }
317 
318 /*
319  * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
320  * when removing pte entry from ptbl.
321  *
322  * Return 1 if ptbl pages were freed.
323  */
324 static int
325 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
326 {
327 	pte_t          *ptbl;
328 	vm_page_t	m;
329 	u_int		pp2d_idx;
330 	pte_t         **pdir;
331 	u_int		pdir_idx;
332 
333 	pp2d_idx = PP2D_IDX(va);
334 	pdir_idx = PDIR_IDX(va);
335 
336 	KASSERT((pmap != kernel_pmap),
337 		("ptbl_unhold: unholding kernel ptbl!"));
338 
339 	pdir = pmap->pm_pp2d[pp2d_idx];
340 	ptbl = pdir[pdir_idx];
341 
342 	/* decrement hold count */
343 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
344 
345 	/*
346 	 * Free ptbl pages if there are no pte entries in this ptbl.
347 	 * ref_count has the same value for all ptbl pages, so check the
348 	 * last page.
349 	 */
350 	m->ref_count--;
351 	if (m->ref_count == 0) {
352 		ptbl_free(mmu, pmap, pdir, pdir_idx, m);
353 		pdir_unhold(mmu, pmap, pp2d_idx);
354 		return (1);
355 	}
356 	return (0);
357 }
358 
359 /*
360  * Increment hold count for ptbl pages. This routine is used when new pte
361  * entry is being inserted into ptbl.
362  */
363 static void
364 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
365 {
366 	pte_t          *ptbl;
367 	vm_page_t	m;
368 
369 	KASSERT((pmap != kernel_pmap),
370 		("ptbl_hold: holding kernel ptbl!"));
371 
372 	ptbl = pdir[pdir_idx];
373 
374 	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
375 
376 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
377 	m->ref_count++;
378 }
379 
380 /*
381  * Clean pte entry, try to free page table page if requested.
382  *
383  * Return 1 if ptbl pages were freed, otherwise return 0.
384  */
385 static int
386 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
387 {
388 	vm_page_t	m;
389 	pte_t          *pte;
390 
391 	pte = pte_find(mmu, pmap, va);
392 	KASSERT(pte != NULL, ("%s: NULL pte", __func__));
393 
394 	if (!PTE_ISVALID(pte))
395 		return (0);
396 
397 	/* Get vm_page_t for mapped pte. */
398 	m = PHYS_TO_VM_PAGE(PTE_PA(pte));
399 
400 	if (PTE_ISWIRED(pte))
401 		pmap->pm_stats.wired_count--;
402 
403 	/* Handle managed entry. */
404 	if (PTE_ISMANAGED(pte)) {
405 
406 		/* Handle modified pages. */
407 		if (PTE_ISMODIFIED(pte))
408 			vm_page_dirty(m);
409 
410 		/* Referenced pages. */
411 		if (PTE_ISREFERENCED(pte))
412 			vm_page_aflag_set(m, PGA_REFERENCED);
413 
414 		/* Remove pv_entry from pv_list. */
415 		pv_remove(pmap, va, m);
416 	} else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
417 		pv_remove(pmap, va, m);
418 		if (TAILQ_EMPTY(&m->md.pv_list))
419 			m->md.pv_tracked = false;
420 	}
421 	mtx_lock_spin(&tlbivax_mutex);
422 	tlb_miss_lock();
423 
424 	tlb0_flush_entry(va);
425 	*pte = 0;
426 
427 	tlb_miss_unlock();
428 	mtx_unlock_spin(&tlbivax_mutex);
429 
430 	pmap->pm_stats.resident_count--;
431 
432 	if (flags & PTBL_UNHOLD) {
433 		return (ptbl_unhold(mmu, pmap, va));
434 	}
435 	return (0);
436 }
437 
438 /*
439  * Insert PTE for a given page and virtual address.
440  */
441 static int
442 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
443     boolean_t nosleep)
444 {
445 	unsigned int	pp2d_idx = PP2D_IDX(va);
446 	unsigned int	pdir_idx = PDIR_IDX(va);
447 	unsigned int	ptbl_idx = PTBL_IDX(va);
448 	pte_t          *ptbl, *pte, pte_tmp;
449 	pte_t         **pdir;
450 
451 	/* Get the page directory pointer. */
452 	pdir = pmap->pm_pp2d[pp2d_idx];
453 	if (pdir == NULL)
454 		pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
455 
456 	/* Get the page table pointer. */
457 	ptbl = pdir[pdir_idx];
458 
459 	if (ptbl == NULL) {
460 		/* Allocate page table pages. */
461 		ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
462 		if (ptbl == NULL) {
463 			KASSERT(nosleep, ("nosleep and NULL ptbl"));
464 			return (ENOMEM);
465 		}
466 		pte = &ptbl[ptbl_idx];
467 	} else {
468 		/*
469 		 * Check if there is valid mapping for requested va, if there
470 		 * is, remove it.
471 		 */
472 		pte = &ptbl[ptbl_idx];
473 		if (PTE_ISVALID(pte)) {
474 			pte_remove(mmu, pmap, va, PTBL_HOLD);
475 		} else {
476 			/*
477 			 * pte is not used, increment hold count for ptbl
478 			 * pages.
479 			 */
480 			if (pmap != kernel_pmap)
481 				ptbl_hold(mmu, pmap, pdir, pdir_idx);
482 		}
483 	}
484 
485 	if (pdir[pdir_idx] == NULL) {
486 		if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
487 			pdir_hold(mmu, pmap, pdir);
488 		pdir[pdir_idx] = ptbl;
489 	}
490 	if (pmap->pm_pp2d[pp2d_idx] == NULL)
491 		pmap->pm_pp2d[pp2d_idx] = pdir;
492 
493 	/*
494 	 * Insert pv_entry into pv_list for mapped page if part of managed
495 	 * memory.
496 	 */
497 	if ((m->oflags & VPO_UNMANAGED) == 0) {
498 		flags |= PTE_MANAGED;
499 
500 		/* Create and insert pv entry. */
501 		pv_insert(pmap, va, m);
502 	}
503 
504 	pmap->pm_stats.resident_count++;
505 
506 	pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
507 	pte_tmp |= (PTE_VALID | flags);
508 
509 	mtx_lock_spin(&tlbivax_mutex);
510 	tlb_miss_lock();
511 
512 	tlb0_flush_entry(va);
513 	*pte = pte_tmp;
514 
515 	tlb_miss_unlock();
516 	mtx_unlock_spin(&tlbivax_mutex);
517 
518 	return (0);
519 }
520 
521 /* Return the pa for the given pmap/va. */
522 static	vm_paddr_t
523 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
524 {
525 	vm_paddr_t	pa = 0;
526 	pte_t          *pte;
527 
528 	pte = pte_find(mmu, pmap, va);
529 	if ((pte != NULL) && PTE_ISVALID(pte))
530 		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
531 	return (pa);
532 }
533 
534 
535 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
536 static void
537 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
538 {
539 	int		i, j;
540 	vm_offset_t	va;
541 	pte_t		*pte;
542 
543 	va = addr;
544 	/* Initialize kernel pdir */
545 	for (i = 0; i < kernel_pdirs; i++) {
546 		kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
547 		    (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
548 		for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES));
549 		    j < PDIR_NENTRIES; j++) {
550 			kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
551 			    (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE) +
552 			     (((i * PDIR_NENTRIES) + j) * PAGE_SIZE));
553 		}
554 	}
555 
556 	/*
557 	 * Fill in PTEs covering kernel code and data. They are not required
558 	 * for address translation, as this area is covered by static TLB1
559 	 * entries, but for pte_vatopa() to work correctly with kernel area
560 	 * addresses.
561 	 */
562 	for (va = addr; va < data_end; va += PAGE_SIZE) {
563 		pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
564 		*pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
565 		*pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
566 		    PTE_VALID | PTE_PS_4KB;
567 	}
568 }
569 
570 /*
571  * Initialize a preallocated and zeroed pmap structure,
572  * such as one in a vmspace structure.
573  */
574 static void
575 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
576 {
577 	int i;
578 
579 	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
580 	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
581 
582 	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
583 
584 	for (i = 0; i < MAXCPU; i++)
585 		pmap->pm_tid[i] = TID_NONE;
586 	CPU_ZERO(&kernel_pmap->pm_active);
587 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
588 	pmap->pm_pp2d = uma_zalloc(ptbl_root_zone, M_WAITOK);
589 	bzero(pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
590 }
591 
592 /*
593  * Release any resources held by the given physical map.
594  * Called when a pmap initialized by mmu_booke_pinit is being released.
595  * Should only be called if the map contains no valid mappings.
596  */
597 static void
598 mmu_booke_release(mmu_t mmu, pmap_t pmap)
599 {
600 
601 	KASSERT(pmap->pm_stats.resident_count == 0,
602 	    ("pmap_release: pmap resident count %ld != 0",
603 	    pmap->pm_stats.resident_count));
604 	uma_zfree(ptbl_root_zone, pmap->pm_pp2d);
605 }
606 
607 static void
608 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
609 {
610 	pte_t *pte;
611 	vm_paddr_t pa = 0;
612 	int sync_sz, valid;
613 
614 	while (sz > 0) {
615 		PMAP_LOCK(pm);
616 		pte = pte_find(mmu, pm, va);
617 		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
618 		if (valid)
619 			pa = PTE_PA(pte);
620 		PMAP_UNLOCK(pm);
621 		sync_sz = PAGE_SIZE - (va & PAGE_MASK);
622 		sync_sz = min(sync_sz, sz);
623 		if (valid) {
624 			pa += (va & PAGE_MASK);
625 			__syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
626 		}
627 		va += sync_sz;
628 		sz -= sync_sz;
629 	}
630 }
631 
632 /*
633  * mmu_booke_zero_page_area zeros the specified hardware page by
634  * mapping it into virtual memory and using bzero to clear
635  * its contents.
636  *
637  * off and size must reside within a single page.
638  */
639 static void
640 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
641 {
642 	vm_offset_t va;
643 
644 	/* XXX KASSERT off and size are within a single page? */
645 
646 	va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
647 	bzero((caddr_t)va + off, size);
648 }
649 
650 /*
651  * mmu_booke_zero_page zeros the specified hardware page.
652  */
653 static void
654 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
655 {
656 	vm_offset_t off, va;
657 
658 	va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
659 
660 	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
661 		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
662 }
663 
664 /*
665  * mmu_booke_copy_page copies the specified (machine independent) page by
666  * mapping the page into virtual memory and using memcopy to copy the page,
667  * one machine dependent page at a time.
668  */
669 static void
670 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
671 {
672 	vm_offset_t sva, dva;
673 
674 	sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
675 	dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
676 	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
677 }
678 
679 static inline void
680 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
681     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
682 {
683 	void *a_cp, *b_cp;
684 	vm_offset_t a_pg_offset, b_pg_offset;
685 	int cnt;
686 
687 	vm_page_t pa, pb;
688 
689 	while (xfersize > 0) {
690 		a_pg_offset = a_offset & PAGE_MASK;
691 		pa = ma[a_offset >> PAGE_SHIFT];
692 		b_pg_offset = b_offset & PAGE_MASK;
693 		pb = mb[b_offset >> PAGE_SHIFT];
694 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
695 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
696 		a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) +
697 		    a_pg_offset);
698 		b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) +
699 		    b_pg_offset);
700 		bcopy(a_cp, b_cp, cnt);
701 		a_offset += cnt;
702 		b_offset += cnt;
703 		xfersize -= cnt;
704 	}
705 }
706 
707 static vm_offset_t
708 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
709 {
710 	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
711 }
712 
713 static void
714 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
715 {
716 }
717 
718 /**************************************************************************/
719 /* TID handling */
720 /**************************************************************************/
721 
722 /*
723  * Return the largest uint value log such that 2^log <= num.
724  */
725 static unsigned long
726 ilog2(unsigned long num)
727 {
728 	long lz;
729 
730 	__asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num));
731 	return (63 - lz);
732 }
733 
734 /*
735  * Invalidate all TLB0 entries which match the given TID. Note this is
736  * dedicated for cases when invalidations should NOT be propagated to other
737  * CPUs.
738  */
739 static void
740 tid_flush(tlbtid_t tid)
741 {
742 	register_t msr;
743 
744 	/* Don't evict kernel translations */
745 	if (tid == TID_KERNEL)
746 		return;
747 
748 	msr = mfmsr();
749 	__asm __volatile("wrteei 0");
750 
751 	/*
752 	 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use
753 	 * it for PID invalidation.
754 	 */
755 	mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT);
756 	__asm __volatile("isync; .long 0x7c200024; isync; msync");
757 
758 	__asm __volatile("wrtee %0" :: "r"(msr));
759 }
760