xref: /freebsd/sys/powerpc/booke/pmap.c (revision e9b1dc32c9bd2ebae5f9e140bfa0e0321bc366b5)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
5  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
20  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Some hw specific parts of this pmap were derived or influenced
29  * by NetBSD's ibm4xx pmap module. More generic code is shared with
30  * a few other pmap modules from the FreeBSD tree.
31  */
32 
33  /*
34   * VM layout notes:
35   *
36   * Kernel and user threads run within one common virtual address space
37   * defined by AS=0.
38   *
39   * 32-bit pmap:
40   * Virtual address space layout:
41   * -----------------------------
42   * 0x0000_0000 - 0x7fff_ffff	: user process
43   * 0x8000_0000 - 0xbfff_ffff	: pmap_mapdev()-ed area (PCI/PCIE etc.)
44   * 0xc000_0000 - 0xc0ff_ffff	: kernel reserved
45   *   0xc000_0000 - data_end	: kernel code+data, env, metadata etc.
46   * 0xc100_0000 - 0xffff_ffff	: KVA
47   *   0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
48   *   0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
49   *   0xc200_4000 - 0xc200_8fff : guard page + kstack0
50   *   0xc200_9000 - 0xfeef_ffff	: actual free KVA space
51   *
52   * 64-bit pmap:
53   * Virtual address space layout:
54   * -----------------------------
55   * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff      : user process
56   *   0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff    : text, data, heap, maps, libraries
57   *   0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff    : mmio region
58   *   0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff    : stack
59   * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff      : kernel reserved
60   *   0xc000_0000_0000_0000 - endkernel-1              : kernel code & data
61   *               endkernel - msgbufp-1                : flat device tree
62   *                 msgbufp - ptbl_bufs-1              : message buffer
63   *               ptbl_bufs - kernel_pdir-1            : kernel page tables
64   *             kernel_pdir - kernel_pp2d-1            : kernel page directory
65   *             kernel_pp2d - .                        : kernel pointers to page directory
66   *      pmap_zero_copy_min - crashdumpmap-1           : reserved for page zero/copy
67   *            crashdumpmap - ptbl_buf_pool_vabase-1   : reserved for ptbl bufs
68   *    ptbl_buf_pool_vabase - virtual_avail-1          : user page directories and page tables
69   *           virtual_avail - 0xcfff_ffff_ffff_ffff    : actual free KVA space
70   * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff      : coprocessor region
71   * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff      : mmio region
72   * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff      : direct map
73   *   0xf000_0000_0000_0000 - +Maxmem                  : physmem map
74   *                         - 0xffff_ffff_ffff_ffff    : device direct map
75   */
76 
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
79 
80 #include "opt_ddb.h"
81 #include "opt_kstack_pages.h"
82 
83 #include <sys/param.h>
84 #include <sys/conf.h>
85 #include <sys/malloc.h>
86 #include <sys/ktr.h>
87 #include <sys/proc.h>
88 #include <sys/user.h>
89 #include <sys/queue.h>
90 #include <sys/systm.h>
91 #include <sys/kernel.h>
92 #include <sys/kerneldump.h>
93 #include <sys/linker.h>
94 #include <sys/msgbuf.h>
95 #include <sys/lock.h>
96 #include <sys/mutex.h>
97 #include <sys/rwlock.h>
98 #include <sys/sched.h>
99 #include <sys/smp.h>
100 #include <sys/vmmeter.h>
101 
102 #include <vm/vm.h>
103 #include <vm/vm_page.h>
104 #include <vm/vm_kern.h>
105 #include <vm/vm_pageout.h>
106 #include <vm/vm_extern.h>
107 #include <vm/vm_object.h>
108 #include <vm/vm_param.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_pager.h>
111 #include <vm/vm_phys.h>
112 #include <vm/vm_pagequeue.h>
113 #include <vm/uma.h>
114 
115 #include <machine/_inttypes.h>
116 #include <machine/cpu.h>
117 #include <machine/pcb.h>
118 #include <machine/platform.h>
119 
120 #include <machine/tlb.h>
121 #include <machine/spr.h>
122 #include <machine/md_var.h>
123 #include <machine/mmuvar.h>
124 #include <machine/pmap.h>
125 #include <machine/pte.h>
126 
127 #include <ddb/ddb.h>
128 
129 #include "mmu_if.h"
130 
131 #define	SPARSE_MAPDEV
132 #ifdef  DEBUG
133 #define debugf(fmt, args...) printf(fmt, ##args)
134 #else
135 #define debugf(fmt, args...)
136 #endif
137 
138 #ifdef __powerpc64__
139 #define	PRI0ptrX	"016lx"
140 #else
141 #define	PRI0ptrX	"08x"
142 #endif
143 
144 #define TODO			panic("%s: not implemented", __func__);
145 
146 extern unsigned char _etext[];
147 extern unsigned char _end[];
148 
149 extern uint32_t *bootinfo;
150 
151 vm_paddr_t kernload;
152 vm_offset_t kernstart;
153 vm_size_t kernsize;
154 
155 /* Message buffer and tables. */
156 static vm_offset_t data_start;
157 static vm_size_t data_end;
158 
159 /* Phys/avail memory regions. */
160 static struct mem_region *availmem_regions;
161 static int availmem_regions_sz;
162 static struct mem_region *physmem_regions;
163 static int physmem_regions_sz;
164 
165 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
166 static vm_offset_t zero_page_va;
167 static struct mtx zero_page_mutex;
168 
169 static struct mtx tlbivax_mutex;
170 
171 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
172 static vm_offset_t copy_page_src_va;
173 static vm_offset_t copy_page_dst_va;
174 static struct mtx copy_page_mutex;
175 
176 /**************************************************************************/
177 /* PMAP */
178 /**************************************************************************/
179 
180 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
181     vm_prot_t, u_int flags, int8_t psind);
182 
183 unsigned int kptbl_min;		/* Index of the first kernel ptbl. */
184 unsigned int kernel_ptbls;	/* Number of KVA ptbls. */
185 #ifdef __powerpc64__
186 unsigned int kernel_pdirs;
187 #endif
188 
189 /*
190  * If user pmap is processed with mmu_booke_remove and the resident count
191  * drops to 0, there are no more pages to remove, so we need not continue.
192  */
193 #define PMAP_REMOVE_DONE(pmap) \
194 	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
195 
196 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
197 extern int elf32_nxstack;
198 #endif
199 
200 /**************************************************************************/
201 /* TLB and TID handling */
202 /**************************************************************************/
203 
204 /* Translation ID busy table */
205 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
206 
207 /*
208  * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
209  * core revisions and should be read from h/w registers during early config.
210  */
211 uint32_t tlb0_entries;
212 uint32_t tlb0_ways;
213 uint32_t tlb0_entries_per_way;
214 uint32_t tlb1_entries;
215 
216 #define TLB0_ENTRIES		(tlb0_entries)
217 #define TLB0_WAYS		(tlb0_ways)
218 #define TLB0_ENTRIES_PER_WAY	(tlb0_entries_per_way)
219 
220 #define TLB1_ENTRIES (tlb1_entries)
221 
222 static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE;
223 
224 static tlbtid_t tid_alloc(struct pmap *);
225 static void tid_flush(tlbtid_t tid);
226 
227 #ifdef DDB
228 #ifdef __powerpc64__
229 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
230 #else
231 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
232 #endif
233 #endif
234 
235 static void tlb1_read_entry(tlb_entry_t *, unsigned int);
236 static void tlb1_write_entry(tlb_entry_t *, unsigned int);
237 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
238 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
239 
240 static vm_size_t tsize2size(unsigned int);
241 static unsigned int size2tsize(vm_size_t);
242 static unsigned int ilog2(unsigned long);
243 
244 static void set_mas4_defaults(void);
245 
246 static inline void tlb0_flush_entry(vm_offset_t);
247 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
248 
249 /**************************************************************************/
250 /* Page table management */
251 /**************************************************************************/
252 
253 static struct rwlock_padalign pvh_global_lock;
254 
255 /* Data for the pv entry allocation mechanism */
256 static uma_zone_t pvzone;
257 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
258 
259 #define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
260 
261 #ifndef PMAP_SHPGPERPROC
262 #define PMAP_SHPGPERPROC	200
263 #endif
264 
265 static void ptbl_init(void);
266 static struct ptbl_buf *ptbl_buf_alloc(void);
267 static void ptbl_buf_free(struct ptbl_buf *);
268 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
269 
270 #ifdef __powerpc64__
271 static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
272 			 unsigned int, boolean_t);
273 static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int);
274 static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
275 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
276 #else
277 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
278 static void ptbl_free(mmu_t, pmap_t, unsigned int);
279 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
280 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
281 #endif
282 
283 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
284 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
285 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
286 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
287 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
288 
289 static pv_entry_t pv_alloc(void);
290 static void pv_free(pv_entry_t);
291 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
292 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
293 
294 static void booke_pmap_init_qpages(void);
295 
296 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
297 #ifdef __powerpc64__
298 #define PTBL_BUFS               (16UL * 16 * 16)
299 #else
300 #define PTBL_BUFS		(128 * 16)
301 #endif
302 
303 struct ptbl_buf {
304 	TAILQ_ENTRY(ptbl_buf) link;	/* list link */
305 	vm_offset_t kva;		/* va of mapping */
306 };
307 
308 /* ptbl free list and a lock used for access synchronization. */
309 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
310 static struct mtx ptbl_buf_freelist_lock;
311 
312 /* Base address of kva space allocated fot ptbl bufs. */
313 static vm_offset_t ptbl_buf_pool_vabase;
314 
315 /* Pointer to ptbl_buf structures. */
316 static struct ptbl_buf *ptbl_bufs;
317 
318 #ifdef SMP
319 extern tlb_entry_t __boot_tlb1[];
320 void pmap_bootstrap_ap(volatile uint32_t *);
321 #endif
322 
323 /*
324  * Kernel MMU interface
325  */
326 static void		mmu_booke_clear_modify(mmu_t, vm_page_t);
327 static void		mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
328     vm_size_t, vm_offset_t);
329 static void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
330 static void		mmu_booke_copy_pages(mmu_t, vm_page_t *,
331     vm_offset_t, vm_page_t *, vm_offset_t, int);
332 static int		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
333     vm_prot_t, u_int flags, int8_t psind);
334 static void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
335     vm_page_t, vm_prot_t);
336 static void		mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
337     vm_prot_t);
338 static vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
339 static vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
340     vm_prot_t);
341 static void		mmu_booke_init(mmu_t);
342 static boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
343 static boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
344 static boolean_t	mmu_booke_is_referenced(mmu_t, vm_page_t);
345 static int		mmu_booke_ts_referenced(mmu_t, vm_page_t);
346 static vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
347     int);
348 static int		mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
349     vm_paddr_t *);
350 static void		mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
351     vm_object_t, vm_pindex_t, vm_size_t);
352 static boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
353 static void		mmu_booke_page_init(mmu_t, vm_page_t);
354 static int		mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
355 static void		mmu_booke_pinit(mmu_t, pmap_t);
356 static void		mmu_booke_pinit0(mmu_t, pmap_t);
357 static void		mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
358     vm_prot_t);
359 static void		mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
360 static void		mmu_booke_qremove(mmu_t, vm_offset_t, int);
361 static void		mmu_booke_release(mmu_t, pmap_t);
362 static void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
363 static void		mmu_booke_remove_all(mmu_t, vm_page_t);
364 static void		mmu_booke_remove_write(mmu_t, vm_page_t);
365 static void		mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
366 static void		mmu_booke_zero_page(mmu_t, vm_page_t);
367 static void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
368 static void		mmu_booke_activate(mmu_t, struct thread *);
369 static void		mmu_booke_deactivate(mmu_t, struct thread *);
370 static void		mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
371 static void		*mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
372 static void		*mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
373 static void		mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
374 static vm_paddr_t	mmu_booke_kextract(mmu_t, vm_offset_t);
375 static void		mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
376 static void		mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
377 static void		mmu_booke_kremove(mmu_t, vm_offset_t);
378 static boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
379 static void		mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
380     vm_size_t);
381 static void		mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
382     void **);
383 static void		mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
384     void *);
385 static void		mmu_booke_scan_init(mmu_t);
386 static vm_offset_t	mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
387 static void		mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
388 static int		mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
389     vm_size_t sz, vm_memattr_t mode);
390 static int		mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
391     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
392 static int		mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
393     int *is_user, vm_offset_t *decoded_addr);
394 
395 
396 static mmu_method_t mmu_booke_methods[] = {
397 	/* pmap dispatcher interface */
398 	MMUMETHOD(mmu_clear_modify,	mmu_booke_clear_modify),
399 	MMUMETHOD(mmu_copy,		mmu_booke_copy),
400 	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
401 	MMUMETHOD(mmu_copy_pages,	mmu_booke_copy_pages),
402 	MMUMETHOD(mmu_enter,		mmu_booke_enter),
403 	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
404 	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
405 	MMUMETHOD(mmu_extract,		mmu_booke_extract),
406 	MMUMETHOD(mmu_extract_and_hold,	mmu_booke_extract_and_hold),
407 	MMUMETHOD(mmu_init,		mmu_booke_init),
408 	MMUMETHOD(mmu_is_modified,	mmu_booke_is_modified),
409 	MMUMETHOD(mmu_is_prefaultable,	mmu_booke_is_prefaultable),
410 	MMUMETHOD(mmu_is_referenced,	mmu_booke_is_referenced),
411 	MMUMETHOD(mmu_ts_referenced,	mmu_booke_ts_referenced),
412 	MMUMETHOD(mmu_map,		mmu_booke_map),
413 	MMUMETHOD(mmu_mincore,		mmu_booke_mincore),
414 	MMUMETHOD(mmu_object_init_pt,	mmu_booke_object_init_pt),
415 	MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
416 	MMUMETHOD(mmu_page_init,	mmu_booke_page_init),
417 	MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
418 	MMUMETHOD(mmu_pinit,		mmu_booke_pinit),
419 	MMUMETHOD(mmu_pinit0,		mmu_booke_pinit0),
420 	MMUMETHOD(mmu_protect,		mmu_booke_protect),
421 	MMUMETHOD(mmu_qenter,		mmu_booke_qenter),
422 	MMUMETHOD(mmu_qremove,		mmu_booke_qremove),
423 	MMUMETHOD(mmu_release,		mmu_booke_release),
424 	MMUMETHOD(mmu_remove,		mmu_booke_remove),
425 	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
426 	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
427 	MMUMETHOD(mmu_sync_icache,	mmu_booke_sync_icache),
428 	MMUMETHOD(mmu_unwire,		mmu_booke_unwire),
429 	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
430 	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
431 	MMUMETHOD(mmu_activate,		mmu_booke_activate),
432 	MMUMETHOD(mmu_deactivate,	mmu_booke_deactivate),
433 	MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
434 	MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
435 
436 	/* Internal interfaces */
437 	MMUMETHOD(mmu_bootstrap,	mmu_booke_bootstrap),
438 	MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
439 	MMUMETHOD(mmu_mapdev,		mmu_booke_mapdev),
440 	MMUMETHOD(mmu_mapdev_attr,	mmu_booke_mapdev_attr),
441 	MMUMETHOD(mmu_kenter,		mmu_booke_kenter),
442 	MMUMETHOD(mmu_kenter_attr,	mmu_booke_kenter_attr),
443 	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
444 	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),
445 	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
446 	MMUMETHOD(mmu_change_attr,	mmu_booke_change_attr),
447 	MMUMETHOD(mmu_map_user_ptr,	mmu_booke_map_user_ptr),
448 	MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr),
449 
450 	/* dumpsys() support */
451 	MMUMETHOD(mmu_dumpsys_map,	mmu_booke_dumpsys_map),
452 	MMUMETHOD(mmu_dumpsys_unmap,	mmu_booke_dumpsys_unmap),
453 	MMUMETHOD(mmu_scan_init,	mmu_booke_scan_init),
454 
455 	{ 0, 0 }
456 };
457 
458 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
459 
460 static __inline uint32_t
461 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
462 {
463 	uint32_t attrib;
464 	int i;
465 
466 	if (ma != VM_MEMATTR_DEFAULT) {
467 		switch (ma) {
468 		case VM_MEMATTR_UNCACHEABLE:
469 			return (MAS2_I | MAS2_G);
470 		case VM_MEMATTR_WRITE_COMBINING:
471 		case VM_MEMATTR_WRITE_BACK:
472 		case VM_MEMATTR_PREFETCHABLE:
473 			return (MAS2_I);
474 		case VM_MEMATTR_WRITE_THROUGH:
475 			return (MAS2_W | MAS2_M);
476 		case VM_MEMATTR_CACHEABLE:
477 			return (MAS2_M);
478 		}
479 	}
480 
481 	/*
482 	 * Assume the page is cache inhibited and access is guarded unless
483 	 * it's in our available memory array.
484 	 */
485 	attrib = _TLB_ENTRY_IO;
486 	for (i = 0; i < physmem_regions_sz; i++) {
487 		if ((pa >= physmem_regions[i].mr_start) &&
488 		    (pa < (physmem_regions[i].mr_start +
489 		     physmem_regions[i].mr_size))) {
490 			attrib = _TLB_ENTRY_MEM;
491 			break;
492 		}
493 	}
494 
495 	return (attrib);
496 }
497 
498 static inline void
499 tlb_miss_lock(void)
500 {
501 #ifdef SMP
502 	struct pcpu *pc;
503 
504 	if (!smp_started)
505 		return;
506 
507 	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
508 		if (pc != pcpup) {
509 
510 			CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
511 			    "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke.tlb_lock);
512 
513 			KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
514 			    ("tlb_miss_lock: tried to lock self"));
515 
516 			tlb_lock(pc->pc_booke.tlb_lock);
517 
518 			CTR1(KTR_PMAP, "%s: locked", __func__);
519 		}
520 	}
521 #endif
522 }
523 
524 static inline void
525 tlb_miss_unlock(void)
526 {
527 #ifdef SMP
528 	struct pcpu *pc;
529 
530 	if (!smp_started)
531 		return;
532 
533 	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
534 		if (pc != pcpup) {
535 			CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
536 			    __func__, pc->pc_cpuid);
537 
538 			tlb_unlock(pc->pc_booke.tlb_lock);
539 
540 			CTR1(KTR_PMAP, "%s: unlocked", __func__);
541 		}
542 	}
543 #endif
544 }
545 
546 /* Return number of entries in TLB0. */
547 static __inline void
548 tlb0_get_tlbconf(void)
549 {
550 	uint32_t tlb0_cfg;
551 
552 	tlb0_cfg = mfspr(SPR_TLB0CFG);
553 	tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
554 	tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
555 	tlb0_entries_per_way = tlb0_entries / tlb0_ways;
556 }
557 
558 /* Return number of entries in TLB1. */
559 static __inline void
560 tlb1_get_tlbconf(void)
561 {
562 	uint32_t tlb1_cfg;
563 
564 	tlb1_cfg = mfspr(SPR_TLB1CFG);
565 	tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
566 }
567 
568 /**************************************************************************/
569 /* Page table related */
570 /**************************************************************************/
571 
572 #ifdef __powerpc64__
573 /* Initialize pool of kva ptbl buffers. */
574 static void
575 ptbl_init(void)
576 {
577 	int		i;
578 
579 	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
580 	TAILQ_INIT(&ptbl_buf_freelist);
581 
582 	for (i = 0; i < PTBL_BUFS; i++) {
583 		ptbl_bufs[i].kva = ptbl_buf_pool_vabase +
584 		    i * MAX(PTBL_PAGES,PDIR_PAGES) * PAGE_SIZE;
585 		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
586 	}
587 }
588 
589 /* Get an sf_buf from the freelist. */
590 static struct ptbl_buf *
591 ptbl_buf_alloc(void)
592 {
593 	struct ptbl_buf *buf;
594 
595 	mtx_lock(&ptbl_buf_freelist_lock);
596 	buf = TAILQ_FIRST(&ptbl_buf_freelist);
597 	if (buf != NULL)
598 		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
599 	mtx_unlock(&ptbl_buf_freelist_lock);
600 
601 	return (buf);
602 }
603 
604 /* Return ptbl buff to free pool. */
605 static void
606 ptbl_buf_free(struct ptbl_buf *buf)
607 {
608 	mtx_lock(&ptbl_buf_freelist_lock);
609 	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
610 	mtx_unlock(&ptbl_buf_freelist_lock);
611 }
612 
613 /*
614  * Search the list of allocated ptbl bufs and find on list of allocated ptbls
615  */
616 static void
617 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t * ptbl)
618 {
619 	struct ptbl_buf *pbuf;
620 
621 	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) {
622 		if (pbuf->kva == (vm_offset_t) ptbl) {
623 			/* Remove from pmap ptbl buf list. */
624 			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
625 
626 			/* Free corresponding ptbl buf. */
627 			ptbl_buf_free(pbuf);
628 
629 			break;
630 		}
631 	}
632 }
633 
634 /* Get a pointer to a PTE in a page table. */
635 static __inline pte_t *
636 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
637 {
638 	pte_t         **pdir;
639 	pte_t          *ptbl;
640 
641 	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
642 
643 	pdir = pmap->pm_pp2d[PP2D_IDX(va)];
644 	if (!pdir)
645 		return NULL;
646 	ptbl = pdir[PDIR_IDX(va)];
647 	return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
648 }
649 
650 /*
651  * Search the list of allocated pdir bufs and find on list of allocated pdirs
652  */
653 static void
654 ptbl_free_pmap_pdir(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
655 {
656 	struct ptbl_buf *pbuf;
657 
658 	TAILQ_FOREACH(pbuf, &pmap->pm_pdir_list, link) {
659 		if (pbuf->kva == (vm_offset_t) pdir) {
660 			/* Remove from pmap ptbl buf list. */
661 			TAILQ_REMOVE(&pmap->pm_pdir_list, pbuf, link);
662 
663 			/* Free corresponding pdir buf. */
664 			ptbl_buf_free(pbuf);
665 
666 			break;
667 		}
668 	}
669 }
670 /* Free pdir pages and invalidate pdir entry. */
671 static void
672 pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx)
673 {
674 	pte_t         **pdir;
675 	vm_paddr_t	pa;
676 	vm_offset_t	va;
677 	vm_page_t	m;
678 	int		i;
679 
680 	pdir = pmap->pm_pp2d[pp2d_idx];
681 
682 	KASSERT((pdir != NULL), ("pdir_free: null pdir"));
683 
684 	pmap->pm_pp2d[pp2d_idx] = NULL;
685 
686 	for (i = 0; i < PDIR_PAGES; i++) {
687 		va = ((vm_offset_t) pdir + (i * PAGE_SIZE));
688 		pa = pte_vatopa(mmu, kernel_pmap, va);
689 		m = PHYS_TO_VM_PAGE(pa);
690 		vm_page_free_zero(m);
691 		vm_wire_sub(1);
692 		pmap_kremove(va);
693 	}
694 
695 	ptbl_free_pmap_pdir(mmu, pmap, pdir);
696 }
697 
698 /*
699  * Decrement pdir pages hold count and attempt to free pdir pages. Called
700  * when removing directory entry from pdir.
701  *
702  * Return 1 if pdir pages were freed.
703  */
704 static int
705 pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
706 {
707 	pte_t         **pdir;
708 	vm_paddr_t	pa;
709 	vm_page_t	m;
710 	int		i;
711 
712 	KASSERT((pmap != kernel_pmap),
713 		("pdir_unhold: unholding kernel pdir!"));
714 
715 	pdir = pmap->pm_pp2d[pp2d_idx];
716 
717 	KASSERT(((vm_offset_t) pdir >= VM_MIN_KERNEL_ADDRESS),
718 	    ("pdir_unhold: non kva pdir"));
719 
720 	/* decrement hold count */
721 	for (i = 0; i < PDIR_PAGES; i++) {
722 		pa = pte_vatopa(mmu, kernel_pmap,
723 		    (vm_offset_t) pdir + (i * PAGE_SIZE));
724 		m = PHYS_TO_VM_PAGE(pa);
725 		m->wire_count--;
726 	}
727 
728 	/*
729 	 * Free pdir pages if there are no dir entries in this pdir.
730 	 * wire_count has the same value for all ptbl pages, so check the
731 	 * last page.
732 	 */
733 	if (m->wire_count == 0) {
734 		pdir_free(mmu, pmap, pp2d_idx);
735 		return (1);
736 	}
737 	return (0);
738 }
739 
740 /*
741  * Increment hold count for pdir pages. This routine is used when new ptlb
742  * entry is being inserted into pdir.
743  */
744 static void
745 pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
746 {
747 	vm_paddr_t	pa;
748 	vm_page_t	m;
749 	int		i;
750 
751 	KASSERT((pmap != kernel_pmap),
752 		("pdir_hold: holding kernel pdir!"));
753 
754 	KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
755 
756 	for (i = 0; i < PDIR_PAGES; i++) {
757 		pa = pte_vatopa(mmu, kernel_pmap,
758 				(vm_offset_t) pdir + (i * PAGE_SIZE));
759 		m = PHYS_TO_VM_PAGE(pa);
760 		m->wire_count++;
761 	}
762 }
763 
764 /* Allocate page table. */
765 static pte_t   *
766 ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
767     boolean_t nosleep)
768 {
769 	vm_page_t	mtbl  [PTBL_PAGES];
770 	vm_page_t	m;
771 	struct ptbl_buf *pbuf;
772 	unsigned int	pidx;
773 	pte_t          *ptbl;
774 	int		i, j;
775 	int		req;
776 
777 	KASSERT((pdir[pdir_idx] == NULL),
778 		("%s: valid ptbl entry exists!", __func__));
779 
780 	pbuf = ptbl_buf_alloc();
781 	if (pbuf == NULL)
782 		panic("%s: couldn't alloc kernel virtual memory", __func__);
783 
784 	ptbl = (pte_t *) pbuf->kva;
785 
786 	for (i = 0; i < PTBL_PAGES; i++) {
787 		pidx = (PTBL_PAGES * pdir_idx) + i;
788 		req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
789 		while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
790 			PMAP_UNLOCK(pmap);
791 			rw_wunlock(&pvh_global_lock);
792 			if (nosleep) {
793 				ptbl_free_pmap_ptbl(pmap, ptbl);
794 				for (j = 0; j < i; j++)
795 					vm_page_free(mtbl[j]);
796 				vm_wire_sub(i);
797 				return (NULL);
798 			}
799 			vm_wait(NULL);
800 			rw_wlock(&pvh_global_lock);
801 			PMAP_LOCK(pmap);
802 		}
803 		mtbl[i] = m;
804 	}
805 
806 	/* Mapin allocated pages into kernel_pmap. */
807 	mmu_booke_qenter(mmu, (vm_offset_t) ptbl, mtbl, PTBL_PAGES);
808 	/* Zero whole ptbl. */
809 	bzero((caddr_t) ptbl, PTBL_PAGES * PAGE_SIZE);
810 
811 	/* Add pbuf to the pmap ptbl bufs list. */
812 	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
813 
814 	return (ptbl);
815 }
816 
817 /* Free ptbl pages and invalidate pdir entry. */
818 static void
819 ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
820 {
821 	pte_t          *ptbl;
822 	vm_paddr_t	pa;
823 	vm_offset_t	va;
824 	vm_page_t	m;
825 	int		i;
826 
827 	ptbl = pdir[pdir_idx];
828 
829 	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
830 
831 	pdir[pdir_idx] = NULL;
832 
833 	for (i = 0; i < PTBL_PAGES; i++) {
834 		va = ((vm_offset_t) ptbl + (i * PAGE_SIZE));
835 		pa = pte_vatopa(mmu, kernel_pmap, va);
836 		m = PHYS_TO_VM_PAGE(pa);
837 		vm_page_free_zero(m);
838 		vm_wire_sub(1);
839 		pmap_kremove(va);
840 	}
841 
842 	ptbl_free_pmap_ptbl(pmap, ptbl);
843 }
844 
845 /*
846  * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
847  * when removing pte entry from ptbl.
848  *
849  * Return 1 if ptbl pages were freed.
850  */
851 static int
852 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
853 {
854 	pte_t          *ptbl;
855 	vm_paddr_t	pa;
856 	vm_page_t	m;
857 	u_int		pp2d_idx;
858 	pte_t         **pdir;
859 	u_int		pdir_idx;
860 	int		i;
861 
862 	pp2d_idx = PP2D_IDX(va);
863 	pdir_idx = PDIR_IDX(va);
864 
865 	KASSERT((pmap != kernel_pmap),
866 		("ptbl_unhold: unholding kernel ptbl!"));
867 
868 	pdir = pmap->pm_pp2d[pp2d_idx];
869 	ptbl = pdir[pdir_idx];
870 
871 	KASSERT(((vm_offset_t) ptbl >= VM_MIN_KERNEL_ADDRESS),
872 	    ("ptbl_unhold: non kva ptbl"));
873 
874 	/* decrement hold count */
875 	for (i = 0; i < PTBL_PAGES; i++) {
876 		pa = pte_vatopa(mmu, kernel_pmap,
877 		    (vm_offset_t) ptbl + (i * PAGE_SIZE));
878 		m = PHYS_TO_VM_PAGE(pa);
879 		m->wire_count--;
880 	}
881 
882 	/*
883 	 * Free ptbl pages if there are no pte entries in this ptbl.
884 	 * wire_count has the same value for all ptbl pages, so check the
885 	 * last page.
886 	 */
887 	if (m->wire_count == 0) {
888 		/* A pair of indirect entries might point to this ptbl page */
889 #if 0
890 		tlb_flush_entry(pmap, va & ~((2UL * PAGE_SIZE_1M) - 1),
891 				TLB_SIZE_1M, MAS6_SIND);
892 		tlb_flush_entry(pmap, (va & ~((2UL * PAGE_SIZE_1M) - 1)) | PAGE_SIZE_1M,
893 				TLB_SIZE_1M, MAS6_SIND);
894 #endif
895 		ptbl_free(mmu, pmap, pdir, pdir_idx);
896 		pdir_unhold(mmu, pmap, pp2d_idx);
897 		return (1);
898 	}
899 	return (0);
900 }
901 
902 /*
903  * Increment hold count for ptbl pages. This routine is used when new pte
904  * entry is being inserted into ptbl.
905  */
906 static void
907 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
908 {
909 	vm_paddr_t	pa;
910 	pte_t          *ptbl;
911 	vm_page_t	m;
912 	int		i;
913 
914 	KASSERT((pmap != kernel_pmap),
915 		("ptbl_hold: holding kernel ptbl!"));
916 
917 	ptbl = pdir[pdir_idx];
918 
919 	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
920 
921 	for (i = 0; i < PTBL_PAGES; i++) {
922 		pa = pte_vatopa(mmu, kernel_pmap,
923 				(vm_offset_t) ptbl + (i * PAGE_SIZE));
924 		m = PHYS_TO_VM_PAGE(pa);
925 		m->wire_count++;
926 	}
927 }
928 #else
929 
930 /* Initialize pool of kva ptbl buffers. */
931 static void
932 ptbl_init(void)
933 {
934 	int i;
935 
936 	CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
937 	    (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
938 	CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
939 	    __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
940 
941 	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
942 	TAILQ_INIT(&ptbl_buf_freelist);
943 
944 	for (i = 0; i < PTBL_BUFS; i++) {
945 		ptbl_bufs[i].kva =
946 		    ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
947 		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
948 	}
949 }
950 
951 /* Get a ptbl_buf from the freelist. */
952 static struct ptbl_buf *
953 ptbl_buf_alloc(void)
954 {
955 	struct ptbl_buf *buf;
956 
957 	mtx_lock(&ptbl_buf_freelist_lock);
958 	buf = TAILQ_FIRST(&ptbl_buf_freelist);
959 	if (buf != NULL)
960 		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
961 	mtx_unlock(&ptbl_buf_freelist_lock);
962 
963 	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
964 
965 	return (buf);
966 }
967 
968 /* Return ptbl buff to free pool. */
969 static void
970 ptbl_buf_free(struct ptbl_buf *buf)
971 {
972 
973 	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
974 
975 	mtx_lock(&ptbl_buf_freelist_lock);
976 	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
977 	mtx_unlock(&ptbl_buf_freelist_lock);
978 }
979 
980 /*
981  * Search the list of allocated ptbl bufs and find on list of allocated ptbls
982  */
983 static void
984 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
985 {
986 	struct ptbl_buf *pbuf;
987 
988 	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
989 
990 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
991 
992 	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
993 		if (pbuf->kva == (vm_offset_t)ptbl) {
994 			/* Remove from pmap ptbl buf list. */
995 			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
996 
997 			/* Free corresponding ptbl buf. */
998 			ptbl_buf_free(pbuf);
999 			break;
1000 		}
1001 }
1002 
1003 /* Allocate page table. */
1004 static pte_t *
1005 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
1006 {
1007 	vm_page_t mtbl[PTBL_PAGES];
1008 	vm_page_t m;
1009 	struct ptbl_buf *pbuf;
1010 	unsigned int pidx;
1011 	pte_t *ptbl;
1012 	int i, j;
1013 
1014 	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1015 	    (pmap == kernel_pmap), pdir_idx);
1016 
1017 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1018 	    ("ptbl_alloc: invalid pdir_idx"));
1019 	KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
1020 	    ("pte_alloc: valid ptbl entry exists!"));
1021 
1022 	pbuf = ptbl_buf_alloc();
1023 	if (pbuf == NULL)
1024 		panic("pte_alloc: couldn't alloc kernel virtual memory");
1025 
1026 	ptbl = (pte_t *)pbuf->kva;
1027 
1028 	CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
1029 
1030 	for (i = 0; i < PTBL_PAGES; i++) {
1031 		pidx = (PTBL_PAGES * pdir_idx) + i;
1032 		while ((m = vm_page_alloc(NULL, pidx,
1033 		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
1034 			PMAP_UNLOCK(pmap);
1035 			rw_wunlock(&pvh_global_lock);
1036 			if (nosleep) {
1037 				ptbl_free_pmap_ptbl(pmap, ptbl);
1038 				for (j = 0; j < i; j++)
1039 					vm_page_free(mtbl[j]);
1040 				vm_wire_sub(i);
1041 				return (NULL);
1042 			}
1043 			vm_wait(NULL);
1044 			rw_wlock(&pvh_global_lock);
1045 			PMAP_LOCK(pmap);
1046 		}
1047 		mtbl[i] = m;
1048 	}
1049 
1050 	/* Map allocated pages into kernel_pmap. */
1051 	mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
1052 
1053 	/* Zero whole ptbl. */
1054 	bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
1055 
1056 	/* Add pbuf to the pmap ptbl bufs list. */
1057 	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
1058 
1059 	return (ptbl);
1060 }
1061 
1062 /* Free ptbl pages and invalidate pdir entry. */
1063 static void
1064 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1065 {
1066 	pte_t *ptbl;
1067 	vm_paddr_t pa;
1068 	vm_offset_t va;
1069 	vm_page_t m;
1070 	int i;
1071 
1072 	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1073 	    (pmap == kernel_pmap), pdir_idx);
1074 
1075 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1076 	    ("ptbl_free: invalid pdir_idx"));
1077 
1078 	ptbl = pmap->pm_pdir[pdir_idx];
1079 
1080 	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
1081 
1082 	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
1083 
1084 	/*
1085 	 * Invalidate the pdir entry as soon as possible, so that other CPUs
1086 	 * don't attempt to look up the page tables we are releasing.
1087 	 */
1088 	mtx_lock_spin(&tlbivax_mutex);
1089 	tlb_miss_lock();
1090 
1091 	pmap->pm_pdir[pdir_idx] = NULL;
1092 
1093 	tlb_miss_unlock();
1094 	mtx_unlock_spin(&tlbivax_mutex);
1095 
1096 	for (i = 0; i < PTBL_PAGES; i++) {
1097 		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
1098 		pa = pte_vatopa(mmu, kernel_pmap, va);
1099 		m = PHYS_TO_VM_PAGE(pa);
1100 		vm_page_free_zero(m);
1101 		vm_wire_sub(1);
1102 		mmu_booke_kremove(mmu, va);
1103 	}
1104 
1105 	ptbl_free_pmap_ptbl(pmap, ptbl);
1106 }
1107 
1108 /*
1109  * Decrement ptbl pages hold count and attempt to free ptbl pages.
1110  * Called when removing pte entry from ptbl.
1111  *
1112  * Return 1 if ptbl pages were freed.
1113  */
1114 static int
1115 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1116 {
1117 	pte_t *ptbl;
1118 	vm_paddr_t pa;
1119 	vm_page_t m;
1120 	int i;
1121 
1122 	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1123 	    (pmap == kernel_pmap), pdir_idx);
1124 
1125 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1126 	    ("ptbl_unhold: invalid pdir_idx"));
1127 	KASSERT((pmap != kernel_pmap),
1128 	    ("ptbl_unhold: unholding kernel ptbl!"));
1129 
1130 	ptbl = pmap->pm_pdir[pdir_idx];
1131 
1132 	//debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
1133 	KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
1134 	    ("ptbl_unhold: non kva ptbl"));
1135 
1136 	/* decrement hold count */
1137 	for (i = 0; i < PTBL_PAGES; i++) {
1138 		pa = pte_vatopa(mmu, kernel_pmap,
1139 		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
1140 		m = PHYS_TO_VM_PAGE(pa);
1141 		m->wire_count--;
1142 	}
1143 
1144 	/*
1145 	 * Free ptbl pages if there are no pte etries in this ptbl.
1146 	 * wire_count has the same value for all ptbl pages, so check the last
1147 	 * page.
1148 	 */
1149 	if (m->wire_count == 0) {
1150 		ptbl_free(mmu, pmap, pdir_idx);
1151 
1152 		//debugf("ptbl_unhold: e (freed ptbl)\n");
1153 		return (1);
1154 	}
1155 
1156 	return (0);
1157 }
1158 
1159 /*
1160  * Increment hold count for ptbl pages. This routine is used when a new pte
1161  * entry is being inserted into the ptbl.
1162  */
1163 static void
1164 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1165 {
1166 	vm_paddr_t pa;
1167 	pte_t *ptbl;
1168 	vm_page_t m;
1169 	int i;
1170 
1171 	CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
1172 	    pdir_idx);
1173 
1174 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1175 	    ("ptbl_hold: invalid pdir_idx"));
1176 	KASSERT((pmap != kernel_pmap),
1177 	    ("ptbl_hold: holding kernel ptbl!"));
1178 
1179 	ptbl = pmap->pm_pdir[pdir_idx];
1180 
1181 	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
1182 
1183 	for (i = 0; i < PTBL_PAGES; i++) {
1184 		pa = pte_vatopa(mmu, kernel_pmap,
1185 		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
1186 		m = PHYS_TO_VM_PAGE(pa);
1187 		m->wire_count++;
1188 	}
1189 }
1190 #endif
1191 
1192 /* Allocate pv_entry structure. */
1193 pv_entry_t
1194 pv_alloc(void)
1195 {
1196 	pv_entry_t pv;
1197 
1198 	pv_entry_count++;
1199 	if (pv_entry_count > pv_entry_high_water)
1200 		pagedaemon_wakeup(0); /* XXX powerpc NUMA */
1201 	pv = uma_zalloc(pvzone, M_NOWAIT);
1202 
1203 	return (pv);
1204 }
1205 
1206 /* Free pv_entry structure. */
1207 static __inline void
1208 pv_free(pv_entry_t pve)
1209 {
1210 
1211 	pv_entry_count--;
1212 	uma_zfree(pvzone, pve);
1213 }
1214 
1215 
1216 /* Allocate and initialize pv_entry structure. */
1217 static void
1218 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
1219 {
1220 	pv_entry_t pve;
1221 
1222 	//int su = (pmap == kernel_pmap);
1223 	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
1224 	//	(u_int32_t)pmap, va, (u_int32_t)m);
1225 
1226 	pve = pv_alloc();
1227 	if (pve == NULL)
1228 		panic("pv_insert: no pv entries!");
1229 
1230 	pve->pv_pmap = pmap;
1231 	pve->pv_va = va;
1232 
1233 	/* add to pv_list */
1234 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1235 	rw_assert(&pvh_global_lock, RA_WLOCKED);
1236 
1237 	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
1238 
1239 	//debugf("pv_insert: e\n");
1240 }
1241 
1242 /* Destroy pv entry. */
1243 static void
1244 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
1245 {
1246 	pv_entry_t pve;
1247 
1248 	//int su = (pmap == kernel_pmap);
1249 	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
1250 
1251 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1252 	rw_assert(&pvh_global_lock, RA_WLOCKED);
1253 
1254 	/* find pv entry */
1255 	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
1256 		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
1257 			/* remove from pv_list */
1258 			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
1259 			if (TAILQ_EMPTY(&m->md.pv_list))
1260 				vm_page_aflag_clear(m, PGA_WRITEABLE);
1261 
1262 			/* free pv entry struct */
1263 			pv_free(pve);
1264 			break;
1265 		}
1266 	}
1267 
1268 	//debugf("pv_remove: e\n");
1269 }
1270 
1271 #ifdef __powerpc64__
1272 /*
1273  * Clean pte entry, try to free page table page if requested.
1274  *
1275  * Return 1 if ptbl pages were freed, otherwise return 0.
1276  */
1277 static int
1278 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
1279 {
1280 	vm_page_t	m;
1281 	pte_t          *pte;
1282 
1283 	pte = pte_find(mmu, pmap, va);
1284 	KASSERT(pte != NULL, ("%s: NULL pte", __func__));
1285 
1286 	if (!PTE_ISVALID(pte))
1287 		return (0);
1288 
1289 	/* Get vm_page_t for mapped pte. */
1290 	m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1291 
1292 	if (PTE_ISWIRED(pte))
1293 		pmap->pm_stats.wired_count--;
1294 
1295 	/* Handle managed entry. */
1296 	if (PTE_ISMANAGED(pte)) {
1297 
1298 		/* Handle modified pages. */
1299 		if (PTE_ISMODIFIED(pte))
1300 			vm_page_dirty(m);
1301 
1302 		/* Referenced pages. */
1303 		if (PTE_ISREFERENCED(pte))
1304 			vm_page_aflag_set(m, PGA_REFERENCED);
1305 
1306 		/* Remove pv_entry from pv_list. */
1307 		pv_remove(pmap, va, m);
1308 	} else if (m->md.pv_tracked) {
1309 		pv_remove(pmap, va, m);
1310 		if (TAILQ_EMPTY(&m->md.pv_list))
1311 			m->md.pv_tracked = false;
1312 	}
1313 	mtx_lock_spin(&tlbivax_mutex);
1314 	tlb_miss_lock();
1315 
1316 	tlb0_flush_entry(va);
1317 	*pte = 0;
1318 
1319 	tlb_miss_unlock();
1320 	mtx_unlock_spin(&tlbivax_mutex);
1321 
1322 	pmap->pm_stats.resident_count--;
1323 
1324 	if (flags & PTBL_UNHOLD) {
1325 		return (ptbl_unhold(mmu, pmap, va));
1326 	}
1327 	return (0);
1328 }
1329 
1330 /*
1331  * allocate a page of pointers to page directories, do not preallocate the
1332  * page tables
1333  */
1334 static pte_t  **
1335 pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
1336 {
1337 	vm_page_t	mtbl  [PDIR_PAGES];
1338 	vm_page_t	m;
1339 	struct ptbl_buf *pbuf;
1340 	pte_t         **pdir;
1341 	unsigned int	pidx;
1342 	int		i;
1343 	int		req;
1344 
1345 	pbuf = ptbl_buf_alloc();
1346 
1347 	if (pbuf == NULL)
1348 		panic("%s: couldn't alloc kernel virtual memory", __func__);
1349 
1350 	/* Allocate pdir pages, this will sleep! */
1351 	for (i = 0; i < PDIR_PAGES; i++) {
1352 		pidx = (PDIR_PAGES * pp2d_idx) + i;
1353 		req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
1354 		while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
1355 			PMAP_UNLOCK(pmap);
1356 			vm_wait(NULL);
1357 			PMAP_LOCK(pmap);
1358 		}
1359 		mtbl[i] = m;
1360 	}
1361 
1362 	/* Mapin allocated pages into kernel_pmap. */
1363 	pdir = (pte_t **) pbuf->kva;
1364 	pmap_qenter((vm_offset_t) pdir, mtbl, PDIR_PAGES);
1365 
1366 	/* Zero whole pdir. */
1367 	bzero((caddr_t) pdir, PDIR_PAGES * PAGE_SIZE);
1368 
1369 	/* Add pdir to the pmap pdir bufs list. */
1370 	TAILQ_INSERT_TAIL(&pmap->pm_pdir_list, pbuf, link);
1371 
1372 	return pdir;
1373 }
1374 
1375 /*
1376  * Insert PTE for a given page and virtual address.
1377  */
1378 static int
1379 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1380     boolean_t nosleep)
1381 {
1382 	unsigned int	pp2d_idx = PP2D_IDX(va);
1383 	unsigned int	pdir_idx = PDIR_IDX(va);
1384 	unsigned int	ptbl_idx = PTBL_IDX(va);
1385 	pte_t          *ptbl, *pte;
1386 	pte_t         **pdir;
1387 
1388 	/* Get the page directory pointer. */
1389 	pdir = pmap->pm_pp2d[pp2d_idx];
1390 	if (pdir == NULL)
1391 		pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
1392 
1393 	/* Get the page table pointer. */
1394 	ptbl = pdir[pdir_idx];
1395 
1396 	if (ptbl == NULL) {
1397 		/* Allocate page table pages. */
1398 		ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
1399 		if (ptbl == NULL) {
1400 			KASSERT(nosleep, ("nosleep and NULL ptbl"));
1401 			return (ENOMEM);
1402 		}
1403 	} else {
1404 		/*
1405 		 * Check if there is valid mapping for requested va, if there
1406 		 * is, remove it.
1407 		 */
1408 		pte = &pdir[pdir_idx][ptbl_idx];
1409 		if (PTE_ISVALID(pte)) {
1410 			pte_remove(mmu, pmap, va, PTBL_HOLD);
1411 		} else {
1412 			/*
1413 			 * pte is not used, increment hold count for ptbl
1414 			 * pages.
1415 			 */
1416 			if (pmap != kernel_pmap)
1417 				ptbl_hold(mmu, pmap, pdir, pdir_idx);
1418 		}
1419 	}
1420 
1421 	if (pdir[pdir_idx] == NULL) {
1422 		if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
1423 			pdir_hold(mmu, pmap, pdir);
1424 		pdir[pdir_idx] = ptbl;
1425 	}
1426 	if (pmap->pm_pp2d[pp2d_idx] == NULL)
1427 		pmap->pm_pp2d[pp2d_idx] = pdir;
1428 
1429 	/*
1430 	 * Insert pv_entry into pv_list for mapped page if part of managed
1431 	 * memory.
1432 	 */
1433 	if ((m->oflags & VPO_UNMANAGED) == 0) {
1434 		flags |= PTE_MANAGED;
1435 
1436 		/* Create and insert pv entry. */
1437 		pv_insert(pmap, va, m);
1438 	}
1439 
1440 	mtx_lock_spin(&tlbivax_mutex);
1441 	tlb_miss_lock();
1442 
1443 	tlb0_flush_entry(va);
1444 	pmap->pm_stats.resident_count++;
1445 	pte = &pdir[pdir_idx][ptbl_idx];
1446 	*pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1447 	*pte |= (PTE_VALID | flags);
1448 
1449 	tlb_miss_unlock();
1450 	mtx_unlock_spin(&tlbivax_mutex);
1451 
1452 	return (0);
1453 }
1454 
1455 /* Return the pa for the given pmap/va. */
1456 static	vm_paddr_t
1457 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1458 {
1459 	vm_paddr_t	pa = 0;
1460 	pte_t          *pte;
1461 
1462 	pte = pte_find(mmu, pmap, va);
1463 	if ((pte != NULL) && PTE_ISVALID(pte))
1464 		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1465 	return (pa);
1466 }
1467 
1468 
1469 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
1470 static void
1471 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1472 {
1473 	int		i, j;
1474 	vm_offset_t	va;
1475 	pte_t		*pte;
1476 
1477 	va = addr;
1478 	/* Initialize kernel pdir */
1479 	for (i = 0; i < kernel_pdirs; i++) {
1480 		kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
1481 		    (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
1482 		for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES));
1483 		    j < PDIR_NENTRIES; j++) {
1484 			kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
1485 			    (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE * PDIR_PAGES) +
1486 			     (((i * PDIR_NENTRIES) + j) * PAGE_SIZE * PTBL_PAGES));
1487 		}
1488 	}
1489 
1490 	/*
1491 	 * Fill in PTEs covering kernel code and data. They are not required
1492 	 * for address translation, as this area is covered by static TLB1
1493 	 * entries, but for pte_vatopa() to work correctly with kernel area
1494 	 * addresses.
1495 	 */
1496 	for (va = addr; va < data_end; va += PAGE_SIZE) {
1497 		pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
1498 		*pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1499 		*pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1500 		    PTE_VALID | PTE_PS_4KB;
1501 	}
1502 }
1503 #else
1504 /*
1505  * Clean pte entry, try to free page table page if requested.
1506  *
1507  * Return 1 if ptbl pages were freed, otherwise return 0.
1508  */
1509 static int
1510 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
1511 {
1512 	unsigned int pdir_idx = PDIR_IDX(va);
1513 	unsigned int ptbl_idx = PTBL_IDX(va);
1514 	vm_page_t m;
1515 	pte_t *ptbl;
1516 	pte_t *pte;
1517 
1518 	//int su = (pmap == kernel_pmap);
1519 	//debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
1520 	//		su, (u_int32_t)pmap, va, flags);
1521 
1522 	ptbl = pmap->pm_pdir[pdir_idx];
1523 	KASSERT(ptbl, ("pte_remove: null ptbl"));
1524 
1525 	pte = &ptbl[ptbl_idx];
1526 
1527 	if (pte == NULL || !PTE_ISVALID(pte))
1528 		return (0);
1529 
1530 	if (PTE_ISWIRED(pte))
1531 		pmap->pm_stats.wired_count--;
1532 
1533 	/* Get vm_page_t for mapped pte. */
1534 	m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1535 
1536 	/* Handle managed entry. */
1537 	if (PTE_ISMANAGED(pte)) {
1538 
1539 		if (PTE_ISMODIFIED(pte))
1540 			vm_page_dirty(m);
1541 
1542 		if (PTE_ISREFERENCED(pte))
1543 			vm_page_aflag_set(m, PGA_REFERENCED);
1544 
1545 		pv_remove(pmap, va, m);
1546 	} else if (m->md.pv_tracked) {
1547 		/*
1548 		 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is
1549 		 * used.  This is needed by the NCSW support code for fast
1550 		 * VA<->PA translation.
1551 		 */
1552 		pv_remove(pmap, va, m);
1553 		if (TAILQ_EMPTY(&m->md.pv_list))
1554 			m->md.pv_tracked = false;
1555 	}
1556 
1557 	mtx_lock_spin(&tlbivax_mutex);
1558 	tlb_miss_lock();
1559 
1560 	tlb0_flush_entry(va);
1561 	*pte = 0;
1562 
1563 	tlb_miss_unlock();
1564 	mtx_unlock_spin(&tlbivax_mutex);
1565 
1566 	pmap->pm_stats.resident_count--;
1567 
1568 	if (flags & PTBL_UNHOLD) {
1569 		//debugf("pte_remove: e (unhold)\n");
1570 		return (ptbl_unhold(mmu, pmap, pdir_idx));
1571 	}
1572 
1573 	//debugf("pte_remove: e\n");
1574 	return (0);
1575 }
1576 
1577 /*
1578  * Insert PTE for a given page and virtual address.
1579  */
1580 static int
1581 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1582     boolean_t nosleep)
1583 {
1584 	unsigned int pdir_idx = PDIR_IDX(va);
1585 	unsigned int ptbl_idx = PTBL_IDX(va);
1586 	pte_t *ptbl, *pte;
1587 
1588 	CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
1589 	    pmap == kernel_pmap, pmap, va);
1590 
1591 	/* Get the page table pointer. */
1592 	ptbl = pmap->pm_pdir[pdir_idx];
1593 
1594 	if (ptbl == NULL) {
1595 		/* Allocate page table pages. */
1596 		ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
1597 		if (ptbl == NULL) {
1598 			KASSERT(nosleep, ("nosleep and NULL ptbl"));
1599 			return (ENOMEM);
1600 		}
1601 	} else {
1602 		/*
1603 		 * Check if there is valid mapping for requested
1604 		 * va, if there is, remove it.
1605 		 */
1606 		pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
1607 		if (PTE_ISVALID(pte)) {
1608 			pte_remove(mmu, pmap, va, PTBL_HOLD);
1609 		} else {
1610 			/*
1611 			 * pte is not used, increment hold count
1612 			 * for ptbl pages.
1613 			 */
1614 			if (pmap != kernel_pmap)
1615 				ptbl_hold(mmu, pmap, pdir_idx);
1616 		}
1617 	}
1618 
1619 	/*
1620 	 * Insert pv_entry into pv_list for mapped page if part of managed
1621 	 * memory.
1622 	 */
1623 	if ((m->oflags & VPO_UNMANAGED) == 0) {
1624 		flags |= PTE_MANAGED;
1625 
1626 		/* Create and insert pv entry. */
1627 		pv_insert(pmap, va, m);
1628 	}
1629 
1630 	pmap->pm_stats.resident_count++;
1631 
1632 	mtx_lock_spin(&tlbivax_mutex);
1633 	tlb_miss_lock();
1634 
1635 	tlb0_flush_entry(va);
1636 	if (pmap->pm_pdir[pdir_idx] == NULL) {
1637 		/*
1638 		 * If we just allocated a new page table, hook it in
1639 		 * the pdir.
1640 		 */
1641 		pmap->pm_pdir[pdir_idx] = ptbl;
1642 	}
1643 	pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
1644 	*pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1645 	*pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
1646 
1647 	tlb_miss_unlock();
1648 	mtx_unlock_spin(&tlbivax_mutex);
1649 	return (0);
1650 }
1651 
1652 /* Return the pa for the given pmap/va. */
1653 static vm_paddr_t
1654 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1655 {
1656 	vm_paddr_t pa = 0;
1657 	pte_t *pte;
1658 
1659 	pte = pte_find(mmu, pmap, va);
1660 	if ((pte != NULL) && PTE_ISVALID(pte))
1661 		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1662 	return (pa);
1663 }
1664 
1665 /* Get a pointer to a PTE in a page table. */
1666 static pte_t *
1667 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1668 {
1669 	unsigned int pdir_idx = PDIR_IDX(va);
1670 	unsigned int ptbl_idx = PTBL_IDX(va);
1671 
1672 	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
1673 
1674 	if (pmap->pm_pdir[pdir_idx])
1675 		return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
1676 
1677 	return (NULL);
1678 }
1679 
1680 /* Set up kernel page tables. */
1681 static void
1682 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1683 {
1684 	int		i;
1685 	vm_offset_t	va;
1686 	pte_t		*pte;
1687 
1688 	/* Initialize kernel pdir */
1689 	for (i = 0; i < kernel_ptbls; i++)
1690 		kernel_pmap->pm_pdir[kptbl_min + i] =
1691 		    (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES));
1692 
1693 	/*
1694 	 * Fill in PTEs covering kernel code and data. They are not required
1695 	 * for address translation, as this area is covered by static TLB1
1696 	 * entries, but for pte_vatopa() to work correctly with kernel area
1697 	 * addresses.
1698 	 */
1699 	for (va = addr; va < data_end; va += PAGE_SIZE) {
1700 		pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1701 		*pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1702 		*pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1703 		    PTE_VALID | PTE_PS_4KB;
1704 	}
1705 }
1706 #endif
1707 
1708 /**************************************************************************/
1709 /* PMAP related */
1710 /**************************************************************************/
1711 
1712 /*
1713  * This is called during booke_init, before the system is really initialized.
1714  */
1715 static void
1716 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
1717 {
1718 	vm_paddr_t phys_kernelend;
1719 	struct mem_region *mp, *mp1;
1720 	int cnt, i, j;
1721 	vm_paddr_t s, e, sz;
1722 	vm_paddr_t physsz, hwphyssz;
1723 	u_int phys_avail_count;
1724 	vm_size_t kstack0_sz;
1725 	vm_offset_t kernel_pdir, kstack0;
1726 	vm_paddr_t kstack0_phys;
1727 	void *dpcpu;
1728 
1729 	debugf("mmu_booke_bootstrap: entered\n");
1730 
1731 	/* Set interesting system properties */
1732 #ifdef __powerpc64__
1733 	hw_direct_map = 1;
1734 #else
1735 	hw_direct_map = 0;
1736 #endif
1737 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
1738 	elf32_nxstack = 1;
1739 #endif
1740 
1741 	/* Initialize invalidation mutex */
1742 	mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
1743 
1744 	/* Read TLB0 size and associativity. */
1745 	tlb0_get_tlbconf();
1746 
1747 	/*
1748 	 * Align kernel start and end address (kernel image).
1749 	 * Note that kernel end does not necessarily relate to kernsize.
1750 	 * kernsize is the size of the kernel that is actually mapped.
1751 	 */
1752 	kernstart = trunc_page(start);
1753 	data_start = round_page(kernelend);
1754 	data_end = data_start;
1755 
1756 	/*
1757 	 * Addresses of preloaded modules (like file systems) use
1758 	 * physical addresses. Make sure we relocate those into
1759 	 * virtual addresses.
1760 	 */
1761 	preload_addr_relocate = kernstart - kernload;
1762 
1763 	/* Allocate the dynamic per-cpu area. */
1764 	dpcpu = (void *)data_end;
1765 	data_end += DPCPU_SIZE;
1766 
1767 	/* Allocate space for the message buffer. */
1768 	msgbufp = (struct msgbuf *)data_end;
1769 	data_end += msgbufsize;
1770 	debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1771 	    (uintptr_t)msgbufp, data_end);
1772 
1773 	data_end = round_page(data_end);
1774 
1775 	/* Allocate space for ptbl_bufs. */
1776 	ptbl_bufs = (struct ptbl_buf *)data_end;
1777 	data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1778 	debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1779 	    (uintptr_t)ptbl_bufs, data_end);
1780 
1781 	data_end = round_page(data_end);
1782 
1783 	/* Allocate PTE tables for kernel KVA. */
1784 	kernel_pdir = data_end;
1785 	kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
1786 	    PDIR_SIZE);
1787 #ifdef __powerpc64__
1788 	kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
1789 	data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
1790 #endif
1791 	data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1792 	debugf(" kernel ptbls: %d\n", kernel_ptbls);
1793 	debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1794 	    kernel_pdir, data_end);
1795 
1796 	debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
1797 	if (data_end - kernstart > kernsize) {
1798 		kernsize += tlb1_mapin_region(kernstart + kernsize,
1799 		    kernload + kernsize, (data_end - kernstart) - kernsize);
1800 	}
1801 	data_end = kernstart + kernsize;
1802 	debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
1803 
1804 	/*
1805 	 * Clear the structures - note we can only do it safely after the
1806 	 * possible additional TLB1 translations are in place (above) so that
1807 	 * all range up to the currently calculated 'data_end' is covered.
1808 	 */
1809 	dpcpu_init(dpcpu, 0);
1810 	memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1811 #ifdef __powerpc64__
1812 	memset((void *)kernel_pdir, 0,
1813 	    kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
1814 	    kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1815 #else
1816 	memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1817 #endif
1818 
1819 	/*******************************************************/
1820 	/* Set the start and end of kva. */
1821 	/*******************************************************/
1822 	virtual_avail = round_page(data_end);
1823 	virtual_end = VM_MAX_KERNEL_ADDRESS;
1824 
1825 	/* Allocate KVA space for page zero/copy operations. */
1826 	zero_page_va = virtual_avail;
1827 	virtual_avail += PAGE_SIZE;
1828 	copy_page_src_va = virtual_avail;
1829 	virtual_avail += PAGE_SIZE;
1830 	copy_page_dst_va = virtual_avail;
1831 	virtual_avail += PAGE_SIZE;
1832 	debugf("zero_page_va = 0x%"PRI0ptrX"\n", zero_page_va);
1833 	debugf("copy_page_src_va = 0x"PRI0ptrX"\n", copy_page_src_va);
1834 	debugf("copy_page_dst_va = 0x"PRI0ptrX"\n", copy_page_dst_va);
1835 
1836 	/* Initialize page zero/copy mutexes. */
1837 	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1838 	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1839 
1840 	/* Allocate KVA space for ptbl bufs. */
1841 	ptbl_buf_pool_vabase = virtual_avail;
1842 	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1843 	debugf("ptbl_buf_pool_vabase = 0x"PRI0ptrX" end = 0x"PRI0ptrX"\n",
1844 	    ptbl_buf_pool_vabase, virtual_avail);
1845 
1846 	/* Calculate corresponding physical addresses for the kernel region. */
1847 	phys_kernelend = kernload + kernsize;
1848 	debugf("kernel image and allocated data:\n");
1849 	debugf(" kernload    = 0x%09llx\n", (uint64_t)kernload);
1850 	debugf(" kernstart   = 0x"PRI0ptrX"\n", kernstart);
1851 	debugf(" kernsize    = 0x"PRI0ptrX"\n", kernsize);
1852 
1853 	if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1854 		panic("mmu_booke_bootstrap: phys_avail too small");
1855 
1856 	/*
1857 	 * Remove kernel physical address range from avail regions list. Page
1858 	 * align all regions.  Non-page aligned memory isn't very interesting
1859 	 * to us.  Also, sort the entries for ascending addresses.
1860 	 */
1861 
1862 	/* Retrieve phys/avail mem regions */
1863 	mem_regions(&physmem_regions, &physmem_regions_sz,
1864 	    &availmem_regions, &availmem_regions_sz);
1865 	sz = 0;
1866 	cnt = availmem_regions_sz;
1867 	debugf("processing avail regions:\n");
1868 	for (mp = availmem_regions; mp->mr_size; mp++) {
1869 		s = mp->mr_start;
1870 		e = mp->mr_start + mp->mr_size;
1871 		debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e);
1872 		/* Check whether this region holds all of the kernel. */
1873 		if (s < kernload && e > phys_kernelend) {
1874 			availmem_regions[cnt].mr_start = phys_kernelend;
1875 			availmem_regions[cnt++].mr_size = e - phys_kernelend;
1876 			e = kernload;
1877 		}
1878 		/* Look whether this regions starts within the kernel. */
1879 		if (s >= kernload && s < phys_kernelend) {
1880 			if (e <= phys_kernelend)
1881 				goto empty;
1882 			s = phys_kernelend;
1883 		}
1884 		/* Now look whether this region ends within the kernel. */
1885 		if (e > kernload && e <= phys_kernelend) {
1886 			if (s >= kernload)
1887 				goto empty;
1888 			e = kernload;
1889 		}
1890 		/* Now page align the start and size of the region. */
1891 		s = round_page(s);
1892 		e = trunc_page(e);
1893 		if (e < s)
1894 			e = s;
1895 		sz = e - s;
1896 		debugf("%09jx-%09jx = %jx\n",
1897 		    (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz);
1898 
1899 		/* Check whether some memory is left here. */
1900 		if (sz == 0) {
1901 		empty:
1902 			memmove(mp, mp + 1,
1903 			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
1904 			cnt--;
1905 			mp--;
1906 			continue;
1907 		}
1908 
1909 		/* Do an insertion sort. */
1910 		for (mp1 = availmem_regions; mp1 < mp; mp1++)
1911 			if (s < mp1->mr_start)
1912 				break;
1913 		if (mp1 < mp) {
1914 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1915 			mp1->mr_start = s;
1916 			mp1->mr_size = sz;
1917 		} else {
1918 			mp->mr_start = s;
1919 			mp->mr_size = sz;
1920 		}
1921 	}
1922 	availmem_regions_sz = cnt;
1923 
1924 	/*******************************************************/
1925 	/* Steal physical memory for kernel stack from the end */
1926 	/* of the first avail region                           */
1927 	/*******************************************************/
1928 	kstack0_sz = kstack_pages * PAGE_SIZE;
1929 	kstack0_phys = availmem_regions[0].mr_start +
1930 	    availmem_regions[0].mr_size;
1931 	kstack0_phys -= kstack0_sz;
1932 	availmem_regions[0].mr_size -= kstack0_sz;
1933 
1934 	/*******************************************************/
1935 	/* Fill in phys_avail table, based on availmem_regions */
1936 	/*******************************************************/
1937 	phys_avail_count = 0;
1938 	physsz = 0;
1939 	hwphyssz = 0;
1940 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1941 
1942 	debugf("fill in phys_avail:\n");
1943 	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1944 
1945 		debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
1946 		    (uintmax_t)availmem_regions[i].mr_start,
1947 		    (uintmax_t)availmem_regions[i].mr_start +
1948 		        availmem_regions[i].mr_size,
1949 		    (uintmax_t)availmem_regions[i].mr_size);
1950 
1951 		if (hwphyssz != 0 &&
1952 		    (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1953 			debugf(" hw.physmem adjust\n");
1954 			if (physsz < hwphyssz) {
1955 				phys_avail[j] = availmem_regions[i].mr_start;
1956 				phys_avail[j + 1] =
1957 				    availmem_regions[i].mr_start +
1958 				    hwphyssz - physsz;
1959 				physsz = hwphyssz;
1960 				phys_avail_count++;
1961 			}
1962 			break;
1963 		}
1964 
1965 		phys_avail[j] = availmem_regions[i].mr_start;
1966 		phys_avail[j + 1] = availmem_regions[i].mr_start +
1967 		    availmem_regions[i].mr_size;
1968 		phys_avail_count++;
1969 		physsz += availmem_regions[i].mr_size;
1970 	}
1971 	physmem = btoc(physsz);
1972 
1973 	/* Calculate the last available physical address. */
1974 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1975 		;
1976 	Maxmem = powerpc_btop(phys_avail[i + 1]);
1977 
1978 	debugf("Maxmem = 0x%08lx\n", Maxmem);
1979 	debugf("phys_avail_count = %d\n", phys_avail_count);
1980 	debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n",
1981 	    (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem);
1982 
1983 #ifdef __powerpc64__
1984 	/*
1985 	 * Map the physical memory contiguously in TLB1.
1986 	 * Round so it fits into a single mapping.
1987 	 */
1988 	tlb1_mapin_region(DMAP_BASE_ADDRESS, 0,
1989 	    phys_avail[i + 1]);
1990 #endif
1991 
1992 	/*******************************************************/
1993 	/* Initialize (statically allocated) kernel pmap. */
1994 	/*******************************************************/
1995 	PMAP_LOCK_INIT(kernel_pmap);
1996 #ifndef __powerpc64__
1997 	kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1998 #endif
1999 
2000 	debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
2001 	kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
2002 	for (i = 0; i < MAXCPU; i++) {
2003 		kernel_pmap->pm_tid[i] = TID_KERNEL;
2004 
2005 		/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
2006 		tidbusy[i][TID_KERNEL] = kernel_pmap;
2007 	}
2008 
2009 	/* Mark kernel_pmap active on all CPUs */
2010 	CPU_FILL(&kernel_pmap->pm_active);
2011 
2012  	/*
2013 	 * Initialize the global pv list lock.
2014 	 */
2015 	rw_init(&pvh_global_lock, "pmap pv global");
2016 
2017 	/*******************************************************/
2018 	/* Final setup */
2019 	/*******************************************************/
2020 
2021 	/* Enter kstack0 into kernel map, provide guard page */
2022 	kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
2023 	thread0.td_kstack = kstack0;
2024 	thread0.td_kstack_pages = kstack_pages;
2025 
2026 	debugf("kstack_sz = 0x%08x\n", kstack0_sz);
2027 	debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
2028 	    kstack0_phys, kstack0_phys + kstack0_sz);
2029 	debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
2030 	    kstack0, kstack0 + kstack0_sz);
2031 
2032 	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
2033 	for (i = 0; i < kstack_pages; i++) {
2034 		mmu_booke_kenter(mmu, kstack0, kstack0_phys);
2035 		kstack0 += PAGE_SIZE;
2036 		kstack0_phys += PAGE_SIZE;
2037 	}
2038 
2039 	pmap_bootstrapped = 1;
2040 
2041 	debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
2042 	debugf("virtual_end   = %"PRI0ptrX"\n", virtual_end);
2043 
2044 	debugf("mmu_booke_bootstrap: exit\n");
2045 }
2046 
2047 #ifdef SMP
2048  void
2049 tlb1_ap_prep(void)
2050 {
2051 	tlb_entry_t *e, tmp;
2052 	unsigned int i;
2053 
2054 	/* Prepare TLB1 image for AP processors */
2055 	e = __boot_tlb1;
2056 	for (i = 0; i < TLB1_ENTRIES; i++) {
2057 		tlb1_read_entry(&tmp, i);
2058 
2059 		if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED))
2060 			memcpy(e++, &tmp, sizeof(tmp));
2061 	}
2062 }
2063 
2064 void
2065 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
2066 {
2067 	int i;
2068 
2069 	/*
2070 	 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
2071 	 * have the snapshot of its contents in the s/w __boot_tlb1[] table
2072 	 * created by tlb1_ap_prep(), so use these values directly to
2073 	 * (re)program AP's TLB1 hardware.
2074 	 *
2075 	 * Start at index 1 because index 0 has the kernel map.
2076 	 */
2077 	for (i = 1; i < TLB1_ENTRIES; i++) {
2078 		if (__boot_tlb1[i].mas1 & MAS1_VALID)
2079 			tlb1_write_entry(&__boot_tlb1[i], i);
2080 	}
2081 
2082 	set_mas4_defaults();
2083 }
2084 #endif
2085 
2086 static void
2087 booke_pmap_init_qpages(void)
2088 {
2089 	struct pcpu *pc;
2090 	int i;
2091 
2092 	CPU_FOREACH(i) {
2093 		pc = pcpu_find(i);
2094 		pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
2095 		if (pc->pc_qmap_addr == 0)
2096 			panic("pmap_init_qpages: unable to allocate KVA");
2097 	}
2098 }
2099 
2100 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
2101 
2102 /*
2103  * Get the physical page address for the given pmap/virtual address.
2104  */
2105 static vm_paddr_t
2106 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
2107 {
2108 	vm_paddr_t pa;
2109 
2110 	PMAP_LOCK(pmap);
2111 	pa = pte_vatopa(mmu, pmap, va);
2112 	PMAP_UNLOCK(pmap);
2113 
2114 	return (pa);
2115 }
2116 
2117 /*
2118  * Extract the physical page address associated with the given
2119  * kernel virtual address.
2120  */
2121 static vm_paddr_t
2122 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
2123 {
2124 	tlb_entry_t e;
2125 	vm_paddr_t p = 0;
2126 	int i;
2127 
2128 	if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
2129 		p = pte_vatopa(mmu, kernel_pmap, va);
2130 
2131 	if (p == 0) {
2132 		/* Check TLB1 mappings */
2133 		for (i = 0; i < TLB1_ENTRIES; i++) {
2134 			tlb1_read_entry(&e, i);
2135 			if (!(e.mas1 & MAS1_VALID))
2136 				continue;
2137 			if (va >= e.virt && va < e.virt + e.size)
2138 				return (e.phys + (va - e.virt));
2139 		}
2140 	}
2141 
2142 	return (p);
2143 }
2144 
2145 /*
2146  * Initialize the pmap module.
2147  * Called by vm_init, to initialize any structures that the pmap
2148  * system needs to map virtual memory.
2149  */
2150 static void
2151 mmu_booke_init(mmu_t mmu)
2152 {
2153 	int shpgperproc = PMAP_SHPGPERPROC;
2154 
2155 	/*
2156 	 * Initialize the address space (zone) for the pv entries.  Set a
2157 	 * high water mark so that the system can recover from excessive
2158 	 * numbers of pv entries.
2159 	 */
2160 	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
2161 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
2162 
2163 	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
2164 	pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
2165 
2166 	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
2167 	pv_entry_high_water = 9 * (pv_entry_max / 10);
2168 
2169 	uma_zone_reserve_kva(pvzone, pv_entry_max);
2170 
2171 	/* Pre-fill pvzone with initial number of pv entries. */
2172 	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
2173 
2174 	/* Initialize ptbl allocation. */
2175 	ptbl_init();
2176 }
2177 
2178 /*
2179  * Map a list of wired pages into kernel virtual address space.  This is
2180  * intended for temporary mappings which do not need page modification or
2181  * references recorded.  Existing mappings in the region are overwritten.
2182  */
2183 static void
2184 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
2185 {
2186 	vm_offset_t va;
2187 
2188 	va = sva;
2189 	while (count-- > 0) {
2190 		mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2191 		va += PAGE_SIZE;
2192 		m++;
2193 	}
2194 }
2195 
2196 /*
2197  * Remove page mappings from kernel virtual address space.  Intended for
2198  * temporary mappings entered by mmu_booke_qenter.
2199  */
2200 static void
2201 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
2202 {
2203 	vm_offset_t va;
2204 
2205 	va = sva;
2206 	while (count-- > 0) {
2207 		mmu_booke_kremove(mmu, va);
2208 		va += PAGE_SIZE;
2209 	}
2210 }
2211 
2212 /*
2213  * Map a wired page into kernel virtual address space.
2214  */
2215 static void
2216 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
2217 {
2218 
2219 	mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
2220 }
2221 
2222 static void
2223 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
2224 {
2225 	uint32_t flags;
2226 	pte_t *pte;
2227 
2228 	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2229 	    (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
2230 
2231 	flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
2232 	flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
2233 	flags |= PTE_PS_4KB;
2234 
2235 	pte = pte_find(mmu, kernel_pmap, va);
2236 	KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va.  NULL PTE"));
2237 
2238 	mtx_lock_spin(&tlbivax_mutex);
2239 	tlb_miss_lock();
2240 
2241 	if (PTE_ISVALID(pte)) {
2242 
2243 		CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
2244 
2245 		/* Flush entry from TLB0 */
2246 		tlb0_flush_entry(va);
2247 	}
2248 
2249 	*pte = PTE_RPN_FROM_PA(pa) | flags;
2250 
2251 	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
2252 	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
2253 	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
2254 
2255 	/* Flush the real memory from the instruction cache. */
2256 	if ((flags & (PTE_I | PTE_G)) == 0)
2257 		__syncicache((void *)va, PAGE_SIZE);
2258 
2259 	tlb_miss_unlock();
2260 	mtx_unlock_spin(&tlbivax_mutex);
2261 }
2262 
2263 /*
2264  * Remove a page from kernel page table.
2265  */
2266 static void
2267 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
2268 {
2269 	pte_t *pte;
2270 
2271 	CTR2(KTR_PMAP,"%s: s (va = 0x"PRI0ptrX")\n", __func__, va);
2272 
2273 	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2274 	    (va <= VM_MAX_KERNEL_ADDRESS)),
2275 	    ("mmu_booke_kremove: invalid va"));
2276 
2277 	pte = pte_find(mmu, kernel_pmap, va);
2278 
2279 	if (!PTE_ISVALID(pte)) {
2280 
2281 		CTR1(KTR_PMAP, "%s: invalid pte", __func__);
2282 
2283 		return;
2284 	}
2285 
2286 	mtx_lock_spin(&tlbivax_mutex);
2287 	tlb_miss_lock();
2288 
2289 	/* Invalidate entry in TLB0, update PTE. */
2290 	tlb0_flush_entry(va);
2291 	*pte = 0;
2292 
2293 	tlb_miss_unlock();
2294 	mtx_unlock_spin(&tlbivax_mutex);
2295 }
2296 
2297 /*
2298  * Provide a kernel pointer corresponding to a given userland pointer.
2299  * The returned pointer is valid until the next time this function is
2300  * called in this thread. This is used internally in copyin/copyout.
2301  */
2302 int
2303 mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
2304     void **kaddr, size_t ulen, size_t *klen)
2305 {
2306 
2307 	if ((uintptr_t)uaddr + ulen > VM_MAXUSER_ADDRESS + PAGE_SIZE)
2308 		return (EFAULT);
2309 
2310 	*kaddr = (void *)(uintptr_t)uaddr;
2311 	if (klen)
2312 		*klen = ulen;
2313 
2314 	return (0);
2315 }
2316 
2317 /*
2318  * Figure out where a given kernel pointer (usually in a fault) points
2319  * to from the VM's perspective, potentially remapping into userland's
2320  * address space.
2321  */
2322 static int
2323 mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
2324     vm_offset_t *decoded_addr)
2325 {
2326 
2327 	if (addr < VM_MAXUSER_ADDRESS)
2328 		*is_user = 1;
2329 	else
2330 		*is_user = 0;
2331 
2332 	*decoded_addr = addr;
2333 	return (0);
2334 }
2335 
2336 /*
2337  * Initialize pmap associated with process 0.
2338  */
2339 static void
2340 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
2341 {
2342 
2343 	PMAP_LOCK_INIT(pmap);
2344 	mmu_booke_pinit(mmu, pmap);
2345 	PCPU_SET(curpmap, pmap);
2346 }
2347 
2348 /*
2349  * Initialize a preallocated and zeroed pmap structure,
2350  * such as one in a vmspace structure.
2351  */
2352 static void
2353 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
2354 {
2355 	int i;
2356 
2357 	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
2358 	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
2359 
2360 	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
2361 
2362 	for (i = 0; i < MAXCPU; i++)
2363 		pmap->pm_tid[i] = TID_NONE;
2364 	CPU_ZERO(&kernel_pmap->pm_active);
2365 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2366 #ifdef __powerpc64__
2367 	bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
2368 	TAILQ_INIT(&pmap->pm_pdir_list);
2369 #else
2370 	bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
2371 #endif
2372 	TAILQ_INIT(&pmap->pm_ptbl_list);
2373 }
2374 
2375 /*
2376  * Release any resources held by the given physical map.
2377  * Called when a pmap initialized by mmu_booke_pinit is being released.
2378  * Should only be called if the map contains no valid mappings.
2379  */
2380 static void
2381 mmu_booke_release(mmu_t mmu, pmap_t pmap)
2382 {
2383 
2384 	KASSERT(pmap->pm_stats.resident_count == 0,
2385 	    ("pmap_release: pmap resident count %ld != 0",
2386 	    pmap->pm_stats.resident_count));
2387 }
2388 
2389 /*
2390  * Insert the given physical page at the specified virtual address in the
2391  * target physical map with the protection requested. If specified the page
2392  * will be wired down.
2393  */
2394 static int
2395 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2396     vm_prot_t prot, u_int flags, int8_t psind)
2397 {
2398 	int error;
2399 
2400 	rw_wlock(&pvh_global_lock);
2401 	PMAP_LOCK(pmap);
2402 	error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
2403 	PMAP_UNLOCK(pmap);
2404 	rw_wunlock(&pvh_global_lock);
2405 	return (error);
2406 }
2407 
2408 static int
2409 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2410     vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
2411 {
2412 	pte_t *pte;
2413 	vm_paddr_t pa;
2414 	uint32_t flags;
2415 	int error, su, sync;
2416 
2417 	pa = VM_PAGE_TO_PHYS(m);
2418 	su = (pmap == kernel_pmap);
2419 	sync = 0;
2420 
2421 	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
2422 	//		"pa=0x%08x prot=0x%08x flags=%#x)\n",
2423 	//		(u_int32_t)pmap, su, pmap->pm_tid,
2424 	//		(u_int32_t)m, va, pa, prot, flags);
2425 
2426 	if (su) {
2427 		KASSERT(((va >= virtual_avail) &&
2428 		    (va <= VM_MAX_KERNEL_ADDRESS)),
2429 		    ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
2430 	} else {
2431 		KASSERT((va <= VM_MAXUSER_ADDRESS),
2432 		    ("mmu_booke_enter_locked: user pmap, non user va"));
2433 	}
2434 	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2435 		VM_OBJECT_ASSERT_LOCKED(m->object);
2436 
2437 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2438 
2439 	/*
2440 	 * If there is an existing mapping, and the physical address has not
2441 	 * changed, must be protection or wiring change.
2442 	 */
2443 	if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
2444 	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
2445 
2446 		/*
2447 		 * Before actually updating pte->flags we calculate and
2448 		 * prepare its new value in a helper var.
2449 		 */
2450 		flags = *pte;
2451 		flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
2452 
2453 		/* Wiring change, just update stats. */
2454 		if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
2455 			if (!PTE_ISWIRED(pte)) {
2456 				flags |= PTE_WIRED;
2457 				pmap->pm_stats.wired_count++;
2458 			}
2459 		} else {
2460 			if (PTE_ISWIRED(pte)) {
2461 				flags &= ~PTE_WIRED;
2462 				pmap->pm_stats.wired_count--;
2463 			}
2464 		}
2465 
2466 		if (prot & VM_PROT_WRITE) {
2467 			/* Add write permissions. */
2468 			flags |= PTE_SW;
2469 			if (!su)
2470 				flags |= PTE_UW;
2471 
2472 			if ((flags & PTE_MANAGED) != 0)
2473 				vm_page_aflag_set(m, PGA_WRITEABLE);
2474 		} else {
2475 			/* Handle modified pages, sense modify status. */
2476 
2477 			/*
2478 			 * The PTE_MODIFIED flag could be set by underlying
2479 			 * TLB misses since we last read it (above), possibly
2480 			 * other CPUs could update it so we check in the PTE
2481 			 * directly rather than rely on that saved local flags
2482 			 * copy.
2483 			 */
2484 			if (PTE_ISMODIFIED(pte))
2485 				vm_page_dirty(m);
2486 		}
2487 
2488 		if (prot & VM_PROT_EXECUTE) {
2489 			flags |= PTE_SX;
2490 			if (!su)
2491 				flags |= PTE_UX;
2492 
2493 			/*
2494 			 * Check existing flags for execute permissions: if we
2495 			 * are turning execute permissions on, icache should
2496 			 * be flushed.
2497 			 */
2498 			if ((*pte & (PTE_UX | PTE_SX)) == 0)
2499 				sync++;
2500 		}
2501 
2502 		flags &= ~PTE_REFERENCED;
2503 
2504 		/*
2505 		 * The new flags value is all calculated -- only now actually
2506 		 * update the PTE.
2507 		 */
2508 		mtx_lock_spin(&tlbivax_mutex);
2509 		tlb_miss_lock();
2510 
2511 		tlb0_flush_entry(va);
2512 		*pte &= ~PTE_FLAGS_MASK;
2513 		*pte |= flags;
2514 
2515 		tlb_miss_unlock();
2516 		mtx_unlock_spin(&tlbivax_mutex);
2517 
2518 	} else {
2519 		/*
2520 		 * If there is an existing mapping, but it's for a different
2521 		 * physical address, pte_enter() will delete the old mapping.
2522 		 */
2523 		//if ((pte != NULL) && PTE_ISVALID(pte))
2524 		//	debugf("mmu_booke_enter_locked: replace\n");
2525 		//else
2526 		//	debugf("mmu_booke_enter_locked: new\n");
2527 
2528 		/* Now set up the flags and install the new mapping. */
2529 		flags = (PTE_SR | PTE_VALID);
2530 		flags |= PTE_M;
2531 
2532 		if (!su)
2533 			flags |= PTE_UR;
2534 
2535 		if (prot & VM_PROT_WRITE) {
2536 			flags |= PTE_SW;
2537 			if (!su)
2538 				flags |= PTE_UW;
2539 
2540 			if ((m->oflags & VPO_UNMANAGED) == 0)
2541 				vm_page_aflag_set(m, PGA_WRITEABLE);
2542 		}
2543 
2544 		if (prot & VM_PROT_EXECUTE) {
2545 			flags |= PTE_SX;
2546 			if (!su)
2547 				flags |= PTE_UX;
2548 		}
2549 
2550 		/* If its wired update stats. */
2551 		if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
2552 			flags |= PTE_WIRED;
2553 
2554 		error = pte_enter(mmu, pmap, m, va, flags,
2555 		    (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
2556 		if (error != 0)
2557 			return (KERN_RESOURCE_SHORTAGE);
2558 
2559 		if ((flags & PMAP_ENTER_WIRED) != 0)
2560 			pmap->pm_stats.wired_count++;
2561 
2562 		/* Flush the real memory from the instruction cache. */
2563 		if (prot & VM_PROT_EXECUTE)
2564 			sync++;
2565 	}
2566 
2567 	if (sync && (su || pmap == PCPU_GET(curpmap))) {
2568 		__syncicache((void *)va, PAGE_SIZE);
2569 		sync = 0;
2570 	}
2571 
2572 	return (KERN_SUCCESS);
2573 }
2574 
2575 /*
2576  * Maps a sequence of resident pages belonging to the same object.
2577  * The sequence begins with the given page m_start.  This page is
2578  * mapped at the given virtual address start.  Each subsequent page is
2579  * mapped at a virtual address that is offset from start by the same
2580  * amount as the page is offset from m_start within the object.  The
2581  * last page in the sequence is the page with the largest offset from
2582  * m_start that can be mapped at a virtual address less than the given
2583  * virtual address end.  Not every virtual page between start and end
2584  * is mapped; only those for which a resident page exists with the
2585  * corresponding offset from m_start are mapped.
2586  */
2587 static void
2588 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
2589     vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
2590 {
2591 	vm_page_t m;
2592 	vm_pindex_t diff, psize;
2593 
2594 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
2595 
2596 	psize = atop(end - start);
2597 	m = m_start;
2598 	rw_wlock(&pvh_global_lock);
2599 	PMAP_LOCK(pmap);
2600 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2601 		mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
2602 		    prot & (VM_PROT_READ | VM_PROT_EXECUTE),
2603 		    PMAP_ENTER_NOSLEEP, 0);
2604 		m = TAILQ_NEXT(m, listq);
2605 	}
2606 	rw_wunlock(&pvh_global_lock);
2607 	PMAP_UNLOCK(pmap);
2608 }
2609 
2610 static void
2611 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2612     vm_prot_t prot)
2613 {
2614 
2615 	rw_wlock(&pvh_global_lock);
2616 	PMAP_LOCK(pmap);
2617 	mmu_booke_enter_locked(mmu, pmap, va, m,
2618 	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
2619 	    0);
2620 	rw_wunlock(&pvh_global_lock);
2621 	PMAP_UNLOCK(pmap);
2622 }
2623 
2624 /*
2625  * Remove the given range of addresses from the specified map.
2626  *
2627  * It is assumed that the start and end are properly rounded to the page size.
2628  */
2629 static void
2630 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
2631 {
2632 	pte_t *pte;
2633 	uint8_t hold_flag;
2634 
2635 	int su = (pmap == kernel_pmap);
2636 
2637 	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
2638 	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
2639 
2640 	if (su) {
2641 		KASSERT(((va >= virtual_avail) &&
2642 		    (va <= VM_MAX_KERNEL_ADDRESS)),
2643 		    ("mmu_booke_remove: kernel pmap, non kernel va"));
2644 	} else {
2645 		KASSERT((va <= VM_MAXUSER_ADDRESS),
2646 		    ("mmu_booke_remove: user pmap, non user va"));
2647 	}
2648 
2649 	if (PMAP_REMOVE_DONE(pmap)) {
2650 		//debugf("mmu_booke_remove: e (empty)\n");
2651 		return;
2652 	}
2653 
2654 	hold_flag = PTBL_HOLD_FLAG(pmap);
2655 	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
2656 
2657 	rw_wlock(&pvh_global_lock);
2658 	PMAP_LOCK(pmap);
2659 	for (; va < endva; va += PAGE_SIZE) {
2660 		pte = pte_find(mmu, pmap, va);
2661 		if ((pte != NULL) && PTE_ISVALID(pte))
2662 			pte_remove(mmu, pmap, va, hold_flag);
2663 	}
2664 	PMAP_UNLOCK(pmap);
2665 	rw_wunlock(&pvh_global_lock);
2666 
2667 	//debugf("mmu_booke_remove: e\n");
2668 }
2669 
2670 /*
2671  * Remove physical page from all pmaps in which it resides.
2672  */
2673 static void
2674 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
2675 {
2676 	pv_entry_t pv, pvn;
2677 	uint8_t hold_flag;
2678 
2679 	rw_wlock(&pvh_global_lock);
2680 	for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
2681 		pvn = TAILQ_NEXT(pv, pv_link);
2682 
2683 		PMAP_LOCK(pv->pv_pmap);
2684 		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
2685 		pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
2686 		PMAP_UNLOCK(pv->pv_pmap);
2687 	}
2688 	vm_page_aflag_clear(m, PGA_WRITEABLE);
2689 	rw_wunlock(&pvh_global_lock);
2690 }
2691 
2692 /*
2693  * Map a range of physical addresses into kernel virtual address space.
2694  */
2695 static vm_offset_t
2696 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
2697     vm_paddr_t pa_end, int prot)
2698 {
2699 	vm_offset_t sva = *virt;
2700 	vm_offset_t va = sva;
2701 
2702 	//debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
2703 	//		sva, pa_start, pa_end);
2704 
2705 	while (pa_start < pa_end) {
2706 		mmu_booke_kenter(mmu, va, pa_start);
2707 		va += PAGE_SIZE;
2708 		pa_start += PAGE_SIZE;
2709 	}
2710 	*virt = va;
2711 
2712 	//debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
2713 	return (sva);
2714 }
2715 
2716 /*
2717  * The pmap must be activated before it's address space can be accessed in any
2718  * way.
2719  */
2720 static void
2721 mmu_booke_activate(mmu_t mmu, struct thread *td)
2722 {
2723 	pmap_t pmap;
2724 	u_int cpuid;
2725 
2726 	pmap = &td->td_proc->p_vmspace->vm_pmap;
2727 
2728 	CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x"PRI0ptrX")",
2729 	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2730 
2731 	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
2732 
2733 	sched_pin();
2734 
2735 	cpuid = PCPU_GET(cpuid);
2736 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
2737 	PCPU_SET(curpmap, pmap);
2738 
2739 	if (pmap->pm_tid[cpuid] == TID_NONE)
2740 		tid_alloc(pmap);
2741 
2742 	/* Load PID0 register with pmap tid value. */
2743 	mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
2744 	__asm __volatile("isync");
2745 
2746 	mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
2747 
2748 	sched_unpin();
2749 
2750 	CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
2751 	    pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
2752 }
2753 
2754 /*
2755  * Deactivate the specified process's address space.
2756  */
2757 static void
2758 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
2759 {
2760 	pmap_t pmap;
2761 
2762 	pmap = &td->td_proc->p_vmspace->vm_pmap;
2763 
2764 	CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x"PRI0ptrX,
2765 	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2766 
2767 	td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
2768 
2769 	CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
2770 	PCPU_SET(curpmap, NULL);
2771 }
2772 
2773 /*
2774  * Copy the range specified by src_addr/len
2775  * from the source map to the range dst_addr/len
2776  * in the destination map.
2777  *
2778  * This routine is only advisory and need not do anything.
2779  */
2780 static void
2781 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
2782     vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
2783 {
2784 
2785 }
2786 
2787 /*
2788  * Set the physical protection on the specified range of this map as requested.
2789  */
2790 static void
2791 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2792     vm_prot_t prot)
2793 {
2794 	vm_offset_t va;
2795 	vm_page_t m;
2796 	pte_t *pte;
2797 
2798 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2799 		mmu_booke_remove(mmu, pmap, sva, eva);
2800 		return;
2801 	}
2802 
2803 	if (prot & VM_PROT_WRITE)
2804 		return;
2805 
2806 	PMAP_LOCK(pmap);
2807 	for (va = sva; va < eva; va += PAGE_SIZE) {
2808 		if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2809 			if (PTE_ISVALID(pte)) {
2810 				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2811 
2812 				mtx_lock_spin(&tlbivax_mutex);
2813 				tlb_miss_lock();
2814 
2815 				/* Handle modified pages. */
2816 				if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
2817 					vm_page_dirty(m);
2818 
2819 				tlb0_flush_entry(va);
2820 				*pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2821 
2822 				tlb_miss_unlock();
2823 				mtx_unlock_spin(&tlbivax_mutex);
2824 			}
2825 		}
2826 	}
2827 	PMAP_UNLOCK(pmap);
2828 }
2829 
2830 /*
2831  * Clear the write and modified bits in each of the given page's mappings.
2832  */
2833 static void
2834 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
2835 {
2836 	pv_entry_t pv;
2837 	pte_t *pte;
2838 
2839 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2840 	    ("mmu_booke_remove_write: page %p is not managed", m));
2841 
2842 	/*
2843 	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2844 	 * set by another thread while the object is locked.  Thus,
2845 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
2846 	 */
2847 	VM_OBJECT_ASSERT_WLOCKED(m->object);
2848 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2849 		return;
2850 	rw_wlock(&pvh_global_lock);
2851 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2852 		PMAP_LOCK(pv->pv_pmap);
2853 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2854 			if (PTE_ISVALID(pte)) {
2855 				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2856 
2857 				mtx_lock_spin(&tlbivax_mutex);
2858 				tlb_miss_lock();
2859 
2860 				/* Handle modified pages. */
2861 				if (PTE_ISMODIFIED(pte))
2862 					vm_page_dirty(m);
2863 
2864 				/* Flush mapping from TLB0. */
2865 				*pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2866 
2867 				tlb_miss_unlock();
2868 				mtx_unlock_spin(&tlbivax_mutex);
2869 			}
2870 		}
2871 		PMAP_UNLOCK(pv->pv_pmap);
2872 	}
2873 	vm_page_aflag_clear(m, PGA_WRITEABLE);
2874 	rw_wunlock(&pvh_global_lock);
2875 }
2876 
2877 static void
2878 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2879 {
2880 	pte_t *pte;
2881 	pmap_t pmap;
2882 	vm_page_t m;
2883 	vm_offset_t addr;
2884 	vm_paddr_t pa = 0;
2885 	int active, valid;
2886 
2887 	va = trunc_page(va);
2888 	sz = round_page(sz);
2889 
2890 	rw_wlock(&pvh_global_lock);
2891 	pmap = PCPU_GET(curpmap);
2892 	active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2893 	while (sz > 0) {
2894 		PMAP_LOCK(pm);
2895 		pte = pte_find(mmu, pm, va);
2896 		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2897 		if (valid)
2898 			pa = PTE_PA(pte);
2899 		PMAP_UNLOCK(pm);
2900 		if (valid) {
2901 			if (!active) {
2902 				/* Create a mapping in the active pmap. */
2903 				addr = 0;
2904 				m = PHYS_TO_VM_PAGE(pa);
2905 				PMAP_LOCK(pmap);
2906 				pte_enter(mmu, pmap, m, addr,
2907 				    PTE_SR | PTE_VALID | PTE_UR, FALSE);
2908 				__syncicache((void *)addr, PAGE_SIZE);
2909 				pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2910 				PMAP_UNLOCK(pmap);
2911 			} else
2912 				__syncicache((void *)va, PAGE_SIZE);
2913 		}
2914 		va += PAGE_SIZE;
2915 		sz -= PAGE_SIZE;
2916 	}
2917 	rw_wunlock(&pvh_global_lock);
2918 }
2919 
2920 /*
2921  * Atomically extract and hold the physical page with the given
2922  * pmap and virtual address pair if that mapping permits the given
2923  * protection.
2924  */
2925 static vm_page_t
2926 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2927     vm_prot_t prot)
2928 {
2929 	pte_t *pte;
2930 	vm_page_t m;
2931 	uint32_t pte_wbit;
2932 	vm_paddr_t pa;
2933 
2934 	m = NULL;
2935 	pa = 0;
2936 	PMAP_LOCK(pmap);
2937 retry:
2938 	pte = pte_find(mmu, pmap, va);
2939 	if ((pte != NULL) && PTE_ISVALID(pte)) {
2940 		if (pmap == kernel_pmap)
2941 			pte_wbit = PTE_SW;
2942 		else
2943 			pte_wbit = PTE_UW;
2944 
2945 		if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2946 			if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2947 				goto retry;
2948 			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2949 			vm_page_hold(m);
2950 		}
2951 	}
2952 
2953 	PA_UNLOCK_COND(pa);
2954 	PMAP_UNLOCK(pmap);
2955 	return (m);
2956 }
2957 
2958 /*
2959  * Initialize a vm_page's machine-dependent fields.
2960  */
2961 static void
2962 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2963 {
2964 
2965 	m->md.pv_tracked = 0;
2966 	TAILQ_INIT(&m->md.pv_list);
2967 }
2968 
2969 /*
2970  * mmu_booke_zero_page_area zeros the specified hardware page by
2971  * mapping it into virtual memory and using bzero to clear
2972  * its contents.
2973  *
2974  * off and size must reside within a single page.
2975  */
2976 static void
2977 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2978 {
2979 	vm_offset_t va;
2980 
2981 	/* XXX KASSERT off and size are within a single page? */
2982 
2983 	mtx_lock(&zero_page_mutex);
2984 	va = zero_page_va;
2985 
2986 	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2987 	bzero((caddr_t)va + off, size);
2988 	mmu_booke_kremove(mmu, va);
2989 
2990 	mtx_unlock(&zero_page_mutex);
2991 }
2992 
2993 /*
2994  * mmu_booke_zero_page zeros the specified hardware page.
2995  */
2996 static void
2997 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2998 {
2999 	vm_offset_t off, va;
3000 
3001 	mtx_lock(&zero_page_mutex);
3002 	va = zero_page_va;
3003 
3004 	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
3005 	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
3006 		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
3007 	mmu_booke_kremove(mmu, va);
3008 
3009 	mtx_unlock(&zero_page_mutex);
3010 }
3011 
3012 /*
3013  * mmu_booke_copy_page copies the specified (machine independent) page by
3014  * mapping the page into virtual memory and using memcopy to copy the page,
3015  * one machine dependent page at a time.
3016  */
3017 static void
3018 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
3019 {
3020 	vm_offset_t sva, dva;
3021 
3022 	sva = copy_page_src_va;
3023 	dva = copy_page_dst_va;
3024 
3025 	mtx_lock(&copy_page_mutex);
3026 	mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
3027 	mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
3028 	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
3029 	mmu_booke_kremove(mmu, dva);
3030 	mmu_booke_kremove(mmu, sva);
3031 	mtx_unlock(&copy_page_mutex);
3032 }
3033 
3034 static inline void
3035 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
3036     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
3037 {
3038 	void *a_cp, *b_cp;
3039 	vm_offset_t a_pg_offset, b_pg_offset;
3040 	int cnt;
3041 
3042 	mtx_lock(&copy_page_mutex);
3043 	while (xfersize > 0) {
3044 		a_pg_offset = a_offset & PAGE_MASK;
3045 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
3046 		mmu_booke_kenter(mmu, copy_page_src_va,
3047 		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
3048 		a_cp = (char *)copy_page_src_va + a_pg_offset;
3049 		b_pg_offset = b_offset & PAGE_MASK;
3050 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
3051 		mmu_booke_kenter(mmu, copy_page_dst_va,
3052 		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
3053 		b_cp = (char *)copy_page_dst_va + b_pg_offset;
3054 		bcopy(a_cp, b_cp, cnt);
3055 		mmu_booke_kremove(mmu, copy_page_dst_va);
3056 		mmu_booke_kremove(mmu, copy_page_src_va);
3057 		a_offset += cnt;
3058 		b_offset += cnt;
3059 		xfersize -= cnt;
3060 	}
3061 	mtx_unlock(&copy_page_mutex);
3062 }
3063 
3064 static vm_offset_t
3065 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
3066 {
3067 	vm_paddr_t paddr;
3068 	vm_offset_t qaddr;
3069 	uint32_t flags;
3070 	pte_t *pte;
3071 
3072 	paddr = VM_PAGE_TO_PHYS(m);
3073 
3074 	flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
3075 	flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
3076 	flags |= PTE_PS_4KB;
3077 
3078 	critical_enter();
3079 	qaddr = PCPU_GET(qmap_addr);
3080 
3081 	pte = pte_find(mmu, kernel_pmap, qaddr);
3082 
3083 	KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
3084 
3085 	/*
3086 	 * XXX: tlbivax is broadcast to other cores, but qaddr should
3087  	 * not be present in other TLBs.  Is there a better instruction
3088 	 * sequence to use? Or just forget it & use mmu_booke_kenter()...
3089 	 */
3090 	__asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
3091 	__asm __volatile("isync; msync");
3092 
3093 	*pte = PTE_RPN_FROM_PA(paddr) | flags;
3094 
3095 	/* Flush the real memory from the instruction cache. */
3096 	if ((flags & (PTE_I | PTE_G)) == 0)
3097 		__syncicache((void *)qaddr, PAGE_SIZE);
3098 
3099 	return (qaddr);
3100 }
3101 
3102 static void
3103 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
3104 {
3105 	pte_t *pte;
3106 
3107 	pte = pte_find(mmu, kernel_pmap, addr);
3108 
3109 	KASSERT(PCPU_GET(qmap_addr) == addr,
3110 	    ("mmu_booke_quick_remove_page: invalid address"));
3111 	KASSERT(*pte != 0,
3112 	    ("mmu_booke_quick_remove_page: PTE not in use"));
3113 
3114 	*pte = 0;
3115 	critical_exit();
3116 }
3117 
3118 /*
3119  * Return whether or not the specified physical page was modified
3120  * in any of physical maps.
3121  */
3122 static boolean_t
3123 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
3124 {
3125 	pte_t *pte;
3126 	pv_entry_t pv;
3127 	boolean_t rv;
3128 
3129 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3130 	    ("mmu_booke_is_modified: page %p is not managed", m));
3131 	rv = FALSE;
3132 
3133 	/*
3134 	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3135 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
3136 	 * is clear, no PTEs can be modified.
3137 	 */
3138 	VM_OBJECT_ASSERT_WLOCKED(m->object);
3139 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3140 		return (rv);
3141 	rw_wlock(&pvh_global_lock);
3142 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3143 		PMAP_LOCK(pv->pv_pmap);
3144 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3145 		    PTE_ISVALID(pte)) {
3146 			if (PTE_ISMODIFIED(pte))
3147 				rv = TRUE;
3148 		}
3149 		PMAP_UNLOCK(pv->pv_pmap);
3150 		if (rv)
3151 			break;
3152 	}
3153 	rw_wunlock(&pvh_global_lock);
3154 	return (rv);
3155 }
3156 
3157 /*
3158  * Return whether or not the specified virtual address is eligible
3159  * for prefault.
3160  */
3161 static boolean_t
3162 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
3163 {
3164 
3165 	return (FALSE);
3166 }
3167 
3168 /*
3169  * Return whether or not the specified physical page was referenced
3170  * in any physical maps.
3171  */
3172 static boolean_t
3173 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
3174 {
3175 	pte_t *pte;
3176 	pv_entry_t pv;
3177 	boolean_t rv;
3178 
3179 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3180 	    ("mmu_booke_is_referenced: page %p is not managed", m));
3181 	rv = FALSE;
3182 	rw_wlock(&pvh_global_lock);
3183 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3184 		PMAP_LOCK(pv->pv_pmap);
3185 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3186 		    PTE_ISVALID(pte)) {
3187 			if (PTE_ISREFERENCED(pte))
3188 				rv = TRUE;
3189 		}
3190 		PMAP_UNLOCK(pv->pv_pmap);
3191 		if (rv)
3192 			break;
3193 	}
3194 	rw_wunlock(&pvh_global_lock);
3195 	return (rv);
3196 }
3197 
3198 /*
3199  * Clear the modify bits on the specified physical page.
3200  */
3201 static void
3202 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
3203 {
3204 	pte_t *pte;
3205 	pv_entry_t pv;
3206 
3207 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3208 	    ("mmu_booke_clear_modify: page %p is not managed", m));
3209 	VM_OBJECT_ASSERT_WLOCKED(m->object);
3210 	KASSERT(!vm_page_xbusied(m),
3211 	    ("mmu_booke_clear_modify: page %p is exclusive busied", m));
3212 
3213 	/*
3214 	 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
3215 	 * If the object containing the page is locked and the page is not
3216 	 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
3217 	 */
3218 	if ((m->aflags & PGA_WRITEABLE) == 0)
3219 		return;
3220 	rw_wlock(&pvh_global_lock);
3221 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3222 		PMAP_LOCK(pv->pv_pmap);
3223 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3224 		    PTE_ISVALID(pte)) {
3225 			mtx_lock_spin(&tlbivax_mutex);
3226 			tlb_miss_lock();
3227 
3228 			if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
3229 				tlb0_flush_entry(pv->pv_va);
3230 				*pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
3231 				    PTE_REFERENCED);
3232 			}
3233 
3234 			tlb_miss_unlock();
3235 			mtx_unlock_spin(&tlbivax_mutex);
3236 		}
3237 		PMAP_UNLOCK(pv->pv_pmap);
3238 	}
3239 	rw_wunlock(&pvh_global_lock);
3240 }
3241 
3242 /*
3243  * Return a count of reference bits for a page, clearing those bits.
3244  * It is not necessary for every reference bit to be cleared, but it
3245  * is necessary that 0 only be returned when there are truly no
3246  * reference bits set.
3247  *
3248  * As an optimization, update the page's dirty field if a modified bit is
3249  * found while counting reference bits.  This opportunistic update can be
3250  * performed at low cost and can eliminate the need for some future calls
3251  * to pmap_is_modified().  However, since this function stops after
3252  * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3253  * dirty pages.  Those dirty pages will only be detected by a future call
3254  * to pmap_is_modified().
3255  */
3256 static int
3257 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
3258 {
3259 	pte_t *pte;
3260 	pv_entry_t pv;
3261 	int count;
3262 
3263 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3264 	    ("mmu_booke_ts_referenced: page %p is not managed", m));
3265 	count = 0;
3266 	rw_wlock(&pvh_global_lock);
3267 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3268 		PMAP_LOCK(pv->pv_pmap);
3269 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3270 		    PTE_ISVALID(pte)) {
3271 			if (PTE_ISMODIFIED(pte))
3272 				vm_page_dirty(m);
3273 			if (PTE_ISREFERENCED(pte)) {
3274 				mtx_lock_spin(&tlbivax_mutex);
3275 				tlb_miss_lock();
3276 
3277 				tlb0_flush_entry(pv->pv_va);
3278 				*pte &= ~PTE_REFERENCED;
3279 
3280 				tlb_miss_unlock();
3281 				mtx_unlock_spin(&tlbivax_mutex);
3282 
3283 				if (++count >= PMAP_TS_REFERENCED_MAX) {
3284 					PMAP_UNLOCK(pv->pv_pmap);
3285 					break;
3286 				}
3287 			}
3288 		}
3289 		PMAP_UNLOCK(pv->pv_pmap);
3290 	}
3291 	rw_wunlock(&pvh_global_lock);
3292 	return (count);
3293 }
3294 
3295 /*
3296  * Clear the wired attribute from the mappings for the specified range of
3297  * addresses in the given pmap.  Every valid mapping within that range must
3298  * have the wired attribute set.  In contrast, invalid mappings cannot have
3299  * the wired attribute set, so they are ignored.
3300  *
3301  * The wired attribute of the page table entry is not a hardware feature, so
3302  * there is no need to invalidate any TLB entries.
3303  */
3304 static void
3305 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3306 {
3307 	vm_offset_t va;
3308 	pte_t *pte;
3309 
3310 	PMAP_LOCK(pmap);
3311 	for (va = sva; va < eva; va += PAGE_SIZE) {
3312 		if ((pte = pte_find(mmu, pmap, va)) != NULL &&
3313 		    PTE_ISVALID(pte)) {
3314 			if (!PTE_ISWIRED(pte))
3315 				panic("mmu_booke_unwire: pte %p isn't wired",
3316 				    pte);
3317 			*pte &= ~PTE_WIRED;
3318 			pmap->pm_stats.wired_count--;
3319 		}
3320 	}
3321 	PMAP_UNLOCK(pmap);
3322 
3323 }
3324 
3325 /*
3326  * Return true if the pmap's pv is one of the first 16 pvs linked to from this
3327  * page.  This count may be changed upwards or downwards in the future; it is
3328  * only necessary that true be returned for a small subset of pmaps for proper
3329  * page aging.
3330  */
3331 static boolean_t
3332 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
3333 {
3334 	pv_entry_t pv;
3335 	int loops;
3336 	boolean_t rv;
3337 
3338 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3339 	    ("mmu_booke_page_exists_quick: page %p is not managed", m));
3340 	loops = 0;
3341 	rv = FALSE;
3342 	rw_wlock(&pvh_global_lock);
3343 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3344 		if (pv->pv_pmap == pmap) {
3345 			rv = TRUE;
3346 			break;
3347 		}
3348 		if (++loops >= 16)
3349 			break;
3350 	}
3351 	rw_wunlock(&pvh_global_lock);
3352 	return (rv);
3353 }
3354 
3355 /*
3356  * Return the number of managed mappings to the given physical page that are
3357  * wired.
3358  */
3359 static int
3360 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
3361 {
3362 	pv_entry_t pv;
3363 	pte_t *pte;
3364 	int count = 0;
3365 
3366 	if ((m->oflags & VPO_UNMANAGED) != 0)
3367 		return (count);
3368 	rw_wlock(&pvh_global_lock);
3369 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3370 		PMAP_LOCK(pv->pv_pmap);
3371 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
3372 			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
3373 				count++;
3374 		PMAP_UNLOCK(pv->pv_pmap);
3375 	}
3376 	rw_wunlock(&pvh_global_lock);
3377 	return (count);
3378 }
3379 
3380 static int
3381 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3382 {
3383 	int i;
3384 	vm_offset_t va;
3385 
3386 	/*
3387 	 * This currently does not work for entries that
3388 	 * overlap TLB1 entries.
3389 	 */
3390 	for (i = 0; i < TLB1_ENTRIES; i ++) {
3391 		if (tlb1_iomapped(i, pa, size, &va) == 0)
3392 			return (0);
3393 	}
3394 
3395 	return (EFAULT);
3396 }
3397 
3398 void
3399 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
3400 {
3401 	vm_paddr_t ppa;
3402 	vm_offset_t ofs;
3403 	vm_size_t gran;
3404 
3405 	/* Minidumps are based on virtual memory addresses. */
3406 	if (do_minidump) {
3407 		*va = (void *)(vm_offset_t)pa;
3408 		return;
3409 	}
3410 
3411 	/* Raw physical memory dumps don't have a virtual address. */
3412 	/* We always map a 256MB page at 256M. */
3413 	gran = 256 * 1024 * 1024;
3414 	ppa = rounddown2(pa, gran);
3415 	ofs = pa - ppa;
3416 	*va = (void *)gran;
3417 	tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
3418 
3419 	if (sz > (gran - ofs))
3420 		tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
3421 		    _TLB_ENTRY_IO);
3422 }
3423 
3424 void
3425 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
3426 {
3427 	vm_paddr_t ppa;
3428 	vm_offset_t ofs;
3429 	vm_size_t gran;
3430 	tlb_entry_t e;
3431 	int i;
3432 
3433 	/* Minidumps are based on virtual memory addresses. */
3434 	/* Nothing to do... */
3435 	if (do_minidump)
3436 		return;
3437 
3438 	for (i = 0; i < TLB1_ENTRIES; i++) {
3439 		tlb1_read_entry(&e, i);
3440 		if (!(e.mas1 & MAS1_VALID))
3441 			break;
3442 	}
3443 
3444 	/* Raw physical memory dumps don't have a virtual address. */
3445 	i--;
3446 	e.mas1 = 0;
3447 	e.mas2 = 0;
3448 	e.mas3 = 0;
3449 	tlb1_write_entry(&e, i);
3450 
3451 	gran = 256 * 1024 * 1024;
3452 	ppa = rounddown2(pa, gran);
3453 	ofs = pa - ppa;
3454 	if (sz > (gran - ofs)) {
3455 		i--;
3456 		e.mas1 = 0;
3457 		e.mas2 = 0;
3458 		e.mas3 = 0;
3459 		tlb1_write_entry(&e, i);
3460 	}
3461 }
3462 
3463 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
3464 
3465 void
3466 mmu_booke_scan_init(mmu_t mmu)
3467 {
3468 	vm_offset_t va;
3469 	pte_t *pte;
3470 	int i;
3471 
3472 	if (!do_minidump) {
3473 		/* Initialize phys. segments for dumpsys(). */
3474 		memset(&dump_map, 0, sizeof(dump_map));
3475 		mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
3476 		    &availmem_regions_sz);
3477 		for (i = 0; i < physmem_regions_sz; i++) {
3478 			dump_map[i].pa_start = physmem_regions[i].mr_start;
3479 			dump_map[i].pa_size = physmem_regions[i].mr_size;
3480 		}
3481 		return;
3482 	}
3483 
3484 	/* Virtual segments for minidumps: */
3485 	memset(&dump_map, 0, sizeof(dump_map));
3486 
3487 	/* 1st: kernel .data and .bss. */
3488 	dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
3489 	dump_map[0].pa_size =
3490 	    round_page((uintptr_t)_end) - dump_map[0].pa_start;
3491 
3492 	/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
3493 	dump_map[1].pa_start = data_start;
3494 	dump_map[1].pa_size = data_end - data_start;
3495 
3496 	/* 3rd: kernel VM. */
3497 	va = dump_map[1].pa_start + dump_map[1].pa_size;
3498 	/* Find start of next chunk (from va). */
3499 	while (va < virtual_end) {
3500 		/* Don't dump the buffer cache. */
3501 		if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
3502 			va = kmi.buffer_eva;
3503 			continue;
3504 		}
3505 		pte = pte_find(mmu, kernel_pmap, va);
3506 		if (pte != NULL && PTE_ISVALID(pte))
3507 			break;
3508 		va += PAGE_SIZE;
3509 	}
3510 	if (va < virtual_end) {
3511 		dump_map[2].pa_start = va;
3512 		va += PAGE_SIZE;
3513 		/* Find last page in chunk. */
3514 		while (va < virtual_end) {
3515 			/* Don't run into the buffer cache. */
3516 			if (va == kmi.buffer_sva)
3517 				break;
3518 			pte = pte_find(mmu, kernel_pmap, va);
3519 			if (pte == NULL || !PTE_ISVALID(pte))
3520 				break;
3521 			va += PAGE_SIZE;
3522 		}
3523 		dump_map[2].pa_size = va - dump_map[2].pa_start;
3524 	}
3525 }
3526 
3527 /*
3528  * Map a set of physical memory pages into the kernel virtual address space.
3529  * Return a pointer to where it is mapped. This routine is intended to be used
3530  * for mapping device memory, NOT real memory.
3531  */
3532 static void *
3533 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3534 {
3535 
3536 	return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
3537 }
3538 
3539 static void *
3540 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
3541 {
3542 	tlb_entry_t e;
3543 	void *res;
3544 	uintptr_t va, tmpva;
3545 	vm_size_t sz;
3546 	int i;
3547 
3548 	/*
3549 	 * Check if this is premapped in TLB1. Note: this should probably also
3550 	 * check whether a sequence of TLB1 entries exist that match the
3551 	 * requirement, but now only checks the easy case.
3552 	 */
3553 	for (i = 0; i < TLB1_ENTRIES; i++) {
3554 		tlb1_read_entry(&e, i);
3555 		if (!(e.mas1 & MAS1_VALID))
3556 			continue;
3557 		if (pa >= e.phys &&
3558 		    (pa + size) <= (e.phys + e.size) &&
3559 		    (ma == VM_MEMATTR_DEFAULT ||
3560 		     tlb_calc_wimg(pa, ma) ==
3561 		      (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))))
3562 			return (void *)(e.virt +
3563 			    (vm_offset_t)(pa - e.phys));
3564 	}
3565 
3566 	size = roundup(size, PAGE_SIZE);
3567 
3568 	/*
3569 	 * The device mapping area is between VM_MAXUSER_ADDRESS and
3570 	 * VM_MIN_KERNEL_ADDRESS.  This gives 1GB of device addressing.
3571 	 */
3572 #ifdef SPARSE_MAPDEV
3573 	/*
3574 	 * With a sparse mapdev, align to the largest starting region.  This
3575 	 * could feasibly be optimized for a 'best-fit' alignment, but that
3576 	 * calculation could be very costly.
3577 	 * Align to the smaller of:
3578 	 * - first set bit in overlap of (pa & size mask)
3579 	 * - largest size envelope
3580 	 *
3581 	 * It's possible the device mapping may start at a PA that's not larger
3582 	 * than the size mask, so we need to offset in to maximize the TLB entry
3583 	 * range and minimize the number of used TLB entries.
3584 	 */
3585 	do {
3586 	    tmpva = tlb1_map_base;
3587 	    sz = ffsl(((1 << flsl(size-1)) - 1) & pa);
3588 	    sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
3589 	    va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
3590 #ifdef __powerpc64__
3591 	} while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
3592 #else
3593 	} while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
3594 #endif
3595 #else
3596 #ifdef __powerpc64__
3597 	va = atomic_fetchadd_long(&tlb1_map_base, size);
3598 #else
3599 	va = atomic_fetchadd_int(&tlb1_map_base, size);
3600 #endif
3601 #endif
3602 	res = (void *)va;
3603 
3604 	do {
3605 		sz = 1 << (ilog2(size) & ~1);
3606 		/* Align size to PA */
3607 		if (pa % sz != 0) {
3608 			do {
3609 				sz >>= 2;
3610 			} while (pa % sz != 0);
3611 		}
3612 		/* Now align from there to VA */
3613 		if (va % sz != 0) {
3614 			do {
3615 				sz >>= 2;
3616 			} while (va % sz != 0);
3617 		}
3618 		if (bootverbose)
3619 			printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
3620 			    va, (uintmax_t)pa, sz);
3621 		if (tlb1_set_entry(va, pa, sz,
3622 		    _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)) < 0)
3623 			return (NULL);
3624 		size -= sz;
3625 		pa += sz;
3626 		va += sz;
3627 	} while (size > 0);
3628 
3629 	return (res);
3630 }
3631 
3632 /*
3633  * 'Unmap' a range mapped by mmu_booke_mapdev().
3634  */
3635 static void
3636 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
3637 {
3638 #ifdef SUPPORTS_SHRINKING_TLB1
3639 	vm_offset_t base, offset;
3640 
3641 	/*
3642 	 * Unmap only if this is inside kernel virtual space.
3643 	 */
3644 	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
3645 		base = trunc_page(va);
3646 		offset = va & PAGE_MASK;
3647 		size = roundup(offset + size, PAGE_SIZE);
3648 		kva_free(base, size);
3649 	}
3650 #endif
3651 }
3652 
3653 /*
3654  * mmu_booke_object_init_pt preloads the ptes for a given object into the
3655  * specified pmap. This eliminates the blast of soft faults on process startup
3656  * and immediately after an mmap.
3657  */
3658 static void
3659 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3660     vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3661 {
3662 
3663 	VM_OBJECT_ASSERT_WLOCKED(object);
3664 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3665 	    ("mmu_booke_object_init_pt: non-device object"));
3666 }
3667 
3668 /*
3669  * Perform the pmap work for mincore.
3670  */
3671 static int
3672 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3673     vm_paddr_t *locked_pa)
3674 {
3675 
3676 	/* XXX: this should be implemented at some point */
3677 	return (0);
3678 }
3679 
3680 static int
3681 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
3682     vm_memattr_t mode)
3683 {
3684 	vm_offset_t va;
3685 	pte_t *pte;
3686 	int i, j;
3687 	tlb_entry_t e;
3688 
3689 	/* Check TLB1 mappings */
3690 	for (i = 0; i < TLB1_ENTRIES; i++) {
3691 		tlb1_read_entry(&e, i);
3692 		if (!(e.mas1 & MAS1_VALID))
3693 			continue;
3694 		if (addr >= e.virt && addr < e.virt + e.size)
3695 			break;
3696 	}
3697 	if (i < TLB1_ENTRIES) {
3698 		/* Only allow full mappings to be modified for now. */
3699 		/* Validate the range. */
3700 		for (j = i, va = addr; va < addr + sz; va += e.size, j++) {
3701 			tlb1_read_entry(&e, j);
3702 			if (va != e.virt || (sz - (va - addr) < e.size))
3703 				return (EINVAL);
3704 		}
3705 		for (va = addr; va < addr + sz; va += e.size, i++) {
3706 			tlb1_read_entry(&e, i);
3707 			e.mas2 &= ~MAS2_WIMGE_MASK;
3708 			e.mas2 |= tlb_calc_wimg(e.phys, mode);
3709 
3710 			/*
3711 			 * Write it out to the TLB.  Should really re-sync with other
3712 			 * cores.
3713 			 */
3714 			tlb1_write_entry(&e, i);
3715 		}
3716 		return (0);
3717 	}
3718 
3719 	/* Not in TLB1, try through pmap */
3720 	/* First validate the range. */
3721 	for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3722 		pte = pte_find(mmu, kernel_pmap, va);
3723 		if (pte == NULL || !PTE_ISVALID(pte))
3724 			return (EINVAL);
3725 	}
3726 
3727 	mtx_lock_spin(&tlbivax_mutex);
3728 	tlb_miss_lock();
3729 	for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3730 		pte = pte_find(mmu, kernel_pmap, va);
3731 		*pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
3732 		*pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
3733 		tlb0_flush_entry(va);
3734 	}
3735 	tlb_miss_unlock();
3736 	mtx_unlock_spin(&tlbivax_mutex);
3737 
3738 	return (0);
3739 }
3740 
3741 /**************************************************************************/
3742 /* TID handling */
3743 /**************************************************************************/
3744 
3745 /*
3746  * Allocate a TID. If necessary, steal one from someone else.
3747  * The new TID is flushed from the TLB before returning.
3748  */
3749 static tlbtid_t
3750 tid_alloc(pmap_t pmap)
3751 {
3752 	tlbtid_t tid;
3753 	int thiscpu;
3754 
3755 	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
3756 
3757 	CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
3758 
3759 	thiscpu = PCPU_GET(cpuid);
3760 
3761 	tid = PCPU_GET(booke.tid_next);
3762 	if (tid > TID_MAX)
3763 		tid = TID_MIN;
3764 	PCPU_SET(booke.tid_next, tid + 1);
3765 
3766 	/* If we are stealing TID then clear the relevant pmap's field */
3767 	if (tidbusy[thiscpu][tid] != NULL) {
3768 
3769 		CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
3770 
3771 		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
3772 
3773 		/* Flush all entries from TLB0 matching this TID. */
3774 		tid_flush(tid);
3775 	}
3776 
3777 	tidbusy[thiscpu][tid] = pmap;
3778 	pmap->pm_tid[thiscpu] = tid;
3779 	__asm __volatile("msync; isync");
3780 
3781 	CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
3782 	    PCPU_GET(booke.tid_next));
3783 
3784 	return (tid);
3785 }
3786 
3787 /**************************************************************************/
3788 /* TLB0 handling */
3789 /**************************************************************************/
3790 
3791 /* Convert TLB0 va and way number to tlb0[] table index. */
3792 static inline unsigned int
3793 tlb0_tableidx(vm_offset_t va, unsigned int way)
3794 {
3795 	unsigned int idx;
3796 
3797 	idx = (way * TLB0_ENTRIES_PER_WAY);
3798 	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
3799 	return (idx);
3800 }
3801 
3802 /*
3803  * Invalidate TLB0 entry.
3804  */
3805 static inline void
3806 tlb0_flush_entry(vm_offset_t va)
3807 {
3808 
3809 	CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
3810 
3811 	mtx_assert(&tlbivax_mutex, MA_OWNED);
3812 
3813 	__asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
3814 	__asm __volatile("isync; msync");
3815 	__asm __volatile("tlbsync; msync");
3816 
3817 	CTR1(KTR_PMAP, "%s: e", __func__);
3818 }
3819 
3820 
3821 /**************************************************************************/
3822 /* TLB1 handling */
3823 /**************************************************************************/
3824 
3825 /*
3826  * TLB1 mapping notes:
3827  *
3828  * TLB1[0]	Kernel text and data.
3829  * TLB1[1-15]	Additional kernel text and data mappings (if required), PCI
3830  *		windows, other devices mappings.
3831  */
3832 
3833  /*
3834  * Read an entry from given TLB1 slot.
3835  */
3836 void
3837 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot)
3838 {
3839 	register_t msr;
3840 	uint32_t mas0;
3841 
3842 	KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__));
3843 
3844 	msr = mfmsr();
3845 	__asm __volatile("wrteei 0");
3846 
3847 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot);
3848 	mtspr(SPR_MAS0, mas0);
3849 	__asm __volatile("isync; tlbre");
3850 
3851 	entry->mas1 = mfspr(SPR_MAS1);
3852 	entry->mas2 = mfspr(SPR_MAS2);
3853 	entry->mas3 = mfspr(SPR_MAS3);
3854 
3855 	switch ((mfpvr() >> 16) & 0xFFFF) {
3856 	case FSL_E500v2:
3857 	case FSL_E500mc:
3858 	case FSL_E5500:
3859 	case FSL_E6500:
3860 		entry->mas7 = mfspr(SPR_MAS7);
3861 		break;
3862 	default:
3863 		entry->mas7 = 0;
3864 		break;
3865 	}
3866 	mtmsr(msr);
3867 
3868 	entry->virt = entry->mas2 & MAS2_EPN_MASK;
3869 	entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) |
3870 	    (entry->mas3 & MAS3_RPN);
3871 	entry->size =
3872 	    tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT);
3873 }
3874 
3875 struct tlbwrite_args {
3876 	tlb_entry_t *e;
3877 	unsigned int idx;
3878 };
3879 
3880 static void
3881 tlb1_write_entry_int(void *arg)
3882 {
3883 	struct tlbwrite_args *args = arg;
3884 	uint32_t mas0;
3885 
3886 	/* Select entry */
3887 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(args->idx);
3888 
3889 	mtspr(SPR_MAS0, mas0);
3890 	__asm __volatile("isync");
3891 	mtspr(SPR_MAS1, args->e->mas1);
3892 	__asm __volatile("isync");
3893 	mtspr(SPR_MAS2, args->e->mas2);
3894 	__asm __volatile("isync");
3895 	mtspr(SPR_MAS3, args->e->mas3);
3896 	__asm __volatile("isync");
3897 	switch ((mfpvr() >> 16) & 0xFFFF) {
3898 	case FSL_E500mc:
3899 	case FSL_E5500:
3900 	case FSL_E6500:
3901 		mtspr(SPR_MAS8, 0);
3902 		__asm __volatile("isync");
3903 		/* FALLTHROUGH */
3904 	case FSL_E500v2:
3905 		mtspr(SPR_MAS7, args->e->mas7);
3906 		__asm __volatile("isync");
3907 		break;
3908 	default:
3909 		break;
3910 	}
3911 
3912 	__asm __volatile("tlbwe; isync; msync");
3913 
3914 }
3915 
3916 static void
3917 tlb1_write_entry_sync(void *arg)
3918 {
3919 	/* Empty synchronization point for smp_rendezvous(). */
3920 }
3921 
3922 /*
3923  * Write given entry to TLB1 hardware.
3924  */
3925 static void
3926 tlb1_write_entry(tlb_entry_t *e, unsigned int idx)
3927 {
3928 	struct tlbwrite_args args;
3929 
3930 	args.e = e;
3931 	args.idx = idx;
3932 
3933 #ifdef SMP
3934 	if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) {
3935 		mb();
3936 		smp_rendezvous(tlb1_write_entry_sync,
3937 		    tlb1_write_entry_int,
3938 		    tlb1_write_entry_sync, &args);
3939 	} else
3940 #endif
3941 	{
3942 		register_t msr;
3943 
3944 		msr = mfmsr();
3945 		__asm __volatile("wrteei 0");
3946 		tlb1_write_entry_int(&args);
3947 		mtmsr(msr);
3948 	}
3949 }
3950 
3951 /*
3952  * Return the largest uint value log such that 2^log <= num.
3953  */
3954 static unsigned int
3955 ilog2(unsigned long num)
3956 {
3957 	long lz;
3958 
3959 #ifdef __powerpc64__
3960 	__asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num));
3961 	return (63 - lz);
3962 #else
3963 	__asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
3964 	return (31 - lz);
3965 #endif
3966 }
3967 
3968 /*
3969  * Convert TLB TSIZE value to mapped region size.
3970  */
3971 static vm_size_t
3972 tsize2size(unsigned int tsize)
3973 {
3974 
3975 	/*
3976 	 * size = 4^tsize KB
3977 	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
3978 	 */
3979 
3980 	return ((1 << (2 * tsize)) * 1024);
3981 }
3982 
3983 /*
3984  * Convert region size (must be power of 4) to TLB TSIZE value.
3985  */
3986 static unsigned int
3987 size2tsize(vm_size_t size)
3988 {
3989 
3990 	return (ilog2(size) / 2 - 5);
3991 }
3992 
3993 /*
3994  * Register permanent kernel mapping in TLB1.
3995  *
3996  * Entries are created starting from index 0 (current free entry is
3997  * kept in tlb1_idx) and are not supposed to be invalidated.
3998  */
3999 int
4000 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
4001     uint32_t flags)
4002 {
4003 	tlb_entry_t e;
4004 	uint32_t ts, tid;
4005 	int tsize, index;
4006 
4007 	for (index = 0; index < TLB1_ENTRIES; index++) {
4008 		tlb1_read_entry(&e, index);
4009 		if ((e.mas1 & MAS1_VALID) == 0)
4010 			break;
4011 		/* Check if we're just updating the flags, and update them. */
4012 		if (e.phys == pa && e.virt == va && e.size == size) {
4013 			e.mas2 = (va & MAS2_EPN_MASK) | flags;
4014 			tlb1_write_entry(&e, index);
4015 			return (0);
4016 		}
4017 	}
4018 	if (index >= TLB1_ENTRIES) {
4019 		printf("tlb1_set_entry: TLB1 full!\n");
4020 		return (-1);
4021 	}
4022 
4023 	/* Convert size to TSIZE */
4024 	tsize = size2tsize(size);
4025 
4026 	tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
4027 	/* XXX TS is hard coded to 0 for now as we only use single address space */
4028 	ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
4029 
4030 	e.phys = pa;
4031 	e.virt = va;
4032 	e.size = size;
4033 	e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
4034 	e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
4035 	e.mas2 = (va & MAS2_EPN_MASK) | flags;
4036 
4037 	/* Set supervisor RWX permission bits */
4038 	e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
4039 	e.mas7 = (pa >> 32) & MAS7_RPN;
4040 
4041 	tlb1_write_entry(&e, index);
4042 
4043 	/*
4044 	 * XXX in general TLB1 updates should be propagated between CPUs,
4045 	 * since current design assumes to have the same TLB1 set-up on all
4046 	 * cores.
4047 	 */
4048 	return (0);
4049 }
4050 
4051 /*
4052  * Map in contiguous RAM region into the TLB1 using maximum of
4053  * KERNEL_REGION_MAX_TLB_ENTRIES entries.
4054  *
4055  * If necessary round up last entry size and return total size
4056  * used by all allocated entries.
4057  */
4058 vm_size_t
4059 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
4060 {
4061 	vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
4062 	vm_size_t mapped, pgsz, base, mask;
4063 	int idx, nents;
4064 
4065 	/* Round up to the next 1M */
4066 	size = roundup2(size, 1 << 20);
4067 
4068 	mapped = 0;
4069 	idx = 0;
4070 	base = va;
4071 	pgsz = 64*1024*1024;
4072 	while (mapped < size) {
4073 		while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
4074 			while (pgsz > (size - mapped))
4075 				pgsz >>= 2;
4076 			pgs[idx++] = pgsz;
4077 			mapped += pgsz;
4078 		}
4079 
4080 		/* We under-map. Correct for this. */
4081 		if (mapped < size) {
4082 			while (pgs[idx - 1] == pgsz) {
4083 				idx--;
4084 				mapped -= pgsz;
4085 			}
4086 			/* XXX We may increase beyond out starting point. */
4087 			pgsz <<= 2;
4088 			pgs[idx++] = pgsz;
4089 			mapped += pgsz;
4090 		}
4091 	}
4092 
4093 	nents = idx;
4094 	mask = pgs[0] - 1;
4095 	/* Align address to the boundary */
4096 	if (va & mask) {
4097 		va = (va + mask) & ~mask;
4098 		pa = (pa + mask) & ~mask;
4099 	}
4100 
4101 	for (idx = 0; idx < nents; idx++) {
4102 		pgsz = pgs[idx];
4103 		debugf("%u: %llx -> %jx, size=%jx\n", idx, pa,
4104 		    (uintmax_t)va, (uintmax_t)pgsz);
4105 		tlb1_set_entry(va, pa, pgsz,
4106 		    _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM);
4107 		pa += pgsz;
4108 		va += pgsz;
4109 	}
4110 
4111 	mapped = (va - base);
4112 	if (bootverbose)
4113 		printf("mapped size 0x%"PRIxPTR" (wasted space 0x%"PRIxPTR")\n",
4114 		    mapped, mapped - size);
4115 	return (mapped);
4116 }
4117 
4118 /*
4119  * TLB1 initialization routine, to be called after the very first
4120  * assembler level setup done in locore.S.
4121  */
4122 void
4123 tlb1_init()
4124 {
4125 	uint32_t mas0, mas1, mas2, mas3, mas7;
4126 	uint32_t tsz;
4127 
4128 	tlb1_get_tlbconf();
4129 
4130 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
4131 	mtspr(SPR_MAS0, mas0);
4132 	__asm __volatile("isync; tlbre");
4133 
4134 	mas1 = mfspr(SPR_MAS1);
4135 	mas2 = mfspr(SPR_MAS2);
4136 	mas3 = mfspr(SPR_MAS3);
4137 	mas7 = mfspr(SPR_MAS7);
4138 
4139 	kernload =  ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
4140 	    (mas3 & MAS3_RPN);
4141 
4142 	tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4143 	kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
4144 
4145 	/* Setup TLB miss defaults */
4146 	set_mas4_defaults();
4147 }
4148 
4149 /*
4150  * pmap_early_io_unmap() should be used in short conjunction with
4151  * pmap_early_io_map(), as in the following snippet:
4152  *
4153  * x = pmap_early_io_map(...);
4154  * <do something with x>
4155  * pmap_early_io_unmap(x, size);
4156  *
4157  * And avoiding more allocations between.
4158  */
4159 void
4160 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
4161 {
4162 	int i;
4163 	tlb_entry_t e;
4164 	vm_size_t isize;
4165 
4166 	size = roundup(size, PAGE_SIZE);
4167 	isize = size;
4168 	for (i = 0; i < TLB1_ENTRIES && size > 0; i++) {
4169 		tlb1_read_entry(&e, i);
4170 		if (!(e.mas1 & MAS1_VALID))
4171 			continue;
4172 		if (va <= e.virt && (va + isize) >= (e.virt + e.size)) {
4173 			size -= e.size;
4174 			e.mas1 &= ~MAS1_VALID;
4175 			tlb1_write_entry(&e, i);
4176 		}
4177 	}
4178 	if (tlb1_map_base == va + isize)
4179 		tlb1_map_base -= isize;
4180 }
4181 
4182 vm_offset_t
4183 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
4184 {
4185 	vm_paddr_t pa_base;
4186 	vm_offset_t va, sz;
4187 	int i;
4188 	tlb_entry_t e;
4189 
4190 	KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
4191 
4192 	for (i = 0; i < TLB1_ENTRIES; i++) {
4193 		tlb1_read_entry(&e, i);
4194 		if (!(e.mas1 & MAS1_VALID))
4195 			continue;
4196 		if (pa >= e.phys && (pa + size) <=
4197 		    (e.phys + e.size))
4198 			return (e.virt + (pa - e.phys));
4199 	}
4200 
4201 	pa_base = rounddown(pa, PAGE_SIZE);
4202 	size = roundup(size + (pa - pa_base), PAGE_SIZE);
4203 	tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
4204 	va = tlb1_map_base + (pa - pa_base);
4205 
4206 	do {
4207 		sz = 1 << (ilog2(size) & ~1);
4208 		tlb1_set_entry(tlb1_map_base, pa_base, sz,
4209 		    _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
4210 		size -= sz;
4211 		pa_base += sz;
4212 		tlb1_map_base += sz;
4213 	} while (size > 0);
4214 
4215 	return (va);
4216 }
4217 
4218 void
4219 pmap_track_page(pmap_t pmap, vm_offset_t va)
4220 {
4221 	vm_paddr_t pa;
4222 	vm_page_t page;
4223 	struct pv_entry *pve;
4224 
4225 	va = trunc_page(va);
4226 	pa = pmap_kextract(va);
4227 	page = PHYS_TO_VM_PAGE(pa);
4228 
4229 	rw_wlock(&pvh_global_lock);
4230 	PMAP_LOCK(pmap);
4231 
4232 	TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) {
4233 		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
4234 			goto out;
4235 		}
4236 	}
4237 	page->md.pv_tracked = true;
4238 	pv_insert(pmap, va, page);
4239 out:
4240 	PMAP_UNLOCK(pmap);
4241 	rw_wunlock(&pvh_global_lock);
4242 }
4243 
4244 
4245 /*
4246  * Setup MAS4 defaults.
4247  * These values are loaded to MAS0-2 on a TLB miss.
4248  */
4249 static void
4250 set_mas4_defaults(void)
4251 {
4252 	uint32_t mas4;
4253 
4254 	/* Defaults: TLB0, PID0, TSIZED=4K */
4255 	mas4 = MAS4_TLBSELD0;
4256 	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
4257 #ifdef SMP
4258 	mas4 |= MAS4_MD;
4259 #endif
4260 	mtspr(SPR_MAS4, mas4);
4261 	__asm __volatile("isync");
4262 }
4263 
4264 
4265 /*
4266  * Return 0 if the physical IO range is encompassed by one of the
4267  * the TLB1 entries, otherwise return related error code.
4268  */
4269 static int
4270 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
4271 {
4272 	uint32_t prot;
4273 	vm_paddr_t pa_start;
4274 	vm_paddr_t pa_end;
4275 	unsigned int entry_tsize;
4276 	vm_size_t entry_size;
4277 	tlb_entry_t e;
4278 
4279 	*va = (vm_offset_t)NULL;
4280 
4281 	tlb1_read_entry(&e, i);
4282 	/* Skip invalid entries */
4283 	if (!(e.mas1 & MAS1_VALID))
4284 		return (EINVAL);
4285 
4286 	/*
4287 	 * The entry must be cache-inhibited, guarded, and r/w
4288 	 * so it can function as an i/o page
4289 	 */
4290 	prot = e.mas2 & (MAS2_I | MAS2_G);
4291 	if (prot != (MAS2_I | MAS2_G))
4292 		return (EPERM);
4293 
4294 	prot = e.mas3 & (MAS3_SR | MAS3_SW);
4295 	if (prot != (MAS3_SR | MAS3_SW))
4296 		return (EPERM);
4297 
4298 	/* The address should be within the entry range. */
4299 	entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4300 	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
4301 
4302 	entry_size = tsize2size(entry_tsize);
4303 	pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) |
4304 	    (e.mas3 & MAS3_RPN);
4305 	pa_end = pa_start + entry_size;
4306 
4307 	if ((pa < pa_start) || ((pa + size) > pa_end))
4308 		return (ERANGE);
4309 
4310 	/* Return virtual address of this mapping. */
4311 	*va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start);
4312 	return (0);
4313 }
4314 
4315 /*
4316  * Invalidate all TLB0 entries which match the given TID. Note this is
4317  * dedicated for cases when invalidations should NOT be propagated to other
4318  * CPUs.
4319  */
4320 static void
4321 tid_flush(tlbtid_t tid)
4322 {
4323 	register_t msr;
4324 	uint32_t mas0, mas1, mas2;
4325 	int entry, way;
4326 
4327 
4328 	/* Don't evict kernel translations */
4329 	if (tid == TID_KERNEL)
4330 		return;
4331 
4332 	msr = mfmsr();
4333 	__asm __volatile("wrteei 0");
4334 
4335 	for (way = 0; way < TLB0_WAYS; way++)
4336 		for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
4337 
4338 			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
4339 			mtspr(SPR_MAS0, mas0);
4340 			__asm __volatile("isync");
4341 
4342 			mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
4343 			mtspr(SPR_MAS2, mas2);
4344 
4345 			__asm __volatile("isync; tlbre");
4346 
4347 			mas1 = mfspr(SPR_MAS1);
4348 
4349 			if (!(mas1 & MAS1_VALID))
4350 				continue;
4351 			if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid)
4352 				continue;
4353 			mas1 &= ~MAS1_VALID;
4354 			mtspr(SPR_MAS1, mas1);
4355 			__asm __volatile("isync; tlbwe; isync; msync");
4356 		}
4357 	mtmsr(msr);
4358 }
4359 
4360 #ifdef DDB
4361 /* Print out contents of the MAS registers for each TLB0 entry */
4362 static void
4363 #ifdef __powerpc64__
4364 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
4365 #else
4366 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
4367 #endif
4368     uint32_t mas7)
4369 {
4370 	int as;
4371 	char desc[3];
4372 	tlbtid_t tid;
4373 	vm_size_t size;
4374 	unsigned int tsize;
4375 
4376 	desc[2] = '\0';
4377 	if (mas1 & MAS1_VALID)
4378 		desc[0] = 'V';
4379 	else
4380 		desc[0] = ' ';
4381 
4382 	if (mas1 & MAS1_IPROT)
4383 		desc[1] = 'P';
4384 	else
4385 		desc[1] = ' ';
4386 
4387 	as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
4388 	tid = MAS1_GETTID(mas1);
4389 
4390 	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4391 	size = 0;
4392 	if (tsize)
4393 		size = tsize2size(tsize);
4394 
4395 	printf("%3d: (%s) [AS=%d] "
4396 	    "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
4397 	    "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
4398 	    i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
4399 }
4400 
4401 DB_SHOW_COMMAND(tlb0, tlb0_print_tlbentries)
4402 {
4403 	uint32_t mas0, mas1, mas3, mas7;
4404 #ifdef __powerpc64__
4405 	uint64_t mas2;
4406 #else
4407 	uint32_t mas2;
4408 #endif
4409 	int entryidx, way, idx;
4410 
4411 	printf("TLB0 entries:\n");
4412 	for (way = 0; way < TLB0_WAYS; way ++)
4413 		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
4414 
4415 			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
4416 			mtspr(SPR_MAS0, mas0);
4417 			__asm __volatile("isync");
4418 
4419 			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
4420 			mtspr(SPR_MAS2, mas2);
4421 
4422 			__asm __volatile("isync; tlbre");
4423 
4424 			mas1 = mfspr(SPR_MAS1);
4425 			mas2 = mfspr(SPR_MAS2);
4426 			mas3 = mfspr(SPR_MAS3);
4427 			mas7 = mfspr(SPR_MAS7);
4428 
4429 			idx = tlb0_tableidx(mas2, way);
4430 			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
4431 		}
4432 }
4433 
4434 /*
4435  * Print out contents of the MAS registers for each TLB1 entry
4436  */
4437 DB_SHOW_COMMAND(tlb1, tlb1_print_tlbentries)
4438 {
4439 	uint32_t mas0, mas1, mas3, mas7;
4440 #ifdef __powerpc64__
4441 	uint64_t mas2;
4442 #else
4443 	uint32_t mas2;
4444 #endif
4445 	int i;
4446 
4447 	printf("TLB1 entries:\n");
4448 	for (i = 0; i < TLB1_ENTRIES; i++) {
4449 
4450 		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
4451 		mtspr(SPR_MAS0, mas0);
4452 
4453 		__asm __volatile("isync; tlbre");
4454 
4455 		mas1 = mfspr(SPR_MAS1);
4456 		mas2 = mfspr(SPR_MAS2);
4457 		mas3 = mfspr(SPR_MAS3);
4458 		mas7 = mfspr(SPR_MAS7);
4459 
4460 		tlb_print_entry(i, mas1, mas2, mas3, mas7);
4461 	}
4462 }
4463 #endif
4464