xref: /freebsd/sys/powerpc/booke/pmap.c (revision b2db760808f74bb53c232900091c9da801ebbfcc)
1 /*-
2  * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * Some hw specific parts of this pmap were derived or influenced
27  * by NetBSD's ibm4xx pmap module. More generic code is shared with
28  * a few other pmap modules from the FreeBSD tree.
29  */
30 
31  /*
32   * VM layout notes:
33   *
34   * Kernel and user threads run within one common virtual address space
35   * defined by AS=0.
36   *
37   * Virtual address space layout:
38   * -----------------------------
39   * 0x0000_0000 - 0xafff_ffff	: user process
40   * 0xb000_0000 - 0xbfff_ffff	: pmap_mapdev()-ed area (PCI/PCIE etc.)
41   * 0xc000_0000 - 0xc0ff_ffff	: kernel reserved
42   *   0xc000_0000 - data_end	: kernel code+data, env, metadata etc.
43   * 0xc100_0000 - 0xfeef_ffff	: KVA
44   *   0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
45   *   0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
46   *   0xc200_4000 - 0xc200_8fff : guard page + kstack0
47   *   0xc200_9000 - 0xfeef_ffff	: actual free KVA space
48   * 0xfef0_0000 - 0xffff_ffff	: I/O devices region
49   */
50 
51 #include <sys/cdefs.h>
52 __FBSDID("$FreeBSD$");
53 
54 #include <sys/types.h>
55 #include <sys/param.h>
56 #include <sys/malloc.h>
57 #include <sys/ktr.h>
58 #include <sys/proc.h>
59 #include <sys/user.h>
60 #include <sys/queue.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/msgbuf.h>
64 #include <sys/lock.h>
65 #include <sys/mutex.h>
66 #include <sys/smp.h>
67 #include <sys/vmmeter.h>
68 
69 #include <vm/vm.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_pager.h>
78 #include <vm/uma.h>
79 
80 #include <machine/cpu.h>
81 #include <machine/pcb.h>
82 #include <machine/platform.h>
83 
84 #include <machine/tlb.h>
85 #include <machine/spr.h>
86 #include <machine/vmparam.h>
87 #include <machine/md_var.h>
88 #include <machine/mmuvar.h>
89 #include <machine/pmap.h>
90 #include <machine/pte.h>
91 
92 #include "mmu_if.h"
93 
94 #define DEBUG
95 #undef DEBUG
96 
97 #ifdef  DEBUG
98 #define debugf(fmt, args...) printf(fmt, ##args)
99 #else
100 #define debugf(fmt, args...)
101 #endif
102 
103 #define TODO			panic("%s: not implemented", __func__);
104 
105 #include "opt_sched.h"
106 #ifndef SCHED_4BSD
107 #error "e500 only works with SCHED_4BSD which uses a global scheduler lock."
108 #endif
109 extern struct mtx sched_lock;
110 
111 extern int dumpsys_minidump;
112 
113 extern unsigned char _etext[];
114 extern unsigned char _end[];
115 
116 /* Kernel physical load address. */
117 extern uint32_t kernload;
118 vm_offset_t kernstart;
119 vm_size_t kernsize;
120 
121 /* Message buffer and tables. */
122 static vm_offset_t data_start;
123 static vm_size_t data_end;
124 
125 /* Phys/avail memory regions. */
126 static struct mem_region *availmem_regions;
127 static int availmem_regions_sz;
128 static struct mem_region *physmem_regions;
129 static int physmem_regions_sz;
130 
131 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
132 static vm_offset_t zero_page_va;
133 static struct mtx zero_page_mutex;
134 
135 static struct mtx tlbivax_mutex;
136 
137 /*
138  * Reserved KVA space for mmu_booke_zero_page_idle. This is used
139  * by idle thred only, no lock required.
140  */
141 static vm_offset_t zero_page_idle_va;
142 
143 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
144 static vm_offset_t copy_page_src_va;
145 static vm_offset_t copy_page_dst_va;
146 static struct mtx copy_page_mutex;
147 
148 /**************************************************************************/
149 /* PMAP */
150 /**************************************************************************/
151 
152 static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
153     vm_prot_t, boolean_t);
154 
155 unsigned int kptbl_min;		/* Index of the first kernel ptbl. */
156 unsigned int kernel_ptbls;	/* Number of KVA ptbls. */
157 
158 /*
159  * If user pmap is processed with mmu_booke_remove and the resident count
160  * drops to 0, there are no more pages to remove, so we need not continue.
161  */
162 #define PMAP_REMOVE_DONE(pmap) \
163 	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
164 
165 extern void tlb_lock(uint32_t *);
166 extern void tlb_unlock(uint32_t *);
167 extern void tid_flush(tlbtid_t);
168 
169 /**************************************************************************/
170 /* TLB and TID handling */
171 /**************************************************************************/
172 
173 /* Translation ID busy table */
174 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
175 
176 /*
177  * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
178  * core revisions and should be read from h/w registers during early config.
179  */
180 uint32_t tlb0_entries;
181 uint32_t tlb0_ways;
182 uint32_t tlb0_entries_per_way;
183 
184 #define TLB0_ENTRIES		(tlb0_entries)
185 #define TLB0_WAYS		(tlb0_ways)
186 #define TLB0_ENTRIES_PER_WAY	(tlb0_entries_per_way)
187 
188 #define TLB1_ENTRIES 16
189 
190 /* In-ram copy of the TLB1 */
191 static tlb_entry_t tlb1[TLB1_ENTRIES];
192 
193 /* Next free entry in the TLB1 */
194 static unsigned int tlb1_idx;
195 
196 static tlbtid_t tid_alloc(struct pmap *);
197 
198 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
199 
200 static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t);
201 static void tlb1_write_entry(unsigned int);
202 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
203 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t);
204 
205 static vm_size_t tsize2size(unsigned int);
206 static unsigned int size2tsize(vm_size_t);
207 static unsigned int ilog2(unsigned int);
208 
209 static void set_mas4_defaults(void);
210 
211 static inline void tlb0_flush_entry(vm_offset_t);
212 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
213 
214 /**************************************************************************/
215 /* Page table management */
216 /**************************************************************************/
217 
218 /* Data for the pv entry allocation mechanism */
219 static uma_zone_t pvzone;
220 static struct vm_object pvzone_obj;
221 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
222 
223 #define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
224 
225 #ifndef PMAP_SHPGPERPROC
226 #define PMAP_SHPGPERPROC	200
227 #endif
228 
229 static void ptbl_init(void);
230 static struct ptbl_buf *ptbl_buf_alloc(void);
231 static void ptbl_buf_free(struct ptbl_buf *);
232 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
233 
234 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
235 static void ptbl_free(mmu_t, pmap_t, unsigned int);
236 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
237 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
238 
239 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
240 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
241 static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
242 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
243 
244 static pv_entry_t pv_alloc(void);
245 static void pv_free(pv_entry_t);
246 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
247 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
248 
249 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
250 #define PTBL_BUFS		(128 * 16)
251 
252 struct ptbl_buf {
253 	TAILQ_ENTRY(ptbl_buf) link;	/* list link */
254 	vm_offset_t kva;		/* va of mapping */
255 };
256 
257 /* ptbl free list and a lock used for access synchronization. */
258 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
259 static struct mtx ptbl_buf_freelist_lock;
260 
261 /* Base address of kva space allocated fot ptbl bufs. */
262 static vm_offset_t ptbl_buf_pool_vabase;
263 
264 /* Pointer to ptbl_buf structures. */
265 static struct ptbl_buf *ptbl_bufs;
266 
267 void pmap_bootstrap_ap(volatile uint32_t *);
268 
269 /*
270  * Kernel MMU interface
271  */
272 static void		mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
273 static void		mmu_booke_clear_modify(mmu_t, vm_page_t);
274 static void		mmu_booke_clear_reference(mmu_t, vm_page_t);
275 static void		mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
276     vm_size_t, vm_offset_t);
277 static void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
278 static void		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
279     vm_prot_t, boolean_t);
280 static void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
281     vm_page_t, vm_prot_t);
282 static void		mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
283     vm_prot_t);
284 static vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
285 static vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
286     vm_prot_t);
287 static void		mmu_booke_init(mmu_t);
288 static boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
289 static boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
290 static boolean_t	mmu_booke_is_referenced(mmu_t, vm_page_t);
291 static boolean_t	mmu_booke_ts_referenced(mmu_t, vm_page_t);
292 static vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
293     int);
294 static int		mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
295     vm_paddr_t *);
296 static void		mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
297     vm_object_t, vm_pindex_t, vm_size_t);
298 static boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
299 static void		mmu_booke_page_init(mmu_t, vm_page_t);
300 static int		mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
301 static void		mmu_booke_pinit(mmu_t, pmap_t);
302 static void		mmu_booke_pinit0(mmu_t, pmap_t);
303 static void		mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
304     vm_prot_t);
305 static void		mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
306 static void		mmu_booke_qremove(mmu_t, vm_offset_t, int);
307 static void		mmu_booke_release(mmu_t, pmap_t);
308 static void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
309 static void		mmu_booke_remove_all(mmu_t, vm_page_t);
310 static void		mmu_booke_remove_write(mmu_t, vm_page_t);
311 static void		mmu_booke_zero_page(mmu_t, vm_page_t);
312 static void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
313 static void		mmu_booke_zero_page_idle(mmu_t, vm_page_t);
314 static void		mmu_booke_activate(mmu_t, struct thread *);
315 static void		mmu_booke_deactivate(mmu_t, struct thread *);
316 static void		mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
317 static void		*mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
318 static void		mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
319 static vm_offset_t	mmu_booke_kextract(mmu_t, vm_offset_t);
320 static void		mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
321 static void		mmu_booke_kremove(mmu_t, vm_offset_t);
322 static boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
323 static void		mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
324     vm_size_t);
325 static vm_offset_t	mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
326     vm_size_t, vm_size_t *);
327 static void		mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
328     vm_size_t, vm_offset_t);
329 static struct pmap_md	*mmu_booke_scan_md(mmu_t, struct pmap_md *);
330 
331 static mmu_method_t mmu_booke_methods[] = {
332 	/* pmap dispatcher interface */
333 	MMUMETHOD(mmu_change_wiring,	mmu_booke_change_wiring),
334 	MMUMETHOD(mmu_clear_modify,	mmu_booke_clear_modify),
335 	MMUMETHOD(mmu_clear_reference,	mmu_booke_clear_reference),
336 	MMUMETHOD(mmu_copy,		mmu_booke_copy),
337 	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
338 	MMUMETHOD(mmu_enter,		mmu_booke_enter),
339 	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
340 	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
341 	MMUMETHOD(mmu_extract,		mmu_booke_extract),
342 	MMUMETHOD(mmu_extract_and_hold,	mmu_booke_extract_and_hold),
343 	MMUMETHOD(mmu_init,		mmu_booke_init),
344 	MMUMETHOD(mmu_is_modified,	mmu_booke_is_modified),
345 	MMUMETHOD(mmu_is_prefaultable,	mmu_booke_is_prefaultable),
346 	MMUMETHOD(mmu_is_referenced,	mmu_booke_is_referenced),
347 	MMUMETHOD(mmu_ts_referenced,	mmu_booke_ts_referenced),
348 	MMUMETHOD(mmu_map,		mmu_booke_map),
349 	MMUMETHOD(mmu_mincore,		mmu_booke_mincore),
350 	MMUMETHOD(mmu_object_init_pt,	mmu_booke_object_init_pt),
351 	MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
352 	MMUMETHOD(mmu_page_init,	mmu_booke_page_init),
353 	MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
354 	MMUMETHOD(mmu_pinit,		mmu_booke_pinit),
355 	MMUMETHOD(mmu_pinit0,		mmu_booke_pinit0),
356 	MMUMETHOD(mmu_protect,		mmu_booke_protect),
357 	MMUMETHOD(mmu_qenter,		mmu_booke_qenter),
358 	MMUMETHOD(mmu_qremove,		mmu_booke_qremove),
359 	MMUMETHOD(mmu_release,		mmu_booke_release),
360 	MMUMETHOD(mmu_remove,		mmu_booke_remove),
361 	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
362 	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
363 	MMUMETHOD(mmu_sync_icache,	mmu_booke_sync_icache),
364 	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
365 	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
366 	MMUMETHOD(mmu_zero_page_idle,	mmu_booke_zero_page_idle),
367 	MMUMETHOD(mmu_activate,		mmu_booke_activate),
368 	MMUMETHOD(mmu_deactivate,	mmu_booke_deactivate),
369 
370 	/* Internal interfaces */
371 	MMUMETHOD(mmu_bootstrap,	mmu_booke_bootstrap),
372 	MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
373 	MMUMETHOD(mmu_mapdev,		mmu_booke_mapdev),
374 	MMUMETHOD(mmu_kenter,		mmu_booke_kenter),
375 	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
376 /*	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),	*/
377 	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
378 
379 	/* dumpsys() support */
380 	MMUMETHOD(mmu_dumpsys_map,	mmu_booke_dumpsys_map),
381 	MMUMETHOD(mmu_dumpsys_unmap,	mmu_booke_dumpsys_unmap),
382 	MMUMETHOD(mmu_scan_md,		mmu_booke_scan_md),
383 
384 	{ 0, 0 }
385 };
386 
387 static mmu_def_t booke_mmu = {
388 	MMU_TYPE_BOOKE,
389 	mmu_booke_methods,
390 	0
391 };
392 MMU_DEF(booke_mmu);
393 
394 static inline void
395 tlb_miss_lock(void)
396 {
397 #ifdef SMP
398 	struct pcpu *pc;
399 
400 	if (!smp_started)
401 		return;
402 
403 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
404 		if (pc != pcpup) {
405 
406 			CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
407 			    "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
408 
409 			KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
410 			    ("tlb_miss_lock: tried to lock self"));
411 
412 			tlb_lock(pc->pc_booke_tlb_lock);
413 
414 			CTR1(KTR_PMAP, "%s: locked", __func__);
415 		}
416 	}
417 #endif
418 }
419 
420 static inline void
421 tlb_miss_unlock(void)
422 {
423 #ifdef SMP
424 	struct pcpu *pc;
425 
426 	if (!smp_started)
427 		return;
428 
429 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
430 		if (pc != pcpup) {
431 			CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
432 			    __func__, pc->pc_cpuid);
433 
434 			tlb_unlock(pc->pc_booke_tlb_lock);
435 
436 			CTR1(KTR_PMAP, "%s: unlocked", __func__);
437 		}
438 	}
439 #endif
440 }
441 
442 /* Return number of entries in TLB0. */
443 static __inline void
444 tlb0_get_tlbconf(void)
445 {
446 	uint32_t tlb0_cfg;
447 
448 	tlb0_cfg = mfspr(SPR_TLB0CFG);
449 	tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
450 	tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
451 	tlb0_entries_per_way = tlb0_entries / tlb0_ways;
452 }
453 
454 /* Initialize pool of kva ptbl buffers. */
455 static void
456 ptbl_init(void)
457 {
458 	int i;
459 
460 	CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
461 	    (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
462 	CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
463 	    __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
464 
465 	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
466 	TAILQ_INIT(&ptbl_buf_freelist);
467 
468 	for (i = 0; i < PTBL_BUFS; i++) {
469 		ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
470 		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
471 	}
472 }
473 
474 /* Get a ptbl_buf from the freelist. */
475 static struct ptbl_buf *
476 ptbl_buf_alloc(void)
477 {
478 	struct ptbl_buf *buf;
479 
480 	mtx_lock(&ptbl_buf_freelist_lock);
481 	buf = TAILQ_FIRST(&ptbl_buf_freelist);
482 	if (buf != NULL)
483 		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
484 	mtx_unlock(&ptbl_buf_freelist_lock);
485 
486 	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
487 
488 	return (buf);
489 }
490 
491 /* Return ptbl buff to free pool. */
492 static void
493 ptbl_buf_free(struct ptbl_buf *buf)
494 {
495 
496 	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
497 
498 	mtx_lock(&ptbl_buf_freelist_lock);
499 	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
500 	mtx_unlock(&ptbl_buf_freelist_lock);
501 }
502 
503 /*
504  * Search the list of allocated ptbl bufs and find on list of allocated ptbls
505  */
506 static void
507 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
508 {
509 	struct ptbl_buf *pbuf;
510 
511 	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
512 
513 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
514 
515 	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
516 		if (pbuf->kva == (vm_offset_t)ptbl) {
517 			/* Remove from pmap ptbl buf list. */
518 			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
519 
520 			/* Free corresponding ptbl buf. */
521 			ptbl_buf_free(pbuf);
522 			break;
523 		}
524 }
525 
526 /* Allocate page table. */
527 static pte_t *
528 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
529 {
530 	vm_page_t mtbl[PTBL_PAGES];
531 	vm_page_t m;
532 	struct ptbl_buf *pbuf;
533 	unsigned int pidx;
534 	pte_t *ptbl;
535 	int i;
536 
537 	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
538 	    (pmap == kernel_pmap), pdir_idx);
539 
540 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
541 	    ("ptbl_alloc: invalid pdir_idx"));
542 	KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
543 	    ("pte_alloc: valid ptbl entry exists!"));
544 
545 	pbuf = ptbl_buf_alloc();
546 	if (pbuf == NULL)
547 		panic("pte_alloc: couldn't alloc kernel virtual memory");
548 
549 	ptbl = (pte_t *)pbuf->kva;
550 
551 	CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
552 
553 	/* Allocate ptbl pages, this will sleep! */
554 	for (i = 0; i < PTBL_PAGES; i++) {
555 		pidx = (PTBL_PAGES * pdir_idx) + i;
556 		while ((m = vm_page_alloc(NULL, pidx,
557 		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
558 
559 			PMAP_UNLOCK(pmap);
560 			vm_page_unlock_queues();
561 			VM_WAIT;
562 			vm_page_lock_queues();
563 			PMAP_LOCK(pmap);
564 		}
565 		mtbl[i] = m;
566 	}
567 
568 	/* Map allocated pages into kernel_pmap. */
569 	mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
570 
571 	/* Zero whole ptbl. */
572 	bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
573 
574 	/* Add pbuf to the pmap ptbl bufs list. */
575 	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
576 
577 	return (ptbl);
578 }
579 
580 /* Free ptbl pages and invalidate pdir entry. */
581 static void
582 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
583 {
584 	pte_t *ptbl;
585 	vm_paddr_t pa;
586 	vm_offset_t va;
587 	vm_page_t m;
588 	int i;
589 
590 	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
591 	    (pmap == kernel_pmap), pdir_idx);
592 
593 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
594 	    ("ptbl_free: invalid pdir_idx"));
595 
596 	ptbl = pmap->pm_pdir[pdir_idx];
597 
598 	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
599 
600 	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
601 
602 	/*
603 	 * Invalidate the pdir entry as soon as possible, so that other CPUs
604 	 * don't attempt to look up the page tables we are releasing.
605 	 */
606 	mtx_lock_spin(&tlbivax_mutex);
607 	tlb_miss_lock();
608 
609 	pmap->pm_pdir[pdir_idx] = NULL;
610 
611 	tlb_miss_unlock();
612 	mtx_unlock_spin(&tlbivax_mutex);
613 
614 	for (i = 0; i < PTBL_PAGES; i++) {
615 		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
616 		pa = pte_vatopa(mmu, kernel_pmap, va);
617 		m = PHYS_TO_VM_PAGE(pa);
618 		vm_page_free_zero(m);
619 		atomic_subtract_int(&cnt.v_wire_count, 1);
620 		mmu_booke_kremove(mmu, va);
621 	}
622 
623 	ptbl_free_pmap_ptbl(pmap, ptbl);
624 }
625 
626 /*
627  * Decrement ptbl pages hold count and attempt to free ptbl pages.
628  * Called when removing pte entry from ptbl.
629  *
630  * Return 1 if ptbl pages were freed.
631  */
632 static int
633 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
634 {
635 	pte_t *ptbl;
636 	vm_paddr_t pa;
637 	vm_page_t m;
638 	int i;
639 
640 	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
641 	    (pmap == kernel_pmap), pdir_idx);
642 
643 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
644 	    ("ptbl_unhold: invalid pdir_idx"));
645 	KASSERT((pmap != kernel_pmap),
646 	    ("ptbl_unhold: unholding kernel ptbl!"));
647 
648 	ptbl = pmap->pm_pdir[pdir_idx];
649 
650 	//debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
651 	KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
652 	    ("ptbl_unhold: non kva ptbl"));
653 
654 	/* decrement hold count */
655 	for (i = 0; i < PTBL_PAGES; i++) {
656 		pa = pte_vatopa(mmu, kernel_pmap,
657 		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
658 		m = PHYS_TO_VM_PAGE(pa);
659 		m->wire_count--;
660 	}
661 
662 	/*
663 	 * Free ptbl pages if there are no pte etries in this ptbl.
664 	 * wire_count has the same value for all ptbl pages, so check the last
665 	 * page.
666 	 */
667 	if (m->wire_count == 0) {
668 		ptbl_free(mmu, pmap, pdir_idx);
669 
670 		//debugf("ptbl_unhold: e (freed ptbl)\n");
671 		return (1);
672 	}
673 
674 	return (0);
675 }
676 
677 /*
678  * Increment hold count for ptbl pages. This routine is used when a new pte
679  * entry is being inserted into the ptbl.
680  */
681 static void
682 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
683 {
684 	vm_paddr_t pa;
685 	pte_t *ptbl;
686 	vm_page_t m;
687 	int i;
688 
689 	CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
690 	    pdir_idx);
691 
692 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
693 	    ("ptbl_hold: invalid pdir_idx"));
694 	KASSERT((pmap != kernel_pmap),
695 	    ("ptbl_hold: holding kernel ptbl!"));
696 
697 	ptbl = pmap->pm_pdir[pdir_idx];
698 
699 	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
700 
701 	for (i = 0; i < PTBL_PAGES; i++) {
702 		pa = pte_vatopa(mmu, kernel_pmap,
703 		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
704 		m = PHYS_TO_VM_PAGE(pa);
705 		m->wire_count++;
706 	}
707 }
708 
709 /* Allocate pv_entry structure. */
710 pv_entry_t
711 pv_alloc(void)
712 {
713 	pv_entry_t pv;
714 
715 	pv_entry_count++;
716 	if (pv_entry_count > pv_entry_high_water)
717 		pagedaemon_wakeup();
718 	pv = uma_zalloc(pvzone, M_NOWAIT);
719 
720 	return (pv);
721 }
722 
723 /* Free pv_entry structure. */
724 static __inline void
725 pv_free(pv_entry_t pve)
726 {
727 
728 	pv_entry_count--;
729 	uma_zfree(pvzone, pve);
730 }
731 
732 
733 /* Allocate and initialize pv_entry structure. */
734 static void
735 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
736 {
737 	pv_entry_t pve;
738 
739 	//int su = (pmap == kernel_pmap);
740 	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
741 	//	(u_int32_t)pmap, va, (u_int32_t)m);
742 
743 	pve = pv_alloc();
744 	if (pve == NULL)
745 		panic("pv_insert: no pv entries!");
746 
747 	pve->pv_pmap = pmap;
748 	pve->pv_va = va;
749 
750 	/* add to pv_list */
751 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
752 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
753 
754 	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
755 
756 	//debugf("pv_insert: e\n");
757 }
758 
759 /* Destroy pv entry. */
760 static void
761 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
762 {
763 	pv_entry_t pve;
764 
765 	//int su = (pmap == kernel_pmap);
766 	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
767 
768 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
769 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
770 
771 	/* find pv entry */
772 	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
773 		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
774 			/* remove from pv_list */
775 			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
776 			if (TAILQ_EMPTY(&m->md.pv_list))
777 				vm_page_flag_clear(m, PG_WRITEABLE);
778 
779 			/* free pv entry struct */
780 			pv_free(pve);
781 			break;
782 		}
783 	}
784 
785 	//debugf("pv_remove: e\n");
786 }
787 
788 /*
789  * Clean pte entry, try to free page table page if requested.
790  *
791  * Return 1 if ptbl pages were freed, otherwise return 0.
792  */
793 static int
794 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
795 {
796 	unsigned int pdir_idx = PDIR_IDX(va);
797 	unsigned int ptbl_idx = PTBL_IDX(va);
798 	vm_page_t m;
799 	pte_t *ptbl;
800 	pte_t *pte;
801 
802 	//int su = (pmap == kernel_pmap);
803 	//debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
804 	//		su, (u_int32_t)pmap, va, flags);
805 
806 	ptbl = pmap->pm_pdir[pdir_idx];
807 	KASSERT(ptbl, ("pte_remove: null ptbl"));
808 
809 	pte = &ptbl[ptbl_idx];
810 
811 	if (pte == NULL || !PTE_ISVALID(pte))
812 		return (0);
813 
814 	if (PTE_ISWIRED(pte))
815 		pmap->pm_stats.wired_count--;
816 
817 	/* Handle managed entry. */
818 	if (PTE_ISMANAGED(pte)) {
819 		/* Get vm_page_t for mapped pte. */
820 		m = PHYS_TO_VM_PAGE(PTE_PA(pte));
821 
822 		if (PTE_ISMODIFIED(pte))
823 			vm_page_dirty(m);
824 
825 		if (PTE_ISREFERENCED(pte))
826 			vm_page_flag_set(m, PG_REFERENCED);
827 
828 		pv_remove(pmap, va, m);
829 	}
830 
831 	mtx_lock_spin(&tlbivax_mutex);
832 	tlb_miss_lock();
833 
834 	tlb0_flush_entry(va);
835 	pte->flags = 0;
836 	pte->rpn = 0;
837 
838 	tlb_miss_unlock();
839 	mtx_unlock_spin(&tlbivax_mutex);
840 
841 	pmap->pm_stats.resident_count--;
842 
843 	if (flags & PTBL_UNHOLD) {
844 		//debugf("pte_remove: e (unhold)\n");
845 		return (ptbl_unhold(mmu, pmap, pdir_idx));
846 	}
847 
848 	//debugf("pte_remove: e\n");
849 	return (0);
850 }
851 
852 /*
853  * Insert PTE for a given page and virtual address.
854  */
855 static void
856 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
857 {
858 	unsigned int pdir_idx = PDIR_IDX(va);
859 	unsigned int ptbl_idx = PTBL_IDX(va);
860 	pte_t *ptbl, *pte;
861 
862 	CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
863 	    pmap == kernel_pmap, pmap, va);
864 
865 	/* Get the page table pointer. */
866 	ptbl = pmap->pm_pdir[pdir_idx];
867 
868 	if (ptbl == NULL) {
869 		/* Allocate page table pages. */
870 		ptbl = ptbl_alloc(mmu, pmap, pdir_idx);
871 	} else {
872 		/*
873 		 * Check if there is valid mapping for requested
874 		 * va, if there is, remove it.
875 		 */
876 		pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
877 		if (PTE_ISVALID(pte)) {
878 			pte_remove(mmu, pmap, va, PTBL_HOLD);
879 		} else {
880 			/*
881 			 * pte is not used, increment hold count
882 			 * for ptbl pages.
883 			 */
884 			if (pmap != kernel_pmap)
885 				ptbl_hold(mmu, pmap, pdir_idx);
886 		}
887 	}
888 
889 	/*
890 	 * Insert pv_entry into pv_list for mapped page if part of managed
891 	 * memory.
892 	 */
893         if ((m->flags & PG_FICTITIOUS) == 0) {
894 		if ((m->flags & PG_UNMANAGED) == 0) {
895 			flags |= PTE_MANAGED;
896 
897 			/* Create and insert pv entry. */
898 			pv_insert(pmap, va, m);
899 		}
900 	}
901 
902 	pmap->pm_stats.resident_count++;
903 
904 	mtx_lock_spin(&tlbivax_mutex);
905 	tlb_miss_lock();
906 
907 	tlb0_flush_entry(va);
908 	if (pmap->pm_pdir[pdir_idx] == NULL) {
909 		/*
910 		 * If we just allocated a new page table, hook it in
911 		 * the pdir.
912 		 */
913 		pmap->pm_pdir[pdir_idx] = ptbl;
914 	}
915 	pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
916 	pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
917 	pte->flags |= (PTE_VALID | flags);
918 
919 	tlb_miss_unlock();
920 	mtx_unlock_spin(&tlbivax_mutex);
921 }
922 
923 /* Return the pa for the given pmap/va. */
924 static vm_paddr_t
925 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
926 {
927 	vm_paddr_t pa = 0;
928 	pte_t *pte;
929 
930 	pte = pte_find(mmu, pmap, va);
931 	if ((pte != NULL) && PTE_ISVALID(pte))
932 		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
933 	return (pa);
934 }
935 
936 /* Get a pointer to a PTE in a page table. */
937 static pte_t *
938 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
939 {
940 	unsigned int pdir_idx = PDIR_IDX(va);
941 	unsigned int ptbl_idx = PTBL_IDX(va);
942 
943 	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
944 
945 	if (pmap->pm_pdir[pdir_idx])
946 		return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
947 
948 	return (NULL);
949 }
950 
951 /**************************************************************************/
952 /* PMAP related */
953 /**************************************************************************/
954 
955 /*
956  * This is called during e500_init, before the system is really initialized.
957  */
958 static void
959 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
960 {
961 	vm_offset_t phys_kernelend;
962 	struct mem_region *mp, *mp1;
963 	int cnt, i, j;
964 	u_int s, e, sz;
965 	u_int phys_avail_count;
966 	vm_size_t physsz, hwphyssz, kstack0_sz;
967 	vm_offset_t kernel_pdir, kstack0, va;
968 	vm_paddr_t kstack0_phys;
969 	void *dpcpu;
970 	pte_t *pte;
971 
972 	debugf("mmu_booke_bootstrap: entered\n");
973 
974 	/* Initialize invalidation mutex */
975 	mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
976 
977 	/* Read TLB0 size and associativity. */
978 	tlb0_get_tlbconf();
979 
980 	/* Align kernel start and end address (kernel image). */
981 	kernstart = trunc_page(start);
982 	data_start = round_page(kernelend);
983 	kernsize = data_start - kernstart;
984 
985 	data_end = data_start;
986 
987 	/* Allocate space for the message buffer. */
988 	msgbufp = (struct msgbuf *)data_end;
989 	data_end += MSGBUF_SIZE;
990 	debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
991 	    data_end);
992 
993 	data_end = round_page(data_end);
994 
995 	/* Allocate the dynamic per-cpu area. */
996 	dpcpu = (void *)data_end;
997 	data_end += DPCPU_SIZE;
998 	dpcpu_init(dpcpu, 0);
999 
1000 	/* Allocate space for ptbl_bufs. */
1001 	ptbl_bufs = (struct ptbl_buf *)data_end;
1002 	data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1003 	debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
1004 	    data_end);
1005 
1006 	data_end = round_page(data_end);
1007 
1008 	/* Allocate PTE tables for kernel KVA. */
1009 	kernel_pdir = data_end;
1010 	kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
1011 	    PDIR_SIZE - 1) / PDIR_SIZE;
1012 	data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1013 	debugf(" kernel ptbls: %d\n", kernel_ptbls);
1014 	debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
1015 
1016 	debugf(" data_end: 0x%08x\n", data_end);
1017 	if (data_end - kernstart > 0x1000000) {
1018 		data_end = (data_end + 0x3fffff) & ~0x3fffff;
1019 		tlb1_mapin_region(kernstart + 0x1000000,
1020 		    kernload + 0x1000000, data_end - kernstart - 0x1000000);
1021 	} else
1022 		data_end = (data_end + 0xffffff) & ~0xffffff;
1023 
1024 	debugf(" updated data_end: 0x%08x\n", data_end);
1025 
1026 	kernsize += data_end - data_start;
1027 
1028 	/*
1029 	 * Clear the structures - note we can only do it safely after the
1030 	 * possible additional TLB1 translations are in place (above) so that
1031 	 * all range up to the currently calculated 'data_end' is covered.
1032 	 */
1033 	memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1034 	memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1035 
1036 	/*******************************************************/
1037 	/* Set the start and end of kva. */
1038 	/*******************************************************/
1039 	virtual_avail = round_page(data_end);
1040 	virtual_end = VM_MAX_KERNEL_ADDRESS;
1041 
1042 	/* Allocate KVA space for page zero/copy operations. */
1043 	zero_page_va = virtual_avail;
1044 	virtual_avail += PAGE_SIZE;
1045 	zero_page_idle_va = virtual_avail;
1046 	virtual_avail += PAGE_SIZE;
1047 	copy_page_src_va = virtual_avail;
1048 	virtual_avail += PAGE_SIZE;
1049 	copy_page_dst_va = virtual_avail;
1050 	virtual_avail += PAGE_SIZE;
1051 	debugf("zero_page_va = 0x%08x\n", zero_page_va);
1052 	debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
1053 	debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1054 	debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1055 
1056 	/* Initialize page zero/copy mutexes. */
1057 	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1058 	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1059 
1060 	/* Allocate KVA space for ptbl bufs. */
1061 	ptbl_buf_pool_vabase = virtual_avail;
1062 	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1063 	debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1064 	    ptbl_buf_pool_vabase, virtual_avail);
1065 
1066 	/* Calculate corresponding physical addresses for the kernel region. */
1067 	phys_kernelend = kernload + kernsize;
1068 	debugf("kernel image and allocated data:\n");
1069 	debugf(" kernload    = 0x%08x\n", kernload);
1070 	debugf(" kernstart   = 0x%08x\n", kernstart);
1071 	debugf(" kernsize    = 0x%08x\n", kernsize);
1072 
1073 	if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1074 		panic("mmu_booke_bootstrap: phys_avail too small");
1075 
1076 	/*
1077 	 * Remove kernel physical address range from avail regions list. Page
1078 	 * align all regions.  Non-page aligned memory isn't very interesting
1079 	 * to us.  Also, sort the entries for ascending addresses.
1080 	 */
1081 
1082 	/* Retrieve phys/avail mem regions */
1083 	mem_regions(&physmem_regions, &physmem_regions_sz,
1084 	    &availmem_regions, &availmem_regions_sz);
1085 	sz = 0;
1086 	cnt = availmem_regions_sz;
1087 	debugf("processing avail regions:\n");
1088 	for (mp = availmem_regions; mp->mr_size; mp++) {
1089 		s = mp->mr_start;
1090 		e = mp->mr_start + mp->mr_size;
1091 		debugf(" %08x-%08x -> ", s, e);
1092 		/* Check whether this region holds all of the kernel. */
1093 		if (s < kernload && e > phys_kernelend) {
1094 			availmem_regions[cnt].mr_start = phys_kernelend;
1095 			availmem_regions[cnt++].mr_size = e - phys_kernelend;
1096 			e = kernload;
1097 		}
1098 		/* Look whether this regions starts within the kernel. */
1099 		if (s >= kernload && s < phys_kernelend) {
1100 			if (e <= phys_kernelend)
1101 				goto empty;
1102 			s = phys_kernelend;
1103 		}
1104 		/* Now look whether this region ends within the kernel. */
1105 		if (e > kernload && e <= phys_kernelend) {
1106 			if (s >= kernload)
1107 				goto empty;
1108 			e = kernload;
1109 		}
1110 		/* Now page align the start and size of the region. */
1111 		s = round_page(s);
1112 		e = trunc_page(e);
1113 		if (e < s)
1114 			e = s;
1115 		sz = e - s;
1116 		debugf("%08x-%08x = %x\n", s, e, sz);
1117 
1118 		/* Check whether some memory is left here. */
1119 		if (sz == 0) {
1120 		empty:
1121 			memmove(mp, mp + 1,
1122 			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
1123 			cnt--;
1124 			mp--;
1125 			continue;
1126 		}
1127 
1128 		/* Do an insertion sort. */
1129 		for (mp1 = availmem_regions; mp1 < mp; mp1++)
1130 			if (s < mp1->mr_start)
1131 				break;
1132 		if (mp1 < mp) {
1133 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1134 			mp1->mr_start = s;
1135 			mp1->mr_size = sz;
1136 		} else {
1137 			mp->mr_start = s;
1138 			mp->mr_size = sz;
1139 		}
1140 	}
1141 	availmem_regions_sz = cnt;
1142 
1143 	/*******************************************************/
1144 	/* Steal physical memory for kernel stack from the end */
1145 	/* of the first avail region                           */
1146 	/*******************************************************/
1147 	kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
1148 	kstack0_phys = availmem_regions[0].mr_start +
1149 	    availmem_regions[0].mr_size;
1150 	kstack0_phys -= kstack0_sz;
1151 	availmem_regions[0].mr_size -= kstack0_sz;
1152 
1153 	/*******************************************************/
1154 	/* Fill in phys_avail table, based on availmem_regions */
1155 	/*******************************************************/
1156 	phys_avail_count = 0;
1157 	physsz = 0;
1158 	hwphyssz = 0;
1159 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1160 
1161 	debugf("fill in phys_avail:\n");
1162 	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1163 
1164 		debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1165 		    availmem_regions[i].mr_start,
1166 		    availmem_regions[i].mr_start +
1167 		        availmem_regions[i].mr_size,
1168 		    availmem_regions[i].mr_size);
1169 
1170 		if (hwphyssz != 0 &&
1171 		    (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1172 			debugf(" hw.physmem adjust\n");
1173 			if (physsz < hwphyssz) {
1174 				phys_avail[j] = availmem_regions[i].mr_start;
1175 				phys_avail[j + 1] =
1176 				    availmem_regions[i].mr_start +
1177 				    hwphyssz - physsz;
1178 				physsz = hwphyssz;
1179 				phys_avail_count++;
1180 			}
1181 			break;
1182 		}
1183 
1184 		phys_avail[j] = availmem_regions[i].mr_start;
1185 		phys_avail[j + 1] = availmem_regions[i].mr_start +
1186 		    availmem_regions[i].mr_size;
1187 		phys_avail_count++;
1188 		physsz += availmem_regions[i].mr_size;
1189 	}
1190 	physmem = btoc(physsz);
1191 
1192 	/* Calculate the last available physical address. */
1193 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1194 		;
1195 	Maxmem = powerpc_btop(phys_avail[i + 1]);
1196 
1197 	debugf("Maxmem = 0x%08lx\n", Maxmem);
1198 	debugf("phys_avail_count = %d\n", phys_avail_count);
1199 	debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem,
1200 	    physmem);
1201 
1202 	/*******************************************************/
1203 	/* Initialize (statically allocated) kernel pmap. */
1204 	/*******************************************************/
1205 	PMAP_LOCK_INIT(kernel_pmap);
1206 	kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1207 
1208 	debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
1209 	debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
1210 	debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1211 	    kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1212 
1213 	/* Initialize kernel pdir */
1214 	for (i = 0; i < kernel_ptbls; i++)
1215 		kernel_pmap->pm_pdir[kptbl_min + i] =
1216 		    (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1217 
1218 	for (i = 0; i < MAXCPU; i++) {
1219 		kernel_pmap->pm_tid[i] = TID_KERNEL;
1220 
1221 		/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1222 		tidbusy[i][0] = kernel_pmap;
1223 	}
1224 
1225 	/*
1226 	 * Fill in PTEs covering kernel code and data. They are not required
1227 	 * for address translation, as this area is covered by static TLB1
1228 	 * entries, but for pte_vatopa() to work correctly with kernel area
1229 	 * addresses.
1230 	 */
1231 	for (va = KERNBASE; va < data_end; va += PAGE_SIZE) {
1232 		pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1233 		pte->rpn = kernload + (va - KERNBASE);
1234 		pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1235 		    PTE_VALID;
1236 	}
1237 	/* Mark kernel_pmap active on all CPUs */
1238 	kernel_pmap->pm_active = ~0;
1239 
1240 	/*******************************************************/
1241 	/* Final setup */
1242 	/*******************************************************/
1243 
1244 	/* Enter kstack0 into kernel map, provide guard page */
1245 	kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1246 	thread0.td_kstack = kstack0;
1247 	thread0.td_kstack_pages = KSTACK_PAGES;
1248 
1249 	debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1250 	debugf("kstack0_phys at 0x%08x - 0x%08x\n",
1251 	    kstack0_phys, kstack0_phys + kstack0_sz);
1252 	debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1253 
1254 	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1255 	for (i = 0; i < KSTACK_PAGES; i++) {
1256 		mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1257 		kstack0 += PAGE_SIZE;
1258 		kstack0_phys += PAGE_SIZE;
1259 	}
1260 
1261 	debugf("virtual_avail = %08x\n", virtual_avail);
1262 	debugf("virtual_end   = %08x\n", virtual_end);
1263 
1264 	debugf("mmu_booke_bootstrap: exit\n");
1265 }
1266 
1267 void
1268 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
1269 {
1270 	int i;
1271 
1272 	/*
1273 	 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
1274 	 * have the snapshot of its contents in the s/w tlb1[] table, so use
1275 	 * these values directly to (re)program AP's TLB1 hardware.
1276 	 */
1277 	for (i = 0; i < tlb1_idx; i ++) {
1278 		/* Skip invalid entries */
1279 		if (!(tlb1[i].mas1 & MAS1_VALID))
1280 			continue;
1281 
1282 		tlb1_write_entry(i);
1283 	}
1284 
1285 	set_mas4_defaults();
1286 }
1287 
1288 /*
1289  * Get the physical page address for the given pmap/virtual address.
1290  */
1291 static vm_paddr_t
1292 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1293 {
1294 	vm_paddr_t pa;
1295 
1296 	PMAP_LOCK(pmap);
1297 	pa = pte_vatopa(mmu, pmap, va);
1298 	PMAP_UNLOCK(pmap);
1299 
1300 	return (pa);
1301 }
1302 
1303 /*
1304  * Extract the physical page address associated with the given
1305  * kernel virtual address.
1306  */
1307 static vm_paddr_t
1308 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1309 {
1310 
1311 	return (pte_vatopa(mmu, kernel_pmap, va));
1312 }
1313 
1314 /*
1315  * Initialize the pmap module.
1316  * Called by vm_init, to initialize any structures that the pmap
1317  * system needs to map virtual memory.
1318  */
1319 static void
1320 mmu_booke_init(mmu_t mmu)
1321 {
1322 	int shpgperproc = PMAP_SHPGPERPROC;
1323 
1324 	/*
1325 	 * Initialize the address space (zone) for the pv entries.  Set a
1326 	 * high water mark so that the system can recover from excessive
1327 	 * numbers of pv entries.
1328 	 */
1329 	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1330 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1331 
1332 	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1333 	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1334 
1335 	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1336 	pv_entry_high_water = 9 * (pv_entry_max / 10);
1337 
1338 	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1339 
1340 	/* Pre-fill pvzone with initial number of pv entries. */
1341 	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1342 
1343 	/* Initialize ptbl allocation. */
1344 	ptbl_init();
1345 }
1346 
1347 /*
1348  * Map a list of wired pages into kernel virtual address space.  This is
1349  * intended for temporary mappings which do not need page modification or
1350  * references recorded.  Existing mappings in the region are overwritten.
1351  */
1352 static void
1353 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1354 {
1355 	vm_offset_t va;
1356 
1357 	va = sva;
1358 	while (count-- > 0) {
1359 		mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1360 		va += PAGE_SIZE;
1361 		m++;
1362 	}
1363 }
1364 
1365 /*
1366  * Remove page mappings from kernel virtual address space.  Intended for
1367  * temporary mappings entered by mmu_booke_qenter.
1368  */
1369 static void
1370 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1371 {
1372 	vm_offset_t va;
1373 
1374 	va = sva;
1375 	while (count-- > 0) {
1376 		mmu_booke_kremove(mmu, va);
1377 		va += PAGE_SIZE;
1378 	}
1379 }
1380 
1381 /*
1382  * Map a wired page into kernel virtual address space.
1383  */
1384 static void
1385 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1386 {
1387 	unsigned int pdir_idx = PDIR_IDX(va);
1388 	unsigned int ptbl_idx = PTBL_IDX(va);
1389 	uint32_t flags;
1390 	pte_t *pte;
1391 
1392 	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1393 	    (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1394 
1395 	flags = 0;
1396 	flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID);
1397 	flags |= PTE_M;
1398 
1399 	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1400 
1401 	mtx_lock_spin(&tlbivax_mutex);
1402 	tlb_miss_lock();
1403 
1404 	if (PTE_ISVALID(pte)) {
1405 
1406 		CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1407 
1408 		/* Flush entry from TLB0 */
1409 		tlb0_flush_entry(va);
1410 	}
1411 
1412 	pte->rpn = pa & ~PTE_PA_MASK;
1413 	pte->flags = flags;
1414 
1415 	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1416 	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1417 	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1418 
1419 	/* Flush the real memory from the instruction cache. */
1420 	if ((flags & (PTE_I | PTE_G)) == 0) {
1421 		__syncicache((void *)va, PAGE_SIZE);
1422 	}
1423 
1424 	tlb_miss_unlock();
1425 	mtx_unlock_spin(&tlbivax_mutex);
1426 }
1427 
1428 /*
1429  * Remove a page from kernel page table.
1430  */
1431 static void
1432 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1433 {
1434 	unsigned int pdir_idx = PDIR_IDX(va);
1435 	unsigned int ptbl_idx = PTBL_IDX(va);
1436 	pte_t *pte;
1437 
1438 //	CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
1439 
1440 	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1441 	    (va <= VM_MAX_KERNEL_ADDRESS)),
1442 	    ("mmu_booke_kremove: invalid va"));
1443 
1444 	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1445 
1446 	if (!PTE_ISVALID(pte)) {
1447 
1448 		CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1449 
1450 		return;
1451 	}
1452 
1453 	mtx_lock_spin(&tlbivax_mutex);
1454 	tlb_miss_lock();
1455 
1456 	/* Invalidate entry in TLB0, update PTE. */
1457 	tlb0_flush_entry(va);
1458 	pte->flags = 0;
1459 	pte->rpn = 0;
1460 
1461 	tlb_miss_unlock();
1462 	mtx_unlock_spin(&tlbivax_mutex);
1463 }
1464 
1465 /*
1466  * Initialize pmap associated with process 0.
1467  */
1468 static void
1469 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1470 {
1471 
1472 	mmu_booke_pinit(mmu, pmap);
1473 	PCPU_SET(curpmap, pmap);
1474 }
1475 
1476 /*
1477  * Initialize a preallocated and zeroed pmap structure,
1478  * such as one in a vmspace structure.
1479  */
1480 static void
1481 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1482 {
1483 	int i;
1484 
1485 	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
1486 	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
1487 
1488 	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
1489 
1490 	PMAP_LOCK_INIT(pmap);
1491 	for (i = 0; i < MAXCPU; i++)
1492 		pmap->pm_tid[i] = TID_NONE;
1493 	pmap->pm_active = 0;
1494 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1495 	bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1496 	TAILQ_INIT(&pmap->pm_ptbl_list);
1497 }
1498 
1499 /*
1500  * Release any resources held by the given physical map.
1501  * Called when a pmap initialized by mmu_booke_pinit is being released.
1502  * Should only be called if the map contains no valid mappings.
1503  */
1504 static void
1505 mmu_booke_release(mmu_t mmu, pmap_t pmap)
1506 {
1507 
1508 	KASSERT(pmap->pm_stats.resident_count == 0,
1509 	    ("pmap_release: pmap resident count %ld != 0",
1510 	    pmap->pm_stats.resident_count));
1511 
1512 	PMAP_LOCK_DESTROY(pmap);
1513 }
1514 
1515 /*
1516  * Insert the given physical page at the specified virtual address in the
1517  * target physical map with the protection requested. If specified the page
1518  * will be wired down.
1519  */
1520 static void
1521 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1522     vm_prot_t prot, boolean_t wired)
1523 {
1524 
1525 	vm_page_lock_queues();
1526 	PMAP_LOCK(pmap);
1527 	mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
1528 	vm_page_unlock_queues();
1529 	PMAP_UNLOCK(pmap);
1530 }
1531 
1532 static void
1533 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1534     vm_prot_t prot, boolean_t wired)
1535 {
1536 	pte_t *pte;
1537 	vm_paddr_t pa;
1538 	uint32_t flags;
1539 	int su, sync;
1540 
1541 	pa = VM_PAGE_TO_PHYS(m);
1542 	su = (pmap == kernel_pmap);
1543 	sync = 0;
1544 
1545 	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1546 	//		"pa=0x%08x prot=0x%08x wired=%d)\n",
1547 	//		(u_int32_t)pmap, su, pmap->pm_tid,
1548 	//		(u_int32_t)m, va, pa, prot, wired);
1549 
1550 	if (su) {
1551 		KASSERT(((va >= virtual_avail) &&
1552 		    (va <= VM_MAX_KERNEL_ADDRESS)),
1553 		    ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1554 	} else {
1555 		KASSERT((va <= VM_MAXUSER_ADDRESS),
1556 		    ("mmu_booke_enter_locked: user pmap, non user va"));
1557 	}
1558 	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1559 	    (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
1560 	    ("mmu_booke_enter_locked: page %p is not busy", m));
1561 
1562 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1563 
1564 	/*
1565 	 * If there is an existing mapping, and the physical address has not
1566 	 * changed, must be protection or wiring change.
1567 	 */
1568 	if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1569 	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1570 
1571 		/*
1572 		 * Before actually updating pte->flags we calculate and
1573 		 * prepare its new value in a helper var.
1574 		 */
1575 		flags = pte->flags;
1576 		flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1577 
1578 		/* Wiring change, just update stats. */
1579 		if (wired) {
1580 			if (!PTE_ISWIRED(pte)) {
1581 				flags |= PTE_WIRED;
1582 				pmap->pm_stats.wired_count++;
1583 			}
1584 		} else {
1585 			if (PTE_ISWIRED(pte)) {
1586 				flags &= ~PTE_WIRED;
1587 				pmap->pm_stats.wired_count--;
1588 			}
1589 		}
1590 
1591 		if (prot & VM_PROT_WRITE) {
1592 			/* Add write permissions. */
1593 			flags |= PTE_SW;
1594 			if (!su)
1595 				flags |= PTE_UW;
1596 
1597 			if ((flags & PTE_MANAGED) != 0)
1598 				vm_page_flag_set(m, PG_WRITEABLE);
1599 		} else {
1600 			/* Handle modified pages, sense modify status. */
1601 
1602 			/*
1603 			 * The PTE_MODIFIED flag could be set by underlying
1604 			 * TLB misses since we last read it (above), possibly
1605 			 * other CPUs could update it so we check in the PTE
1606 			 * directly rather than rely on that saved local flags
1607 			 * copy.
1608 			 */
1609 			if (PTE_ISMODIFIED(pte))
1610 				vm_page_dirty(m);
1611 		}
1612 
1613 		if (prot & VM_PROT_EXECUTE) {
1614 			flags |= PTE_SX;
1615 			if (!su)
1616 				flags |= PTE_UX;
1617 
1618 			/*
1619 			 * Check existing flags for execute permissions: if we
1620 			 * are turning execute permissions on, icache should
1621 			 * be flushed.
1622 			 */
1623 			if ((pte->flags & (PTE_UX | PTE_SX)) == 0)
1624 				sync++;
1625 		}
1626 
1627 		flags &= ~PTE_REFERENCED;
1628 
1629 		/*
1630 		 * The new flags value is all calculated -- only now actually
1631 		 * update the PTE.
1632 		 */
1633 		mtx_lock_spin(&tlbivax_mutex);
1634 		tlb_miss_lock();
1635 
1636 		tlb0_flush_entry(va);
1637 		pte->flags = flags;
1638 
1639 		tlb_miss_unlock();
1640 		mtx_unlock_spin(&tlbivax_mutex);
1641 
1642 	} else {
1643 		/*
1644 		 * If there is an existing mapping, but it's for a different
1645 		 * physical address, pte_enter() will delete the old mapping.
1646 		 */
1647 		//if ((pte != NULL) && PTE_ISVALID(pte))
1648 		//	debugf("mmu_booke_enter_locked: replace\n");
1649 		//else
1650 		//	debugf("mmu_booke_enter_locked: new\n");
1651 
1652 		/* Now set up the flags and install the new mapping. */
1653 		flags = (PTE_SR | PTE_VALID);
1654 		flags |= PTE_M;
1655 
1656 		if (!su)
1657 			flags |= PTE_UR;
1658 
1659 		if (prot & VM_PROT_WRITE) {
1660 			flags |= PTE_SW;
1661 			if (!su)
1662 				flags |= PTE_UW;
1663 
1664 			if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
1665 				vm_page_flag_set(m, PG_WRITEABLE);
1666 		}
1667 
1668 		if (prot & VM_PROT_EXECUTE) {
1669 			flags |= PTE_SX;
1670 			if (!su)
1671 				flags |= PTE_UX;
1672 		}
1673 
1674 		/* If its wired update stats. */
1675 		if (wired) {
1676 			pmap->pm_stats.wired_count++;
1677 			flags |= PTE_WIRED;
1678 		}
1679 
1680 		pte_enter(mmu, pmap, m, va, flags);
1681 
1682 		/* Flush the real memory from the instruction cache. */
1683 		if (prot & VM_PROT_EXECUTE)
1684 			sync++;
1685 	}
1686 
1687 	if (sync && (su || pmap == PCPU_GET(curpmap))) {
1688 		__syncicache((void *)va, PAGE_SIZE);
1689 		sync = 0;
1690 	}
1691 }
1692 
1693 /*
1694  * Maps a sequence of resident pages belonging to the same object.
1695  * The sequence begins with the given page m_start.  This page is
1696  * mapped at the given virtual address start.  Each subsequent page is
1697  * mapped at a virtual address that is offset from start by the same
1698  * amount as the page is offset from m_start within the object.  The
1699  * last page in the sequence is the page with the largest offset from
1700  * m_start that can be mapped at a virtual address less than the given
1701  * virtual address end.  Not every virtual page between start and end
1702  * is mapped; only those for which a resident page exists with the
1703  * corresponding offset from m_start are mapped.
1704  */
1705 static void
1706 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1707     vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1708 {
1709 	vm_page_t m;
1710 	vm_pindex_t diff, psize;
1711 
1712 	psize = atop(end - start);
1713 	m = m_start;
1714 	vm_page_lock_queues();
1715 	PMAP_LOCK(pmap);
1716 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1717 		mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1718 		    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1719 		m = TAILQ_NEXT(m, listq);
1720 	}
1721 	vm_page_unlock_queues();
1722 	PMAP_UNLOCK(pmap);
1723 }
1724 
1725 static void
1726 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1727     vm_prot_t prot)
1728 {
1729 
1730 	vm_page_lock_queues();
1731 	PMAP_LOCK(pmap);
1732 	mmu_booke_enter_locked(mmu, pmap, va, m,
1733 	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1734 	vm_page_unlock_queues();
1735 	PMAP_UNLOCK(pmap);
1736 }
1737 
1738 /*
1739  * Remove the given range of addresses from the specified map.
1740  *
1741  * It is assumed that the start and end are properly rounded to the page size.
1742  */
1743 static void
1744 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1745 {
1746 	pte_t *pte;
1747 	uint8_t hold_flag;
1748 
1749 	int su = (pmap == kernel_pmap);
1750 
1751 	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1752 	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1753 
1754 	if (su) {
1755 		KASSERT(((va >= virtual_avail) &&
1756 		    (va <= VM_MAX_KERNEL_ADDRESS)),
1757 		    ("mmu_booke_remove: kernel pmap, non kernel va"));
1758 	} else {
1759 		KASSERT((va <= VM_MAXUSER_ADDRESS),
1760 		    ("mmu_booke_remove: user pmap, non user va"));
1761 	}
1762 
1763 	if (PMAP_REMOVE_DONE(pmap)) {
1764 		//debugf("mmu_booke_remove: e (empty)\n");
1765 		return;
1766 	}
1767 
1768 	hold_flag = PTBL_HOLD_FLAG(pmap);
1769 	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1770 
1771 	vm_page_lock_queues();
1772 	PMAP_LOCK(pmap);
1773 	for (; va < endva; va += PAGE_SIZE) {
1774 		pte = pte_find(mmu, pmap, va);
1775 		if ((pte != NULL) && PTE_ISVALID(pte))
1776 			pte_remove(mmu, pmap, va, hold_flag);
1777 	}
1778 	PMAP_UNLOCK(pmap);
1779 	vm_page_unlock_queues();
1780 
1781 	//debugf("mmu_booke_remove: e\n");
1782 }
1783 
1784 /*
1785  * Remove physical page from all pmaps in which it resides.
1786  */
1787 static void
1788 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1789 {
1790 	pv_entry_t pv, pvn;
1791 	uint8_t hold_flag;
1792 
1793 	vm_page_lock_queues();
1794 	for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1795 		pvn = TAILQ_NEXT(pv, pv_link);
1796 
1797 		PMAP_LOCK(pv->pv_pmap);
1798 		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1799 		pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1800 		PMAP_UNLOCK(pv->pv_pmap);
1801 	}
1802 	vm_page_flag_clear(m, PG_WRITEABLE);
1803 	vm_page_unlock_queues();
1804 }
1805 
1806 /*
1807  * Map a range of physical addresses into kernel virtual address space.
1808  */
1809 static vm_offset_t
1810 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1811     vm_offset_t pa_end, int prot)
1812 {
1813 	vm_offset_t sva = *virt;
1814 	vm_offset_t va = sva;
1815 
1816 	//debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1817 	//		sva, pa_start, pa_end);
1818 
1819 	while (pa_start < pa_end) {
1820 		mmu_booke_kenter(mmu, va, pa_start);
1821 		va += PAGE_SIZE;
1822 		pa_start += PAGE_SIZE;
1823 	}
1824 	*virt = va;
1825 
1826 	//debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1827 	return (sva);
1828 }
1829 
1830 /*
1831  * The pmap must be activated before it's address space can be accessed in any
1832  * way.
1833  */
1834 static void
1835 mmu_booke_activate(mmu_t mmu, struct thread *td)
1836 {
1837 	pmap_t pmap;
1838 
1839 	pmap = &td->td_proc->p_vmspace->vm_pmap;
1840 
1841 	CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
1842 	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1843 
1844 	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1845 
1846 	mtx_lock_spin(&sched_lock);
1847 
1848 	atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
1849 	PCPU_SET(curpmap, pmap);
1850 
1851 	if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE)
1852 		tid_alloc(pmap);
1853 
1854 	/* Load PID0 register with pmap tid value. */
1855 	mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]);
1856 	__asm __volatile("isync");
1857 
1858 	mtx_unlock_spin(&sched_lock);
1859 
1860 	CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1861 	    pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1862 }
1863 
1864 /*
1865  * Deactivate the specified process's address space.
1866  */
1867 static void
1868 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
1869 {
1870 	pmap_t pmap;
1871 
1872 	pmap = &td->td_proc->p_vmspace->vm_pmap;
1873 
1874 	CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
1875 	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1876 
1877 	atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask));
1878 	PCPU_SET(curpmap, NULL);
1879 }
1880 
1881 /*
1882  * Copy the range specified by src_addr/len
1883  * from the source map to the range dst_addr/len
1884  * in the destination map.
1885  *
1886  * This routine is only advisory and need not do anything.
1887  */
1888 static void
1889 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
1890     vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
1891 {
1892 
1893 }
1894 
1895 /*
1896  * Set the physical protection on the specified range of this map as requested.
1897  */
1898 static void
1899 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1900     vm_prot_t prot)
1901 {
1902 	vm_offset_t va;
1903 	vm_page_t m;
1904 	pte_t *pte;
1905 
1906 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1907 		mmu_booke_remove(mmu, pmap, sva, eva);
1908 		return;
1909 	}
1910 
1911 	if (prot & VM_PROT_WRITE)
1912 		return;
1913 
1914 	vm_page_lock_queues();
1915 	PMAP_LOCK(pmap);
1916 	for (va = sva; va < eva; va += PAGE_SIZE) {
1917 		if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1918 			if (PTE_ISVALID(pte)) {
1919 				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1920 
1921 				mtx_lock_spin(&tlbivax_mutex);
1922 				tlb_miss_lock();
1923 
1924 				/* Handle modified pages. */
1925 				if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
1926 					vm_page_dirty(m);
1927 
1928 				tlb0_flush_entry(va);
1929 				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1930 
1931 				tlb_miss_unlock();
1932 				mtx_unlock_spin(&tlbivax_mutex);
1933 			}
1934 		}
1935 	}
1936 	PMAP_UNLOCK(pmap);
1937 	vm_page_unlock_queues();
1938 }
1939 
1940 /*
1941  * Clear the write and modified bits in each of the given page's mappings.
1942  */
1943 static void
1944 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
1945 {
1946 	pv_entry_t pv;
1947 	pte_t *pte;
1948 
1949 	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
1950 	    ("mmu_booke_remove_write: page %p is not managed", m));
1951 
1952 	/*
1953 	 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
1954 	 * another thread while the object is locked.  Thus, if PG_WRITEABLE
1955 	 * is clear, no page table entries need updating.
1956 	 */
1957 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1958 	if ((m->oflags & VPO_BUSY) == 0 &&
1959 	    (m->flags & PG_WRITEABLE) == 0)
1960 		return;
1961 	vm_page_lock_queues();
1962 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1963 		PMAP_LOCK(pv->pv_pmap);
1964 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
1965 			if (PTE_ISVALID(pte)) {
1966 				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1967 
1968 				mtx_lock_spin(&tlbivax_mutex);
1969 				tlb_miss_lock();
1970 
1971 				/* Handle modified pages. */
1972 				if (PTE_ISMODIFIED(pte))
1973 					vm_page_dirty(m);
1974 
1975 				/* Flush mapping from TLB0. */
1976 				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1977 
1978 				tlb_miss_unlock();
1979 				mtx_unlock_spin(&tlbivax_mutex);
1980 			}
1981 		}
1982 		PMAP_UNLOCK(pv->pv_pmap);
1983 	}
1984 	vm_page_flag_clear(m, PG_WRITEABLE);
1985 	vm_page_unlock_queues();
1986 }
1987 
1988 static void
1989 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
1990 {
1991 	pte_t *pte;
1992 	pmap_t pmap;
1993 	vm_page_t m;
1994 	vm_offset_t addr;
1995 	vm_paddr_t pa;
1996 	int active, valid;
1997 
1998 	va = trunc_page(va);
1999 	sz = round_page(sz);
2000 
2001 	vm_page_lock_queues();
2002 	pmap = PCPU_GET(curpmap);
2003 	active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2004 	while (sz > 0) {
2005 		PMAP_LOCK(pm);
2006 		pte = pte_find(mmu, pm, va);
2007 		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2008 		if (valid)
2009 			pa = PTE_PA(pte);
2010 		PMAP_UNLOCK(pm);
2011 		if (valid) {
2012 			if (!active) {
2013 				/* Create a mapping in the active pmap. */
2014 				addr = 0;
2015 				m = PHYS_TO_VM_PAGE(pa);
2016 				PMAP_LOCK(pmap);
2017 				pte_enter(mmu, pmap, m, addr,
2018 				    PTE_SR | PTE_VALID | PTE_UR);
2019 				__syncicache((void *)addr, PAGE_SIZE);
2020 				pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2021 				PMAP_UNLOCK(pmap);
2022 			} else
2023 				__syncicache((void *)va, PAGE_SIZE);
2024 		}
2025 		va += PAGE_SIZE;
2026 		sz -= PAGE_SIZE;
2027 	}
2028 	vm_page_unlock_queues();
2029 }
2030 
2031 /*
2032  * Atomically extract and hold the physical page with the given
2033  * pmap and virtual address pair if that mapping permits the given
2034  * protection.
2035  */
2036 static vm_page_t
2037 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2038     vm_prot_t prot)
2039 {
2040 	pte_t *pte;
2041 	vm_page_t m;
2042 	uint32_t pte_wbit;
2043 	vm_paddr_t pa;
2044 
2045 	m = NULL;
2046 	pa = 0;
2047 	PMAP_LOCK(pmap);
2048 retry:
2049 	pte = pte_find(mmu, pmap, va);
2050 	if ((pte != NULL) && PTE_ISVALID(pte)) {
2051 		if (pmap == kernel_pmap)
2052 			pte_wbit = PTE_SW;
2053 		else
2054 			pte_wbit = PTE_UW;
2055 
2056 		if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2057 			if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2058 				goto retry;
2059 			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2060 			vm_page_hold(m);
2061 		}
2062 	}
2063 
2064 	PA_UNLOCK_COND(pa);
2065 	PMAP_UNLOCK(pmap);
2066 	return (m);
2067 }
2068 
2069 /*
2070  * Initialize a vm_page's machine-dependent fields.
2071  */
2072 static void
2073 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2074 {
2075 
2076 	TAILQ_INIT(&m->md.pv_list);
2077 }
2078 
2079 /*
2080  * mmu_booke_zero_page_area zeros the specified hardware page by
2081  * mapping it into virtual memory and using bzero to clear
2082  * its contents.
2083  *
2084  * off and size must reside within a single page.
2085  */
2086 static void
2087 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2088 {
2089 	vm_offset_t va;
2090 
2091 	/* XXX KASSERT off and size are within a single page? */
2092 
2093 	mtx_lock(&zero_page_mutex);
2094 	va = zero_page_va;
2095 
2096 	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2097 	bzero((caddr_t)va + off, size);
2098 	mmu_booke_kremove(mmu, va);
2099 
2100 	mtx_unlock(&zero_page_mutex);
2101 }
2102 
2103 /*
2104  * mmu_booke_zero_page zeros the specified hardware page.
2105  */
2106 static void
2107 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2108 {
2109 
2110 	mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
2111 }
2112 
2113 /*
2114  * mmu_booke_copy_page copies the specified (machine independent) page by
2115  * mapping the page into virtual memory and using memcopy to copy the page,
2116  * one machine dependent page at a time.
2117  */
2118 static void
2119 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2120 {
2121 	vm_offset_t sva, dva;
2122 
2123 	sva = copy_page_src_va;
2124 	dva = copy_page_dst_va;
2125 
2126 	mtx_lock(&copy_page_mutex);
2127 	mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2128 	mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2129 	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2130 	mmu_booke_kremove(mmu, dva);
2131 	mmu_booke_kremove(mmu, sva);
2132 	mtx_unlock(&copy_page_mutex);
2133 }
2134 
2135 /*
2136  * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2137  * into virtual memory and using bzero to clear its contents. This is intended
2138  * to be called from the vm_pagezero process only and outside of Giant. No
2139  * lock is required.
2140  */
2141 static void
2142 mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2143 {
2144 	vm_offset_t va;
2145 
2146 	va = zero_page_idle_va;
2147 	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2148 	bzero((caddr_t)va, PAGE_SIZE);
2149 	mmu_booke_kremove(mmu, va);
2150 }
2151 
2152 /*
2153  * Return whether or not the specified physical page was modified
2154  * in any of physical maps.
2155  */
2156 static boolean_t
2157 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2158 {
2159 	pte_t *pte;
2160 	pv_entry_t pv;
2161 	boolean_t rv;
2162 
2163 	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2164 	    ("mmu_booke_is_modified: page %p is not managed", m));
2165 	rv = FALSE;
2166 
2167 	/*
2168 	 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
2169 	 * concurrently set while the object is locked.  Thus, if PG_WRITEABLE
2170 	 * is clear, no PTEs can be modified.
2171 	 */
2172 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2173 	if ((m->oflags & VPO_BUSY) == 0 &&
2174 	    (m->flags & PG_WRITEABLE) == 0)
2175 		return (rv);
2176 	vm_page_lock_queues();
2177 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2178 		PMAP_LOCK(pv->pv_pmap);
2179 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2180 		    PTE_ISVALID(pte)) {
2181 			if (PTE_ISMODIFIED(pte))
2182 				rv = TRUE;
2183 		}
2184 		PMAP_UNLOCK(pv->pv_pmap);
2185 		if (rv)
2186 			break;
2187 	}
2188 	vm_page_unlock_queues();
2189 	return (rv);
2190 }
2191 
2192 /*
2193  * Return whether or not the specified virtual address is eligible
2194  * for prefault.
2195  */
2196 static boolean_t
2197 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2198 {
2199 
2200 	return (FALSE);
2201 }
2202 
2203 /*
2204  * Return whether or not the specified physical page was referenced
2205  * in any physical maps.
2206  */
2207 static boolean_t
2208 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
2209 {
2210 	pte_t *pte;
2211 	pv_entry_t pv;
2212 	boolean_t rv;
2213 
2214 	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2215 	    ("mmu_booke_is_referenced: page %p is not managed", m));
2216 	rv = FALSE;
2217 	vm_page_lock_queues();
2218 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2219 		PMAP_LOCK(pv->pv_pmap);
2220 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2221 		    PTE_ISVALID(pte)) {
2222 			if (PTE_ISREFERENCED(pte))
2223 				rv = TRUE;
2224 		}
2225 		PMAP_UNLOCK(pv->pv_pmap);
2226 		if (rv)
2227 			break;
2228 	}
2229 	vm_page_unlock_queues();
2230 	return (rv);
2231 }
2232 
2233 /*
2234  * Clear the modify bits on the specified physical page.
2235  */
2236 static void
2237 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2238 {
2239 	pte_t *pte;
2240 	pv_entry_t pv;
2241 
2242 	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2243 	    ("mmu_booke_clear_modify: page %p is not managed", m));
2244 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2245 	KASSERT((m->oflags & VPO_BUSY) == 0,
2246 	    ("mmu_booke_clear_modify: page %p is busy", m));
2247 
2248 	/*
2249 	 * If the page is not PG_WRITEABLE, then no PTEs can be modified.
2250 	 * If the object containing the page is locked and the page is not
2251 	 * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
2252 	 */
2253 	if ((m->flags & PG_WRITEABLE) == 0)
2254 		return;
2255 	vm_page_lock_queues();
2256 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2257 		PMAP_LOCK(pv->pv_pmap);
2258 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2259 		    PTE_ISVALID(pte)) {
2260 			mtx_lock_spin(&tlbivax_mutex);
2261 			tlb_miss_lock();
2262 
2263 			if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2264 				tlb0_flush_entry(pv->pv_va);
2265 				pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2266 				    PTE_REFERENCED);
2267 			}
2268 
2269 			tlb_miss_unlock();
2270 			mtx_unlock_spin(&tlbivax_mutex);
2271 		}
2272 		PMAP_UNLOCK(pv->pv_pmap);
2273 	}
2274 	vm_page_unlock_queues();
2275 }
2276 
2277 /*
2278  * Return a count of reference bits for a page, clearing those bits.
2279  * It is not necessary for every reference bit to be cleared, but it
2280  * is necessary that 0 only be returned when there are truly no
2281  * reference bits set.
2282  *
2283  * XXX: The exact number of bits to check and clear is a matter that
2284  * should be tested and standardized at some point in the future for
2285  * optimal aging of shared pages.
2286  */
2287 static int
2288 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2289 {
2290 	pte_t *pte;
2291 	pv_entry_t pv;
2292 	int count;
2293 
2294 	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2295 	    ("mmu_booke_ts_referenced: page %p is not managed", m));
2296 	count = 0;
2297 	vm_page_lock_queues();
2298 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2299 		PMAP_LOCK(pv->pv_pmap);
2300 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2301 		    PTE_ISVALID(pte)) {
2302 			if (PTE_ISREFERENCED(pte)) {
2303 				mtx_lock_spin(&tlbivax_mutex);
2304 				tlb_miss_lock();
2305 
2306 				tlb0_flush_entry(pv->pv_va);
2307 				pte->flags &= ~PTE_REFERENCED;
2308 
2309 				tlb_miss_unlock();
2310 				mtx_unlock_spin(&tlbivax_mutex);
2311 
2312 				if (++count > 4) {
2313 					PMAP_UNLOCK(pv->pv_pmap);
2314 					break;
2315 				}
2316 			}
2317 		}
2318 		PMAP_UNLOCK(pv->pv_pmap);
2319 	}
2320 	vm_page_unlock_queues();
2321 	return (count);
2322 }
2323 
2324 /*
2325  * Clear the reference bit on the specified physical page.
2326  */
2327 static void
2328 mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
2329 {
2330 	pte_t *pte;
2331 	pv_entry_t pv;
2332 
2333 	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2334 	    ("mmu_booke_clear_reference: page %p is not managed", m));
2335 	vm_page_lock_queues();
2336 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2337 		PMAP_LOCK(pv->pv_pmap);
2338 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2339 		    PTE_ISVALID(pte)) {
2340 			if (PTE_ISREFERENCED(pte)) {
2341 				mtx_lock_spin(&tlbivax_mutex);
2342 				tlb_miss_lock();
2343 
2344 				tlb0_flush_entry(pv->pv_va);
2345 				pte->flags &= ~PTE_REFERENCED;
2346 
2347 				tlb_miss_unlock();
2348 				mtx_unlock_spin(&tlbivax_mutex);
2349 			}
2350 		}
2351 		PMAP_UNLOCK(pv->pv_pmap);
2352 	}
2353 	vm_page_unlock_queues();
2354 }
2355 
2356 /*
2357  * Change wiring attribute for a map/virtual-address pair.
2358  */
2359 static void
2360 mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
2361 {
2362 	pte_t *pte;
2363 
2364 	PMAP_LOCK(pmap);
2365 	if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2366 		if (wired) {
2367 			if (!PTE_ISWIRED(pte)) {
2368 				pte->flags |= PTE_WIRED;
2369 				pmap->pm_stats.wired_count++;
2370 			}
2371 		} else {
2372 			if (PTE_ISWIRED(pte)) {
2373 				pte->flags &= ~PTE_WIRED;
2374 				pmap->pm_stats.wired_count--;
2375 			}
2376 		}
2377 	}
2378 	PMAP_UNLOCK(pmap);
2379 }
2380 
2381 /*
2382  * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2383  * page.  This count may be changed upwards or downwards in the future; it is
2384  * only necessary that true be returned for a small subset of pmaps for proper
2385  * page aging.
2386  */
2387 static boolean_t
2388 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2389 {
2390 	pv_entry_t pv;
2391 	int loops;
2392 	boolean_t rv;
2393 
2394 	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2395 	    ("mmu_booke_page_exists_quick: page %p is not managed", m));
2396 	loops = 0;
2397 	rv = FALSE;
2398 	vm_page_lock_queues();
2399 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2400 		if (pv->pv_pmap == pmap) {
2401 			rv = TRUE;
2402 			break;
2403 		}
2404 		if (++loops >= 16)
2405 			break;
2406 	}
2407 	vm_page_unlock_queues();
2408 	return (rv);
2409 }
2410 
2411 /*
2412  * Return the number of managed mappings to the given physical page that are
2413  * wired.
2414  */
2415 static int
2416 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2417 {
2418 	pv_entry_t pv;
2419 	pte_t *pte;
2420 	int count = 0;
2421 
2422 	if ((m->flags & PG_FICTITIOUS) != 0)
2423 		return (count);
2424 	vm_page_lock_queues();
2425 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2426 		PMAP_LOCK(pv->pv_pmap);
2427 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2428 			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2429 				count++;
2430 		PMAP_UNLOCK(pv->pv_pmap);
2431 	}
2432 	vm_page_unlock_queues();
2433 	return (count);
2434 }
2435 
2436 static int
2437 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2438 {
2439 	int i;
2440 	vm_offset_t va;
2441 
2442 	/*
2443 	 * This currently does not work for entries that
2444 	 * overlap TLB1 entries.
2445 	 */
2446 	for (i = 0; i < tlb1_idx; i ++) {
2447 		if (tlb1_iomapped(i, pa, size, &va) == 0)
2448 			return (0);
2449 	}
2450 
2451 	return (EFAULT);
2452 }
2453 
2454 vm_offset_t
2455 mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2456     vm_size_t *sz)
2457 {
2458 	vm_paddr_t pa, ppa;
2459 	vm_offset_t va;
2460 	vm_size_t gran;
2461 
2462 	/* Raw physical memory dumps don't have a virtual address. */
2463 	if (md->md_vaddr == ~0UL) {
2464 		/* We always map a 256MB page at 256M. */
2465 		gran = 256 * 1024 * 1024;
2466 		pa = md->md_paddr + ofs;
2467 		ppa = pa & ~(gran - 1);
2468 		ofs = pa - ppa;
2469 		va = gran;
2470 		tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO);
2471 		if (*sz > (gran - ofs))
2472 			*sz = gran - ofs;
2473 		return (va + ofs);
2474 	}
2475 
2476 	/* Minidumps are based on virtual memory addresses. */
2477 	va = md->md_vaddr + ofs;
2478 	if (va >= kernstart + kernsize) {
2479 		gran = PAGE_SIZE - (va & PAGE_MASK);
2480 		if (*sz > gran)
2481 			*sz = gran;
2482 	}
2483 	return (va);
2484 }
2485 
2486 void
2487 mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2488     vm_offset_t va)
2489 {
2490 
2491 	/* Raw physical memory dumps don't have a virtual address. */
2492 	if (md->md_vaddr == ~0UL) {
2493 		tlb1_idx--;
2494 		tlb1[tlb1_idx].mas1 = 0;
2495 		tlb1[tlb1_idx].mas2 = 0;
2496 		tlb1[tlb1_idx].mas3 = 0;
2497 		tlb1_write_entry(tlb1_idx);
2498 		return;
2499 	}
2500 
2501 	/* Minidumps are based on virtual memory addresses. */
2502 	/* Nothing to do... */
2503 }
2504 
2505 struct pmap_md *
2506 mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
2507 {
2508 	static struct pmap_md md;
2509 	pte_t *pte;
2510 	vm_offset_t va;
2511 
2512 	if (dumpsys_minidump) {
2513 		md.md_paddr = ~0UL;	/* Minidumps use virtual addresses. */
2514 		if (prev == NULL) {
2515 			/* 1st: kernel .data and .bss. */
2516 			md.md_index = 1;
2517 			md.md_vaddr = trunc_page((uintptr_t)_etext);
2518 			md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
2519 			return (&md);
2520 		}
2521 		switch (prev->md_index) {
2522 		case 1:
2523 			/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2524 			md.md_index = 2;
2525 			md.md_vaddr = data_start;
2526 			md.md_size = data_end - data_start;
2527 			break;
2528 		case 2:
2529 			/* 3rd: kernel VM. */
2530 			va = prev->md_vaddr + prev->md_size;
2531 			/* Find start of next chunk (from va). */
2532 			while (va < virtual_end) {
2533 				/* Don't dump the buffer cache. */
2534 				if (va >= kmi.buffer_sva &&
2535 				    va < kmi.buffer_eva) {
2536 					va = kmi.buffer_eva;
2537 					continue;
2538 				}
2539 				pte = pte_find(mmu, kernel_pmap, va);
2540 				if (pte != NULL && PTE_ISVALID(pte))
2541 					break;
2542 				va += PAGE_SIZE;
2543 			}
2544 			if (va < virtual_end) {
2545 				md.md_vaddr = va;
2546 				va += PAGE_SIZE;
2547 				/* Find last page in chunk. */
2548 				while (va < virtual_end) {
2549 					/* Don't run into the buffer cache. */
2550 					if (va == kmi.buffer_sva)
2551 						break;
2552 					pte = pte_find(mmu, kernel_pmap, va);
2553 					if (pte == NULL || !PTE_ISVALID(pte))
2554 						break;
2555 					va += PAGE_SIZE;
2556 				}
2557 				md.md_size = va - md.md_vaddr;
2558 				break;
2559 			}
2560 			md.md_index = 3;
2561 			/* FALLTHROUGH */
2562 		default:
2563 			return (NULL);
2564 		}
2565 	} else { /* minidumps */
2566 		mem_regions(&physmem_regions, &physmem_regions_sz,
2567 		    &availmem_regions, &availmem_regions_sz);
2568 
2569 		if (prev == NULL) {
2570 			/* first physical chunk. */
2571 			md.md_paddr = physmem_regions[0].mr_start;
2572 			md.md_size = physmem_regions[0].mr_size;
2573 			md.md_vaddr = ~0UL;
2574 			md.md_index = 1;
2575 		} else if (md.md_index < physmem_regions_sz) {
2576 			md.md_paddr = physmem_regions[md.md_index].mr_start;
2577 			md.md_size = physmem_regions[md.md_index].mr_size;
2578 			md.md_vaddr = ~0UL;
2579 			md.md_index++;
2580 		} else {
2581 			/* There's no next physical chunk. */
2582 			return (NULL);
2583 		}
2584 	}
2585 
2586 	return (&md);
2587 }
2588 
2589 /*
2590  * Map a set of physical memory pages into the kernel virtual address space.
2591  * Return a pointer to where it is mapped. This routine is intended to be used
2592  * for mapping device memory, NOT real memory.
2593  */
2594 static void *
2595 mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2596 {
2597 	void *res;
2598 	uintptr_t va;
2599 	vm_size_t sz;
2600 
2601 	va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
2602 	res = (void *)va;
2603 
2604 	do {
2605 		sz = 1 << (ilog2(size) & ~1);
2606 		if (bootverbose)
2607 			printf("Wiring VA=%x to PA=%x (size=%x), "
2608 			    "using TLB1[%d]\n", va, pa, sz, tlb1_idx);
2609 		tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO);
2610 		size -= sz;
2611 		pa += sz;
2612 		va += sz;
2613 	} while (size > 0);
2614 
2615 	return (res);
2616 }
2617 
2618 /*
2619  * 'Unmap' a range mapped by mmu_booke_mapdev().
2620  */
2621 static void
2622 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2623 {
2624 	vm_offset_t base, offset;
2625 
2626 	/*
2627 	 * Unmap only if this is inside kernel virtual space.
2628 	 */
2629 	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2630 		base = trunc_page(va);
2631 		offset = va & PAGE_MASK;
2632 		size = roundup(offset + size, PAGE_SIZE);
2633 		kmem_free(kernel_map, base, size);
2634 	}
2635 }
2636 
2637 /*
2638  * mmu_booke_object_init_pt preloads the ptes for a given object into the
2639  * specified pmap. This eliminates the blast of soft faults on process startup
2640  * and immediately after an mmap.
2641  */
2642 static void
2643 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2644     vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2645 {
2646 
2647 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2648 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2649 	    ("mmu_booke_object_init_pt: non-device object"));
2650 }
2651 
2652 /*
2653  * Perform the pmap work for mincore.
2654  */
2655 static int
2656 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2657     vm_paddr_t *locked_pa)
2658 {
2659 
2660 	TODO;
2661 	return (0);
2662 }
2663 
2664 /**************************************************************************/
2665 /* TID handling */
2666 /**************************************************************************/
2667 
2668 /*
2669  * Allocate a TID. If necessary, steal one from someone else.
2670  * The new TID is flushed from the TLB before returning.
2671  */
2672 static tlbtid_t
2673 tid_alloc(pmap_t pmap)
2674 {
2675 	tlbtid_t tid;
2676 	int thiscpu;
2677 
2678 	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2679 
2680 	CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2681 
2682 	thiscpu = PCPU_GET(cpuid);
2683 
2684 	tid = PCPU_GET(tid_next);
2685 	if (tid > TID_MAX)
2686 		tid = TID_MIN;
2687 	PCPU_SET(tid_next, tid + 1);
2688 
2689 	/* If we are stealing TID then clear the relevant pmap's field */
2690 	if (tidbusy[thiscpu][tid] != NULL) {
2691 
2692 		CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2693 
2694 		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2695 
2696 		/* Flush all entries from TLB0 matching this TID. */
2697 		tid_flush(tid);
2698 	}
2699 
2700 	tidbusy[thiscpu][tid] = pmap;
2701 	pmap->pm_tid[thiscpu] = tid;
2702 	__asm __volatile("msync; isync");
2703 
2704 	CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2705 	    PCPU_GET(tid_next));
2706 
2707 	return (tid);
2708 }
2709 
2710 /**************************************************************************/
2711 /* TLB0 handling */
2712 /**************************************************************************/
2713 
2714 static void
2715 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
2716     uint32_t mas7)
2717 {
2718 	int as;
2719 	char desc[3];
2720 	tlbtid_t tid;
2721 	vm_size_t size;
2722 	unsigned int tsize;
2723 
2724 	desc[2] = '\0';
2725 	if (mas1 & MAS1_VALID)
2726 		desc[0] = 'V';
2727 	else
2728 		desc[0] = ' ';
2729 
2730 	if (mas1 & MAS1_IPROT)
2731 		desc[1] = 'P';
2732 	else
2733 		desc[1] = ' ';
2734 
2735 	as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
2736 	tid = MAS1_GETTID(mas1);
2737 
2738 	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2739 	size = 0;
2740 	if (tsize)
2741 		size = tsize2size(tsize);
2742 
2743 	debugf("%3d: (%s) [AS=%d] "
2744 	    "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2745 	    "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2746 	    i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2747 }
2748 
2749 /* Convert TLB0 va and way number to tlb0[] table index. */
2750 static inline unsigned int
2751 tlb0_tableidx(vm_offset_t va, unsigned int way)
2752 {
2753 	unsigned int idx;
2754 
2755 	idx = (way * TLB0_ENTRIES_PER_WAY);
2756 	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2757 	return (idx);
2758 }
2759 
2760 /*
2761  * Invalidate TLB0 entry.
2762  */
2763 static inline void
2764 tlb0_flush_entry(vm_offset_t va)
2765 {
2766 
2767 	CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2768 
2769 	mtx_assert(&tlbivax_mutex, MA_OWNED);
2770 
2771 	__asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2772 	__asm __volatile("isync; msync");
2773 	__asm __volatile("tlbsync; msync");
2774 
2775 	CTR1(KTR_PMAP, "%s: e", __func__);
2776 }
2777 
2778 /* Print out contents of the MAS registers for each TLB0 entry */
2779 void
2780 tlb0_print_tlbentries(void)
2781 {
2782 	uint32_t mas0, mas1, mas2, mas3, mas7;
2783 	int entryidx, way, idx;
2784 
2785 	debugf("TLB0 entries:\n");
2786 	for (way = 0; way < TLB0_WAYS; way ++)
2787 		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2788 
2789 			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2790 			mtspr(SPR_MAS0, mas0);
2791 			__asm __volatile("isync");
2792 
2793 			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2794 			mtspr(SPR_MAS2, mas2);
2795 
2796 			__asm __volatile("isync; tlbre");
2797 
2798 			mas1 = mfspr(SPR_MAS1);
2799 			mas2 = mfspr(SPR_MAS2);
2800 			mas3 = mfspr(SPR_MAS3);
2801 			mas7 = mfspr(SPR_MAS7);
2802 
2803 			idx = tlb0_tableidx(mas2, way);
2804 			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2805 		}
2806 }
2807 
2808 /**************************************************************************/
2809 /* TLB1 handling */
2810 /**************************************************************************/
2811 
2812 /*
2813  * TLB1 mapping notes:
2814  *
2815  * TLB1[0]	CCSRBAR
2816  * TLB1[1]	Kernel text and data.
2817  * TLB1[2-15]	Additional kernel text and data mappings (if required), PCI
2818  *		windows, other devices mappings.
2819  */
2820 
2821 /*
2822  * Write given entry to TLB1 hardware.
2823  * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2824  */
2825 static void
2826 tlb1_write_entry(unsigned int idx)
2827 {
2828 	uint32_t mas0, mas7;
2829 
2830 	//debugf("tlb1_write_entry: s\n");
2831 
2832 	/* Clear high order RPN bits */
2833 	mas7 = 0;
2834 
2835 	/* Select entry */
2836 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2837 	//debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
2838 
2839 	mtspr(SPR_MAS0, mas0);
2840 	__asm __volatile("isync");
2841 	mtspr(SPR_MAS1, tlb1[idx].mas1);
2842 	__asm __volatile("isync");
2843 	mtspr(SPR_MAS2, tlb1[idx].mas2);
2844 	__asm __volatile("isync");
2845 	mtspr(SPR_MAS3, tlb1[idx].mas3);
2846 	__asm __volatile("isync");
2847 	mtspr(SPR_MAS7, mas7);
2848 	__asm __volatile("isync; tlbwe; isync; msync");
2849 
2850 	//debugf("tlb1_write_entry: e\n");
2851 }
2852 
2853 /*
2854  * Return the largest uint value log such that 2^log <= num.
2855  */
2856 static unsigned int
2857 ilog2(unsigned int num)
2858 {
2859 	int lz;
2860 
2861 	__asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
2862 	return (31 - lz);
2863 }
2864 
2865 /*
2866  * Convert TLB TSIZE value to mapped region size.
2867  */
2868 static vm_size_t
2869 tsize2size(unsigned int tsize)
2870 {
2871 
2872 	/*
2873 	 * size = 4^tsize KB
2874 	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2875 	 */
2876 
2877 	return ((1 << (2 * tsize)) * 1024);
2878 }
2879 
2880 /*
2881  * Convert region size (must be power of 4) to TLB TSIZE value.
2882  */
2883 static unsigned int
2884 size2tsize(vm_size_t size)
2885 {
2886 
2887 	return (ilog2(size) / 2 - 5);
2888 }
2889 
2890 /*
2891  * Register permanent kernel mapping in TLB1.
2892  *
2893  * Entries are created starting from index 0 (current free entry is
2894  * kept in tlb1_idx) and are not supposed to be invalidated.
2895  */
2896 static int
2897 tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
2898     uint32_t flags)
2899 {
2900 	uint32_t ts, tid;
2901 	int tsize;
2902 
2903 	if (tlb1_idx >= TLB1_ENTRIES) {
2904 		printf("tlb1_set_entry: TLB1 full!\n");
2905 		return (-1);
2906 	}
2907 
2908 	/* Convert size to TSIZE */
2909 	tsize = size2tsize(size);
2910 
2911 	tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
2912 	/* XXX TS is hard coded to 0 for now as we only use single address space */
2913 	ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
2914 
2915 	/* XXX LOCK tlb1[] */
2916 
2917 	tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2918 	tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2919 	tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags;
2920 
2921 	/* Set supervisor RWX permission bits */
2922 	tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2923 
2924 	tlb1_write_entry(tlb1_idx++);
2925 
2926 	/* XXX UNLOCK tlb1[] */
2927 
2928 	/*
2929 	 * XXX in general TLB1 updates should be propagated between CPUs,
2930 	 * since current design assumes to have the same TLB1 set-up on all
2931 	 * cores.
2932 	 */
2933 	return (0);
2934 }
2935 
2936 static int
2937 tlb1_entry_size_cmp(const void *a, const void *b)
2938 {
2939 	const vm_size_t *sza;
2940 	const vm_size_t *szb;
2941 
2942 	sza = a;
2943 	szb = b;
2944 	if (*sza > *szb)
2945 		return (-1);
2946 	else if (*sza < *szb)
2947 		return (1);
2948 	else
2949 		return (0);
2950 }
2951 
2952 /*
2953  * Map in contiguous RAM region into the TLB1 using maximum of
2954  * KERNEL_REGION_MAX_TLB_ENTRIES entries.
2955  *
2956  * If necessary round up last entry size and return total size
2957  * used by all allocated entries.
2958  */
2959 vm_size_t
2960 tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size)
2961 {
2962 	vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES];
2963 	vm_size_t mapped_size, sz, esz;
2964 	unsigned int log;
2965 	int i;
2966 
2967 	CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x",
2968 	    __func__, size, va, pa);
2969 
2970 	mapped_size = 0;
2971 	sz = size;
2972 	memset(entry_size, 0, sizeof(entry_size));
2973 
2974 	/* Calculate entry sizes. */
2975 	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) {
2976 
2977 		/* Largest region that is power of 4 and fits within size */
2978 		log = ilog2(sz) / 2;
2979 		esz = 1 << (2 * log);
2980 
2981 		/* If this is last entry cover remaining size. */
2982 		if (i ==  KERNEL_REGION_MAX_TLB_ENTRIES - 1) {
2983 			while (esz < sz)
2984 				esz = esz << 2;
2985 		}
2986 
2987 		entry_size[i] = esz;
2988 		mapped_size += esz;
2989 		if (esz < sz)
2990 			sz -= esz;
2991 		else
2992 			sz = 0;
2993 	}
2994 
2995 	/* Sort entry sizes, required to get proper entry address alignment. */
2996 	qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES,
2997 	    sizeof(vm_size_t), tlb1_entry_size_cmp);
2998 
2999 	/* Load TLB1 entries. */
3000 	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) {
3001 		esz = entry_size[i];
3002 		if (!esz)
3003 			break;
3004 
3005 		CTR5(KTR_PMAP, "%s: entry %d: sz  = 0x%08x (va = 0x%08x "
3006 		    "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa);
3007 
3008 		tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM);
3009 
3010 		va += esz;
3011 		pa += esz;
3012 	}
3013 
3014 	CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)",
3015 	    __func__, mapped_size, mapped_size - size);
3016 
3017 	return (mapped_size);
3018 }
3019 
3020 /*
3021  * TLB1 initialization routine, to be called after the very first
3022  * assembler level setup done in locore.S.
3023  */
3024 void
3025 tlb1_init(vm_offset_t ccsrbar)
3026 {
3027 	uint32_t mas0;
3028 
3029 	/* TLB1[1] is used to map the kernel. Save that entry. */
3030 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1);
3031 	mtspr(SPR_MAS0, mas0);
3032 	__asm __volatile("isync; tlbre");
3033 
3034 	tlb1[1].mas1 = mfspr(SPR_MAS1);
3035 	tlb1[1].mas2 = mfspr(SPR_MAS2);
3036 	tlb1[1].mas3 = mfspr(SPR_MAS3);
3037 
3038 	/* Map in CCSRBAR in TLB1[0] */
3039 	tlb1_idx = 0;
3040 	tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
3041 	/*
3042 	 * Set the next available TLB1 entry index. Note TLB[1] is reserved
3043 	 * for initial mapping of kernel text+data, which was set early in
3044 	 * locore, we need to skip this [busy] entry.
3045 	 */
3046 	tlb1_idx = 2;
3047 
3048 	/* Setup TLB miss defaults */
3049 	set_mas4_defaults();
3050 }
3051 
3052 /*
3053  * Setup MAS4 defaults.
3054  * These values are loaded to MAS0-2 on a TLB miss.
3055  */
3056 static void
3057 set_mas4_defaults(void)
3058 {
3059 	uint32_t mas4;
3060 
3061 	/* Defaults: TLB0, PID0, TSIZED=4K */
3062 	mas4 = MAS4_TLBSELD0;
3063 	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
3064 #ifdef SMP
3065 	mas4 |= MAS4_MD;
3066 #endif
3067 	mtspr(SPR_MAS4, mas4);
3068 	__asm __volatile("isync");
3069 }
3070 
3071 /*
3072  * Print out contents of the MAS registers for each TLB1 entry
3073  */
3074 void
3075 tlb1_print_tlbentries(void)
3076 {
3077 	uint32_t mas0, mas1, mas2, mas3, mas7;
3078 	int i;
3079 
3080 	debugf("TLB1 entries:\n");
3081 	for (i = 0; i < TLB1_ENTRIES; i++) {
3082 
3083 		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3084 		mtspr(SPR_MAS0, mas0);
3085 
3086 		__asm __volatile("isync; tlbre");
3087 
3088 		mas1 = mfspr(SPR_MAS1);
3089 		mas2 = mfspr(SPR_MAS2);
3090 		mas3 = mfspr(SPR_MAS3);
3091 		mas7 = mfspr(SPR_MAS7);
3092 
3093 		tlb_print_entry(i, mas1, mas2, mas3, mas7);
3094 	}
3095 }
3096 
3097 /*
3098  * Print out contents of the in-ram tlb1 table.
3099  */
3100 void
3101 tlb1_print_entries(void)
3102 {
3103 	int i;
3104 
3105 	debugf("tlb1[] table entries:\n");
3106 	for (i = 0; i < TLB1_ENTRIES; i++)
3107 		tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
3108 }
3109 
3110 /*
3111  * Return 0 if the physical IO range is encompassed by one of the
3112  * the TLB1 entries, otherwise return related error code.
3113  */
3114 static int
3115 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
3116 {
3117 	uint32_t prot;
3118 	vm_paddr_t pa_start;
3119 	vm_paddr_t pa_end;
3120 	unsigned int entry_tsize;
3121 	vm_size_t entry_size;
3122 
3123 	*va = (vm_offset_t)NULL;
3124 
3125 	/* Skip invalid entries */
3126 	if (!(tlb1[i].mas1 & MAS1_VALID))
3127 		return (EINVAL);
3128 
3129 	/*
3130 	 * The entry must be cache-inhibited, guarded, and r/w
3131 	 * so it can function as an i/o page
3132 	 */
3133 	prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
3134 	if (prot != (MAS2_I | MAS2_G))
3135 		return (EPERM);
3136 
3137 	prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
3138 	if (prot != (MAS3_SR | MAS3_SW))
3139 		return (EPERM);
3140 
3141 	/* The address should be within the entry range. */
3142 	entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3143 	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3144 
3145 	entry_size = tsize2size(entry_tsize);
3146 	pa_start = tlb1[i].mas3 & MAS3_RPN;
3147 	pa_end = pa_start + entry_size - 1;
3148 
3149 	if ((pa < pa_start) || ((pa + size) > pa_end))
3150 		return (ERANGE);
3151 
3152 	/* Return virtual address of this mapping. */
3153 	*va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);
3154 	return (0);
3155 }
3156