xref: /freebsd/sys/powerpc/booke/pmap.c (revision db612abe8df3355d1eb23bb3b50fdd97bc21e979)
1 /*-
2  * Copyright (C) 2007 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
20  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Some hw specific parts of this pmap were derived or influenced
29  * by NetBSD's ibm4xx pmap module. More generic code is shared with
30  * a few other pmap modules from the FreeBSD tree.
31  */
32 
33  /*
34   * VM layout notes:
35   *
36   * Kernel and user threads run within one common virtual address space
37   * defined by AS=0.
38   *
39   * Virtual address space layout:
40   * -----------------------------
41   * 0x0000_0000 - 0xbfff_efff	: user process
42   * 0xc000_0000 - 0xc1ff_ffff	: kerel reserved
43   *   0xc000_0000 - kernelend	: kernel code &data
44   *   0xc1ff_c000 - 0xc200_0000	: kstack0
45   * 0xc200_0000 - 0xffef_ffff	: KVA
46   *   0xc200_0000 - 0xc200_3fff : reserved for page zero/copy
47   *   0xc200_4000 - ptbl buf end: reserved for ptbl bufs
48   *   ptbl buf end- 0xffef_ffff	: actual free KVA space
49   * 0xfff0_0000 - 0xffff_ffff	: I/O devices region
50   */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include <sys/types.h>
56 #include <sys/param.h>
57 #include <sys/malloc.h>
58 #include <sys/proc.h>
59 #include <sys/user.h>
60 #include <sys/queue.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/msgbuf.h>
64 #include <sys/lock.h>
65 #include <sys/mutex.h>
66 #include <sys/vmmeter.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_extern.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_param.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_pager.h>
77 #include <vm/uma.h>
78 
79 #include <machine/cpu.h>
80 #include <machine/pcb.h>
81 #include <machine/powerpc.h>
82 
83 #include <machine/tlb.h>
84 #include <machine/spr.h>
85 #include <machine/vmparam.h>
86 #include <machine/md_var.h>
87 #include <machine/mmuvar.h>
88 #include <machine/pmap.h>
89 #include <machine/pte.h>
90 
91 #include "mmu_if.h"
92 
93 #define DEBUG
94 #undef DEBUG
95 
96 #ifdef  DEBUG
97 #define debugf(fmt, args...) printf(fmt, ##args)
98 #else
99 #define debugf(fmt, args...)
100 #endif
101 
102 #define TODO			panic("%s: not implemented", __func__);
103 #define memmove(d, s, l)	bcopy(s, d, l)
104 
105 #include "opt_sched.h"
106 #ifndef SCHED_4BSD
107 #error "e500 only works with SCHED_4BSD which uses a global scheduler lock."
108 #endif
109 extern struct mtx sched_lock;
110 
111 /* Kernel physical load address. */
112 extern uint32_t kernload;
113 
114 struct mem_region availmem_regions[MEM_REGIONS];
115 int availmem_regions_sz;
116 
117 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
118 static vm_offset_t zero_page_va;
119 static struct mtx zero_page_mutex;
120 
121 /*
122  * Reserved KVA space for mmu_booke_zero_page_idle. This is used
123  * by idle thred only, no lock required.
124  */
125 static vm_offset_t zero_page_idle_va;
126 
127 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
128 static vm_offset_t copy_page_src_va;
129 static vm_offset_t copy_page_dst_va;
130 static struct mtx copy_page_mutex;
131 
132 /**************************************************************************/
133 /* PMAP */
134 /**************************************************************************/
135 
136 static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
137     vm_prot_t, boolean_t);
138 
139 unsigned int kptbl_min;		/* Index of the first kernel ptbl. */
140 unsigned int kernel_ptbls;	/* Number of KVA ptbls. */
141 
142 static int pagedaemon_waken;
143 
144 /*
145  * If user pmap is processed with mmu_booke_remove and the resident count
146  * drops to 0, there are no more pages to remove, so we need not continue.
147  */
148 #define PMAP_REMOVE_DONE(pmap) \
149 	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
150 
151 extern void load_pid0(tlbtid_t);
152 
153 /**************************************************************************/
154 /* TLB and TID handling */
155 /**************************************************************************/
156 
157 /* Translation ID busy table */
158 static volatile pmap_t tidbusy[TID_MAX + 1];
159 
160 /*
161  * Actual maximum number of TLB0 entries.
162  * This number differs between e500 core revisions.
163  */
164 u_int32_t tlb0_size;
165 u_int32_t tlb0_nways;
166 u_int32_t tlb0_nentries_per_way;
167 
168 #define TLB0_SIZE		(tlb0_size)
169 #define TLB0_NWAYS		(tlb0_nways)
170 #define TLB0_ENTRIES_PER_WAY	(tlb0_nentries_per_way)
171 
172 /* Pointer to kernel tlb0 table, allocated in mmu_booke_bootstrap() */
173 tlb_entry_t *tlb0;
174 
175 /*
176  * Spinlock to assure proper locking between threads and
177  * between tlb miss handler and kernel.
178  */
179 static struct mtx tlb0_mutex;
180 
181 #define TLB1_SIZE 16
182 
183 /* In-ram copy of the TLB1 */
184 static tlb_entry_t tlb1[TLB1_SIZE];
185 
186 /* Next free entry in the TLB1 */
187 static unsigned int tlb1_idx;
188 
189 static tlbtid_t tid_alloc(struct pmap *);
190 static void tid_flush(tlbtid_t);
191 
192 extern void tlb1_inval_va(vm_offset_t);
193 extern void tlb0_inval_va(vm_offset_t);
194 
195 static void tlb_print_entry(int, u_int32_t, u_int32_t, u_int32_t, u_int32_t);
196 
197 static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, u_int32_t);
198 static void __tlb1_set_entry(unsigned int, vm_offset_t, vm_offset_t,
199     vm_size_t, u_int32_t, unsigned int, unsigned int);
200 static void tlb1_write_entry(unsigned int);
201 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
202 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t);
203 
204 static vm_size_t tsize2size(unsigned int);
205 static unsigned int size2tsize(vm_size_t);
206 static unsigned int ilog2(unsigned int);
207 
208 static void set_mas4_defaults(void);
209 
210 static void tlb0_inval_entry(vm_offset_t, unsigned int);
211 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
212 static void tlb0_write_entry(unsigned int, unsigned int);
213 static void tlb0_flush_entry(pmap_t, vm_offset_t);
214 static void tlb0_init(void);
215 
216 /**************************************************************************/
217 /* Page table management */
218 /**************************************************************************/
219 
220 /* Data for the pv entry allocation mechanism */
221 static uma_zone_t pvzone;
222 static struct vm_object pvzone_obj;
223 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
224 
225 #define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
226 
227 #ifndef PMAP_SHPGPERPROC
228 #define PMAP_SHPGPERPROC	200
229 #endif
230 
231 static void ptbl_init(void);
232 static struct ptbl_buf *ptbl_buf_alloc(void);
233 static void ptbl_buf_free(struct ptbl_buf *);
234 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
235 
236 static void ptbl_alloc(mmu_t, pmap_t, unsigned int);
237 static void ptbl_free(mmu_t, pmap_t, unsigned int);
238 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
239 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
240 
241 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
242 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
243 void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, u_int32_t);
244 static int pte_remove(mmu_t, pmap_t, vm_offset_t, u_int8_t);
245 
246 pv_entry_t pv_alloc(void);
247 static void pv_free(pv_entry_t);
248 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
249 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
250 
251 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
252 #define PTBL_BUFS		(128 * 16)
253 
254 struct ptbl_buf {
255 	TAILQ_ENTRY(ptbl_buf) link;	/* list link */
256 	vm_offset_t kva;		/* va of mapping */
257 };
258 
259 /* ptbl free list and a lock used for access synchronization. */
260 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
261 static struct mtx ptbl_buf_freelist_lock;
262 
263 /* Base address of kva space allocated fot ptbl bufs. */
264 static vm_offset_t ptbl_buf_pool_vabase;
265 
266 /* Pointer to ptbl_buf structures. */
267 static struct ptbl_buf *ptbl_bufs;
268 
269 /*
270  * Kernel MMU interface
271  */
272 static vm_offset_t	mmu_booke_addr_hint(mmu_t, vm_object_t, vm_offset_t, vm_size_t);
273 static void		mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
274 static void		mmu_booke_clear_modify(mmu_t, vm_page_t);
275 static void		mmu_booke_clear_reference(mmu_t, vm_page_t);
276 static void		mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t,
277     vm_offset_t);
278 static void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
279 static void		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
280     vm_prot_t, boolean_t);
281 static void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
282     vm_page_t, vm_prot_t);
283 static void		mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
284     vm_prot_t);
285 static vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
286 static vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
287     vm_prot_t);
288 static void		mmu_booke_init(mmu_t);
289 static boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
290 static boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
291 static boolean_t	mmu_booke_ts_referenced(mmu_t, vm_page_t);
292 static vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
293     int);
294 static int		mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t);
295 static void		mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
296     vm_object_t, vm_pindex_t, vm_size_t);
297 static boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
298 static void		mmu_booke_page_init(mmu_t, vm_page_t);
299 static int		mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
300 static void		mmu_booke_pinit(mmu_t, pmap_t);
301 static void		mmu_booke_pinit0(mmu_t, pmap_t);
302 static void		mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
303     vm_prot_t);
304 static void		mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
305 static void		mmu_booke_qremove(mmu_t, vm_offset_t, int);
306 static void		mmu_booke_release(mmu_t, pmap_t);
307 static void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
308 static void		mmu_booke_remove_all(mmu_t, vm_page_t);
309 static void		mmu_booke_remove_write(mmu_t, vm_page_t);
310 static void		mmu_booke_zero_page(mmu_t, vm_page_t);
311 static void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
312 static void		mmu_booke_zero_page_idle(mmu_t, vm_page_t);
313 static void		mmu_booke_activate(mmu_t, struct thread *);
314 static void		mmu_booke_deactivate(mmu_t, struct thread *);
315 static void		mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
316 static void		*mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
317 static void		mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
318 static vm_offset_t	mmu_booke_kextract(mmu_t, vm_offset_t);
319 static void		mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
320 static void		mmu_booke_kremove(mmu_t, vm_offset_t);
321 static boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
322 static boolean_t	mmu_booke_page_executable(mmu_t, vm_page_t);
323 
324 static mmu_method_t mmu_booke_methods[] = {
325 	/* pmap dispatcher interface */
326 	MMUMETHOD(mmu_addr_hint,	mmu_booke_addr_hint),
327 	MMUMETHOD(mmu_change_wiring,	mmu_booke_change_wiring),
328 	MMUMETHOD(mmu_clear_modify,	mmu_booke_clear_modify),
329 	MMUMETHOD(mmu_clear_reference,	mmu_booke_clear_reference),
330 	MMUMETHOD(mmu_copy,		mmu_booke_copy),
331 	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
332 	MMUMETHOD(mmu_enter,		mmu_booke_enter),
333 	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
334 	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
335 	MMUMETHOD(mmu_extract,		mmu_booke_extract),
336 	MMUMETHOD(mmu_extract_and_hold,	mmu_booke_extract_and_hold),
337 	MMUMETHOD(mmu_init,		mmu_booke_init),
338 	MMUMETHOD(mmu_is_modified,	mmu_booke_is_modified),
339 	MMUMETHOD(mmu_is_prefaultable,	mmu_booke_is_prefaultable),
340 	MMUMETHOD(mmu_ts_referenced,	mmu_booke_ts_referenced),
341 	MMUMETHOD(mmu_map,		mmu_booke_map),
342 	MMUMETHOD(mmu_mincore,		mmu_booke_mincore),
343 	MMUMETHOD(mmu_object_init_pt,	mmu_booke_object_init_pt),
344 	MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
345 	MMUMETHOD(mmu_page_init,	mmu_booke_page_init),
346 	MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
347 	MMUMETHOD(mmu_pinit,		mmu_booke_pinit),
348 	MMUMETHOD(mmu_pinit0,		mmu_booke_pinit0),
349 	MMUMETHOD(mmu_protect,		mmu_booke_protect),
350 	MMUMETHOD(mmu_qenter,		mmu_booke_qenter),
351 	MMUMETHOD(mmu_qremove,		mmu_booke_qremove),
352 	MMUMETHOD(mmu_release,		mmu_booke_release),
353 	MMUMETHOD(mmu_remove,		mmu_booke_remove),
354 	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
355 	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
356 	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
357 	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
358 	MMUMETHOD(mmu_zero_page_idle,	mmu_booke_zero_page_idle),
359 	MMUMETHOD(mmu_activate,		mmu_booke_activate),
360 	MMUMETHOD(mmu_deactivate,	mmu_booke_deactivate),
361 
362 	/* Internal interfaces */
363 	MMUMETHOD(mmu_bootstrap,	mmu_booke_bootstrap),
364 	MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
365 	MMUMETHOD(mmu_mapdev,		mmu_booke_mapdev),
366 	MMUMETHOD(mmu_kenter,		mmu_booke_kenter),
367 	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
368 /*	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),	*/
369 	MMUMETHOD(mmu_page_executable,	mmu_booke_page_executable),
370 	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
371 
372 	{ 0, 0 }
373 };
374 
375 static mmu_def_t booke_mmu = {
376 	MMU_TYPE_BOOKE,
377 	mmu_booke_methods,
378 	0
379 };
380 MMU_DEF(booke_mmu);
381 
382 /*
383  * This routine defines the region(s) of memory that should
384  * not be tested for the modified bit.
385  */
386 static __inline int
387 track_modified_needed(pmap_t pmap, vm_offset_t va)
388 {
389 
390 	if (pmap == kernel_pmap)
391 		return ((va < kmi.clean_sva) || (va >= kmi.clean_eva));
392 	else
393 		return (1);
394 }
395 
396 /* Return number of entries in TLB0. */
397 static __inline void
398 tlb0_get_tlbconf(void)
399 {
400 	uint32_t tlb0_cfg;
401 
402 	tlb0_cfg = mfspr(SPR_TLB0CFG);
403 	tlb0_size = tlb0_cfg & TLBCFG_NENTRY_MASK;
404 	tlb0_nways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
405 	tlb0_nentries_per_way = tlb0_size/tlb0_nways;
406 }
407 
408 /* Initialize pool of kva ptbl buffers. */
409 static void
410 ptbl_init(void)
411 {
412 	int i;
413 
414 	//debugf("ptbl_init: s (ptbl_bufs = 0x%08x size 0x%08x)\n",
415 	//		(u_int32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
416 	//debugf("ptbl_init: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)\n",
417 	//		ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
418 
419 	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
420 	TAILQ_INIT(&ptbl_buf_freelist);
421 
422 	for (i = 0; i < PTBL_BUFS; i++) {
423 		ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
424 		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
425 	}
426 
427 	//debugf("ptbl_init: e\n");
428 }
429 
430 /* Get an sf_buf from the freelist. */
431 static struct ptbl_buf *
432 ptbl_buf_alloc(void)
433 {
434 	struct ptbl_buf *buf;
435 
436 	//debugf("ptbl_buf_alloc: s\n");
437 
438 	mtx_lock(&ptbl_buf_freelist_lock);
439 	buf = TAILQ_FIRST(&ptbl_buf_freelist);
440 	if (buf != NULL)
441 		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
442 	mtx_unlock(&ptbl_buf_freelist_lock);
443 
444 	//debugf("ptbl_buf_alloc: e (buf = 0x%08x)\n", (u_int32_t)buf);
445 	return (buf);
446 }
447 
448 /* Return ptbl buff to free pool. */
449 static void
450 ptbl_buf_free(struct ptbl_buf *buf)
451 {
452 
453 	//debugf("ptbl_buf_free: s (buf = 0x%08x)\n", (u_int32_t)buf);
454 
455 	mtx_lock(&ptbl_buf_freelist_lock);
456 	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
457 	mtx_unlock(&ptbl_buf_freelist_lock);
458 
459 	//debugf("ptbl_buf_free: e\n");
460 }
461 
462 /*
463  * Search the list of allocated ptbl bufs and find
464  * on list of allocated ptbls
465  */
466 static void
467 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
468 {
469 	struct ptbl_buf *pbuf;
470 
471 	//debugf("ptbl_free_pmap_ptbl: s (pmap = 0x%08x ptbl = 0x%08x)\n",
472 	//		(u_int32_t)pmap, (u_int32_t)ptbl);
473 
474 	TAILQ_FOREACH(pbuf, &pmap->ptbl_list, link) {
475 		if (pbuf->kva == (vm_offset_t)ptbl) {
476 			/* Remove from pmap ptbl buf list. */
477 			TAILQ_REMOVE(&pmap->ptbl_list, pbuf, link);
478 
479 			/* Free correspondig ptbl buf. */
480 			ptbl_buf_free(pbuf);
481 
482 			break;
483 		}
484 	}
485 
486 	//debugf("ptbl_free_pmap_ptbl: e\n");
487 }
488 
489 /* Allocate page table. */
490 static void
491 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
492 {
493 	vm_page_t mtbl[PTBL_PAGES];
494 	vm_page_t m;
495 	struct ptbl_buf *pbuf;
496 	unsigned int pidx;
497 	int i;
498 
499 	//int su = (pmap == kernel_pmap);
500 	//debugf("ptbl_alloc: s (pmap = 0x%08x su = %d pdir_idx = %d)\n", (u_int32_t)pmap, su, pdir_idx);
501 
502 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
503 	    ("ptbl_alloc: invalid pdir_idx"));
504 	KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
505 	    ("pte_alloc: valid ptbl entry exists!"));
506 
507 	pbuf = ptbl_buf_alloc();
508 	if (pbuf == NULL)
509 		panic("pte_alloc: couldn't alloc kernel virtual memory");
510 	pmap->pm_pdir[pdir_idx] = (pte_t *)pbuf->kva;
511 	//debugf("ptbl_alloc: kva = 0x%08x\n", (u_int32_t)pmap->pm_pdir[pdir_idx]);
512 
513 	/* Allocate ptbl pages, this will sleep! */
514 	for (i = 0; i < PTBL_PAGES; i++) {
515 		pidx = (PTBL_PAGES * pdir_idx) + i;
516 		while ((m = vm_page_alloc(NULL, pidx, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
517 			PMAP_UNLOCK(pmap);
518 			vm_page_unlock_queues();
519 			VM_WAIT;
520 			vm_page_lock_queues();
521 			PMAP_LOCK(pmap);
522 		}
523 		mtbl[i] = m;
524 	}
525 
526 	/* Map in allocated pages into kernel_pmap. */
527 	mmu_booke_qenter(mmu, (vm_offset_t)pmap->pm_pdir[pdir_idx], mtbl, PTBL_PAGES);
528 
529 	/* Zero whole ptbl. */
530 	bzero((caddr_t)pmap->pm_pdir[pdir_idx], PTBL_PAGES * PAGE_SIZE);
531 
532 	/* Add pbuf to the pmap ptbl bufs list. */
533 	TAILQ_INSERT_TAIL(&pmap->ptbl_list, pbuf, link);
534 
535 	//debugf("ptbl_alloc: e\n");
536 }
537 
538 /* Free ptbl pages and invalidate pdir entry. */
539 static void
540 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
541 {
542 	pte_t *ptbl;
543 	vm_paddr_t pa;
544 	vm_offset_t va;
545 	vm_page_t m;
546 	int i;
547 
548 	//int su = (pmap == kernel_pmap);
549 	//debugf("ptbl_free: s (pmap = 0x%08x su = %d pdir_idx = %d)\n", (u_int32_t)pmap, su, pdir_idx);
550 
551 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
552 	    ("ptbl_free: invalid pdir_idx"));
553 
554 	ptbl = pmap->pm_pdir[pdir_idx];
555 
556 	//debugf("ptbl_free: ptbl = 0x%08x\n", (u_int32_t)ptbl);
557 	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
558 
559 	for (i = 0; i < PTBL_PAGES; i++) {
560 		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
561 		pa = pte_vatopa(mmu, kernel_pmap, va);
562 		m = PHYS_TO_VM_PAGE(pa);
563 		vm_page_free_zero(m);
564 		atomic_subtract_int(&cnt.v_wire_count, 1);
565 		mmu_booke_kremove(mmu, va);
566 	}
567 
568 	ptbl_free_pmap_ptbl(pmap, ptbl);
569 	pmap->pm_pdir[pdir_idx] = NULL;
570 
571 	//debugf("ptbl_free: e\n");
572 }
573 
574 /*
575  * Decrement ptbl pages hold count and attempt to free ptbl pages.
576  * Called when removing pte entry from ptbl.
577  *
578  * Return 1 if ptbl pages were freed.
579  */
580 static int
581 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
582 {
583 	pte_t *ptbl;
584 	vm_paddr_t pa;
585 	vm_page_t m;
586 	int i;
587 
588 	//int su = (pmap == kernel_pmap);
589 	//debugf("ptbl_unhold: s (pmap = %08x su = %d pdir_idx = %d)\n",
590 	//		(u_int32_t)pmap, su, pdir_idx);
591 
592 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
593 	    ("ptbl_unhold: invalid pdir_idx"));
594 	KASSERT((pmap != kernel_pmap),
595 	    ("ptbl_unhold: unholding kernel ptbl!"));
596 
597 	ptbl = pmap->pm_pdir[pdir_idx];
598 
599 	//debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
600 	KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
601 	    ("ptbl_unhold: non kva ptbl"));
602 
603 	/* decrement hold count */
604 	for (i = 0; i < PTBL_PAGES; i++) {
605 		pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE));
606 		m = PHYS_TO_VM_PAGE(pa);
607 		m->wire_count--;
608 	}
609 
610 	/*
611 	 * Free ptbl pages if there are no pte etries in this ptbl.
612 	 * wire_count has the same value for all ptbl pages, so check
613 	 * the last page.
614 	 */
615 	if (m->wire_count == 0) {
616 		ptbl_free(mmu, pmap, pdir_idx);
617 
618 		//debugf("ptbl_unhold: e (freed ptbl)\n");
619 		return (1);
620 	}
621 
622 	//debugf("ptbl_unhold: e\n");
623 	return (0);
624 }
625 
626 /*
627  * Increment hold count for ptbl pages. This routine is used when
628  * new pte entry is being inserted into ptbl.
629  */
630 static void
631 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
632 {
633 	vm_paddr_t pa;
634 	pte_t *ptbl;
635 	vm_page_t m;
636 	int i;
637 
638 	//debugf("ptbl_hold: s (pmap = 0x%08x pdir_idx = %d)\n", (u_int32_t)pmap, pdir_idx);
639 
640 	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
641 	    ("ptbl_hold: invalid pdir_idx"));
642 	KASSERT((pmap != kernel_pmap),
643 	    ("ptbl_hold: holding kernel ptbl!"));
644 
645 	ptbl = pmap->pm_pdir[pdir_idx];
646 
647 	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
648 
649 	for (i = 0; i < PTBL_PAGES; i++) {
650 		pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE));
651 		m = PHYS_TO_VM_PAGE(pa);
652 		m->wire_count++;
653 	}
654 
655 	//debugf("ptbl_hold: e\n");
656 }
657 
658 /* Allocate pv_entry structure. */
659 pv_entry_t
660 pv_alloc(void)
661 {
662 	pv_entry_t pv;
663 
664 	debugf("pv_alloc: s\n");
665 
666 	pv_entry_count++;
667 	if ((pv_entry_count > pv_entry_high_water) && (pagedaemon_waken == 0)) {
668 		pagedaemon_waken = 1;
669 		wakeup (&vm_pages_needed);
670 	}
671 	pv = uma_zalloc(pvzone, M_NOWAIT);
672 
673 	debugf("pv_alloc: e\n");
674 	return (pv);
675 }
676 
677 /* Free pv_entry structure. */
678 static __inline void
679 pv_free(pv_entry_t pve)
680 {
681 	//debugf("pv_free: s\n");
682 
683 	pv_entry_count--;
684 	uma_zfree(pvzone, pve);
685 
686 	//debugf("pv_free: e\n");
687 }
688 
689 
690 /* Allocate and initialize pv_entry structure. */
691 static void
692 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
693 {
694 	pv_entry_t pve;
695 
696 	//int su = (pmap == kernel_pmap);
697 	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
698 	//	(u_int32_t)pmap, va, (u_int32_t)m);
699 
700 	pve = pv_alloc();
701 	if (pve == NULL)
702 		panic("pv_insert: no pv entries!");
703 
704 	pve->pv_pmap = pmap;
705 	pve->pv_va = va;
706 
707 	/* add to pv_list */
708 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
709 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
710 
711 	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
712 
713 	//debugf("pv_insert: e\n");
714 }
715 
716 /* Destroy pv entry. */
717 static void
718 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
719 {
720 	pv_entry_t pve;
721 
722 	//int su = (pmap == kernel_pmap);
723 	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
724 
725 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
726 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
727 
728 	/* find pv entry */
729 	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
730 		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
731 			/* remove from pv_list */
732 			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
733 			if (TAILQ_EMPTY(&m->md.pv_list))
734 				vm_page_flag_clear(m, PG_WRITEABLE);
735 
736 			/* free pv entry struct */
737 			pv_free(pve);
738 
739 			break;
740 		}
741 	}
742 
743 	//debugf("pv_remove: e\n");
744 }
745 
746 /*
747  * Clean pte entry, try to free page table page if requested.
748  *
749  * Return 1 if ptbl pages were freed, otherwise return 0.
750  */
751 static int
752 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
753 {
754 	unsigned int pdir_idx = PDIR_IDX(va);
755 	unsigned int ptbl_idx = PTBL_IDX(va);
756 	vm_page_t m;
757 	pte_t *ptbl;
758 	pte_t *pte;
759 
760 	//int su = (pmap == kernel_pmap);
761 	//debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
762 	//		su, (u_int32_t)pmap, va, flags);
763 
764 	ptbl = pmap->pm_pdir[pdir_idx];
765 	KASSERT(ptbl, ("pte_remove: null ptbl"));
766 
767 	pte = &ptbl[ptbl_idx];
768 
769 	if (pte == NULL || !PTE_ISVALID(pte))
770 		return (0);
771 
772 	/* Get vm_page_t for mapped pte. */
773 	m = PHYS_TO_VM_PAGE(PTE_PA(pte));
774 
775 	if (PTE_ISWIRED(pte))
776 		pmap->pm_stats.wired_count--;
777 
778 	if (!PTE_ISFAKE(pte)) {
779 		/* Handle managed entry. */
780 		if (PTE_ISMANAGED(pte)) {
781 
782 			/* Handle modified pages. */
783 			if (PTE_ISMODIFIED(pte)) {
784 				if (track_modified_needed(pmap, va))
785 					vm_page_dirty(m);
786 			}
787 
788 			/* Referenced pages. */
789 			if (PTE_ISREFERENCED(pte))
790 				vm_page_flag_set(m, PG_REFERENCED);
791 
792 			/* Remove pv_entry from pv_list. */
793 			pv_remove(pmap, va, m);
794 		}
795 	}
796 
797 	pte->flags = 0;
798 	pte->rpn = 0;
799 	pmap->pm_stats.resident_count--;
800 
801 	if (flags & PTBL_UNHOLD) {
802 		//debugf("pte_remove: e (unhold)\n");
803 		return (ptbl_unhold(mmu, pmap, pdir_idx));
804 	}
805 
806 	//debugf("pte_remove: e\n");
807 	return (0);
808 }
809 
810 /*
811  * Insert PTE for a given page and virtual address.
812  */
813 void
814 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, u_int32_t flags)
815 {
816 	unsigned int pdir_idx = PDIR_IDX(va);
817 	unsigned int ptbl_idx = PTBL_IDX(va);
818 	pte_t *ptbl;
819 	pte_t *pte;
820 
821 	//int su = (pmap == kernel_pmap);
822 	//debugf("pte_enter: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
823 
824 	/* Get the page table pointer. */
825 	ptbl = pmap->pm_pdir[pdir_idx];
826 
827 	if (ptbl) {
828 		/*
829 		 * Check if there is valid mapping for requested
830 		 * va, if there is, remove it.
831 		 */
832 		pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
833 		if (PTE_ISVALID(pte)) {
834 			pte_remove(mmu, pmap, va, PTBL_HOLD);
835 		} else {
836 			/*
837 			 * pte is not used, increment hold count
838 			 * for ptbl pages.
839 			 */
840 			if (pmap != kernel_pmap)
841 				ptbl_hold(mmu, pmap, pdir_idx);
842 		}
843 	} else {
844 		/* Allocate page table pages. */
845 		ptbl_alloc(mmu, pmap, pdir_idx);
846 	}
847 
848 	/* Flush entry from TLB. */
849 	tlb0_flush_entry(pmap, va);
850 
851 	pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
852 
853 	/*
854 	 * Insert pv_entry into pv_list for mapped page
855 	 * if part of managed memory.
856 	 */
857         if ((m->flags & PG_FICTITIOUS) == 0) {
858 		if ((m->flags & PG_UNMANAGED) == 0) {
859 			pte->flags |= PTE_MANAGED;
860 
861 			/* Create and insert pv entry. */
862 			pv_insert(pmap, va, m);
863 		}
864         } else {
865 		pte->flags |= PTE_FAKE;
866 	}
867 
868 	pmap->pm_stats.resident_count++;
869 	pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
870 	pte->flags |= (PTE_VALID | flags);
871 
872 	//debugf("pte_enter: e\n");
873 }
874 
875 /* Return the pa for the given pmap/va. */
876 static vm_paddr_t
877 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
878 {
879 	vm_paddr_t pa = 0;
880 	pte_t *pte;
881 
882 	pte = pte_find(mmu, pmap, va);
883 	if ((pte != NULL) && PTE_ISVALID(pte))
884 		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
885 	return (pa);
886 }
887 
888 /* Get a pointer to a PTE in a page table. */
889 static pte_t *
890 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
891 {
892 	unsigned int pdir_idx = PDIR_IDX(va);
893 	unsigned int ptbl_idx = PTBL_IDX(va);
894 
895 	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
896 
897 	if (pmap->pm_pdir[pdir_idx])
898 		return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
899 
900 	return (NULL);
901 }
902 
903 /**************************************************************************/
904 /* PMAP related */
905 /**************************************************************************/
906 
907 /*
908  * This is called during e500_init, before the system is really initialized.
909  */
910 static void
911 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend)
912 {
913 	vm_offset_t phys_kernelend;
914 	struct mem_region *mp, *mp1;
915 	int cnt, i, j;
916 	u_int s, e, sz;
917 	u_int phys_avail_count;
918 	vm_size_t physsz, hwphyssz;
919 	vm_offset_t kernel_pdir;
920 
921 	debugf("mmu_booke_bootstrap: entered\n");
922 
923 	/* Align kernel start and end address (kernel image). */
924 	kernelstart = trunc_page(kernelstart);
925 	kernelend = round_page(kernelend);
926 
927 	/* Allocate space for the message buffer. */
928 	msgbufp = (struct msgbuf *)kernelend;
929 	kernelend += MSGBUF_SIZE;
930 	debugf(" msgbufp at 0x%08x end = 0x%08x\n", (u_int32_t)msgbufp,
931 	    kernelend);
932 
933 	kernelend = round_page(kernelend);
934 
935 	/* Allocate space for tlb0 table. */
936 	tlb0_get_tlbconf(); /* Read TLB0 size and associativity. */
937 	tlb0 = (tlb_entry_t *)kernelend;
938 	kernelend += sizeof(tlb_entry_t) * tlb0_size;
939 	memset((void *)tlb0, 0, sizeof(tlb_entry_t) * tlb0_size);
940 	debugf(" tlb0 at 0x%08x end = 0x%08x\n", (u_int32_t)tlb0, kernelend);
941 
942 	kernelend = round_page(kernelend);
943 
944 	/* Allocate space for ptbl_bufs. */
945 	ptbl_bufs = (struct ptbl_buf *)kernelend;
946 	kernelend += sizeof(struct ptbl_buf) * PTBL_BUFS;
947 	memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
948 	debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (u_int32_t)ptbl_bufs,
949 	    kernelend);
950 
951 	kernelend = round_page(kernelend);
952 
953 	/* Allocate PTE tables for kernel KVA. */
954 	kernel_pdir = kernelend;
955 	kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
956 	    PDIR_SIZE - 1) / PDIR_SIZE;
957 	kernelend += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
958 	memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
959 	debugf(" kernel ptbls: %d\n", kernel_ptbls);
960 	debugf(" kernel pdir at 0x%08x\n", kernel_pdir);
961 
962 	if (kernelend - kernelstart > 0x1000000) {
963 		kernelend = (kernelend + 0x3fffff) & ~0x3fffff;
964 		tlb1_mapin_region(kernelstart + 0x1000000,
965 		    kernload + 0x1000000, kernelend - kernelstart - 0x1000000);
966 	} else
967 		kernelend = (kernelend + 0xffffff) & ~0xffffff;
968 
969 	/*******************************************************/
970 	/* Set the start and end of kva. */
971 	/*******************************************************/
972 	virtual_avail = kernelend;
973 	virtual_end = VM_MAX_KERNEL_ADDRESS;
974 
975 	/* Allocate KVA space for page zero/copy operations. */
976 	zero_page_va = virtual_avail;
977 	virtual_avail += PAGE_SIZE;
978 	zero_page_idle_va = virtual_avail;
979 	virtual_avail += PAGE_SIZE;
980 	copy_page_src_va = virtual_avail;
981 	virtual_avail += PAGE_SIZE;
982 	copy_page_dst_va = virtual_avail;
983 	virtual_avail += PAGE_SIZE;
984 
985 	/* Initialize page zero/copy mutexes. */
986 	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
987 	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
988 
989 	/* Initialize tlb0 table mutex. */
990 	mtx_init(&tlb0_mutex, "tlb0", NULL, MTX_SPIN | MTX_RECURSE);
991 
992 	/* Allocate KVA space for ptbl bufs. */
993 	ptbl_buf_pool_vabase = virtual_avail;
994 	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
995 
996 	debugf("ptbl_buf_pool_vabase = 0x%08x\n", ptbl_buf_pool_vabase);
997 	debugf("virtual_avail = %08x\n", virtual_avail);
998 	debugf("virtual_end   = %08x\n", virtual_end);
999 
1000 	/* Calculate corresponding physical addresses for the kernel region. */
1001 	phys_kernelend = kernload + (kernelend - kernelstart);
1002 
1003 	debugf("kernel image and allocated data:\n");
1004 	debugf(" kernload    = 0x%08x\n", kernload);
1005 	debugf(" kernelstart = 0x%08x\n", kernelstart);
1006 	debugf(" kernelend   = 0x%08x\n", kernelend);
1007 	debugf(" kernel size = 0x%08x\n", kernelend - kernelstart);
1008 
1009 	if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1010 		panic("mmu_booke_bootstrap: phys_avail too small");
1011 
1012 	/*
1013 	 * Removed kernel physical address range from avail
1014 	 * regions list. Page align all regions.
1015 	 * Non-page aligned memory isn't very interesting to us.
1016 	 * Also, sort the entries for ascending addresses.
1017 	 */
1018 	sz = 0;
1019 	cnt = availmem_regions_sz;
1020 	debugf("processing avail regions:\n");
1021 	for (mp = availmem_regions; mp->mr_size; mp++) {
1022 		s = mp->mr_start;
1023 		e = mp->mr_start + mp->mr_size;
1024 		debugf(" %08x-%08x -> ", s, e);
1025 		/* Check whether this region holds all of the kernel. */
1026 		if (s < kernload && e > phys_kernelend) {
1027 			availmem_regions[cnt].mr_start = phys_kernelend;
1028 			availmem_regions[cnt++].mr_size = e - phys_kernelend;
1029 			e = kernload;
1030 		}
1031 		/* Look whether this regions starts within the kernel. */
1032 		if (s >= kernload && s < phys_kernelend) {
1033 			if (e <= phys_kernelend)
1034 				goto empty;
1035 			s = phys_kernelend;
1036 		}
1037 		/* Now look whether this region ends within the kernel. */
1038 		if (e > kernload && e <= phys_kernelend) {
1039 			if (s >= kernload)
1040 				goto empty;
1041 			e = kernload;
1042 		}
1043 		/* Now page align the start and size of the region. */
1044 		s = round_page(s);
1045 		e = trunc_page(e);
1046 		if (e < s)
1047 			e = s;
1048 		sz = e - s;
1049 		debugf("%08x-%08x = %x\n", s, e, sz);
1050 
1051 		/* Check whether some memory is left here. */
1052 		if (sz == 0) {
1053 		empty:
1054 			memmove(mp, mp + 1,
1055 			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
1056 			cnt--;
1057 			mp--;
1058 			continue;
1059 		}
1060 
1061 		/* Do an insertion sort. */
1062 		for (mp1 = availmem_regions; mp1 < mp; mp1++)
1063 			if (s < mp1->mr_start)
1064 				break;
1065 		if (mp1 < mp) {
1066 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1067 			mp1->mr_start = s;
1068 			mp1->mr_size = sz;
1069 		} else {
1070 			mp->mr_start = s;
1071 			mp->mr_size = sz;
1072 		}
1073 	}
1074 	availmem_regions_sz = cnt;
1075 
1076 	/*******************************************************/
1077 	/* Fill in phys_avail table, based on availmem_regions */
1078 	/*******************************************************/
1079 	phys_avail_count = 0;
1080 	physsz = 0;
1081 	hwphyssz = 0;
1082 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1083 
1084 	debugf("fill in phys_avail:\n");
1085 	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1086 
1087 		debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1088 		    availmem_regions[i].mr_start,
1089 		    availmem_regions[i].mr_start + availmem_regions[i].mr_size,
1090 		    availmem_regions[i].mr_size);
1091 
1092 		if (hwphyssz != 0 && (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1093 			debugf(" hw.physmem adjust\n");
1094 			if (physsz < hwphyssz) {
1095 				phys_avail[j] = availmem_regions[i].mr_start;
1096 				phys_avail[j + 1] = availmem_regions[i].mr_start +
1097 				    hwphyssz - physsz;
1098 				physsz = hwphyssz;
1099 				phys_avail_count++;
1100 			}
1101 			break;
1102 		}
1103 
1104 		phys_avail[j] = availmem_regions[i].mr_start;
1105 		phys_avail[j + 1] = availmem_regions[i].mr_start +
1106 		    availmem_regions[i].mr_size;
1107 		phys_avail_count++;
1108 		physsz += availmem_regions[i].mr_size;
1109 	}
1110 	physmem = btoc(physsz);
1111 
1112 	/* Calculate the last available physical address. */
1113 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1114 		;
1115 	Maxmem = powerpc_btop(phys_avail[i + 1]);
1116 
1117 	debugf("Maxmem = 0x%08lx\n", Maxmem);
1118 	debugf("phys_avail_count = %d\n", phys_avail_count);
1119 	debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, physmem);
1120 
1121 	/*******************************************************/
1122 	/* Initialize (statically allocated) kernel pmap. */
1123 	/*******************************************************/
1124 	PMAP_LOCK_INIT(kernel_pmap);
1125 	kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1126 
1127 	debugf("kernel_pmap = 0x%08x\n", (u_int32_t)kernel_pmap);
1128 	debugf("kptbl_min = %d, kernel_kptbls = %d\n", kptbl_min, kernel_ptbls);
1129 	debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1130 	    kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1131 
1132 	/* Initialize kernel pdir */
1133 	for (i = 0; i < kernel_ptbls; i++)
1134 		kernel_pmap->pm_pdir[kptbl_min + i] =
1135 		    (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1136 
1137 	kernel_pmap->pm_tid = KERNEL_TID;
1138 	kernel_pmap->pm_active = ~0;
1139 
1140 	/* Initialize tidbusy with kenel_pmap entry. */
1141 	tidbusy[0] = kernel_pmap;
1142 
1143 	/*******************************************************/
1144 	/* Final setup */
1145 	/*******************************************************/
1146 	/* Initialize TLB0 handling. */
1147 	tlb0_init();
1148 
1149 	debugf("mmu_booke_bootstrap: exit\n");
1150 }
1151 
1152 /*
1153  * Get the physical page address for the given pmap/virtual address.
1154  */
1155 static vm_paddr_t
1156 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1157 {
1158 	vm_paddr_t pa;
1159 
1160 	PMAP_LOCK(pmap);
1161 	pa = pte_vatopa(mmu, pmap, va);
1162 	PMAP_UNLOCK(pmap);
1163 
1164 	return (pa);
1165 }
1166 
1167 /*
1168  * Extract the physical page address associated with the given
1169  * kernel virtual address.
1170  */
1171 static vm_paddr_t
1172 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1173 {
1174 
1175 	return (pte_vatopa(mmu, kernel_pmap, va));
1176 }
1177 
1178 /*
1179  * Initialize the pmap module.
1180  * Called by vm_init, to initialize any structures that the pmap
1181  * system needs to map virtual memory.
1182  */
1183 static void
1184 mmu_booke_init(mmu_t mmu)
1185 {
1186 	int shpgperproc = PMAP_SHPGPERPROC;
1187 
1188 	//debugf("mmu_booke_init: s\n");
1189 
1190 	/*
1191 	 * Initialize the address space (zone) for the pv entries.  Set a
1192 	 * high water mark so that the system can recover from excessive
1193 	 * numbers of pv entries.
1194 	 */
1195 	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1196 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1197 
1198 	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1199 	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1200 
1201 	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1202 	pv_entry_high_water = 9 * (pv_entry_max / 10);
1203 
1204 	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1205 
1206 	/* Pre-fill pvzone with initial number of pv entries. */
1207 	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1208 
1209 	/* Initialize ptbl allocation. */
1210 	ptbl_init();
1211 
1212 	//debugf("mmu_booke_init: e\n");
1213 }
1214 
1215 /*
1216  * Map a list of wired pages into kernel virtual address space.  This is
1217  * intended for temporary mappings which do not need page modification or
1218  * references recorded.  Existing mappings in the region are overwritten.
1219  */
1220 static void
1221 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1222 {
1223 	vm_offset_t va;
1224 
1225 	//debugf("mmu_booke_qenter: s (sva = 0x%08x count = %d)\n", sva, count);
1226 
1227 	va = sva;
1228 	while (count-- > 0) {
1229 		mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1230 		va += PAGE_SIZE;
1231 		m++;
1232 	}
1233 
1234 	//debugf("mmu_booke_qenter: e\n");
1235 }
1236 
1237 /*
1238  * Remove page mappings from kernel virtual address space.  Intended for
1239  * temporary mappings entered by mmu_booke_qenter.
1240  */
1241 static void
1242 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1243 {
1244 	vm_offset_t va;
1245 
1246 	//debugf("mmu_booke_qremove: s (sva = 0x%08x count = %d)\n", sva, count);
1247 
1248 	va = sva;
1249 	while (count-- > 0) {
1250 		mmu_booke_kremove(mmu, va);
1251 		va += PAGE_SIZE;
1252 	}
1253 
1254 	//debugf("mmu_booke_qremove: e\n");
1255 }
1256 
1257 /*
1258  * Map a wired page into kernel virtual address space.
1259  */
1260 static void
1261 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1262 {
1263 	unsigned int pdir_idx = PDIR_IDX(va);
1264 	unsigned int ptbl_idx = PTBL_IDX(va);
1265 	u_int32_t flags;
1266 	pte_t *pte;
1267 
1268 	//debugf("mmu_booke_kenter: s (pdir_idx = %d ptbl_idx = %d va=0x%08x pa=0x%08x)\n",
1269 	//		pdir_idx, ptbl_idx, va, pa);
1270 
1271 	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)),
1272 			("mmu_booke_kenter: invalid va"));
1273 
1274 #if 0
1275 	/* assume IO mapping, set I, G bits */
1276 	flags = (PTE_G | PTE_I | PTE_FAKE);
1277 
1278 	/* if mapping is within system memory, do not set I, G bits */
1279 	for (i = 0; i < totalmem_regions_sz; i++) {
1280 		if ((pa >= totalmem_regions[i].mr_start) &&
1281 				(pa < (totalmem_regions[i].mr_start +
1282 				       totalmem_regions[i].mr_size))) {
1283 			flags &= ~(PTE_I | PTE_G | PTE_FAKE);
1284 			break;
1285 		}
1286 	}
1287 #else
1288 	flags = 0;
1289 #endif
1290 
1291 	flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID);
1292 
1293 	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1294 
1295 	if (PTE_ISVALID(pte)) {
1296 		//debugf("mmu_booke_kenter: replacing entry!\n");
1297 
1298 		/* Flush entry from TLB0 */
1299 		tlb0_flush_entry(kernel_pmap, va);
1300 	}
1301 
1302 	pte->rpn = pa & ~PTE_PA_MASK;
1303 	pte->flags = flags;
1304 
1305 	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1306 	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1307 	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1308 
1309 	/* Flush the real memory from the instruction cache. */
1310 	if ((flags & (PTE_I | PTE_G)) == 0) {
1311 		__syncicache((void *)va, PAGE_SIZE);
1312 	}
1313 
1314 	//debugf("mmu_booke_kenter: e\n");
1315 }
1316 
1317 /*
1318  * Remove a page from kernel page table.
1319  */
1320 static void
1321 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1322 {
1323 	unsigned int pdir_idx = PDIR_IDX(va);
1324 	unsigned int ptbl_idx = PTBL_IDX(va);
1325 	pte_t *pte;
1326 
1327 	//debugf("mmu_booke_kremove: s (va = 0x%08x)\n", va);
1328 
1329 	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)),
1330 	    ("mmu_booke_kremove: invalid va"));
1331 
1332 	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1333 
1334 	if (!PTE_ISVALID(pte)) {
1335 		//debugf("mmu_booke_kremove: e (invalid pte)\n");
1336 		return;
1337 	}
1338 
1339 	/* Invalidate entry in TLB0. */
1340 	tlb0_flush_entry(kernel_pmap, va);
1341 
1342 	pte->flags = 0;
1343 	pte->rpn = 0;
1344 
1345 	//debugf("mmu_booke_kremove: e\n");
1346 }
1347 
1348 /*
1349  * Initialize pmap associated with process 0.
1350  */
1351 static void
1352 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1353 {
1354 	//debugf("mmu_booke_pinit0: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
1355 	mmu_booke_pinit(mmu, pmap);
1356 	PCPU_SET(curpmap, pmap);
1357 	//debugf("mmu_booke_pinit0: e\n");
1358 }
1359 
1360 /*
1361  * Initialize a preallocated and zeroed pmap structure,
1362  * such as one in a vmspace structure.
1363  */
1364 static void
1365 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1366 {
1367 
1368 	//struct thread *td;
1369 	//struct proc *p;
1370 
1371 	//td = PCPU_GET(curthread);
1372 	//p = td->td_proc;
1373 	//debugf("mmu_booke_pinit: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
1374 	//printf("mmu_booke_pinit: proc %d '%s'\n", p->p_pid, p->p_comm);
1375 
1376 	KASSERT((pmap != kernel_pmap), ("mmu_booke_pinit: initializing kernel_pmap"));
1377 
1378 	PMAP_LOCK_INIT(pmap);
1379 	pmap->pm_tid = 0;
1380 	pmap->pm_active = 0;
1381 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1382 	bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1383 
1384 	TAILQ_INIT(&pmap->ptbl_list);
1385 
1386 	//debugf("mmu_booke_pinit: e\n");
1387 }
1388 
1389 /*
1390  * Release any resources held by the given physical map.
1391  * Called when a pmap initialized by mmu_booke_pinit is being released.
1392  * Should only be called if the map contains no valid mappings.
1393  */
1394 static void
1395 mmu_booke_release(mmu_t mmu, pmap_t pmap)
1396 {
1397 
1398 	//debugf("mmu_booke_release: s\n");
1399 
1400 	PMAP_LOCK_DESTROY(pmap);
1401 
1402 	//debugf("mmu_booke_release: e\n");
1403 }
1404 
1405 #if 0
1406 /* Not needed, kernel page tables are statically allocated. */
1407 void
1408 mmu_booke_growkernel(vm_offset_t maxkvaddr)
1409 {
1410 }
1411 #endif
1412 
1413 /*
1414  * Insert the given physical page at the specified virtual address in the
1415  * target physical map with the protection requested. If specified the page
1416  * will be wired down.
1417  */
1418 static void
1419 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1420     vm_prot_t prot, boolean_t wired)
1421 {
1422 	vm_page_lock_queues();
1423 	PMAP_LOCK(pmap);
1424 	mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
1425 	vm_page_unlock_queues();
1426 	PMAP_UNLOCK(pmap);
1427 }
1428 
1429 static void
1430 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1431     vm_prot_t prot, boolean_t wired)
1432 {
1433 	pte_t *pte;
1434 	vm_paddr_t pa;
1435 	u_int32_t flags;
1436 	int su, sync;
1437 
1438 	pa = VM_PAGE_TO_PHYS(m);
1439 	su = (pmap == kernel_pmap);
1440 	sync = 0;
1441 
1442 	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1443 	//		"pa=0x%08x prot=0x%08x wired=%d)\n",
1444 	//		(u_int32_t)pmap, su, pmap->pm_tid,
1445 	//		(u_int32_t)m, va, pa, prot, wired);
1446 
1447 	if (su) {
1448 		KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)),
1449 				("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1450 	} else {
1451 		KASSERT((va <= VM_MAXUSER_ADDRESS),
1452 				("mmu_booke_enter_locked: user pmap, non user va"));
1453 	}
1454 
1455 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1456 
1457 	/*
1458 	 * If there is an existing mapping, and the physical address has not
1459 	 * changed, must be protection or wiring change.
1460 	 */
1461 	if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1462 	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1463 
1464 		//debugf("mmu_booke_enter_locked: update\n");
1465 
1466 		/* Wiring change, just update stats. */
1467 		if (wired) {
1468 			if (!PTE_ISWIRED(pte)) {
1469 				pte->flags |= PTE_WIRED;
1470 				pmap->pm_stats.wired_count++;
1471 			}
1472 		} else {
1473 			if (PTE_ISWIRED(pte)) {
1474 				pte->flags &= ~PTE_WIRED;
1475 				pmap->pm_stats.wired_count--;
1476 			}
1477 		}
1478 
1479 		/* Save the old bits and clear the ones we're interested in. */
1480 		flags = pte->flags;
1481 		pte->flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1482 
1483 		if (prot & VM_PROT_WRITE) {
1484 			/* Add write permissions. */
1485 			pte->flags |= PTE_SW;
1486 			if (!su)
1487 				pte->flags |= PTE_UW;
1488 		} else {
1489 			/* Handle modified pages, sense modify status. */
1490 			if (PTE_ISMODIFIED(pte)) {
1491 				if (track_modified_needed(pmap, va))
1492 					vm_page_dirty(m);
1493 			}
1494 		}
1495 
1496 		/* If we're turning on execute permissions, flush the icache. */
1497 		if (prot & VM_PROT_EXECUTE) {
1498 			pte->flags |= PTE_SX;
1499 			if (!su)
1500 				pte->flags |= PTE_UX;
1501 
1502 			if ((flags & (PTE_UX | PTE_SX)) == 0)
1503 				sync++;
1504 		}
1505 
1506 		/* Flush the old mapping from TLB0. */
1507 		pte->flags &= ~PTE_REFERENCED;
1508 		tlb0_flush_entry(pmap, va);
1509 	} else {
1510 		/*
1511 		 * If there is an existing mapping, but its for a different
1512 		 * physical address, pte_enter() will delete the old mapping.
1513 		 */
1514 		//if ((pte != NULL) && PTE_ISVALID(pte))
1515 		//	debugf("mmu_booke_enter_locked: replace\n");
1516 		//else
1517 		//	debugf("mmu_booke_enter_locked: new\n");
1518 
1519 		/* Now set up the flags and install the new mapping. */
1520 		flags = (PTE_SR | PTE_VALID);
1521 
1522 		if (!su)
1523 			flags |= PTE_UR;
1524 
1525 		if (prot & VM_PROT_WRITE) {
1526 			flags |= PTE_SW;
1527 			if (!su)
1528 				flags |= PTE_UW;
1529 		}
1530 
1531 		if (prot & VM_PROT_EXECUTE) {
1532 			flags |= PTE_SX;
1533 			if (!su)
1534 				flags |= PTE_UX;
1535 		}
1536 
1537 		/* If its wired update stats. */
1538 		if (wired) {
1539 			pmap->pm_stats.wired_count++;
1540 			flags |= PTE_WIRED;
1541 		}
1542 
1543 		pte_enter(mmu, pmap, m, va, flags);
1544 
1545 		/* Flush the real memory from the instruction cache. */
1546 		if (prot & VM_PROT_EXECUTE)
1547 			sync++;
1548 	}
1549 
1550 	if (sync && (su || pmap == PCPU_GET(curpmap))) {
1551 		__syncicache((void *)va, PAGE_SIZE);
1552 		sync = 0;
1553 	}
1554 
1555 	if (sync) {
1556 		/* Create a temporary mapping. */
1557 		pmap = PCPU_GET(curpmap);
1558 
1559 		va = 0;
1560 		pte = pte_find(mmu, pmap, va);
1561 		KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__));
1562 
1563 		flags = PTE_SR | PTE_VALID | PTE_UR;
1564 		pte_enter(mmu, pmap, m, va, flags);
1565 		__syncicache((void *)va, PAGE_SIZE);
1566 		pte_remove(mmu, pmap, va, PTBL_UNHOLD);
1567 	}
1568 
1569 	//debugf("mmu_booke_enter_locked: e\n");
1570 }
1571 
1572 /*
1573  * Maps a sequence of resident pages belonging to the same object.
1574  * The sequence begins with the given page m_start.  This page is
1575  * mapped at the given virtual address start.  Each subsequent page is
1576  * mapped at a virtual address that is offset from start by the same
1577  * amount as the page is offset from m_start within the object.  The
1578  * last page in the sequence is the page with the largest offset from
1579  * m_start that can be mapped at a virtual address less than the given
1580  * virtual address end.  Not every virtual page between start and end
1581  * is mapped; only those for which a resident page exists with the
1582  * corresponding offset from m_start are mapped.
1583  */
1584 static void
1585 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1586     vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1587 {
1588 	vm_page_t m;
1589 	vm_pindex_t diff, psize;
1590 
1591 	psize = atop(end - start);
1592 	m = m_start;
1593 	PMAP_LOCK(pmap);
1594 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1595 		mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, prot &
1596 		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1597 		m = TAILQ_NEXT(m, listq);
1598 	}
1599 	PMAP_UNLOCK(pmap);
1600 }
1601 
1602 static void
1603 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1604     vm_prot_t prot)
1605 {
1606 
1607 	//debugf("mmu_booke_enter_quick: s\n");
1608 
1609 	PMAP_LOCK(pmap);
1610 	mmu_booke_enter_locked(mmu, pmap, va, m,
1611 	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1612 	PMAP_UNLOCK(pmap);
1613 
1614 	//debugf("mmu_booke_enter_quick e\n");
1615 }
1616 
1617 /*
1618  * Remove the given range of addresses from the specified map.
1619  *
1620  * It is assumed that the start and end are properly rounded to the page size.
1621  */
1622 static void
1623 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1624 {
1625 	pte_t *pte;
1626 	u_int8_t hold_flag;
1627 
1628 	int su = (pmap == kernel_pmap);
1629 
1630 	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1631 	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1632 
1633 	if (su) {
1634 		KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)),
1635 		    ("mmu_booke_enter: kernel pmap, non kernel va"));
1636 	} else {
1637 		KASSERT((va <= VM_MAXUSER_ADDRESS),
1638 		    ("mmu_booke_enter: user pmap, non user va"));
1639 	}
1640 
1641 	if (PMAP_REMOVE_DONE(pmap)) {
1642 		//debugf("mmu_booke_remove: e (empty)\n");
1643 		return;
1644 	}
1645 
1646 	hold_flag = PTBL_HOLD_FLAG(pmap);
1647 	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1648 
1649 	vm_page_lock_queues();
1650 	PMAP_LOCK(pmap);
1651 	for (; va < endva; va += PAGE_SIZE) {
1652 		pte = pte_find(mmu, pmap, va);
1653 		if ((pte != NULL) && PTE_ISVALID(pte)) {
1654 			pte_remove(mmu, pmap, va, hold_flag);
1655 
1656 			/* Flush mapping from TLB0. */
1657 			tlb0_flush_entry(pmap, va);
1658 		}
1659 	}
1660 	PMAP_UNLOCK(pmap);
1661 	vm_page_unlock_queues();
1662 
1663 	//debugf("mmu_booke_remove: e\n");
1664 }
1665 
1666 /*
1667  * Remove physical page from all pmaps in which it resides.
1668  */
1669 static void
1670 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1671 {
1672 	pv_entry_t pv, pvn;
1673 	u_int8_t hold_flag;
1674 
1675 	//debugf("mmu_booke_remove_all: s\n");
1676 
1677 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1678 
1679 	for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1680 		pvn = TAILQ_NEXT(pv, pv_link);
1681 
1682 		PMAP_LOCK(pv->pv_pmap);
1683 		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1684 		pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1685 
1686 		/* Flush mapping from TLB0. */
1687 		tlb0_flush_entry(pv->pv_pmap, pv->pv_va);
1688 		PMAP_UNLOCK(pv->pv_pmap);
1689 	}
1690 	vm_page_flag_clear(m, PG_WRITEABLE);
1691 
1692 	//debugf("mmu_booke_remove_all: e\n");
1693 }
1694 
1695 /*
1696  * Map a range of physical addresses into kernel virtual address space.
1697  *
1698  * The value passed in *virt is a suggested virtual address for the mapping.
1699  * Architectures which can support a direct-mapped physical to virtual region
1700  * can return the appropriate address within that region, leaving '*virt'
1701  * unchanged.  We cannot and therefore do not; *virt is updated with the
1702  * first usable address after the mapped region.
1703  */
1704 static vm_offset_t
1705 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1706     vm_offset_t pa_end, int prot)
1707 {
1708 	vm_offset_t sva = *virt;
1709 	vm_offset_t va = sva;
1710 
1711 	//debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1712 	//		sva, pa_start, pa_end);
1713 
1714 	while (pa_start < pa_end) {
1715 		mmu_booke_kenter(mmu, va, pa_start);
1716 		va += PAGE_SIZE;
1717 		pa_start += PAGE_SIZE;
1718 	}
1719 	*virt = va;
1720 
1721 	//debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1722 	return (sva);
1723 }
1724 
1725 /*
1726  * The pmap must be activated before it's address space can be accessed in any
1727  * way.
1728  */
1729 static void
1730 mmu_booke_activate(mmu_t mmu, struct thread *td)
1731 {
1732 	pmap_t pmap;
1733 
1734 	pmap = &td->td_proc->p_vmspace->vm_pmap;
1735 
1736 	//debugf("mmu_booke_activate: s (proc = '%s', id = %d, pmap = 0x%08x)\n",
1737 	//		td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1738 
1739 	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1740 
1741 	mtx_lock_spin(&sched_lock);
1742 
1743 	pmap->pm_active |= PCPU_GET(cpumask);
1744 	PCPU_SET(curpmap, pmap);
1745 
1746 	if (!pmap->pm_tid)
1747 		tid_alloc(pmap);
1748 
1749 	/* Load PID0 register with pmap tid value. */
1750 	load_pid0(pmap->pm_tid);
1751 
1752 	mtx_unlock_spin(&sched_lock);
1753 
1754 	//debugf("mmu_booke_activate: e (tid = %d for '%s')\n", pmap->pm_tid,
1755 	//		td->td_proc->p_comm);
1756 }
1757 
1758 /*
1759  * Deactivate the specified process's address space.
1760  */
1761 static void
1762 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
1763 {
1764 	pmap_t pmap;
1765 
1766 	pmap = &td->td_proc->p_vmspace->vm_pmap;
1767 	pmap->pm_active &= ~(PCPU_GET(cpumask));
1768 	PCPU_SET(curpmap, NULL);
1769 }
1770 
1771 /*
1772  * Copy the range specified by src_addr/len
1773  * from the source map to the range dst_addr/len
1774  * in the destination map.
1775  *
1776  * This routine is only advisory and need not do anything.
1777  */
1778 static void
1779 mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
1780     vm_size_t len, vm_offset_t src_addr)
1781 {
1782 
1783 }
1784 
1785 /*
1786  * Set the physical protection on the specified range of this map as requested.
1787  */
1788 static void
1789 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1790     vm_prot_t prot)
1791 {
1792 	vm_offset_t va;
1793 	vm_page_t m;
1794 	pte_t *pte;
1795 
1796 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1797 		mmu_booke_remove(mmu, pmap, sva, eva);
1798 		return;
1799 	}
1800 
1801 	if (prot & VM_PROT_WRITE)
1802 		return;
1803 
1804 	vm_page_lock_queues();
1805 	PMAP_LOCK(pmap);
1806 	for (va = sva; va < eva; va += PAGE_SIZE) {
1807 		if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1808 			if (PTE_ISVALID(pte)) {
1809 				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1810 
1811 				/* Handle modified pages. */
1812 				if (PTE_ISMODIFIED(pte)) {
1813 					if (track_modified_needed(pmap, va))
1814 						vm_page_dirty(m);
1815 				}
1816 
1817 				/* Referenced pages. */
1818 				if (PTE_ISREFERENCED(pte))
1819 					vm_page_flag_set(m, PG_REFERENCED);
1820 
1821 				/* Flush mapping from TLB0. */
1822 				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
1823 				    PTE_REFERENCED);
1824 				tlb0_flush_entry(pmap, va);
1825 			}
1826 		}
1827 	}
1828 	PMAP_UNLOCK(pmap);
1829 	vm_page_unlock_queues();
1830 }
1831 
1832 /*
1833  * Clear the write and modified bits in each of the given page's mappings.
1834  */
1835 static void
1836 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
1837 {
1838 	pv_entry_t pv;
1839 	pte_t *pte;
1840 
1841 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1842 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1843 	    (m->flags & PG_WRITEABLE) == 0)
1844 		return;
1845 
1846 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1847 		PMAP_LOCK(pv->pv_pmap);
1848 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
1849 			if (PTE_ISVALID(pte)) {
1850 				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1851 
1852 				/* Handle modified pages. */
1853 				if (PTE_ISMODIFIED(pte)) {
1854 					if (track_modified_needed(pv->pv_pmap,
1855 					    pv->pv_va))
1856 						vm_page_dirty(m);
1857 				}
1858 
1859 				/* Referenced pages. */
1860 				if (PTE_ISREFERENCED(pte))
1861 					vm_page_flag_set(m, PG_REFERENCED);
1862 
1863 				/* Flush mapping from TLB0. */
1864 				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
1865 				    PTE_REFERENCED);
1866 				tlb0_flush_entry(pv->pv_pmap, pv->pv_va);
1867 			}
1868 		}
1869 		PMAP_UNLOCK(pv->pv_pmap);
1870 	}
1871 	vm_page_flag_clear(m, PG_WRITEABLE);
1872 }
1873 
1874 static boolean_t
1875 mmu_booke_page_executable(mmu_t mmu, vm_page_t m)
1876 {
1877 	pv_entry_t pv;
1878 	pte_t *pte;
1879 	boolean_t executable;
1880 
1881 	executable = FALSE;
1882 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1883 		PMAP_LOCK(pv->pv_pmap);
1884 		pte = pte_find(mmu, pv->pv_pmap, pv->pv_va);
1885 		if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX))
1886 			executable = TRUE;
1887 		PMAP_UNLOCK(pv->pv_pmap);
1888 		if (executable)
1889 			break;
1890 	}
1891 
1892 	return (executable);
1893 }
1894 
1895 /*
1896  * Atomically extract and hold the physical page with the given
1897  * pmap and virtual address pair if that mapping permits the given
1898  * protection.
1899  */
1900 static vm_page_t
1901 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
1902     vm_prot_t prot)
1903 {
1904 	pte_t *pte;
1905 	vm_page_t m;
1906 	u_int32_t pte_wbit;
1907 
1908 	m = NULL;
1909 	vm_page_lock_queues();
1910 	PMAP_LOCK(pmap);
1911 	pte = pte_find(mmu, pmap, va);
1912 
1913 	if ((pte != NULL) && PTE_ISVALID(pte)) {
1914 		if (pmap == kernel_pmap)
1915 			pte_wbit = PTE_SW;
1916 		else
1917 			pte_wbit = PTE_UW;
1918 
1919 		if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
1920 			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1921 			vm_page_hold(m);
1922 		}
1923 	}
1924 
1925 	vm_page_unlock_queues();
1926 	PMAP_UNLOCK(pmap);
1927 	return (m);
1928 }
1929 
1930 /*
1931  * Initialize a vm_page's machine-dependent fields.
1932  */
1933 static void
1934 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
1935 {
1936 
1937 	TAILQ_INIT(&m->md.pv_list);
1938 }
1939 
1940 /*
1941  * mmu_booke_zero_page_area zeros the specified hardware page by
1942  * mapping it into virtual memory and using bzero to clear
1943  * its contents.
1944  *
1945  * off and size must reside within a single page.
1946  */
1947 static void
1948 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1949 {
1950 	vm_offset_t va;
1951 
1952 	//debugf("mmu_booke_zero_page_area: s\n");
1953 
1954 	mtx_lock(&zero_page_mutex);
1955 	va = zero_page_va;
1956 
1957 	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
1958 	bzero((caddr_t)va + off, size);
1959 	mmu_booke_kremove(mmu, va);
1960 
1961 	mtx_unlock(&zero_page_mutex);
1962 
1963 	//debugf("mmu_booke_zero_page_area: e\n");
1964 }
1965 
1966 /*
1967  * mmu_booke_zero_page zeros the specified hardware page.
1968  */
1969 static void
1970 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
1971 {
1972 
1973 	//debugf("mmu_booke_zero_page: s\n");
1974 	mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
1975 	//debugf("mmu_booke_zero_page: e\n");
1976 }
1977 
1978 /*
1979  * mmu_booke_copy_page copies the specified (machine independent) page by
1980  * mapping the page into virtual memory and using memcopy to copy the page,
1981  * one machine dependent page at a time.
1982  */
1983 static void
1984 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
1985 {
1986 	vm_offset_t sva, dva;
1987 
1988 	//debugf("mmu_booke_copy_page: s\n");
1989 
1990 	mtx_lock(&copy_page_mutex);
1991 	sva = copy_page_src_va;
1992 	dva = copy_page_dst_va;
1993 
1994 	mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
1995 	mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
1996 	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
1997 	mmu_booke_kremove(mmu, dva);
1998 	mmu_booke_kremove(mmu, sva);
1999 
2000 	mtx_unlock(&copy_page_mutex);
2001 
2002 	//debugf("mmu_booke_copy_page: e\n");
2003 }
2004 
2005 #if 0
2006 /*
2007  * Remove all pages from specified address space, this aids process exit
2008  * speeds. This is much faster than mmu_booke_remove in the case of running
2009  * down an entire address space. Only works for the current pmap.
2010  */
2011 void
2012 mmu_booke_remove_pages(pmap_t pmap)
2013 {
2014 }
2015 #endif
2016 
2017 /*
2018  * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2019  * into virtual memory and using bzero to clear its contents. This is intended
2020  * to be called from the vm_pagezero process only and outside of Giant. No
2021  * lock is required.
2022  */
2023 static void
2024 mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2025 {
2026 	vm_offset_t va;
2027 
2028 	//debugf("mmu_booke_zero_page_idle: s\n");
2029 
2030 	va = zero_page_idle_va;
2031 	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2032 	bzero((caddr_t)va, PAGE_SIZE);
2033 	mmu_booke_kremove(mmu, va);
2034 
2035 	//debugf("mmu_booke_zero_page_idle: e\n");
2036 }
2037 
2038 /*
2039  * Return whether or not the specified physical page was modified
2040  * in any of physical maps.
2041  */
2042 static boolean_t
2043 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2044 {
2045 	pte_t *pte;
2046 	pv_entry_t pv;
2047 
2048 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2049 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2050 		return (FALSE);
2051 
2052 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2053 		PMAP_LOCK(pv->pv_pmap);
2054 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2055 			if (!PTE_ISVALID(pte))
2056 				goto make_sure_to_unlock;
2057 
2058 			if (!track_modified_needed(pv->pv_pmap, pv->pv_va))
2059 				goto make_sure_to_unlock;
2060 
2061 			if (PTE_ISMODIFIED(pte)) {
2062 				PMAP_UNLOCK(pv->pv_pmap);
2063 				return (TRUE);
2064 			}
2065 		}
2066 make_sure_to_unlock:
2067 		PMAP_UNLOCK(pv->pv_pmap);
2068 	}
2069 	return (FALSE);
2070 }
2071 
2072 /*
2073  * Return whether or not the specified virtual address is elgible
2074  * for prefault.
2075  */
2076 static boolean_t
2077 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2078 {
2079 
2080 	return (FALSE);
2081 }
2082 
2083 /*
2084  * Clear the modify bits on the specified physical page.
2085  */
2086 static void
2087 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2088 {
2089 	pte_t *pte;
2090 	pv_entry_t pv;
2091 
2092 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2093 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2094 		return;
2095 
2096 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2097 		PMAP_LOCK(pv->pv_pmap);
2098 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2099 			if (!PTE_ISVALID(pte))
2100 				goto make_sure_to_unlock;
2101 
2102 			if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2103 				pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2104 				    PTE_REFERENCED);
2105 				tlb0_flush_entry(pv->pv_pmap, pv->pv_va);
2106 			}
2107 		}
2108 make_sure_to_unlock:
2109 		PMAP_UNLOCK(pv->pv_pmap);
2110 	}
2111 }
2112 
2113 /*
2114  * Return a count of reference bits for a page, clearing those bits.
2115  * It is not necessary for every reference bit to be cleared, but it
2116  * is necessary that 0 only be returned when there are truly no
2117  * reference bits set.
2118  *
2119  * XXX: The exact number of bits to check and clear is a matter that
2120  * should be tested and standardized at some point in the future for
2121  * optimal aging of shared pages.
2122  */
2123 static int
2124 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2125 {
2126 	pte_t *pte;
2127 	pv_entry_t pv;
2128 	int count;
2129 
2130 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2131 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2132 		return (0);
2133 
2134 	count = 0;
2135 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2136 		PMAP_LOCK(pv->pv_pmap);
2137 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2138 			if (!PTE_ISVALID(pte))
2139 				goto make_sure_to_unlock;
2140 
2141 			if (!track_modified_needed(pv->pv_pmap, pv->pv_va))
2142 				goto make_sure_to_unlock;
2143 
2144 			if (PTE_ISREFERENCED(pte)) {
2145 				pte->flags &= ~PTE_REFERENCED;
2146 				tlb0_flush_entry(pv->pv_pmap, pv->pv_va);
2147 
2148 				if (++count > 4) {
2149 					PMAP_UNLOCK(pv->pv_pmap);
2150 					break;
2151 				}
2152 			}
2153 		}
2154 make_sure_to_unlock:
2155 		PMAP_UNLOCK(pv->pv_pmap);
2156 	}
2157 	return (count);
2158 }
2159 
2160 /*
2161  * Clear the reference bit on the specified physical page.
2162  */
2163 static void
2164 mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
2165 {
2166 	pte_t *pte;
2167 	pv_entry_t pv;
2168 
2169 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2170 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2171 		return;
2172 
2173 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2174 		PMAP_LOCK(pv->pv_pmap);
2175 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2176 			if (!PTE_ISVALID(pte))
2177 				goto make_sure_to_unlock;
2178 
2179 			if (PTE_ISREFERENCED(pte)) {
2180 				pte->flags &= ~PTE_REFERENCED;
2181 				tlb0_flush_entry(pv->pv_pmap, pv->pv_va);
2182 			}
2183 		}
2184 make_sure_to_unlock:
2185 		PMAP_UNLOCK(pv->pv_pmap);
2186 	}
2187 }
2188 
2189 /*
2190  * Change wiring attribute for a map/virtual-address pair.
2191  */
2192 static void
2193 mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
2194 {
2195 	pte_t *pte;;
2196 
2197 	PMAP_LOCK(pmap);
2198 	if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2199 		if (wired) {
2200 			if (!PTE_ISWIRED(pte)) {
2201 				pte->flags |= PTE_WIRED;
2202 				pmap->pm_stats.wired_count++;
2203 			}
2204 		} else {
2205 			if (PTE_ISWIRED(pte)) {
2206 				pte->flags &= ~PTE_WIRED;
2207 				pmap->pm_stats.wired_count--;
2208 			}
2209 		}
2210 	}
2211 	PMAP_UNLOCK(pmap);
2212 }
2213 
2214 /*
2215  * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2216  * page.  This count may be changed upwards or downwards in the future; it is
2217  * only necessary that true be returned for a small subset of pmaps for proper
2218  * page aging.
2219  */
2220 static boolean_t
2221 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2222 {
2223 	pv_entry_t pv;
2224 	int loops;
2225 
2226 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2227 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2228 		return (FALSE);
2229 
2230 	loops = 0;
2231 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2232 
2233 		if (pv->pv_pmap == pmap)
2234 			return (TRUE);
2235 
2236 		if (++loops >= 16)
2237 			break;
2238 	}
2239 	return (FALSE);
2240 }
2241 
2242 /*
2243  * Return the number of managed mappings to the given physical page that are
2244  * wired.
2245  */
2246 static int
2247 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2248 {
2249 	pv_entry_t pv;
2250 	pte_t *pte;
2251 	int count = 0;
2252 
2253 	if ((m->flags & PG_FICTITIOUS) != 0)
2254 		return (count);
2255 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2256 
2257 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2258 		PMAP_LOCK(pv->pv_pmap);
2259 		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2260 			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2261 				count++;
2262 		PMAP_UNLOCK(pv->pv_pmap);
2263 	}
2264 
2265 	return (count);
2266 }
2267 
2268 static int
2269 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2270 {
2271 	int i;
2272 	vm_offset_t va;
2273 
2274 	/*
2275 	 * This currently does not work for entries that
2276 	 * overlap TLB1 entries.
2277 	 */
2278 	for (i = 0; i < tlb1_idx; i ++) {
2279 		if (tlb1_iomapped(i, pa, size, &va) == 0)
2280 			return (0);
2281 	}
2282 
2283 	return (EFAULT);
2284 }
2285 
2286 /*
2287  * Map a set of physical memory pages into the kernel virtual address space.
2288  * Return a pointer to where it is mapped. This routine is intended to be used
2289  * for mapping device memory, NOT real memory.
2290  */
2291 static void *
2292 mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2293 {
2294 	uintptr_t va;
2295 
2296 	va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
2297 	if (bootverbose)
2298 		printf("Wiring VA=%x to PA=%x (size=%x), using TLB1[%d]\n",
2299 		    va, pa, size, tlb1_idx);
2300 	tlb1_set_entry(va, pa, size, _TLB_ENTRY_IO);
2301 	return ((void *)va);
2302 }
2303 
2304 /*
2305  * 'Unmap' a range mapped by mmu_booke_mapdev().
2306  */
2307 static void
2308 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2309 {
2310 	vm_offset_t base, offset;
2311 
2312 	//debugf("mmu_booke_unmapdev: s (va = 0x%08x)\n", va);
2313 
2314 	/*
2315 	 * Unmap only if this is inside kernel virtual space.
2316 	 */
2317 	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2318 		base = trunc_page(va);
2319 		offset = va & PAGE_MASK;
2320 		size = roundup(offset + size, PAGE_SIZE);
2321 		kmem_free(kernel_map, base, size);
2322 	}
2323 
2324 	//debugf("mmu_booke_unmapdev: e\n");
2325 }
2326 
2327 /*
2328  * mmu_booke_object_init_pt preloads the ptes for a given object
2329  * into the specified pmap. This eliminates the blast of soft
2330  * faults on process startup and immediately after an mmap.
2331  */
2332 static void
2333 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2334     vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2335 {
2336 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2337 	KASSERT(object->type == OBJT_DEVICE,
2338 	    ("mmu_booke_object_init_pt: non-device object"));
2339 }
2340 
2341 /*
2342  * Perform the pmap work for mincore.
2343  */
2344 static int
2345 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2346 {
2347 
2348 	TODO;
2349 	return (0);
2350 }
2351 
2352 static vm_offset_t
2353 mmu_booke_addr_hint(mmu_t mmu, vm_object_t object, vm_offset_t va,
2354     vm_size_t size)
2355 {
2356 
2357 	return (va);
2358 }
2359 
2360 /**************************************************************************/
2361 /* TID handling */
2362 /**************************************************************************/
2363 /*
2364  * Flush all entries from TLB0 matching given tid.
2365  */
2366 static void
2367 tid_flush(tlbtid_t tid)
2368 {
2369 	int i, entryidx, way;
2370 
2371 	//debugf("tid_flush: s (tid = %d)\n", tid);
2372 
2373 	mtx_lock_spin(&tlb0_mutex);
2374 
2375 	for (i = 0; i < TLB0_SIZE; i++) {
2376 		if (MAS1_GETTID(tlb0[i].mas1) == tid) {
2377 			way = i / TLB0_ENTRIES_PER_WAY;
2378 			entryidx = i - (way * TLB0_ENTRIES_PER_WAY);
2379 
2380 			//debugf("tid_flush: inval tlb0 entry %d\n", i);
2381 			tlb0_inval_entry(entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT, way);
2382 		}
2383 	}
2384 
2385 	mtx_unlock_spin(&tlb0_mutex);
2386 
2387 	//debugf("tid_flush: e\n");
2388 }
2389 
2390 /*
2391  * Allocate a TID. If necessary, steal one from someone else.
2392  * The new TID is flushed from the TLB before returning.
2393  */
2394 static tlbtid_t
2395 tid_alloc(pmap_t pmap)
2396 {
2397 	tlbtid_t tid;
2398 	static tlbtid_t next_tid = TID_MIN;
2399 
2400 	//struct thread *td;
2401 	//struct proc *p;
2402 
2403 	//td = PCPU_GET(curthread);
2404 	//p = td->td_proc;
2405 	//debugf("tid_alloc: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
2406 	//printf("tid_alloc: proc %d '%s'\n", p->p_pid, p->p_comm);
2407 
2408 	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2409 
2410 	/*
2411 	 * Find a likely TID, allocate unused if possible,
2412 	 * skip reserved entries.
2413 	 */
2414 	tid = next_tid;
2415 	while (tidbusy[tid] != NULL) {
2416 		if (tid == next_tid)
2417 			break;
2418 
2419 		if (tid == TID_MAX)
2420 			tid = TID_MIN;
2421 		else
2422 			tid++;
2423 
2424 	}
2425 
2426 	/* Now clean it out */
2427 	tid_flush(tid);
2428 
2429 	/* If we are stealing pmap then clear its tid */
2430 	if (tidbusy[tid]) {
2431 		//debugf("warning: stealing tid %d\n", tid);
2432 		tidbusy[tid]->pm_tid = 0;
2433 	}
2434 
2435 	/* Calculate next tid */
2436 	if (tid == TID_MAX)
2437 		next_tid = TID_MIN;
2438 	else
2439 		next_tid = tid + 1;
2440 
2441 	tidbusy[tid] = pmap;
2442 	pmap->pm_tid = tid;
2443 
2444 	//debugf("tid_alloc: e (%02d next = %02d)\n", tid, next_tid);
2445 	return (tid);
2446 }
2447 
2448 #if 0
2449 /*
2450  * Free this pmap's TID.
2451  */
2452 static void
2453 tid_free(pmap_t pmap)
2454 {
2455 	tlbtid_t oldtid;
2456 
2457 	oldtid = pmap->pm_tid;
2458 
2459 	if (oldtid == 0) {
2460 		panic("tid_free: freeing kernel tid");
2461 	}
2462 
2463 #ifdef DEBUG
2464 	if (tidbusy[oldtid] == 0)
2465 		debugf("tid_free: freeing free tid %d\n", oldtid);
2466 	if (tidbusy[oldtid] != pmap) {
2467 		debugf("tid_free: freeing someone esle's tid\n "
2468 		       "tidbusy[%d] = 0x%08x pmap = 0x%08x\n",
2469 		       oldtid, (u_int32_t)tidbusy[oldtid], (u_int32_t)pmap);
2470 	}
2471 #endif
2472 
2473 	tidbusy[oldtid] = NULL;
2474 	tid_flush(oldtid);
2475 }
2476 #endif
2477 
2478 #if 0
2479 #if DEBUG
2480 static void
2481 tid_print_busy(void)
2482 {
2483 	int i;
2484 
2485 	for (i = 0; i < TID_MAX; i++) {
2486 		debugf("tid %d = pmap 0x%08x", i, (u_int32_t)tidbusy[i]);
2487 		if (tidbusy[i])
2488 			debugf(" pmap->tid = %d", tidbusy[i]->pm_tid);
2489 		debugf("\n");
2490 	}
2491 
2492 }
2493 #endif /* DEBUG */
2494 #endif
2495 
2496 /**************************************************************************/
2497 /* TLB0 handling */
2498 /**************************************************************************/
2499 
2500 static void
2501 tlb_print_entry(int i, u_int32_t mas1, u_int32_t mas2, u_int32_t mas3, u_int32_t mas7)
2502 {
2503 	int as;
2504 	char desc[3];
2505 	tlbtid_t tid;
2506 	vm_size_t size;
2507 	unsigned int tsize;
2508 
2509 	desc[2] = '\0';
2510 	if (mas1 & MAS1_VALID)
2511 		desc[0] = 'V';
2512 	else
2513 		desc[0] = ' ';
2514 
2515 	if (mas1 & MAS1_IPROT)
2516 		desc[1] = 'P';
2517 	else
2518 		desc[1] = ' ';
2519 
2520 	as = (mas1 & MAS1_TS) ? 1 : 0;
2521 	tid = MAS1_GETTID(mas1);
2522 
2523 	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2524 	size = 0;
2525 	if (tsize)
2526 		size = tsize2size(tsize);
2527 
2528 	debugf("%3d: (%s) [AS=%d] "
2529 	    "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2530 	    "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2531 	    i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2532 }
2533 
2534 /* Convert TLB0 va and way number to tlb0[] table index. */
2535 static inline unsigned int
2536 tlb0_tableidx(vm_offset_t va, unsigned int way)
2537 {
2538 	unsigned int idx;
2539 
2540 	idx = (way * TLB0_ENTRIES_PER_WAY);
2541 	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2542 	return (idx);
2543 }
2544 
2545 /*
2546  * Write given entry to TLB0 hardware.
2547  * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2548  */
2549 static void
2550 tlb0_write_entry(unsigned int idx, unsigned int way)
2551 {
2552 	u_int32_t mas0, mas7, nv;
2553 
2554 	/* Clear high order RPN bits. */
2555 	mas7 = 0;
2556 
2557 	/* Preserve NV. */
2558 	mas0 = mfspr(SPR_MAS0);
2559 	nv = mas0 & (TLB0_NWAYS - 1);
2560 
2561 	/* Select entry. */
2562 	mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way) | nv;
2563 
2564 	//debugf("tlb0_write_entry: s (idx=%d way=%d mas0=0x%08x "
2565 	//		"mas1=0x%08x mas2=0x%08x mas3=0x%08x)\n",
2566 	//		idx, way, mas0, tlb0[idx].mas1,
2567 	//		tlb0[idx].mas2, tlb0[idx].mas3);
2568 
2569 	mtspr(SPR_MAS0, mas0);
2570 	__asm volatile("isync");
2571 	mtspr(SPR_MAS1, tlb0[idx].mas1);
2572 	__asm volatile("isync");
2573 	mtspr(SPR_MAS2, tlb0[idx].mas2);
2574 	__asm volatile("isync");
2575 	mtspr(SPR_MAS3, tlb0[idx].mas3);
2576 	__asm volatile("isync");
2577 	mtspr(SPR_MAS7, mas7);
2578 	__asm volatile("isync; tlbwe; isync; msync");
2579 
2580 	//debugf("tlb0_write_entry: e\n");
2581 }
2582 
2583 /*
2584  * Invalidate TLB0 entry, clear correspondig tlb0 table element.
2585  */
2586 static void
2587 tlb0_inval_entry(vm_offset_t va, unsigned int way)
2588 {
2589 	int idx = tlb0_tableidx(va, way);
2590 
2591 	//debugf("tlb0_inval_entry: s (va=0x%08x way=%d idx=%d)\n",
2592 	//		va, way, idx);
2593 
2594 	tlb0[idx].mas1 = 1 << MAS1_TSIZE_SHIFT;	/* !MAS1_VALID */
2595 	tlb0[idx].mas2 = va & MAS2_EPN;
2596 	tlb0[idx].mas3 = 0;
2597 
2598 	tlb0_write_entry(idx, way);
2599 
2600 	//debugf("tlb0_inval_entry: e\n");
2601 }
2602 
2603 /*
2604  * Invalidate TLB0 entry that corresponds to pmap/va.
2605  */
2606 static void
2607 tlb0_flush_entry(pmap_t pmap, vm_offset_t va)
2608 {
2609 	int idx, way;
2610 
2611 	//debugf("tlb0_flush_entry: s (pmap=0x%08x va=0x%08x)\n",
2612 	//		(u_int32_t)pmap, va);
2613 
2614 	mtx_lock_spin(&tlb0_mutex);
2615 
2616 	/* Check all TLB0 ways. */
2617 	for (way = 0; way < TLB0_NWAYS; way ++) {
2618 		idx = tlb0_tableidx(va, way);
2619 
2620 		/* Invalidate only if entry matches va and pmap tid. */
2621 		if (((MAS1_GETTID(tlb0[idx].mas1) == pmap->pm_tid) &&
2622 				((tlb0[idx].mas2 & MAS2_EPN) == va))) {
2623 			tlb0_inval_entry(va, way);
2624 		}
2625 	}
2626 
2627 	mtx_unlock_spin(&tlb0_mutex);
2628 
2629 	//debugf("tlb0_flush_entry: e\n");
2630 }
2631 
2632 /* Clean TLB0 hardware and tlb0[] table. */
2633 static void
2634 tlb0_init(void)
2635 {
2636 	int entryidx, way;
2637 
2638 	debugf("tlb0_init: TLB0_SIZE = %d TLB0_NWAYS = %d\n",
2639 	    TLB0_SIZE, TLB0_NWAYS);
2640 
2641 	mtx_lock_spin(&tlb0_mutex);
2642 
2643 	for (way = 0; way < TLB0_NWAYS; way ++) {
2644 		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2645 			tlb0_inval_entry(entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT, way);
2646 		}
2647 	}
2648 
2649 	mtx_unlock_spin(&tlb0_mutex);
2650 }
2651 
2652 #if 0
2653 #if DEBUG
2654 /* Print out tlb0 entries for given va. */
2655 static void
2656 tlb0_print_tlbentries_va(vm_offset_t va)
2657 {
2658 	u_int32_t mas0, mas1, mas2, mas3, mas7;
2659 	int way, idx;
2660 
2661 	debugf("TLB0 entries for va = 0x%08x:\n", va);
2662 	for (way = 0; way < TLB0_NWAYS; way ++) {
2663 		mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2664 		mtspr(SPR_MAS0, mas0);
2665 		__asm volatile("isync");
2666 
2667 		mas2 = va & MAS2_EPN;
2668 		mtspr(SPR_MAS2, mas2);
2669 		__asm volatile("isync; tlbre");
2670 
2671 		mas1 = mfspr(SPR_MAS1);
2672 		mas2 = mfspr(SPR_MAS2);
2673 		mas3 = mfspr(SPR_MAS3);
2674 		mas7 = mfspr(SPR_MAS7);
2675 
2676 		idx = tlb0_tableidx(va, way);
2677 		tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2678 	}
2679 }
2680 
2681 /* Print out contents of the MAS registers for each TLB0 entry */
2682 static void
2683 tlb0_print_tlbentries(void)
2684 {
2685 	u_int32_t mas0, mas1, mas2, mas3, mas7;
2686 	int entryidx, way, idx;
2687 
2688 	debugf("TLB0 entries:\n");
2689 	for (way = 0; way < TLB0_NWAYS; way ++) {
2690 		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2691 
2692 			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2693 			mtspr(SPR_MAS0, mas0);
2694 			__asm volatile("isync");
2695 
2696 			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2697 			mtspr(SPR_MAS2, mas2);
2698 
2699 			__asm volatile("isync; tlbre");
2700 
2701 			mas1 = mfspr(SPR_MAS1);
2702 			mas2 = mfspr(SPR_MAS2);
2703 			mas3 = mfspr(SPR_MAS3);
2704 			mas7 = mfspr(SPR_MAS7);
2705 
2706 			idx = tlb0_tableidx(mas2, way);
2707 			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2708 		}
2709 	}
2710 }
2711 
2712 /* Print out kernel tlb0[] table. */
2713 static void
2714 tlb0_print_entries(void)
2715 {
2716 	int i;
2717 
2718 	debugf("tlb0[] table entries:\n");
2719 	for (i = 0; i < TLB0_SIZE; i++) {
2720 		tlb_print_entry(i, tlb0[i].mas1,
2721 				tlb0[i].mas2, tlb0[i].mas3, 0);
2722 	}
2723 }
2724 #endif /* DEBUG */
2725 #endif
2726 
2727 /**************************************************************************/
2728 /* TLB1 handling */
2729 /**************************************************************************/
2730 /*
2731  * Write given entry to TLB1 hardware.
2732  * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2733  */
2734 static void
2735 tlb1_write_entry(unsigned int idx)
2736 {
2737 	u_int32_t mas0, mas7;
2738 
2739 	//debugf("tlb1_write_entry: s\n");
2740 
2741 	/* Clear high order RPN bits */
2742 	mas7 = 0;
2743 
2744 	/* Select entry */
2745 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2746 	//debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
2747 
2748 	mtspr(SPR_MAS0, mas0);
2749 	__asm volatile("isync");
2750 	mtspr(SPR_MAS1, tlb1[idx].mas1);
2751 	__asm volatile("isync");
2752 	mtspr(SPR_MAS2, tlb1[idx].mas2);
2753 	__asm volatile("isync");
2754 	mtspr(SPR_MAS3, tlb1[idx].mas3);
2755 	__asm volatile("isync");
2756 	mtspr(SPR_MAS7, mas7);
2757 	__asm volatile("isync; tlbwe; isync; msync");
2758 
2759 	//debugf("tlb1_write_entry: e\n");;
2760 }
2761 
2762 /*
2763  * Return the largest uint value log such that 2^log <= num.
2764  */
2765 static unsigned int
2766 ilog2(unsigned int num)
2767 {
2768 	int lz;
2769 
2770 	__asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
2771 	return (31 - lz);
2772 }
2773 
2774 /*
2775  * Convert TLB TSIZE value to mapped region size.
2776  */
2777 static vm_size_t
2778 tsize2size(unsigned int tsize)
2779 {
2780 
2781 	/*
2782 	 * size = 4^tsize KB
2783 	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2784 	 */
2785 
2786 	return ((1 << (2 * tsize)) * 1024);
2787 }
2788 
2789 /*
2790  * Convert region size (must be power of 4) to TLB TSIZE value.
2791  */
2792 static unsigned int
2793 size2tsize(vm_size_t size)
2794 {
2795 
2796 	/*
2797 	 * tsize = log2(size) / 2 - 5
2798 	 */
2799 
2800 	return (ilog2(size) / 2 - 5);
2801 }
2802 
2803 /*
2804  * Setup entry in a sw tlb1 table, write entry to TLB1 hardware.
2805  * This routine is used for low level operations on the TLB1,
2806  * for creating temporaray as well as permanent mappings (tlb_set_entry).
2807  *
2808  * We assume kernel mappings only, thus all entries created have supervisor
2809  * permission bits set nad user permission bits cleared.
2810  *
2811  * Provided mapping size must be a power of 4.
2812  * Mapping flags must be a combination of MAS2_[WIMG].
2813  * Entry TID is set to _tid which must not exceed 8 bit value.
2814  * Entry TS is set to either 0 or MAS1_TS based on provided _ts.
2815  */
2816 static void
2817 __tlb1_set_entry(unsigned int idx, vm_offset_t va, vm_offset_t pa,
2818     vm_size_t size, u_int32_t flags, unsigned int _tid, unsigned int _ts)
2819 {
2820 	int tsize;
2821 	u_int32_t ts, tid;
2822 
2823 	//debugf("__tlb1_set_entry: s (idx = %d va = 0x%08x pa = 0x%08x "
2824 	//		"size = 0x%08x flags = 0x%08x _tid = %d _ts = %d\n",
2825 	//		idx, va, pa, size, flags, _tid, _ts);
2826 
2827 	/* Convert size to TSIZE */
2828 	tsize = size2tsize(size);
2829 	//debugf("__tlb1_set_entry: tsize = %d\n", tsize);
2830 
2831 	tid = (_tid <<  MAS1_TID_SHIFT) & MAS1_TID_MASK;
2832 	ts = (_ts) ? MAS1_TS : 0;
2833 	tlb1[idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2834 	tlb1[idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2835 
2836 	tlb1[idx].mas2 = (va & MAS2_EPN) | flags;
2837 
2838 	/* Set supervisor rwx permission bits */
2839 	tlb1[idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2840 
2841 	//debugf("__tlb1_set_entry: mas1 = %08x mas2 = %08x mas3 = 0x%08x\n",
2842 	//		tlb1[idx].mas1, tlb1[idx].mas2, tlb1[idx].mas3);
2843 
2844 	tlb1_write_entry(idx);
2845 	//debugf("__tlb1_set_entry: e\n");
2846 }
2847 
2848 /*
2849  * Register permanent kernel mapping in TLB1.
2850  *
2851  * Entries are created starting from index 0 (current free entry is
2852  * kept in tlb1_idx) and are not supposed to be invalidated.
2853  */
2854 static int
2855 tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, u_int32_t flags)
2856 {
2857 	//debugf("tlb1_set_entry: s (tlb1_idx = %d va = 0x%08x pa = 0x%08x "
2858 	//		"size = 0x%08x flags = 0x%08x\n",
2859 	//		tlb1_idx, va, pa, size, flags);
2860 
2861 	if (tlb1_idx >= TLB1_SIZE) {
2862 		//debugf("tlb1_set_entry: e (tlb1 full!)\n");
2863 		return (-1);
2864 	}
2865 
2866 	/* TS = 0, TID = 0 */
2867 	__tlb1_set_entry(tlb1_idx++, va, pa, size, flags, KERNEL_TID, 0);
2868 	//debugf("tlb1_set_entry: e\n");
2869 	return (0);
2870 }
2871 
2872 /*
2873  * Invalidate TLB1 entry, clear correspondig tlb1 table element.
2874  * This routine is used to clear temporary entries created
2875  * early in a locore.S or through the use of __tlb1_set_entry().
2876  */
2877 void
2878 tlb1_inval_entry(unsigned int idx)
2879 {
2880 	vm_offset_t va;
2881 
2882 	va = tlb1[idx].mas2 & MAS2_EPN;
2883 
2884 	tlb1[idx].mas1 = 0; /* !MAS1_VALID */
2885 	tlb1[idx].mas2 = 0;
2886 	tlb1[idx].mas3 = 0;
2887 
2888 	tlb1_write_entry(idx);
2889 }
2890 
2891 static int
2892 tlb1_entry_size_cmp(const void *a, const void *b)
2893 {
2894 	const vm_size_t *sza;
2895 	const vm_size_t *szb;
2896 
2897 	sza = a;
2898 	szb = b;
2899 	if (*sza > *szb)
2900 		return (-1);
2901 	else if (*sza < *szb)
2902 		return (1);
2903 	else
2904 		return (0);
2905 }
2906 
2907 /*
2908  * Mapin contiguous RAM region into the TLB1 using maximum of
2909  * KERNEL_REGION_MAX_TLB_ENTRIES entries.
2910  *
2911  * If necessarry round up last entry size and return total size
2912  * used by all allocated entries.
2913  */
2914 vm_size_t
2915 tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size)
2916 {
2917 	vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES];
2918 	vm_size_t mapped_size, sz, esz;
2919 	unsigned int log;
2920 	int i;
2921 
2922 	debugf("tlb1_mapin_region:\n");
2923 	debugf(" region size = 0x%08x va = 0x%08x pa = 0x%08x\n", size, va, pa);
2924 
2925 	mapped_size = 0;
2926 	sz = size;
2927 	memset(entry_size, 0, sizeof(entry_size));
2928 
2929 	/* Calculate entry sizes. */
2930 	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) {
2931 
2932 		/* Largest region that is power of 4 and fits within size */
2933 		log = ilog2(sz)/2;
2934 		esz = 1 << (2 * log);
2935 
2936 		/* Minimum region size is 4KB */
2937 		if (esz < (1 << 12))
2938 			esz = 1 << 12;
2939 
2940 		/* If this is last entry cover remaining size. */
2941 		if (i ==  KERNEL_REGION_MAX_TLB_ENTRIES - 1) {
2942 			while (esz < sz)
2943 				esz = esz << 2;
2944 		}
2945 
2946 		entry_size[i] = esz;
2947 		mapped_size += esz;
2948 		if (esz < sz)
2949 			sz -= esz;
2950 		else
2951 			sz = 0;
2952 	}
2953 
2954 	/* Sort entry sizes, required to get proper entry address alignment. */
2955 	qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES,
2956 	    sizeof(vm_size_t), tlb1_entry_size_cmp);
2957 
2958 	/* Load TLB1 entries. */
2959 	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) {
2960 		esz = entry_size[i];
2961 		if (!esz)
2962 			break;
2963 		debugf("  entry %d: sz  = 0x%08x (va = 0x%08x pa = 0x%08x)\n",
2964 		    tlb1_idx, esz, va, pa);
2965 		tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM);
2966 
2967 		va += esz;
2968 		pa += esz;
2969 	}
2970 
2971 	debugf(" mapped size 0x%08x (wasted space 0x%08x)\n",
2972 	    mapped_size, mapped_size - size);
2973 
2974 	return (mapped_size);
2975 }
2976 
2977 /*
2978  * TLB1 initialization routine, to be called after the very first
2979  * assembler level setup done in locore.S.
2980  */
2981 void
2982 tlb1_init(vm_offset_t ccsrbar)
2983 {
2984 	uint32_t mas0;
2985 
2986 	/* TBL1[1] is used to map the kernel. Save that entry. */
2987 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1);
2988 	mtspr(SPR_MAS0, mas0);
2989 	__asm __volatile("isync; tlbre");
2990 
2991 	tlb1[1].mas1 = mfspr(SPR_MAS1);
2992 	tlb1[1].mas2 = mfspr(SPR_MAS2);
2993 	tlb1[1].mas3 = mfspr(SPR_MAS3);
2994 
2995 	/* Mapin CCSRBAR in TLB1[0] */
2996 	__tlb1_set_entry(0, CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE,
2997 	    _TLB_ENTRY_IO, KERNEL_TID, 0);
2998 
2999 	/* Setup TLB miss defaults */
3000 	set_mas4_defaults();
3001 
3002 	/* Reset next available TLB1 entry index. */
3003 	tlb1_idx = 2;
3004 }
3005 
3006 /*
3007  * Setup MAS4 defaults.
3008  * These values are loaded to MAS0-2 on a TLB miss.
3009  */
3010 static void
3011 set_mas4_defaults(void)
3012 {
3013 	u_int32_t mas4;
3014 
3015 	/* Defaults: TLB0, PID0, TSIZED=4K */
3016 	mas4 = MAS4_TLBSELD0;
3017 	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
3018 
3019 	mtspr(SPR_MAS4, mas4);
3020 	__asm volatile("isync");
3021 }
3022 
3023 /*
3024  * Print out contents of the MAS registers for each TLB1 entry
3025  */
3026 void
3027 tlb1_print_tlbentries(void)
3028 {
3029 	u_int32_t mas0, mas1, mas2, mas3, mas7;
3030 	int i;
3031 
3032 	debugf("TLB1 entries:\n");
3033 	for (i = 0; i < TLB1_SIZE; i++) {
3034 
3035 		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3036 		mtspr(SPR_MAS0, mas0);
3037 
3038 		__asm volatile("isync; tlbre");
3039 
3040 		mas1 = mfspr(SPR_MAS1);
3041 		mas2 = mfspr(SPR_MAS2);
3042 		mas3 = mfspr(SPR_MAS3);
3043 		mas7 = mfspr(SPR_MAS7);
3044 
3045 		tlb_print_entry(i, mas1, mas2, mas3, mas7);
3046 	}
3047 }
3048 
3049 /*
3050  * Print out contents of the in-ram tlb1 table.
3051  */
3052 void
3053 tlb1_print_entries(void)
3054 {
3055 	int i;
3056 
3057 	debugf("tlb1[] table entries:\n");
3058 	for (i = 0; i < TLB1_SIZE; i++)
3059 		tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
3060 }
3061 
3062 /*
3063  * Return 0 if the physical IO range is encompassed by one of the
3064  * the TLB1 entries, otherwise return related error code.
3065  */
3066 static int
3067 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
3068 {
3069 	u_int32_t prot;
3070 	vm_paddr_t pa_start;
3071 	vm_paddr_t pa_end;
3072 	unsigned int entry_tsize;
3073 	vm_size_t entry_size;
3074 
3075 	*va = (vm_offset_t)NULL;
3076 
3077 	/* Skip invalid entries */
3078 	if (!(tlb1[i].mas1 & MAS1_VALID))
3079 		return (EINVAL);
3080 
3081 	/*
3082 	 * The entry must be cache-inhibited, guarded, and r/w
3083 	 * so it can function as an i/o page
3084 	 */
3085 	prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
3086 	if (prot != (MAS2_I | MAS2_G))
3087 		return (EPERM);
3088 
3089 	prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
3090 	if (prot != (MAS3_SR | MAS3_SW))
3091 		return (EPERM);
3092 
3093 	/* The address should be within the entry range. */
3094 	entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3095 	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3096 
3097 	entry_size = tsize2size(entry_tsize);
3098 	pa_start = tlb1[i].mas3 & MAS3_RPN;
3099 	pa_end = pa_start + entry_size - 1;
3100 
3101 	if ((pa < pa_start) || ((pa + size) > pa_end))
3102 		return (ERANGE);
3103 
3104 	/* Return virtual address of this mapping. */
3105 	*va = (tlb1[i].mas2 & MAS2_EPN) + (pa - pa_start);
3106 	return (0);
3107 }
3108