xref: /freebsd/sys/powerpc/booke/pmap.c (revision fe3e92e6868dce2ed94c98428b8df1f27ed3ef63)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
5  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
20  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Some hw specific parts of this pmap were derived or influenced
29  * by NetBSD's ibm4xx pmap module. More generic code is shared with
30  * a few other pmap modules from the FreeBSD tree.
31  */
32 
33  /*
34   * VM layout notes:
35   *
36   * Kernel and user threads run within one common virtual address space
37   * defined by AS=0.
38   *
39   * 32-bit pmap:
40   * Virtual address space layout:
41   * -----------------------------
42   * 0x0000_0000 - 0x7fff_ffff	: user process
43   * 0x8000_0000 - 0xbfff_ffff	: pmap_mapdev()-ed area (PCI/PCIE etc.)
44   * 0xc000_0000 - 0xc0ff_ffff	: kernel reserved
45   *   0xc000_0000 - data_end	: kernel code+data, env, metadata etc.
46   * 0xc100_0000 - 0xffff_ffff	: KVA
47   *   0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
48   *   0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
49   *   0xc200_4000 - 0xc200_8fff : guard page + kstack0
50   *   0xc200_9000 - 0xfeef_ffff	: actual free KVA space
51   *
52   * 64-bit pmap:
53   * Virtual address space layout:
54   * -----------------------------
55   * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff      : user process
56   *   0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff    : text, data, heap, maps, libraries
57   *   0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff    : mmio region
58   *   0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff    : stack
59   * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff      : kernel reserved
60   *   0xc000_0000_0000_0000 - endkernel-1              : kernel code & data
61   *               endkernel - msgbufp-1                : flat device tree
62   *                 msgbufp - kernel_pdir-1            : message buffer
63   *             kernel_pdir - kernel_pp2d-1            : kernel page directory
64   *             kernel_pp2d - .                        : kernel pointers to page directory
65   *      pmap_zero_copy_min - crashdumpmap-1           : reserved for page zero/copy
66   *            crashdumpmap - ptbl_buf_pool_vabase-1   : reserved for ptbl bufs
67   *    ptbl_buf_pool_vabase - virtual_avail-1          : user page directories and page tables
68   *           virtual_avail - 0xcfff_ffff_ffff_ffff    : actual free KVA space
69   * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff      : coprocessor region
70   * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff      : mmio region
71   * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff      : direct map
72   *   0xf000_0000_0000_0000 - +Maxmem                  : physmem map
73   *                         - 0xffff_ffff_ffff_ffff    : device direct map
74   */
75 
76 #include <sys/cdefs.h>
77 #include "opt_ddb.h"
78 #include "opt_kstack_pages.h"
79 
80 #include <sys/param.h>
81 #include <sys/conf.h>
82 #include <sys/malloc.h>
83 #include <sys/ktr.h>
84 #include <sys/proc.h>
85 #include <sys/user.h>
86 #include <sys/queue.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/kerneldump.h>
90 #include <sys/linker.h>
91 #include <sys/msgbuf.h>
92 #include <sys/lock.h>
93 #include <sys/mutex.h>
94 #include <sys/rwlock.h>
95 #include <sys/sched.h>
96 #include <sys/smp.h>
97 #include <sys/vmmeter.h>
98 
99 #include <vm/vm.h>
100 #include <vm/vm_param.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_kern.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_extern.h>
105 #include <vm/vm_object.h>
106 #include <vm/vm_map.h>
107 #include <vm/vm_pager.h>
108 #include <vm/vm_phys.h>
109 #include <vm/vm_pagequeue.h>
110 #include <vm/vm_radix.h>
111 #include <vm/vm_dumpset.h>
112 #include <vm/uma.h>
113 
114 #include <machine/_inttypes.h>
115 #include <machine/cpu.h>
116 #include <machine/pcb.h>
117 #include <machine/platform.h>
118 
119 #include <machine/tlb.h>
120 #include <machine/spr.h>
121 #include <machine/md_var.h>
122 #include <machine/mmuvar.h>
123 #include <machine/pmap.h>
124 #include <machine/pte.h>
125 
126 #include <ddb/ddb.h>
127 
128 #define	SPARSE_MAPDEV
129 
130 /* Use power-of-two mappings in mmu_booke_mapdev(), to save entries. */
131 #define	POW2_MAPPINGS
132 
133 #ifdef  DEBUG
134 #define debugf(fmt, args...) printf(fmt, ##args)
135 #define	__debug_used
136 #else
137 #define debugf(fmt, args...)
138 #define	__debug_used	__unused
139 #endif
140 
141 #ifdef __powerpc64__
142 #define	PRI0ptrX	"016lx"
143 #else
144 #define	PRI0ptrX	"08x"
145 #endif
146 
147 #define TODO			panic("%s: not implemented", __func__);
148 
149 extern unsigned char _etext[];
150 extern unsigned char _end[];
151 
152 extern uint32_t *bootinfo;
153 
154 vm_paddr_t kernload;
155 vm_offset_t kernstart;
156 vm_size_t kernsize;
157 
158 /* Message buffer and tables. */
159 static vm_offset_t data_start;
160 static vm_size_t data_end;
161 
162 /* Phys/avail memory regions. */
163 static struct mem_region *availmem_regions;
164 static int availmem_regions_sz;
165 static struct mem_region *physmem_regions;
166 static int physmem_regions_sz;
167 
168 #ifndef __powerpc64__
169 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
170 static vm_offset_t zero_page_va;
171 static struct mtx zero_page_mutex;
172 
173 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
174 static vm_offset_t copy_page_src_va;
175 static vm_offset_t copy_page_dst_va;
176 static struct mtx copy_page_mutex;
177 #endif
178 
179 static struct mtx tlbivax_mutex;
180 static bool mmuv2;
181 
182 /**************************************************************************/
183 /* PMAP */
184 /**************************************************************************/
185 
186 static int mmu_booke_enter_locked(pmap_t, vm_offset_t, vm_page_t,
187     vm_prot_t, u_int flags, int8_t psind);
188 
189 unsigned int kptbl_min;		/* Index of the first kernel ptbl. */
190 static uma_zone_t ptbl_root_zone;
191 
192 /*
193  * If user pmap is processed with mmu_booke_remove and the resident count
194  * drops to 0, there are no more pages to remove, so we need not continue.
195  */
196 #define PMAP_REMOVE_DONE(pmap) \
197 	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
198 
199 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
200 extern int elf32_nxstack;
201 #endif
202 
203 /**************************************************************************/
204 /* TLB and TID handling */
205 /**************************************************************************/
206 
207 /* Translation ID busy table */
208 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
209 
210 /*
211  * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
212  * core revisions and should be read from h/w registers during early config.
213  */
214 uint32_t tlb0_entries;
215 uint32_t tlb0_ways;
216 uint32_t tlb0_entries_per_way;
217 uint32_t tlb1_entries;
218 
219 #define TLB0_ENTRIES		(tlb0_entries)
220 #define TLB0_WAYS		(tlb0_ways)
221 #define TLB0_ENTRIES_PER_WAY	(tlb0_entries_per_way)
222 
223 #define TLB1_ENTRIES (tlb1_entries)
224 
225 static tlbtid_t tid_alloc(struct pmap *);
226 
227 #ifdef DDB
228 #ifdef __powerpc64__
229 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
230 #else
231 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
232 #endif
233 #endif
234 
235 static void tlb1_read_entry(tlb_entry_t *, unsigned int);
236 static void tlb1_write_entry(tlb_entry_t *, unsigned int);
237 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
238 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t, int);
239 
240 static __inline uint32_t tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma);
241 
242 static vm_size_t tsize2size(unsigned int);
243 static unsigned int size2tsize(vm_size_t);
244 
245 static void set_mas4_defaults(void);
246 
247 static inline void tlb0_flush_entry(vm_offset_t);
248 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
249 
250 /**************************************************************************/
251 /* Page table management */
252 /**************************************************************************/
253 
254 static struct rwlock_padalign pvh_global_lock;
255 
256 /* Data for the pv entry allocation mechanism */
257 static uma_zone_t pvzone;
258 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
259 
260 #define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
261 
262 #ifndef PMAP_SHPGPERPROC
263 #define PMAP_SHPGPERPROC	200
264 #endif
265 
266 static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
267 static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, bool);
268 static int pte_remove(pmap_t, vm_offset_t, uint8_t);
269 static pte_t *pte_find(pmap_t, vm_offset_t);
270 static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
271 
272 static pv_entry_t pv_alloc(void);
273 static void pv_free(pv_entry_t);
274 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
275 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
276 
277 static void booke_pmap_init_qpages(void);
278 
279 static inline void tlb_miss_lock(void);
280 static inline void tlb_miss_unlock(void);
281 
282 #ifdef SMP
283 extern tlb_entry_t __boot_tlb1[];
284 void pmap_bootstrap_ap(volatile uint32_t *);
285 #endif
286 
287 /*
288  * Kernel MMU interface
289  */
290 static void		mmu_booke_clear_modify(vm_page_t);
291 static void		mmu_booke_copy(pmap_t, pmap_t, vm_offset_t,
292     vm_size_t, vm_offset_t);
293 static void		mmu_booke_copy_page(vm_page_t, vm_page_t);
294 static void		mmu_booke_copy_pages(vm_page_t *,
295     vm_offset_t, vm_page_t *, vm_offset_t, int);
296 static int		mmu_booke_enter(pmap_t, vm_offset_t, vm_page_t,
297     vm_prot_t, u_int flags, int8_t psind);
298 static void		mmu_booke_enter_object(pmap_t, vm_offset_t, vm_offset_t,
299     vm_page_t, vm_prot_t);
300 static void		mmu_booke_enter_quick(pmap_t, vm_offset_t, vm_page_t,
301     vm_prot_t);
302 static vm_paddr_t	mmu_booke_extract(pmap_t, vm_offset_t);
303 static vm_page_t	mmu_booke_extract_and_hold(pmap_t, vm_offset_t,
304     vm_prot_t);
305 static void		mmu_booke_init(void);
306 static bool		mmu_booke_is_modified(vm_page_t);
307 static bool		mmu_booke_is_prefaultable(pmap_t, vm_offset_t);
308 static bool		mmu_booke_is_referenced(vm_page_t);
309 static int		mmu_booke_ts_referenced(vm_page_t);
310 static void		*mmu_booke_map(vm_offset_t *, vm_paddr_t, vm_paddr_t,
311     int);
312 static int		mmu_booke_mincore(pmap_t, vm_offset_t,
313     vm_paddr_t *);
314 static void		mmu_booke_object_init_pt(pmap_t, vm_offset_t,
315     vm_object_t, vm_pindex_t, vm_size_t);
316 static bool		mmu_booke_page_exists_quick(pmap_t, vm_page_t);
317 static void		mmu_booke_page_init(vm_page_t);
318 static int		mmu_booke_page_wired_mappings(vm_page_t);
319 static int		mmu_booke_pinit(pmap_t);
320 static void		mmu_booke_pinit0(pmap_t);
321 static void		mmu_booke_protect(pmap_t, vm_offset_t, vm_offset_t,
322     vm_prot_t);
323 static void		mmu_booke_qenter(void *, vm_page_t *, int);
324 static void		mmu_booke_qremove(void *, int);
325 static void		mmu_booke_release(pmap_t);
326 static void		mmu_booke_remove(pmap_t, vm_offset_t, vm_offset_t);
327 static void		mmu_booke_remove_all(vm_page_t);
328 static void		mmu_booke_remove_write(vm_page_t);
329 static void		mmu_booke_unwire(pmap_t, vm_offset_t, vm_offset_t);
330 static void		mmu_booke_zero_page(vm_page_t);
331 static void		mmu_booke_zero_page_area(vm_page_t, int, int);
332 static void		mmu_booke_activate(struct thread *);
333 static void		mmu_booke_deactivate(struct thread *);
334 static void		mmu_booke_bootstrap(vm_offset_t, vm_offset_t);
335 static void		*mmu_booke_mapdev(vm_paddr_t, vm_size_t);
336 static void		*mmu_booke_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
337 static void		mmu_booke_unmapdev(void *, vm_size_t);
338 static vm_paddr_t	mmu_booke_kextract(vm_offset_t);
339 static void		mmu_booke_kenter(vm_offset_t, vm_paddr_t);
340 static void		mmu_booke_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t);
341 static void		mmu_booke_kremove(vm_offset_t);
342 static int		mmu_booke_dev_direct_mapped(vm_paddr_t, vm_size_t);
343 static void		mmu_booke_sync_icache(pmap_t, vm_offset_t,
344     vm_size_t);
345 static void		mmu_booke_dumpsys_map(vm_paddr_t pa, size_t,
346     void **);
347 static void		mmu_booke_dumpsys_unmap(vm_paddr_t pa, size_t,
348     void *);
349 static void		mmu_booke_scan_init(void);
350 static void		*mmu_booke_quick_enter_page(vm_page_t m);
351 static void		mmu_booke_quick_remove_page(void *addr);
352 static int		mmu_booke_change_attr(void *addr,
353     vm_size_t sz, vm_memattr_t mode);
354 static int		mmu_booke_decode_kernel_ptr(vm_offset_t addr,
355     int *is_user, vm_offset_t *decoded_addr);
356 static void		mmu_booke_page_array_startup(long);
357 static bool mmu_booke_page_is_mapped(vm_page_t m);
358 static bool mmu_booke_ps_enabled(pmap_t pmap);
359 
360 static struct pmap_funcs mmu_booke_methods = {
361 	/* pmap dispatcher interface */
362 	.clear_modify = mmu_booke_clear_modify,
363 	.copy = mmu_booke_copy,
364 	.copy_page = mmu_booke_copy_page,
365 	.copy_pages = mmu_booke_copy_pages,
366 	.enter = mmu_booke_enter,
367 	.enter_object = mmu_booke_enter_object,
368 	.enter_quick = mmu_booke_enter_quick,
369 	.extract = mmu_booke_extract,
370 	.extract_and_hold = mmu_booke_extract_and_hold,
371 	.init = mmu_booke_init,
372 	.is_modified = mmu_booke_is_modified,
373 	.is_prefaultable = mmu_booke_is_prefaultable,
374 	.is_referenced = mmu_booke_is_referenced,
375 	.ts_referenced = mmu_booke_ts_referenced,
376 	.map = mmu_booke_map,
377 	.mincore = mmu_booke_mincore,
378 	.object_init_pt = mmu_booke_object_init_pt,
379 	.page_exists_quick = mmu_booke_page_exists_quick,
380 	.page_init = mmu_booke_page_init,
381 	.page_wired_mappings =  mmu_booke_page_wired_mappings,
382 	.pinit = mmu_booke_pinit,
383 	.pinit0 = mmu_booke_pinit0,
384 	.protect = mmu_booke_protect,
385 	.qenter = mmu_booke_qenter,
386 	.qremove = mmu_booke_qremove,
387 	.release = mmu_booke_release,
388 	.remove = mmu_booke_remove,
389 	.remove_all = mmu_booke_remove_all,
390 	.remove_write = mmu_booke_remove_write,
391 	.sync_icache = mmu_booke_sync_icache,
392 	.unwire = mmu_booke_unwire,
393 	.zero_page = mmu_booke_zero_page,
394 	.zero_page_area = mmu_booke_zero_page_area,
395 	.activate = mmu_booke_activate,
396 	.deactivate = mmu_booke_deactivate,
397 	.quick_enter_page =  mmu_booke_quick_enter_page,
398 	.quick_remove_page =  mmu_booke_quick_remove_page,
399 	.page_array_startup = mmu_booke_page_array_startup,
400 	.page_is_mapped = mmu_booke_page_is_mapped,
401 	.ps_enabled = mmu_booke_ps_enabled,
402 
403 	/* Internal interfaces */
404 	.bootstrap = mmu_booke_bootstrap,
405 	.dev_direct_mapped = mmu_booke_dev_direct_mapped,
406 	.mapdev = mmu_booke_mapdev,
407 	.mapdev_attr = mmu_booke_mapdev_attr,
408 	.kenter = mmu_booke_kenter,
409 	.kenter_attr = mmu_booke_kenter_attr,
410 	.kextract = mmu_booke_kextract,
411 	.kremove = mmu_booke_kremove,
412 	.unmapdev = mmu_booke_unmapdev,
413 	.change_attr = mmu_booke_change_attr,
414 	.decode_kernel_ptr =  mmu_booke_decode_kernel_ptr,
415 
416 	/* dumpsys() support */
417 	.dumpsys_map_chunk = mmu_booke_dumpsys_map,
418 	.dumpsys_unmap_chunk = mmu_booke_dumpsys_unmap,
419 	.dumpsys_pa_init = mmu_booke_scan_init,
420 };
421 
422 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods);
423 
424 #ifdef __powerpc64__
425 #include "pmap_64.c"
426 #else
427 #include "pmap_32.c"
428 #endif
429 
430 static vm_offset_t tlb1_map_base = VM_MAPDEV_BASE;
431 
432 static __inline uint32_t
tlb_calc_wimg(vm_paddr_t pa,vm_memattr_t ma)433 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
434 {
435 	uint32_t attrib;
436 	int i;
437 
438 	if (ma != VM_MEMATTR_DEFAULT) {
439 		switch (ma) {
440 		case VM_MEMATTR_UNCACHEABLE:
441 			return (MAS2_I | MAS2_G);
442 		case VM_MEMATTR_WRITE_COMBINING:
443 		case VM_MEMATTR_WRITE_BACK:
444 		case VM_MEMATTR_PREFETCHABLE:
445 			return (MAS2_I);
446 		case VM_MEMATTR_WRITE_THROUGH:
447 			return (MAS2_W | MAS2_M);
448 		case VM_MEMATTR_CACHEABLE:
449 			return (MAS2_M);
450 		}
451 	}
452 
453 	/*
454 	 * Assume the page is cache inhibited and access is guarded unless
455 	 * it's in our available memory array.
456 	 */
457 	attrib = _TLB_ENTRY_IO;
458 	for (i = 0; i < physmem_regions_sz; i++) {
459 		if ((pa >= physmem_regions[i].mr_start) &&
460 		    (pa < (physmem_regions[i].mr_start +
461 		     physmem_regions[i].mr_size))) {
462 			attrib = _TLB_ENTRY_MEM;
463 			break;
464 		}
465 	}
466 
467 	return (attrib);
468 }
469 
470 static inline void
tlb_miss_lock(void)471 tlb_miss_lock(void)
472 {
473 #ifdef SMP
474 	struct pcpu *pc;
475 
476 	if (!smp_started)
477 		return;
478 
479 	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
480 		if (pc != pcpup) {
481 			CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
482 			    "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke.tlb_lock);
483 
484 			KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
485 			    ("tlb_miss_lock: tried to lock self"));
486 
487 			tlb_lock(pc->pc_booke.tlb_lock);
488 
489 			CTR1(KTR_PMAP, "%s: locked", __func__);
490 		}
491 	}
492 #endif
493 }
494 
495 static inline void
tlb_miss_unlock(void)496 tlb_miss_unlock(void)
497 {
498 #ifdef SMP
499 	struct pcpu *pc;
500 
501 	if (!smp_started)
502 		return;
503 
504 	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
505 		if (pc != pcpup) {
506 			CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
507 			    __func__, pc->pc_cpuid);
508 
509 			tlb_unlock(pc->pc_booke.tlb_lock);
510 
511 			CTR1(KTR_PMAP, "%s: unlocked", __func__);
512 		}
513 	}
514 #endif
515 }
516 
517 /* Return number of entries in TLB0. */
518 static __inline void
tlb0_get_tlbconf(void)519 tlb0_get_tlbconf(void)
520 {
521 	uint32_t tlb0_cfg;
522 
523 	tlb0_cfg = mfspr(SPR_TLB0CFG);
524 	tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
525 	tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
526 	tlb0_entries_per_way = tlb0_entries / tlb0_ways;
527 }
528 
529 /* Return number of entries in TLB1. */
530 static __inline void
tlb1_get_tlbconf(void)531 tlb1_get_tlbconf(void)
532 {
533 	uint32_t tlb1_cfg;
534 
535 	tlb1_cfg = mfspr(SPR_TLB1CFG);
536 	tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
537 }
538 
539 /**************************************************************************/
540 /* Page table related */
541 /**************************************************************************/
542 
543 /* Allocate pv_entry structure. */
544 pv_entry_t
pv_alloc(void)545 pv_alloc(void)
546 {
547 	pv_entry_t pv;
548 
549 	pv_entry_count++;
550 	if (pv_entry_count > pv_entry_high_water)
551 		pagedaemon_wakeup(0); /* XXX powerpc NUMA */
552 	pv = uma_zalloc(pvzone, M_NOWAIT);
553 
554 	return (pv);
555 }
556 
557 /* Free pv_entry structure. */
558 static __inline void
pv_free(pv_entry_t pve)559 pv_free(pv_entry_t pve)
560 {
561 
562 	pv_entry_count--;
563 	uma_zfree(pvzone, pve);
564 }
565 
566 /* Allocate and initialize pv_entry structure. */
567 static void
pv_insert(pmap_t pmap,vm_offset_t va,vm_page_t m)568 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
569 {
570 	pv_entry_t pve;
571 
572 	//int su = (pmap == kernel_pmap);
573 	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
574 	//	(u_int32_t)pmap, va, (u_int32_t)m);
575 
576 	pve = pv_alloc();
577 	if (pve == NULL)
578 		panic("pv_insert: no pv entries!");
579 
580 	pve->pv_pmap = pmap;
581 	pve->pv_va = va;
582 
583 	/* add to pv_list */
584 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
585 	rw_assert(&pvh_global_lock, RA_WLOCKED);
586 
587 	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
588 
589 	//debugf("pv_insert: e\n");
590 }
591 
592 /* Destroy pv entry. */
593 static void
pv_remove(pmap_t pmap,vm_offset_t va,vm_page_t m)594 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
595 {
596 	pv_entry_t pve;
597 
598 	//int su = (pmap == kernel_pmap);
599 	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
600 
601 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
602 	rw_assert(&pvh_global_lock, RA_WLOCKED);
603 
604 	/* find pv entry */
605 	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
606 		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
607 			/* remove from pv_list */
608 			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
609 			if (TAILQ_EMPTY(&m->md.pv_list))
610 				vm_page_aflag_clear(m, PGA_WRITEABLE);
611 
612 			/* free pv entry struct */
613 			pv_free(pve);
614 			break;
615 		}
616 	}
617 
618 	//debugf("pv_remove: e\n");
619 }
620 
621 /**************************************************************************/
622 /* PMAP related */
623 /**************************************************************************/
624 
625 /*
626  * This is called during booke_init, before the system is really initialized.
627  */
628 static void
mmu_booke_bootstrap(vm_offset_t start,vm_offset_t kernelend)629 mmu_booke_bootstrap(vm_offset_t start, vm_offset_t kernelend)
630 {
631 	vm_paddr_t phys_kernelend;
632 	struct mem_region *mp, *mp1;
633 	int cnt, i, j;
634 	vm_paddr_t s, e, sz;
635 	vm_paddr_t physsz, hwphyssz;
636 	u_int phys_avail_count __debug_used;
637 	vm_size_t kstack0_sz;
638 	vm_paddr_t kstack0_phys;
639 	vm_offset_t kstack0;
640 	void *dpcpu;
641 
642 	debugf("mmu_booke_bootstrap: entered\n");
643 
644 	if ((mfspr(SPR_MMUCFG) & MMUCFG_MAVN_M) > 0)
645 		mmuv2 = true;
646 
647 	/* Set interesting system properties */
648 #ifdef __powerpc64__
649 	hw_direct_map = 1;
650 #else
651 	hw_direct_map = 0;
652 #endif
653 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
654 	elf32_nxstack = 1;
655 #endif
656 
657 	/* Initialize invalidation mutex */
658 	mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
659 
660 	/* Read TLB0 size and associativity. */
661 	tlb0_get_tlbconf();
662 
663 	/*
664 	 * Align kernel start and end address (kernel image).
665 	 * Note that kernel end does not necessarily relate to kernsize.
666 	 * kernsize is the size of the kernel that is actually mapped.
667 	 */
668 	data_start = round_page(kernelend);
669 	data_end = data_start;
670 
671 	/* Allocate the dynamic per-cpu area. */
672 	dpcpu = (void *)data_end;
673 	data_end += DPCPU_SIZE;
674 
675 	/* Allocate space for the message buffer. */
676 	msgbufp = (struct msgbuf *)data_end;
677 	data_end += msgbufsize;
678 	debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
679 	    (uintptr_t)msgbufp, data_end);
680 
681 	data_end = round_page(data_end);
682 	data_end = round_page(mmu_booke_alloc_kernel_pgtables(data_end));
683 
684 	/* Retrieve phys/avail mem regions */
685 	mem_regions(&physmem_regions, &physmem_regions_sz,
686 	    &availmem_regions, &availmem_regions_sz);
687 
688 	if (PHYS_AVAIL_ENTRIES < availmem_regions_sz)
689 		panic("mmu_booke_bootstrap: phys_avail too small");
690 
691 	data_end = round_page(data_end);
692 	vm_page_array = (vm_page_t)data_end;
693 	/*
694 	 * Get a rough idea (upper bound) on the size of the page array.  The
695 	 * vm_page_array will not handle any more pages than we have in the
696 	 * avail_regions array, and most likely much less.
697 	 */
698 	sz = 0;
699 	for (mp = availmem_regions; mp->mr_size; mp++) {
700 		sz += mp->mr_size;
701 	}
702 	sz = (round_page(sz) / (PAGE_SIZE + sizeof(struct vm_page)));
703 	data_end += round_page(sz * sizeof(struct vm_page));
704 
705 	/* Pre-round up to 1MB.  This wastes some space, but saves TLB entries */
706 	data_end = roundup2(data_end, 1 << 20);
707 
708 	debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
709 	debugf(" kernstart: %#zx\n", kernstart);
710 	debugf(" kernsize: %#zx\n", kernsize);
711 
712 	if (data_end - kernstart > kernsize) {
713 		kernsize += tlb1_mapin_region(kernstart + kernsize,
714 		    kernload + kernsize, (data_end - kernstart) - kernsize,
715 		    _TLB_ENTRY_MEM);
716 	}
717 	data_end = kernstart + kernsize;
718 	debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
719 
720 	/*
721 	 * Clear the structures - note we can only do it safely after the
722 	 * possible additional TLB1 translations are in place (above) so that
723 	 * all range up to the currently calculated 'data_end' is covered.
724 	 */
725 	bzero((void *)data_start, data_end - data_start);
726 	dpcpu_init(dpcpu, 0);
727 
728 	/*******************************************************/
729 	/* Set the start and end of kva. */
730 	/*******************************************************/
731 	virtual_avail = round_page(data_end);
732 	virtual_end = VM_MAX_KERNEL_ADDRESS;
733 
734 #ifndef __powerpc64__
735 	/* Allocate KVA space for page zero/copy operations. */
736 	zero_page_va = virtual_avail;
737 	virtual_avail += PAGE_SIZE;
738 	copy_page_src_va = virtual_avail;
739 	virtual_avail += PAGE_SIZE;
740 	copy_page_dst_va = virtual_avail;
741 	virtual_avail += PAGE_SIZE;
742 	debugf("zero_page_va = 0x%"PRI0ptrX"\n", zero_page_va);
743 	debugf("copy_page_src_va = 0x%"PRI0ptrX"\n", copy_page_src_va);
744 	debugf("copy_page_dst_va = 0x%"PRI0ptrX"\n", copy_page_dst_va);
745 
746 	/* Initialize page zero/copy mutexes. */
747 	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
748 	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
749 
750 	/* Allocate KVA space for ptbl bufs. */
751 	ptbl_buf_pool_vabase = virtual_avail;
752 	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
753 	debugf("ptbl_buf_pool_vabase = 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
754 	    ptbl_buf_pool_vabase, virtual_avail);
755 #endif
756 #ifdef	__powerpc64__
757 	/* Allocate KVA space for crashdumpmap. */
758 	crashdumpmap = (caddr_t)virtual_avail;
759 	virtual_avail += MAXDUMPPGS * PAGE_SIZE;
760 #endif
761 
762 	/* Calculate corresponding physical addresses for the kernel region. */
763 	phys_kernelend = kernload + kernsize;
764 	debugf("kernel image and allocated data:\n");
765 	debugf(" kernload    = 0x%09jx\n", (uintmax_t)kernload);
766 	debugf(" kernstart   = 0x%"PRI0ptrX"\n", kernstart);
767 	debugf(" kernsize    = 0x%"PRI0ptrX"\n", kernsize);
768 
769 	/*
770 	 * Remove kernel physical address range from avail regions list. Page
771 	 * align all regions.  Non-page aligned memory isn't very interesting
772 	 * to us.  Also, sort the entries for ascending addresses.
773 	 */
774 
775 	sz = 0;
776 	cnt = availmem_regions_sz;
777 	debugf("processing avail regions:\n");
778 	for (mp = availmem_regions; mp->mr_size; mp++) {
779 		s = mp->mr_start;
780 		e = mp->mr_start + mp->mr_size;
781 		debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e);
782 		/* Check whether this region holds all of the kernel. */
783 		if (s < kernload && e > phys_kernelend) {
784 			availmem_regions[cnt].mr_start = phys_kernelend;
785 			availmem_regions[cnt++].mr_size = e - phys_kernelend;
786 			e = kernload;
787 		}
788 		/* Look whether this regions starts within the kernel. */
789 		if (s >= kernload && s < phys_kernelend) {
790 			if (e <= phys_kernelend)
791 				goto empty;
792 			s = phys_kernelend;
793 		}
794 		/* Now look whether this region ends within the kernel. */
795 		if (e > kernload && e <= phys_kernelend) {
796 			if (s >= kernload)
797 				goto empty;
798 			e = kernload;
799 		}
800 		/* Now page align the start and size of the region. */
801 		s = round_page(s);
802 		e = trunc_page(e);
803 		if (e < s)
804 			e = s;
805 		sz = e - s;
806 		debugf("%09jx-%09jx = %jx\n",
807 		    (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz);
808 
809 		/* Check whether some memory is left here. */
810 		if (sz == 0) {
811 		empty:
812 			memmove(mp, mp + 1,
813 			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
814 			cnt--;
815 			mp--;
816 			continue;
817 		}
818 
819 		/* Do an insertion sort. */
820 		for (mp1 = availmem_regions; mp1 < mp; mp1++)
821 			if (s < mp1->mr_start)
822 				break;
823 		if (mp1 < mp) {
824 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
825 			mp1->mr_start = s;
826 			mp1->mr_size = sz;
827 		} else {
828 			mp->mr_start = s;
829 			mp->mr_size = sz;
830 		}
831 	}
832 	availmem_regions_sz = cnt;
833 
834 	/*******************************************************/
835 	/* Steal physical memory for kernel stack from the end */
836 	/* of the first avail region                           */
837 	/*******************************************************/
838 	kstack0_sz = kstack_pages * PAGE_SIZE;
839 	kstack0_phys = availmem_regions[0].mr_start +
840 	    availmem_regions[0].mr_size;
841 	kstack0_phys -= kstack0_sz;
842 	availmem_regions[0].mr_size -= kstack0_sz;
843 
844 	/*******************************************************/
845 	/* Fill in phys_avail table, based on availmem_regions */
846 	/*******************************************************/
847 	phys_avail_count = 0;
848 	physsz = 0;
849 	hwphyssz = 0;
850 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
851 
852 	debugf("fill in phys_avail:\n");
853 	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
854 		debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
855 		    (uintmax_t)availmem_regions[i].mr_start,
856 		    (uintmax_t)availmem_regions[i].mr_start +
857 		        availmem_regions[i].mr_size,
858 		    (uintmax_t)availmem_regions[i].mr_size);
859 
860 		if (hwphyssz != 0 &&
861 		    (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
862 			debugf(" hw.physmem adjust\n");
863 			if (physsz < hwphyssz) {
864 				phys_avail[j] = availmem_regions[i].mr_start;
865 				phys_avail[j + 1] =
866 				    availmem_regions[i].mr_start +
867 				    hwphyssz - physsz;
868 				physsz = hwphyssz;
869 				phys_avail_count++;
870 				dump_avail[j] = phys_avail[j];
871 				dump_avail[j + 1] = phys_avail[j + 1];
872 			}
873 			break;
874 		}
875 
876 		phys_avail[j] = availmem_regions[i].mr_start;
877 		phys_avail[j + 1] = availmem_regions[i].mr_start +
878 		    availmem_regions[i].mr_size;
879 		phys_avail_count++;
880 		physsz += availmem_regions[i].mr_size;
881 		dump_avail[j] = phys_avail[j];
882 		dump_avail[j + 1] = phys_avail[j + 1];
883 	}
884 	physmem = btoc(physsz);
885 
886 	/* Calculate the last available physical address. */
887 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
888 		;
889 	Maxmem = powerpc_btop(phys_avail[i + 1]);
890 
891 	debugf("Maxmem = 0x%08lx\n", Maxmem);
892 	debugf("phys_avail_count = %d\n", phys_avail_count);
893 	debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n",
894 	    (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem);
895 
896 #ifdef __powerpc64__
897 	/*
898 	 * Map the physical memory contiguously in TLB1.
899 	 * Round so it fits into a single mapping.
900 	 */
901 	tlb1_mapin_region(DMAP_BASE_ADDRESS, 0,
902 	    phys_avail[i + 1], _TLB_ENTRY_MEM);
903 #endif
904 
905 	/*******************************************************/
906 	/* Initialize (statically allocated) kernel pmap. */
907 	/*******************************************************/
908 	mtx_init(&kernel_pmap->pm_mtx, "kernel pmap", NULL, MTX_DEF);
909 
910 	debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
911 	kernel_pte_alloc(virtual_avail, kernstart);
912 	for (i = 0; i < MAXCPU; i++) {
913 		kernel_pmap->pm_tid[i] = TID_KERNEL;
914 
915 		/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
916 		tidbusy[i][TID_KERNEL] = kernel_pmap;
917 	}
918 
919 	/* Mark kernel_pmap active on all CPUs */
920 	CPU_FILL(&kernel_pmap->pm_active);
921 
922  	/*
923 	 * Initialize the global pv list lock.
924 	 */
925 	rw_init(&pvh_global_lock, "pmap pv global");
926 
927 	/*******************************************************/
928 	/* Final setup */
929 	/*******************************************************/
930 
931 	/* Enter kstack0 into kernel map, provide guard page */
932 	kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
933 	thread0.td_kstack = (char *)kstack0;
934 	thread0.td_kstack_pages = kstack_pages;
935 
936 	debugf("kstack_sz = 0x%08jx\n", (uintmax_t)kstack0_sz);
937 	debugf("kstack0_phys at 0x%09jx - 0x%09jx\n",
938 	    (uintmax_t)kstack0_phys, (uintmax_t)kstack0_phys + kstack0_sz);
939 	debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
940 	    kstack0, kstack0 + kstack0_sz);
941 
942 	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
943 	for (i = 0; i < kstack_pages; i++) {
944 		mmu_booke_kenter(kstack0, kstack0_phys);
945 		kstack0 += PAGE_SIZE;
946 		kstack0_phys += PAGE_SIZE;
947 	}
948 
949 	pmap_bootstrapped = 1;
950 
951 	debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
952 	debugf("virtual_end   = %"PRI0ptrX"\n", virtual_end);
953 
954 	debugf("mmu_booke_bootstrap: exit\n");
955 }
956 
957 #ifdef SMP
958 void
tlb1_ap_prep(void)959 tlb1_ap_prep(void)
960 {
961 	tlb_entry_t *e, tmp;
962 	unsigned int i;
963 
964 	/* Prepare TLB1 image for AP processors */
965 	e = __boot_tlb1;
966 	for (i = 0; i < TLB1_ENTRIES; i++) {
967 		tlb1_read_entry(&tmp, i);
968 
969 		if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED))
970 			memcpy(e++, &tmp, sizeof(tmp));
971 	}
972 }
973 
974 void
pmap_bootstrap_ap(volatile uint32_t * trcp __unused)975 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
976 {
977 	int i;
978 
979 	/*
980 	 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
981 	 * have the snapshot of its contents in the s/w __boot_tlb1[] table
982 	 * created by tlb1_ap_prep(), so use these values directly to
983 	 * (re)program AP's TLB1 hardware.
984 	 *
985 	 * Start at index 1 because index 0 has the kernel map.
986 	 */
987 	for (i = 1; i < TLB1_ENTRIES; i++) {
988 		if (__boot_tlb1[i].mas1 & MAS1_VALID)
989 			tlb1_write_entry(&__boot_tlb1[i], i);
990 	}
991 
992 	set_mas4_defaults();
993 }
994 #endif
995 
996 static void
booke_pmap_init_qpages(void)997 booke_pmap_init_qpages(void)
998 {
999 	struct pcpu *pc;
1000 	int i;
1001 
1002 	CPU_FOREACH(i) {
1003 		pc = pcpu_find(i);
1004 		pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
1005 		if (pc->pc_qmap_addr == NULL)
1006 			panic("pmap_init_qpages: unable to allocate KVA");
1007 	}
1008 }
1009 
1010 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
1011 
1012 /*
1013  * Get the physical page address for the given pmap/virtual address.
1014  */
1015 static vm_paddr_t
mmu_booke_extract(pmap_t pmap,vm_offset_t va)1016 mmu_booke_extract(pmap_t pmap, vm_offset_t va)
1017 {
1018 	vm_paddr_t pa;
1019 
1020 	PMAP_LOCK(pmap);
1021 	pa = pte_vatopa(pmap, va);
1022 	PMAP_UNLOCK(pmap);
1023 
1024 	return (pa);
1025 }
1026 
1027 /*
1028  * Extract the physical page address associated with the given
1029  * kernel virtual address.
1030  */
1031 static vm_paddr_t
mmu_booke_kextract(vm_offset_t va)1032 mmu_booke_kextract(vm_offset_t va)
1033 {
1034 	tlb_entry_t e;
1035 	vm_paddr_t p = 0;
1036 	int i;
1037 
1038 #ifdef __powerpc64__
1039 	if (va >= DMAP_BASE_ADDRESS && va <= DMAP_MAX_ADDRESS)
1040 		return (DMAP_TO_PHYS(va));
1041 #endif
1042 
1043 	if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
1044 		p = pte_vatopa(kernel_pmap, va);
1045 
1046 	if (p == 0) {
1047 		/* Check TLB1 mappings */
1048 		for (i = 0; i < TLB1_ENTRIES; i++) {
1049 			tlb1_read_entry(&e, i);
1050 			if (!(e.mas1 & MAS1_VALID))
1051 				continue;
1052 			if (va >= e.virt && va < e.virt + e.size)
1053 				return (e.phys + (va - e.virt));
1054 		}
1055 	}
1056 
1057 	return (p);
1058 }
1059 
1060 /*
1061  * Initialize the pmap module.
1062  *
1063  * Called by vm_mem_init(), to initialize any structures that the pmap system
1064  * needs to map virtual memory.
1065  */
1066 static void
mmu_booke_init(void)1067 mmu_booke_init(void)
1068 {
1069 	int shpgperproc = PMAP_SHPGPERPROC;
1070 
1071 	/*
1072 	 * Initialize the address space (zone) for the pv entries.  Set a
1073 	 * high water mark so that the system can recover from excessive
1074 	 * numbers of pv entries.
1075 	 */
1076 	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1077 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1078 
1079 	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1080 	pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
1081 
1082 	TUNABLE_INT_FETCH("vm.pmap.pv_entry_max", &pv_entry_max);
1083 	pv_entry_high_water = 9 * (pv_entry_max / 10);
1084 
1085 	uma_zone_reserve_kva(pvzone, pv_entry_max);
1086 
1087 	/* Pre-fill pvzone with initial number of pv entries. */
1088 	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1089 
1090 	/* Create a UMA zone for page table roots. */
1091 	ptbl_root_zone = uma_zcreate("pmap root", PMAP_ROOT_SIZE,
1092 	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, UMA_ZONE_VM);
1093 
1094 	/* Initialize ptbl allocation. */
1095 	ptbl_init();
1096 }
1097 
1098 /*
1099  * Map a list of wired pages into kernel virtual address space.  This is
1100  * intended for temporary mappings which do not need page modification or
1101  * references recorded.  Existing mappings in the region are overwritten.
1102  */
1103 static void
mmu_booke_qenter(void * sva,vm_page_t * m,int count)1104 mmu_booke_qenter(void *sva, vm_page_t *m, int count)
1105 {
1106 	vm_offset_t va;
1107 
1108 	va = (vm_offset_t)sva;
1109 	while (count-- > 0) {
1110 		mmu_booke_kenter(va, VM_PAGE_TO_PHYS(*m));
1111 		va += PAGE_SIZE;
1112 		m++;
1113 	}
1114 }
1115 
1116 /*
1117  * Remove page mappings from kernel virtual address space.  Intended for
1118  * temporary mappings entered by mmu_booke_qenter.
1119  */
1120 static void
mmu_booke_qremove(void * sva,int count)1121 mmu_booke_qremove(void *sva, int count)
1122 {
1123 	vm_offset_t va;
1124 
1125 	va = (vm_offset_t)sva;
1126 	while (count-- > 0) {
1127 		mmu_booke_kremove(va);
1128 		va += PAGE_SIZE;
1129 	}
1130 }
1131 
1132 /*
1133  * Map a wired page into kernel virtual address space.
1134  */
1135 static void
mmu_booke_kenter(vm_offset_t va,vm_paddr_t pa)1136 mmu_booke_kenter(vm_offset_t va, vm_paddr_t pa)
1137 {
1138 
1139 	mmu_booke_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
1140 }
1141 
1142 static void
mmu_booke_kenter_attr(vm_offset_t va,vm_paddr_t pa,vm_memattr_t ma)1143 mmu_booke_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1144 {
1145 	uint32_t flags;
1146 	pte_t *pte;
1147 
1148 	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1149 	    (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1150 
1151 	flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
1152 	flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
1153 	flags |= PTE_PS_4KB;
1154 
1155 	pte = pte_find(kernel_pmap, va);
1156 	KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va.  NULL PTE"));
1157 
1158 	mtx_lock_spin(&tlbivax_mutex);
1159 	tlb_miss_lock();
1160 
1161 	if (PTE_ISVALID(pte)) {
1162 		CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1163 
1164 		/* Flush entry from TLB0 */
1165 		tlb0_flush_entry(va);
1166 	}
1167 
1168 	*pte = PTE_RPN_FROM_PA(pa) | flags;
1169 
1170 	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1171 	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1172 	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1173 
1174 	/* Flush the real memory from the instruction cache. */
1175 	if ((flags & (PTE_I | PTE_G)) == 0)
1176 		__syncicache((void *)va, PAGE_SIZE);
1177 
1178 	tlb_miss_unlock();
1179 	mtx_unlock_spin(&tlbivax_mutex);
1180 }
1181 
1182 /*
1183  * Remove a page from kernel page table.
1184  */
1185 static void
mmu_booke_kremove(vm_offset_t va)1186 mmu_booke_kremove(vm_offset_t va)
1187 {
1188 	pte_t *pte;
1189 
1190 	CTR2(KTR_PMAP,"%s: s (va = 0x%"PRI0ptrX")\n", __func__, va);
1191 
1192 	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1193 	    (va <= VM_MAX_KERNEL_ADDRESS)),
1194 	    ("mmu_booke_kremove: invalid va"));
1195 
1196 	pte = pte_find(kernel_pmap, va);
1197 
1198 	if (!PTE_ISVALID(pte)) {
1199 		CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1200 
1201 		return;
1202 	}
1203 
1204 	mtx_lock_spin(&tlbivax_mutex);
1205 	tlb_miss_lock();
1206 
1207 	/* Invalidate entry in TLB0, update PTE. */
1208 	tlb0_flush_entry(va);
1209 	*pte = 0;
1210 
1211 	tlb_miss_unlock();
1212 	mtx_unlock_spin(&tlbivax_mutex);
1213 }
1214 
1215 /*
1216  * Figure out where a given kernel pointer (usually in a fault) points
1217  * to from the VM's perspective, potentially remapping into userland's
1218  * address space.
1219  */
1220 static int
mmu_booke_decode_kernel_ptr(vm_offset_t addr,int * is_user,vm_offset_t * decoded_addr)1221 mmu_booke_decode_kernel_ptr(vm_offset_t addr, int *is_user,
1222     vm_offset_t *decoded_addr)
1223 {
1224 
1225 	if (trunc_page(addr) <= VM_MAXUSER_ADDRESS)
1226 		*is_user = 1;
1227 	else
1228 		*is_user = 0;
1229 
1230 	*decoded_addr = addr;
1231 	return (0);
1232 }
1233 
1234 static bool
mmu_booke_page_is_mapped(vm_page_t m)1235 mmu_booke_page_is_mapped(vm_page_t m)
1236 {
1237 
1238 	return (!TAILQ_EMPTY(&(m)->md.pv_list));
1239 }
1240 
1241 static bool
mmu_booke_ps_enabled(pmap_t pmap __unused)1242 mmu_booke_ps_enabled(pmap_t pmap __unused)
1243 {
1244 	return (false);
1245 }
1246 
1247 /*
1248  * Initialize pmap associated with process 0.
1249  */
1250 static void
mmu_booke_pinit0(pmap_t pmap)1251 mmu_booke_pinit0(pmap_t pmap)
1252 {
1253 
1254 	PMAP_LOCK_INIT(pmap);
1255 	mmu_booke_pinit(pmap);
1256 	PCPU_SET(curpmap, pmap);
1257 }
1258 
1259 /*
1260  * Insert the given physical page at the specified virtual address in the
1261  * target physical map with the protection requested. If specified the page
1262  * will be wired down.
1263  */
1264 static int
mmu_booke_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind)1265 mmu_booke_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
1266     vm_prot_t prot, u_int flags, int8_t psind)
1267 {
1268 	int error;
1269 
1270 	rw_wlock(&pvh_global_lock);
1271 	PMAP_LOCK(pmap);
1272 	error = mmu_booke_enter_locked(pmap, va, m, prot, flags, psind);
1273 	PMAP_UNLOCK(pmap);
1274 	rw_wunlock(&pvh_global_lock);
1275 	return (error);
1276 }
1277 
1278 static int
mmu_booke_enter_locked(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int pmap_flags,int8_t psind __unused)1279 mmu_booke_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
1280     vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
1281 {
1282 	pte_t *pte;
1283 	vm_paddr_t pa;
1284 	pte_t flags;
1285 	int error, su, sync;
1286 
1287 	pa = VM_PAGE_TO_PHYS(m);
1288 	su = (pmap == kernel_pmap);
1289 	sync = 0;
1290 
1291 	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1292 	//		"pa=0x%08x prot=0x%08x flags=%#x)\n",
1293 	//		(u_int32_t)pmap, su, pmap->pm_tid,
1294 	//		(u_int32_t)m, va, pa, prot, flags);
1295 
1296 	if (su) {
1297 		KASSERT(((va >= virtual_avail) &&
1298 		    (va <= VM_MAX_KERNEL_ADDRESS)),
1299 		    ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1300 	} else {
1301 		KASSERT((va <= VM_MAXUSER_ADDRESS),
1302 		    ("mmu_booke_enter_locked: user pmap, non user va"));
1303 	}
1304 	if ((m->oflags & VPO_UNMANAGED) == 0) {
1305 		if ((pmap_flags & PMAP_ENTER_QUICK_LOCKED) == 0)
1306 			VM_PAGE_OBJECT_BUSY_ASSERT(m);
1307 		else
1308 			VM_OBJECT_ASSERT_LOCKED(m->object);
1309 	}
1310 
1311 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1312 
1313 	/*
1314 	 * If there is an existing mapping, and the physical address has not
1315 	 * changed, must be protection or wiring change.
1316 	 */
1317 	if (((pte = pte_find(pmap, va)) != NULL) &&
1318 	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1319 
1320 		/*
1321 		 * Before actually updating pte->flags we calculate and
1322 		 * prepare its new value in a helper var.
1323 		 */
1324 		flags = *pte;
1325 		flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1326 
1327 		/* Wiring change, just update stats. */
1328 		if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
1329 			if (!PTE_ISWIRED(pte)) {
1330 				flags |= PTE_WIRED;
1331 				pmap->pm_stats.wired_count++;
1332 			}
1333 		} else {
1334 			if (PTE_ISWIRED(pte)) {
1335 				flags &= ~PTE_WIRED;
1336 				pmap->pm_stats.wired_count--;
1337 			}
1338 		}
1339 
1340 		if (prot & VM_PROT_WRITE) {
1341 			/* Add write permissions. */
1342 			flags |= PTE_SW;
1343 			if (!su)
1344 				flags |= PTE_UW;
1345 
1346 			if ((flags & PTE_MANAGED) != 0)
1347 				vm_page_aflag_set(m, PGA_WRITEABLE);
1348 		} else {
1349 			/* Handle modified pages, sense modify status. */
1350 
1351 			/*
1352 			 * The PTE_MODIFIED flag could be set by underlying
1353 			 * TLB misses since we last read it (above), possibly
1354 			 * other CPUs could update it so we check in the PTE
1355 			 * directly rather than rely on that saved local flags
1356 			 * copy.
1357 			 */
1358 			if (PTE_ISMODIFIED(pte))
1359 				vm_page_dirty(m);
1360 		}
1361 
1362 		if (prot & VM_PROT_EXECUTE) {
1363 			flags |= PTE_SX;
1364 			if (!su)
1365 				flags |= PTE_UX;
1366 
1367 			/*
1368 			 * Check existing flags for execute permissions: if we
1369 			 * are turning execute permissions on, icache should
1370 			 * be flushed.
1371 			 */
1372 			if ((*pte & (PTE_UX | PTE_SX)) == 0)
1373 				sync++;
1374 		}
1375 
1376 		flags &= ~PTE_REFERENCED;
1377 
1378 		/*
1379 		 * The new flags value is all calculated -- only now actually
1380 		 * update the PTE.
1381 		 */
1382 		mtx_lock_spin(&tlbivax_mutex);
1383 		tlb_miss_lock();
1384 
1385 		tlb0_flush_entry(va);
1386 		*pte &= ~PTE_FLAGS_MASK;
1387 		*pte |= flags;
1388 
1389 		tlb_miss_unlock();
1390 		mtx_unlock_spin(&tlbivax_mutex);
1391 
1392 	} else {
1393 		/*
1394 		 * If there is an existing mapping, but it's for a different
1395 		 * physical address, pte_enter() will delete the old mapping.
1396 		 */
1397 		//if ((pte != NULL) && PTE_ISVALID(pte))
1398 		//	debugf("mmu_booke_enter_locked: replace\n");
1399 		//else
1400 		//	debugf("mmu_booke_enter_locked: new\n");
1401 
1402 		/* Now set up the flags and install the new mapping. */
1403 		flags = (PTE_SR | PTE_VALID);
1404 		flags |= PTE_M;
1405 
1406 		if (!su)
1407 			flags |= PTE_UR;
1408 
1409 		if (prot & VM_PROT_WRITE) {
1410 			flags |= PTE_SW;
1411 			if (!su)
1412 				flags |= PTE_UW;
1413 
1414 			if ((m->oflags & VPO_UNMANAGED) == 0)
1415 				vm_page_aflag_set(m, PGA_WRITEABLE);
1416 		}
1417 
1418 		if (prot & VM_PROT_EXECUTE) {
1419 			flags |= PTE_SX;
1420 			if (!su)
1421 				flags |= PTE_UX;
1422 		}
1423 
1424 		/* If its wired update stats. */
1425 		if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
1426 			flags |= PTE_WIRED;
1427 
1428 		error = pte_enter(pmap, m, va, flags,
1429 		    (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
1430 		if (error != 0)
1431 			return (KERN_RESOURCE_SHORTAGE);
1432 
1433 		if ((flags & PMAP_ENTER_WIRED) != 0)
1434 			pmap->pm_stats.wired_count++;
1435 
1436 		/* Flush the real memory from the instruction cache. */
1437 		if (prot & VM_PROT_EXECUTE)
1438 			sync++;
1439 	}
1440 
1441 	if (sync && (su || pmap == PCPU_GET(curpmap))) {
1442 		__syncicache((void *)va, PAGE_SIZE);
1443 		sync = 0;
1444 	}
1445 
1446 	return (KERN_SUCCESS);
1447 }
1448 
1449 /*
1450  * Maps a sequence of resident pages belonging to the same object.
1451  * The sequence begins with the given page m_start.  This page is
1452  * mapped at the given virtual address start.  Each subsequent page is
1453  * mapped at a virtual address that is offset from start by the same
1454  * amount as the page is offset from m_start within the object.  The
1455  * last page in the sequence is the page with the largest offset from
1456  * m_start that can be mapped at a virtual address less than the given
1457  * virtual address end.  Not every virtual page between start and end
1458  * is mapped; only those for which a resident page exists with the
1459  * corresponding offset from m_start are mapped.
1460  */
1461 static void
mmu_booke_enter_object(pmap_t pmap,vm_offset_t start,vm_offset_t end,vm_page_t m_start,vm_prot_t prot)1462 mmu_booke_enter_object(pmap_t pmap, vm_offset_t start,
1463     vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1464 {
1465 	struct pctrie_iter pages;
1466 	vm_offset_t va;
1467 	vm_page_t m;
1468 
1469 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
1470 
1471 	vm_page_iter_limit_init(&pages, m_start->object,
1472 	    m_start->pindex + atop(end - start));
1473 	m = vm_radix_iter_lookup(&pages, m_start->pindex);
1474 	rw_wlock(&pvh_global_lock);
1475 	PMAP_LOCK(pmap);
1476 	while (m != NULL) {
1477 		va = start + ptoa(m->pindex - m_start->pindex);
1478 		mmu_booke_enter_locked(pmap, va, m,
1479 		    prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1480 		    PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
1481 		m = vm_radix_iter_step(&pages);
1482 	}
1483 	PMAP_UNLOCK(pmap);
1484 	rw_wunlock(&pvh_global_lock);
1485 }
1486 
1487 static void
mmu_booke_enter_quick(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot)1488 mmu_booke_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
1489     vm_prot_t prot)
1490 {
1491 
1492 	rw_wlock(&pvh_global_lock);
1493 	PMAP_LOCK(pmap);
1494 	mmu_booke_enter_locked(pmap, va, m,
1495 	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP |
1496 	    PMAP_ENTER_QUICK_LOCKED, 0);
1497 	PMAP_UNLOCK(pmap);
1498 	rw_wunlock(&pvh_global_lock);
1499 }
1500 
1501 /*
1502  * Remove the given range of addresses from the specified map.
1503  *
1504  * It is assumed that the start and end are properly rounded to the page size.
1505  */
1506 static void
mmu_booke_remove(pmap_t pmap,vm_offset_t va,vm_offset_t endva)1507 mmu_booke_remove(pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1508 {
1509 	pte_t *pte;
1510 	uint8_t hold_flag;
1511 
1512 	int su = (pmap == kernel_pmap);
1513 
1514 	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1515 	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1516 
1517 	if (su) {
1518 		KASSERT(((va >= virtual_avail) &&
1519 		    (va <= VM_MAX_KERNEL_ADDRESS)),
1520 		    ("mmu_booke_remove: kernel pmap, non kernel va"));
1521 	} else {
1522 		KASSERT((va <= VM_MAXUSER_ADDRESS),
1523 		    ("mmu_booke_remove: user pmap, non user va"));
1524 	}
1525 
1526 	if (PMAP_REMOVE_DONE(pmap)) {
1527 		//debugf("mmu_booke_remove: e (empty)\n");
1528 		return;
1529 	}
1530 
1531 	hold_flag = PTBL_HOLD_FLAG(pmap);
1532 	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1533 
1534 	rw_wlock(&pvh_global_lock);
1535 	PMAP_LOCK(pmap);
1536 	for (; va < endva; va += PAGE_SIZE) {
1537 		pte = pte_find_next(pmap, &va);
1538 		if ((pte == NULL) || !PTE_ISVALID(pte))
1539 			break;
1540 		if (va >= endva)
1541 			break;
1542 		pte_remove(pmap, va, hold_flag);
1543 	}
1544 	PMAP_UNLOCK(pmap);
1545 	rw_wunlock(&pvh_global_lock);
1546 
1547 	//debugf("mmu_booke_remove: e\n");
1548 }
1549 
1550 /*
1551  * Remove physical page from all pmaps in which it resides.
1552  */
1553 static void
mmu_booke_remove_all(vm_page_t m)1554 mmu_booke_remove_all(vm_page_t m)
1555 {
1556 	pv_entry_t pv, pvn;
1557 	uint8_t hold_flag;
1558 
1559 	rw_wlock(&pvh_global_lock);
1560 	TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_link, pvn) {
1561 		PMAP_LOCK(pv->pv_pmap);
1562 		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1563 		pte_remove(pv->pv_pmap, pv->pv_va, hold_flag);
1564 		PMAP_UNLOCK(pv->pv_pmap);
1565 	}
1566 	vm_page_aflag_clear(m, PGA_WRITEABLE);
1567 	rw_wunlock(&pvh_global_lock);
1568 }
1569 
1570 /*
1571  * Map a range of physical addresses into kernel virtual address space.
1572  */
1573 static void *
mmu_booke_map(vm_offset_t * virt,vm_paddr_t pa_start,vm_paddr_t pa_end,int prot)1574 mmu_booke_map(vm_offset_t *virt, vm_paddr_t pa_start,
1575     vm_paddr_t pa_end, int prot)
1576 {
1577 	vm_offset_t sva = *virt;
1578 	vm_offset_t va = sva;
1579 
1580 #ifdef __powerpc64__
1581 	/* XXX: Handle memory not starting at 0x0. */
1582 	if (pa_end < ctob(Maxmem))
1583 		return (PHYS_TO_DMAP(pa_start));
1584 #endif
1585 
1586 	while (pa_start < pa_end) {
1587 		mmu_booke_kenter(va, pa_start);
1588 		va += PAGE_SIZE;
1589 		pa_start += PAGE_SIZE;
1590 	}
1591 	*virt = va;
1592 
1593 	return ((void *)sva);
1594 }
1595 
1596 /*
1597  * The pmap must be activated before it's address space can be accessed in any
1598  * way.
1599  */
1600 static void
mmu_booke_activate(struct thread * td)1601 mmu_booke_activate(struct thread *td)
1602 {
1603 	pmap_t pmap;
1604 	u_int cpuid;
1605 
1606 	pmap = &td->td_proc->p_vmspace->vm_pmap;
1607 
1608 	CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%"PRI0ptrX")",
1609 	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1610 
1611 	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1612 
1613 	sched_pin();
1614 
1615 	cpuid = PCPU_GET(cpuid);
1616 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
1617 	PCPU_SET(curpmap, pmap);
1618 
1619 	if (pmap->pm_tid[cpuid] == TID_NONE)
1620 		tid_alloc(pmap);
1621 
1622 	/* Load PID0 register with pmap tid value. */
1623 	mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
1624 	__asm __volatile("isync");
1625 
1626 	mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
1627 
1628 	sched_unpin();
1629 
1630 	CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1631 	    pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1632 }
1633 
1634 /*
1635  * Deactivate the specified process's address space.
1636  */
1637 static void
mmu_booke_deactivate(struct thread * td)1638 mmu_booke_deactivate(struct thread *td)
1639 {
1640 	pmap_t pmap;
1641 
1642 	pmap = &td->td_proc->p_vmspace->vm_pmap;
1643 
1644 	CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%"PRI0ptrX,
1645 	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1646 
1647 	td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
1648 
1649 	CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
1650 	PCPU_SET(curpmap, NULL);
1651 }
1652 
1653 /*
1654  * Copy the range specified by src_addr/len
1655  * from the source map to the range dst_addr/len
1656  * in the destination map.
1657  *
1658  * This routine is only advisory and need not do anything.
1659  */
1660 static void
mmu_booke_copy(pmap_t dst_pmap,pmap_t src_pmap,vm_offset_t dst_addr,vm_size_t len,vm_offset_t src_addr)1661 mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap,
1662     vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
1663 {
1664 
1665 }
1666 
1667 /*
1668  * Set the physical protection on the specified range of this map as requested.
1669  */
1670 static void
mmu_booke_protect(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_prot_t prot)1671 mmu_booke_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1672     vm_prot_t prot)
1673 {
1674 	vm_offset_t va;
1675 	vm_page_t m;
1676 	pte_t *pte;
1677 
1678 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1679 		mmu_booke_remove(pmap, sva, eva);
1680 		return;
1681 	}
1682 
1683 	if (prot & VM_PROT_WRITE)
1684 		return;
1685 
1686 	PMAP_LOCK(pmap);
1687 	for (va = sva; va < eva; va += PAGE_SIZE) {
1688 		if ((pte = pte_find(pmap, va)) != NULL) {
1689 			if (PTE_ISVALID(pte)) {
1690 				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1691 
1692 				mtx_lock_spin(&tlbivax_mutex);
1693 				tlb_miss_lock();
1694 
1695 				/* Handle modified pages. */
1696 				if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
1697 					vm_page_dirty(m);
1698 
1699 				tlb0_flush_entry(va);
1700 				*pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1701 
1702 				tlb_miss_unlock();
1703 				mtx_unlock_spin(&tlbivax_mutex);
1704 			}
1705 		}
1706 	}
1707 	PMAP_UNLOCK(pmap);
1708 }
1709 
1710 /*
1711  * Clear the write and modified bits in each of the given page's mappings.
1712  */
1713 static void
mmu_booke_remove_write(vm_page_t m)1714 mmu_booke_remove_write(vm_page_t m)
1715 {
1716 	pv_entry_t pv;
1717 	pte_t *pte;
1718 
1719 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1720 	    ("mmu_booke_remove_write: page %p is not managed", m));
1721 	vm_page_assert_busied(m);
1722 
1723 	if (!pmap_page_is_write_mapped(m))
1724 	        return;
1725 	rw_wlock(&pvh_global_lock);
1726 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1727 		PMAP_LOCK(pv->pv_pmap);
1728 		if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL) {
1729 			if (PTE_ISVALID(pte)) {
1730 				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1731 
1732 				mtx_lock_spin(&tlbivax_mutex);
1733 				tlb_miss_lock();
1734 
1735 				/* Handle modified pages. */
1736 				if (PTE_ISMODIFIED(pte))
1737 					vm_page_dirty(m);
1738 
1739 				/* Flush mapping from TLB0. */
1740 				*pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1741 
1742 				tlb_miss_unlock();
1743 				mtx_unlock_spin(&tlbivax_mutex);
1744 			}
1745 		}
1746 		PMAP_UNLOCK(pv->pv_pmap);
1747 	}
1748 	vm_page_aflag_clear(m, PGA_WRITEABLE);
1749 	rw_wunlock(&pvh_global_lock);
1750 }
1751 
1752 /*
1753  * Atomically extract and hold the physical page with the given
1754  * pmap and virtual address pair if that mapping permits the given
1755  * protection.
1756  */
1757 static vm_page_t
mmu_booke_extract_and_hold(pmap_t pmap,vm_offset_t va,vm_prot_t prot)1758 mmu_booke_extract_and_hold(pmap_t pmap, vm_offset_t va,
1759     vm_prot_t prot)
1760 {
1761 	pte_t *pte;
1762 	vm_page_t m;
1763 	uint32_t pte_wbit;
1764 
1765 	m = NULL;
1766 	PMAP_LOCK(pmap);
1767 	pte = pte_find(pmap, va);
1768 	if ((pte != NULL) && PTE_ISVALID(pte)) {
1769 		if (pmap == kernel_pmap)
1770 			pte_wbit = PTE_SW;
1771 		else
1772 			pte_wbit = PTE_UW;
1773 
1774 		if ((*pte & pte_wbit) != 0 || (prot & VM_PROT_WRITE) == 0) {
1775 			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1776 			if (!vm_page_wire_mapped(m))
1777 				m = NULL;
1778 		}
1779 	}
1780 	PMAP_UNLOCK(pmap);
1781 	return (m);
1782 }
1783 
1784 /*
1785  * Initialize a vm_page's machine-dependent fields.
1786  */
1787 static void
mmu_booke_page_init(vm_page_t m)1788 mmu_booke_page_init(vm_page_t m)
1789 {
1790 
1791 	m->md.pv_tracked = 0;
1792 	TAILQ_INIT(&m->md.pv_list);
1793 }
1794 
1795 /*
1796  * Return whether or not the specified physical page was modified
1797  * in any of physical maps.
1798  */
1799 static bool
mmu_booke_is_modified(vm_page_t m)1800 mmu_booke_is_modified(vm_page_t m)
1801 {
1802 	pte_t *pte;
1803 	pv_entry_t pv;
1804 	bool rv;
1805 
1806 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1807 	    ("mmu_booke_is_modified: page %p is not managed", m));
1808 	rv = false;
1809 
1810 	/*
1811 	 * If the page is not busied then this check is racy.
1812 	 */
1813 	if (!pmap_page_is_write_mapped(m))
1814 		return (false);
1815 
1816 	rw_wlock(&pvh_global_lock);
1817 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1818 		PMAP_LOCK(pv->pv_pmap);
1819 		if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
1820 		    PTE_ISVALID(pte)) {
1821 			if (PTE_ISMODIFIED(pte))
1822 				rv = true;
1823 		}
1824 		PMAP_UNLOCK(pv->pv_pmap);
1825 		if (rv)
1826 			break;
1827 	}
1828 	rw_wunlock(&pvh_global_lock);
1829 	return (rv);
1830 }
1831 
1832 /*
1833  * Return whether or not the specified virtual address is eligible
1834  * for prefault.
1835  */
1836 static bool
mmu_booke_is_prefaultable(pmap_t pmap,vm_offset_t addr)1837 mmu_booke_is_prefaultable(pmap_t pmap, vm_offset_t addr)
1838 {
1839 
1840 	return (false);
1841 }
1842 
1843 /*
1844  * Return whether or not the specified physical page was referenced
1845  * in any physical maps.
1846  */
1847 static bool
mmu_booke_is_referenced(vm_page_t m)1848 mmu_booke_is_referenced(vm_page_t m)
1849 {
1850 	pte_t *pte;
1851 	pv_entry_t pv;
1852 	bool rv;
1853 
1854 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1855 	    ("mmu_booke_is_referenced: page %p is not managed", m));
1856 	rv = false;
1857 	rw_wlock(&pvh_global_lock);
1858 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1859 		PMAP_LOCK(pv->pv_pmap);
1860 		if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
1861 		    PTE_ISVALID(pte)) {
1862 			if (PTE_ISREFERENCED(pte))
1863 				rv = true;
1864 		}
1865 		PMAP_UNLOCK(pv->pv_pmap);
1866 		if (rv)
1867 			break;
1868 	}
1869 	rw_wunlock(&pvh_global_lock);
1870 	return (rv);
1871 }
1872 
1873 /*
1874  * Clear the modify bits on the specified physical page.
1875  */
1876 static void
mmu_booke_clear_modify(vm_page_t m)1877 mmu_booke_clear_modify(vm_page_t m)
1878 {
1879 	pte_t *pte;
1880 	pv_entry_t pv;
1881 
1882 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1883 	    ("mmu_booke_clear_modify: page %p is not managed", m));
1884 	vm_page_assert_busied(m);
1885 
1886 	if (!pmap_page_is_write_mapped(m))
1887 	        return;
1888 
1889 	rw_wlock(&pvh_global_lock);
1890 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1891 		PMAP_LOCK(pv->pv_pmap);
1892 		if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
1893 		    PTE_ISVALID(pte)) {
1894 			mtx_lock_spin(&tlbivax_mutex);
1895 			tlb_miss_lock();
1896 
1897 			if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
1898 				tlb0_flush_entry(pv->pv_va);
1899 				*pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
1900 				    PTE_REFERENCED);
1901 			}
1902 
1903 			tlb_miss_unlock();
1904 			mtx_unlock_spin(&tlbivax_mutex);
1905 		}
1906 		PMAP_UNLOCK(pv->pv_pmap);
1907 	}
1908 	rw_wunlock(&pvh_global_lock);
1909 }
1910 
1911 /*
1912  * Return a count of reference bits for a page, clearing those bits.
1913  * It is not necessary for every reference bit to be cleared, but it
1914  * is necessary that 0 only be returned when there are truly no
1915  * reference bits set.
1916  *
1917  * As an optimization, update the page's dirty field if a modified bit is
1918  * found while counting reference bits.  This opportunistic update can be
1919  * performed at low cost and can eliminate the need for some future calls
1920  * to pmap_is_modified().  However, since this function stops after
1921  * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
1922  * dirty pages.  Those dirty pages will only be detected by a future call
1923  * to pmap_is_modified().
1924  */
1925 static int
mmu_booke_ts_referenced(vm_page_t m)1926 mmu_booke_ts_referenced(vm_page_t m)
1927 {
1928 	pte_t *pte;
1929 	pv_entry_t pv;
1930 	int count;
1931 
1932 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1933 	    ("mmu_booke_ts_referenced: page %p is not managed", m));
1934 	count = 0;
1935 	rw_wlock(&pvh_global_lock);
1936 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1937 		PMAP_LOCK(pv->pv_pmap);
1938 		if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
1939 		    PTE_ISVALID(pte)) {
1940 			if (PTE_ISMODIFIED(pte))
1941 				vm_page_dirty(m);
1942 			if (PTE_ISREFERENCED(pte)) {
1943 				mtx_lock_spin(&tlbivax_mutex);
1944 				tlb_miss_lock();
1945 
1946 				tlb0_flush_entry(pv->pv_va);
1947 				*pte &= ~PTE_REFERENCED;
1948 
1949 				tlb_miss_unlock();
1950 				mtx_unlock_spin(&tlbivax_mutex);
1951 
1952 				if (++count >= PMAP_TS_REFERENCED_MAX) {
1953 					PMAP_UNLOCK(pv->pv_pmap);
1954 					break;
1955 				}
1956 			}
1957 		}
1958 		PMAP_UNLOCK(pv->pv_pmap);
1959 	}
1960 	rw_wunlock(&pvh_global_lock);
1961 	return (count);
1962 }
1963 
1964 /*
1965  * Clear the wired attribute from the mappings for the specified range of
1966  * addresses in the given pmap.  Every valid mapping within that range must
1967  * have the wired attribute set.  In contrast, invalid mappings cannot have
1968  * the wired attribute set, so they are ignored.
1969  *
1970  * The wired attribute of the page table entry is not a hardware feature, so
1971  * there is no need to invalidate any TLB entries.
1972  */
1973 static void
mmu_booke_unwire(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)1974 mmu_booke_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1975 {
1976 	vm_offset_t va;
1977 	pte_t *pte;
1978 
1979 	PMAP_LOCK(pmap);
1980 	for (va = sva; va < eva; va += PAGE_SIZE) {
1981 		if ((pte = pte_find(pmap, va)) != NULL &&
1982 		    PTE_ISVALID(pte)) {
1983 			if (!PTE_ISWIRED(pte))
1984 				panic("mmu_booke_unwire: pte %p isn't wired",
1985 				    pte);
1986 			*pte &= ~PTE_WIRED;
1987 			pmap->pm_stats.wired_count--;
1988 		}
1989 	}
1990 	PMAP_UNLOCK(pmap);
1991 
1992 }
1993 
1994 /*
1995  * Return true if the pmap's pv is one of the first 16 pvs linked to from this
1996  * page.  This count may be changed upwards or downwards in the future; it is
1997  * only necessary that true be returned for a small subset of pmaps for proper
1998  * page aging.
1999  */
2000 static bool
mmu_booke_page_exists_quick(pmap_t pmap,vm_page_t m)2001 mmu_booke_page_exists_quick(pmap_t pmap, vm_page_t m)
2002 {
2003 	pv_entry_t pv;
2004 	int loops;
2005 	bool rv;
2006 
2007 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2008 	    ("mmu_booke_page_exists_quick: page %p is not managed", m));
2009 	loops = 0;
2010 	rv = false;
2011 	rw_wlock(&pvh_global_lock);
2012 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2013 		if (pv->pv_pmap == pmap) {
2014 			rv = true;
2015 			break;
2016 		}
2017 		if (++loops >= 16)
2018 			break;
2019 	}
2020 	rw_wunlock(&pvh_global_lock);
2021 	return (rv);
2022 }
2023 
2024 /*
2025  * Return the number of managed mappings to the given physical page that are
2026  * wired.
2027  */
2028 static int
mmu_booke_page_wired_mappings(vm_page_t m)2029 mmu_booke_page_wired_mappings(vm_page_t m)
2030 {
2031 	pv_entry_t pv;
2032 	pte_t *pte;
2033 	int count = 0;
2034 
2035 	if ((m->oflags & VPO_UNMANAGED) != 0)
2036 		return (count);
2037 	rw_wlock(&pvh_global_lock);
2038 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2039 		PMAP_LOCK(pv->pv_pmap);
2040 		if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL)
2041 			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2042 				count++;
2043 		PMAP_UNLOCK(pv->pv_pmap);
2044 	}
2045 	rw_wunlock(&pvh_global_lock);
2046 	return (count);
2047 }
2048 
2049 static int
mmu_booke_dev_direct_mapped(vm_paddr_t pa,vm_size_t size)2050 mmu_booke_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
2051 {
2052 	int i;
2053 	vm_offset_t va;
2054 
2055 	/*
2056 	 * This currently does not work for entries that
2057 	 * overlap TLB1 entries.
2058 	 */
2059 	for (i = 0; i < TLB1_ENTRIES; i ++) {
2060 		if (tlb1_iomapped(i, pa, size, &va) == 0)
2061 			return (0);
2062 	}
2063 
2064 	return (EFAULT);
2065 }
2066 
2067 void
mmu_booke_dumpsys_map(vm_paddr_t pa,size_t sz,void ** va)2068 mmu_booke_dumpsys_map(vm_paddr_t pa, size_t sz, void **va)
2069 {
2070 	vm_paddr_t ppa;
2071 	vm_offset_t ofs;
2072 	vm_size_t gran;
2073 
2074 	/* Minidumps are based on virtual memory addresses. */
2075 	if (do_minidump) {
2076 		*va = (void *)(vm_offset_t)pa;
2077 		return;
2078 	}
2079 
2080 	/* Raw physical memory dumps don't have a virtual address. */
2081 	/* We always map a 256MB page at 256M. */
2082 	gran = 256 * 1024 * 1024;
2083 	ppa = rounddown2(pa, gran);
2084 	ofs = pa - ppa;
2085 	*va = (void *)gran;
2086 	tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
2087 
2088 	if (sz > (gran - ofs))
2089 		tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
2090 		    _TLB_ENTRY_IO);
2091 }
2092 
2093 void
mmu_booke_dumpsys_unmap(vm_paddr_t pa,size_t sz,void * va)2094 mmu_booke_dumpsys_unmap(vm_paddr_t pa, size_t sz, void *va)
2095 {
2096 	vm_paddr_t ppa;
2097 	vm_offset_t ofs;
2098 	vm_size_t gran;
2099 	tlb_entry_t e;
2100 	int i;
2101 
2102 	/* Minidumps are based on virtual memory addresses. */
2103 	/* Nothing to do... */
2104 	if (do_minidump)
2105 		return;
2106 
2107 	for (i = 0; i < TLB1_ENTRIES; i++) {
2108 		tlb1_read_entry(&e, i);
2109 		if (!(e.mas1 & MAS1_VALID))
2110 			break;
2111 	}
2112 
2113 	/* Raw physical memory dumps don't have a virtual address. */
2114 	i--;
2115 	e.mas1 = 0;
2116 	e.mas2 = 0;
2117 	e.mas3 = 0;
2118 	tlb1_write_entry(&e, i);
2119 
2120 	gran = 256 * 1024 * 1024;
2121 	ppa = rounddown2(pa, gran);
2122 	ofs = pa - ppa;
2123 	if (sz > (gran - ofs)) {
2124 		i--;
2125 		e.mas1 = 0;
2126 		e.mas2 = 0;
2127 		e.mas3 = 0;
2128 		tlb1_write_entry(&e, i);
2129 	}
2130 }
2131 
2132 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2133 
2134 void
mmu_booke_scan_init(void)2135 mmu_booke_scan_init(void)
2136 {
2137 	vm_offset_t va;
2138 	pte_t *pte;
2139 	int i;
2140 
2141 	if (!do_minidump) {
2142 		/* Initialize phys. segments for dumpsys(). */
2143 		memset(&dump_map, 0, sizeof(dump_map));
2144 		mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
2145 		    &availmem_regions_sz);
2146 		for (i = 0; i < physmem_regions_sz; i++) {
2147 			dump_map[i].pa_start = physmem_regions[i].mr_start;
2148 			dump_map[i].pa_size = physmem_regions[i].mr_size;
2149 		}
2150 		return;
2151 	}
2152 
2153 	/* Virtual segments for minidumps: */
2154 	memset(&dump_map, 0, sizeof(dump_map));
2155 
2156 	/* 1st: kernel .data and .bss. */
2157 	dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2158 	dump_map[0].pa_size =
2159 	    round_page((uintptr_t)_end) - dump_map[0].pa_start;
2160 
2161 	/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2162 	dump_map[1].pa_start = data_start;
2163 	dump_map[1].pa_size = data_end - data_start;
2164 
2165 	/* 3rd: kernel VM. */
2166 	va = dump_map[1].pa_start + dump_map[1].pa_size;
2167 	/* Find start of next chunk (from va). */
2168 	while (va < virtual_end) {
2169 		/* Don't dump the buffer cache. */
2170 		if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2171 			va = kmi.buffer_eva;
2172 			continue;
2173 		}
2174 		pte = pte_find(kernel_pmap, va);
2175 		if (pte != NULL && PTE_ISVALID(pte))
2176 			break;
2177 		va += PAGE_SIZE;
2178 	}
2179 	if (va < virtual_end) {
2180 		dump_map[2].pa_start = va;
2181 		va += PAGE_SIZE;
2182 		/* Find last page in chunk. */
2183 		while (va < virtual_end) {
2184 			/* Don't run into the buffer cache. */
2185 			if (va == kmi.buffer_sva)
2186 				break;
2187 			pte = pte_find(kernel_pmap, va);
2188 			if (pte == NULL || !PTE_ISVALID(pte))
2189 				break;
2190 			va += PAGE_SIZE;
2191 		}
2192 		dump_map[2].pa_size = va - dump_map[2].pa_start;
2193 	}
2194 }
2195 
2196 /*
2197  * Map a set of physical memory pages into the kernel virtual address space.
2198  * Return a pointer to where it is mapped. This routine is intended to be used
2199  * for mapping device memory, NOT real memory.
2200  */
2201 static void *
mmu_booke_mapdev(vm_paddr_t pa,vm_size_t size)2202 mmu_booke_mapdev(vm_paddr_t pa, vm_size_t size)
2203 {
2204 
2205 	return (mmu_booke_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
2206 }
2207 
2208 static int
tlb1_find_pa(vm_paddr_t pa,tlb_entry_t * e)2209 tlb1_find_pa(vm_paddr_t pa, tlb_entry_t *e)
2210 {
2211 	int i;
2212 
2213 	for (i = 0; i < TLB1_ENTRIES; i++) {
2214 		tlb1_read_entry(e, i);
2215 		if ((e->mas1 & MAS1_VALID) == 0)
2216 			continue;
2217 		if (e->phys == pa)
2218 			return (i);
2219 	}
2220 	return (-1);
2221 }
2222 
2223 static void *
mmu_booke_mapdev_attr(vm_paddr_t pa,vm_size_t size,vm_memattr_t ma)2224 mmu_booke_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2225 {
2226 	tlb_entry_t e;
2227 	vm_paddr_t tmppa;
2228 #ifndef __powerpc64__
2229 	uintptr_t tmpva;
2230 #endif
2231 	uintptr_t va, retva;
2232 	vm_size_t sz;
2233 	int i;
2234 	int wimge;
2235 
2236 	/*
2237 	 * Check if this is premapped in TLB1.
2238 	 */
2239 	sz = size;
2240 	tmppa = pa;
2241 	va = ~0;
2242 	wimge = tlb_calc_wimg(pa, ma);
2243 	for (i = 0; i < TLB1_ENTRIES; i++) {
2244 		tlb1_read_entry(&e, i);
2245 		if (!(e.mas1 & MAS1_VALID))
2246 			continue;
2247 		if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED)))
2248 			continue;
2249 		if (tmppa >= e.phys && tmppa < e.phys + e.size) {
2250 			va = e.virt + (pa - e.phys);
2251 			tmppa = e.phys + e.size;
2252 			sz -= MIN(sz, e.size - (pa - e.phys));
2253 			while (sz > 0 && (i = tlb1_find_pa(tmppa, &e)) != -1) {
2254 				if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED)))
2255 					break;
2256 				sz -= MIN(sz, e.size);
2257 				tmppa = e.phys + e.size;
2258 			}
2259 			if (sz != 0)
2260 				break;
2261 			return ((void *)va);
2262 		}
2263 	}
2264 
2265 	size = roundup(size, PAGE_SIZE);
2266 
2267 #ifdef __powerpc64__
2268 	KASSERT(pa < VM_MAPDEV_PA_MAX,
2269 	    ("Unsupported physical address! %lx", pa));
2270 	va = VM_MAPDEV_BASE + pa;
2271 	retva = va;
2272 #ifdef POW2_MAPPINGS
2273 	/*
2274 	 * Align the mapping to a power of 2 size, taking into account that we
2275 	 * may need to increase the size multiple times to satisfy the size and
2276 	 * alignment requirements.
2277 	 *
2278 	 * This works in the general case because it's very rare (near never?)
2279 	 * to have different access properties (WIMG) within a single
2280 	 * power-of-two region.  If a design does call for that, POW2_MAPPINGS
2281 	 * can be undefined, and exact mappings will be used instead.
2282 	 */
2283 	sz = size;
2284 	size = roundup2(size, 1 << ilog2(size));
2285 	while (rounddown2(va, size) + size < va + sz)
2286 		size <<= 1;
2287 	va = rounddown2(va, size);
2288 	pa = rounddown2(pa, size);
2289 #endif
2290 #else
2291 	/*
2292 	 * The device mapping area is between VM_MAXUSER_ADDRESS and
2293 	 * VM_MIN_KERNEL_ADDRESS.  This gives 1GB of device addressing.
2294 	 */
2295 #ifdef SPARSE_MAPDEV
2296 	/*
2297 	 * With a sparse mapdev, align to the largest starting region.  This
2298 	 * could feasibly be optimized for a 'best-fit' alignment, but that
2299 	 * calculation could be very costly.
2300 	 * Align to the smaller of:
2301 	 * - first set bit in overlap of (pa & size mask)
2302 	 * - largest size envelope
2303 	 *
2304 	 * It's possible the device mapping may start at a PA that's not larger
2305 	 * than the size mask, so we need to offset in to maximize the TLB entry
2306 	 * range and minimize the number of used TLB entries.
2307 	 */
2308 	do {
2309 	    tmpva = tlb1_map_base;
2310 	    sz = ffsl((~((1 << flsl(size-1)) - 1)) & pa);
2311 	    sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
2312 	    va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
2313 	} while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
2314 #endif
2315 	va = atomic_fetchadd_int(&tlb1_map_base, size);
2316 	retva = va;
2317 #endif
2318 
2319 	if (tlb1_mapin_region(va, pa, size, tlb_calc_wimg(pa, ma)) != size)
2320 		return (NULL);
2321 
2322 	return ((void *)retva);
2323 }
2324 
2325 /*
2326  * 'Unmap' a range mapped by mmu_booke_mapdev().
2327  */
2328 static void
mmu_booke_unmapdev(void * p,vm_size_t size)2329 mmu_booke_unmapdev(void *p, vm_size_t size)
2330 {
2331 #ifdef SUPPORTS_SHRINKING_TLB1
2332 	void *base;
2333 	vm_offset_t offset, va;
2334 
2335 	/*
2336 	 * Unmap only if this is inside kernel virtual space.
2337 	 */
2338 	va = (vm_offset_t)p;
2339 	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2340 		base = trunc_page(va);
2341 		offset = va & PAGE_MASK;
2342 		size = roundup(offset + size, PAGE_SIZE);
2343 		mmu_booke_qremove(base, atop(size));
2344 		kva_free((vm_offset_t)base, size);
2345 	}
2346 #endif
2347 }
2348 
2349 /*
2350  * mmu_booke_object_init_pt preloads the ptes for a given object into the
2351  * specified pmap. This eliminates the blast of soft faults on process startup
2352  * and immediately after an mmap.
2353  */
2354 static void
mmu_booke_object_init_pt(pmap_t pmap,vm_offset_t addr,vm_object_t object,vm_pindex_t pindex,vm_size_t size)2355 mmu_booke_object_init_pt(pmap_t pmap, vm_offset_t addr,
2356     vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2357 {
2358 
2359 	VM_OBJECT_ASSERT_WLOCKED(object);
2360 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2361 	    ("mmu_booke_object_init_pt: non-device object"));
2362 }
2363 
2364 /*
2365  * Perform the pmap work for mincore.
2366  */
2367 static int
mmu_booke_mincore(pmap_t pmap,vm_offset_t addr,vm_paddr_t * pap)2368 mmu_booke_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
2369 {
2370 
2371 	/* XXX: this should be implemented at some point */
2372 	return (0);
2373 }
2374 
2375 static int
mmu_booke_change_attr(void * sva,vm_size_t sz,vm_memattr_t mode)2376 mmu_booke_change_attr(void *sva, vm_size_t sz, vm_memattr_t mode)
2377 {
2378 	vm_offset_t addr, va;
2379 	pte_t *pte;
2380 	int i, j;
2381 	tlb_entry_t e;
2382 
2383 	addr = (vm_offset_t)sva;
2384 	addr = trunc_page(addr);
2385 
2386 	/* Only allow changes to mapped kernel addresses.  This includes:
2387 	 * - KVA
2388 	 * - DMAP (powerpc64)
2389 	 * - Device mappings
2390 	 */
2391 	if (addr <= VM_MAXUSER_ADDRESS ||
2392 #ifdef __powerpc64__
2393 	    (addr >= tlb1_map_base && addr < DMAP_BASE_ADDRESS) ||
2394 	    (addr > DMAP_MAX_ADDRESS && addr < VM_MIN_KERNEL_ADDRESS) ||
2395 #else
2396 	    (addr >= tlb1_map_base && addr < VM_MIN_KERNEL_ADDRESS) ||
2397 #endif
2398 	    (addr > VM_MAX_KERNEL_ADDRESS))
2399 		return (EINVAL);
2400 
2401 	/* Check TLB1 mappings */
2402 	for (i = 0; i < TLB1_ENTRIES; i++) {
2403 		tlb1_read_entry(&e, i);
2404 		if (!(e.mas1 & MAS1_VALID))
2405 			continue;
2406 		if (addr >= e.virt && addr < e.virt + e.size)
2407 			break;
2408 	}
2409 	if (i < TLB1_ENTRIES) {
2410 		/* Only allow full mappings to be modified for now. */
2411 		/* Validate the range. */
2412 		for (j = i, va = addr; va < addr + sz; va += e.size, j++) {
2413 			tlb1_read_entry(&e, j);
2414 			if (va != e.virt || (sz - (va - addr) < e.size))
2415 				return (EINVAL);
2416 		}
2417 		for (va = addr; va < addr + sz; va += e.size, i++) {
2418 			tlb1_read_entry(&e, i);
2419 			e.mas2 &= ~MAS2_WIMGE_MASK;
2420 			e.mas2 |= tlb_calc_wimg(e.phys, mode);
2421 
2422 			/*
2423 			 * Write it out to the TLB.  Should really re-sync with other
2424 			 * cores.
2425 			 */
2426 			tlb1_write_entry(&e, i);
2427 		}
2428 		return (0);
2429 	}
2430 
2431 	/* Not in TLB1, try through pmap */
2432 	/* First validate the range. */
2433 	for (va = addr; va < addr + sz; va += PAGE_SIZE) {
2434 		pte = pte_find(kernel_pmap, va);
2435 		if (pte == NULL || !PTE_ISVALID(pte))
2436 			return (EINVAL);
2437 	}
2438 
2439 	mtx_lock_spin(&tlbivax_mutex);
2440 	tlb_miss_lock();
2441 	for (va = addr; va < addr + sz; va += PAGE_SIZE) {
2442 		pte = pte_find(kernel_pmap, va);
2443 		*pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
2444 		*pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
2445 		tlb0_flush_entry(va);
2446 	}
2447 	tlb_miss_unlock();
2448 	mtx_unlock_spin(&tlbivax_mutex);
2449 
2450 	return (0);
2451 }
2452 
2453 static void
mmu_booke_page_array_startup(long pages)2454 mmu_booke_page_array_startup(long pages)
2455 {
2456 	vm_page_array_size = pages;
2457 }
2458 
2459 /**************************************************************************/
2460 /* TID handling */
2461 /**************************************************************************/
2462 
2463 /*
2464  * Allocate a TID. If necessary, steal one from someone else.
2465  * The new TID is flushed from the TLB before returning.
2466  */
2467 static tlbtid_t
tid_alloc(pmap_t pmap)2468 tid_alloc(pmap_t pmap)
2469 {
2470 	tlbtid_t tid;
2471 	int thiscpu;
2472 
2473 	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2474 
2475 	CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2476 
2477 	thiscpu = PCPU_GET(cpuid);
2478 
2479 	tid = PCPU_GET(booke.tid_next);
2480 	if (tid > TID_MAX)
2481 		tid = TID_MIN;
2482 	PCPU_SET(booke.tid_next, tid + 1);
2483 
2484 	/* If we are stealing TID then clear the relevant pmap's field */
2485 	if (tidbusy[thiscpu][tid] != NULL) {
2486 		CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2487 
2488 		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2489 
2490 		/* Flush all entries from TLB0 matching this TID. */
2491 		tid_flush(tid);
2492 	}
2493 
2494 	tidbusy[thiscpu][tid] = pmap;
2495 	pmap->pm_tid[thiscpu] = tid;
2496 	__asm __volatile("msync; isync");
2497 
2498 	CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2499 	    PCPU_GET(booke.tid_next));
2500 
2501 	return (tid);
2502 }
2503 
2504 /**************************************************************************/
2505 /* TLB0 handling */
2506 /**************************************************************************/
2507 
2508 /* Convert TLB0 va and way number to tlb0[] table index. */
2509 static inline unsigned int
tlb0_tableidx(vm_offset_t va,unsigned int way)2510 tlb0_tableidx(vm_offset_t va, unsigned int way)
2511 {
2512 	unsigned int idx;
2513 
2514 	idx = (way * TLB0_ENTRIES_PER_WAY);
2515 	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2516 	return (idx);
2517 }
2518 
2519 /*
2520  * Invalidate TLB0 entry.
2521  */
2522 static inline void
tlb0_flush_entry(vm_offset_t va)2523 tlb0_flush_entry(vm_offset_t va)
2524 {
2525 
2526 	CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2527 
2528 	mtx_assert(&tlbivax_mutex, MA_OWNED);
2529 
2530 	__asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2531 	__asm __volatile("isync; msync");
2532 	__asm __volatile("tlbsync; msync");
2533 
2534 	CTR1(KTR_PMAP, "%s: e", __func__);
2535 }
2536 
2537 /**************************************************************************/
2538 /* TLB1 handling */
2539 /**************************************************************************/
2540 
2541 /*
2542  * TLB1 mapping notes:
2543  *
2544  * TLB1[0]	Kernel text and data.
2545  * TLB1[1-15]	Additional kernel text and data mappings (if required), PCI
2546  *		windows, other devices mappings.
2547  */
2548 
2549  /*
2550  * Read an entry from given TLB1 slot.
2551  */
2552 void
tlb1_read_entry(tlb_entry_t * entry,unsigned int slot)2553 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot)
2554 {
2555 	register_t msr;
2556 	uint32_t mas0;
2557 
2558 	KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__));
2559 
2560 	msr = mfmsr();
2561 	__asm __volatile("wrteei 0");
2562 
2563 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot);
2564 	mtspr(SPR_MAS0, mas0);
2565 	__asm __volatile("isync; tlbre");
2566 
2567 	entry->mas1 = mfspr(SPR_MAS1);
2568 	entry->mas2 = mfspr(SPR_MAS2);
2569 	entry->mas3 = mfspr(SPR_MAS3);
2570 
2571 	switch ((mfpvr() >> 16) & 0xFFFF) {
2572 	case FSL_E500v2:
2573 	case FSL_E500mc:
2574 	case FSL_E5500:
2575 	case FSL_E6500:
2576 		entry->mas7 = mfspr(SPR_MAS7);
2577 		break;
2578 	default:
2579 		entry->mas7 = 0;
2580 		break;
2581 	}
2582 	__asm __volatile("wrtee %0" :: "r"(msr));
2583 
2584 	entry->virt = entry->mas2 & MAS2_EPN_MASK;
2585 	entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) |
2586 	    (entry->mas3 & MAS3_RPN);
2587 	entry->size =
2588 	    tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT);
2589 }
2590 
2591 struct tlbwrite_args {
2592 	tlb_entry_t *e;
2593 	unsigned int idx;
2594 };
2595 
2596 static uint32_t
tlb1_find_free(void)2597 tlb1_find_free(void)
2598 {
2599 	tlb_entry_t e;
2600 	int i;
2601 
2602 	for (i = 0; i < TLB1_ENTRIES; i++) {
2603 		tlb1_read_entry(&e, i);
2604 		if ((e.mas1 & MAS1_VALID) == 0)
2605 			return (i);
2606 	}
2607 	return (-1);
2608 }
2609 
2610 static void
tlb1_purge_va_range(vm_offset_t va,vm_size_t size)2611 tlb1_purge_va_range(vm_offset_t va, vm_size_t size)
2612 {
2613 	tlb_entry_t e;
2614 	int i;
2615 
2616 	for (i = 0; i < TLB1_ENTRIES; i++) {
2617 		tlb1_read_entry(&e, i);
2618 		if ((e.mas1 & MAS1_VALID) == 0)
2619 			continue;
2620 		if ((e.mas2 & MAS2_EPN_MASK) >= va &&
2621 		    (e.mas2 & MAS2_EPN_MASK) < va + size) {
2622 			mtspr(SPR_MAS1, e.mas1 & ~MAS1_VALID);
2623 			__asm __volatile("isync; tlbwe; isync; msync");
2624 		}
2625 	}
2626 }
2627 
2628 static void
tlb1_write_entry_int(void * arg)2629 tlb1_write_entry_int(void *arg)
2630 {
2631 	struct tlbwrite_args *args = arg;
2632 	uint32_t idx, mas0;
2633 
2634 	idx = args->idx;
2635 	if (idx == -1) {
2636 		tlb1_purge_va_range(args->e->virt, args->e->size);
2637 		idx = tlb1_find_free();
2638 		if (idx == -1)
2639 			panic("No free TLB1 entries!\n");
2640 	}
2641 	/* Select entry */
2642 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2643 
2644 	mtspr(SPR_MAS0, mas0);
2645 	mtspr(SPR_MAS1, args->e->mas1);
2646 	mtspr(SPR_MAS2, args->e->mas2);
2647 	mtspr(SPR_MAS3, args->e->mas3);
2648 	switch ((mfpvr() >> 16) & 0xFFFF) {
2649 	case FSL_E500mc:
2650 	case FSL_E5500:
2651 	case FSL_E6500:
2652 		mtspr(SPR_MAS8, 0);
2653 		/* FALLTHROUGH */
2654 	case FSL_E500v2:
2655 		mtspr(SPR_MAS7, args->e->mas7);
2656 		break;
2657 	default:
2658 		break;
2659 	}
2660 
2661 	__asm __volatile("isync; tlbwe; isync; msync");
2662 
2663 }
2664 
2665 static void
tlb1_write_entry_sync(void * arg)2666 tlb1_write_entry_sync(void *arg)
2667 {
2668 	/* Empty synchronization point for smp_rendezvous(). */
2669 }
2670 
2671 /*
2672  * Write given entry to TLB1 hardware.
2673  */
2674 static void
tlb1_write_entry(tlb_entry_t * e,unsigned int idx)2675 tlb1_write_entry(tlb_entry_t *e, unsigned int idx)
2676 {
2677 	struct tlbwrite_args args;
2678 
2679 	args.e = e;
2680 	args.idx = idx;
2681 
2682 #ifdef SMP
2683 	if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) {
2684 		mb();
2685 		smp_rendezvous(tlb1_write_entry_sync,
2686 		    tlb1_write_entry_int,
2687 		    tlb1_write_entry_sync, &args);
2688 	} else
2689 #endif
2690 	{
2691 		register_t msr;
2692 
2693 		msr = mfmsr();
2694 		__asm __volatile("wrteei 0");
2695 		tlb1_write_entry_int(&args);
2696 		__asm __volatile("wrtee %0" :: "r"(msr));
2697 	}
2698 }
2699 
2700 /*
2701  * Convert TLB TSIZE value to mapped region size.
2702  */
2703 static vm_size_t
tsize2size(unsigned int tsize)2704 tsize2size(unsigned int tsize)
2705 {
2706 
2707 	/*
2708 	 * size = 4^tsize KB
2709 	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2710 	 */
2711 
2712 	return ((1UL << tsize) * 1024);
2713 }
2714 
2715 /*
2716  * Convert region size (must be power of 4) to TLB TSIZE value.
2717  */
2718 static unsigned int
size2tsize(vm_size_t size)2719 size2tsize(vm_size_t size)
2720 {
2721 
2722 	return (ilog2(size) - 10);
2723 }
2724 
2725 /*
2726  * Register permanent kernel mapping in TLB1.
2727  *
2728  * Entries are created starting from index 0 (current free entry is
2729  * kept in tlb1_idx) and are not supposed to be invalidated.
2730  */
2731 int
tlb1_set_entry(vm_offset_t va,vm_paddr_t pa,vm_size_t size,uint32_t flags)2732 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
2733     uint32_t flags)
2734 {
2735 	tlb_entry_t e;
2736 	uint32_t ts, tid;
2737 	int tsize, index;
2738 
2739 	/* First try to update an existing entry. */
2740 	for (index = 0; index < TLB1_ENTRIES; index++) {
2741 		tlb1_read_entry(&e, index);
2742 		/* Check if we're just updating the flags, and update them. */
2743 		if (e.phys == pa && e.virt == va && e.size == size) {
2744 			e.mas2 = (va & MAS2_EPN_MASK) | flags;
2745 			tlb1_write_entry(&e, index);
2746 			return (0);
2747 		}
2748 	}
2749 
2750 	/* Convert size to TSIZE */
2751 	tsize = size2tsize(size);
2752 
2753 	tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
2754 	/* XXX TS is hard coded to 0 for now as we only use single address space */
2755 	ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
2756 
2757 	e.phys = pa;
2758 	e.virt = va;
2759 	e.size = size;
2760 	e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2761 	e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2762 	e.mas2 = (va & MAS2_EPN_MASK) | flags;
2763 
2764 	/* Set supervisor RWX permission bits */
2765 	e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2766 	e.mas7 = (pa >> 32) & MAS7_RPN;
2767 
2768 	tlb1_write_entry(&e, -1);
2769 
2770 	return (0);
2771 }
2772 
2773 /*
2774  * Map in contiguous RAM region into the TLB1.
2775  */
2776 static vm_size_t
tlb1_mapin_region(vm_offset_t va,vm_paddr_t pa,vm_size_t size,int wimge)2777 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size, int wimge)
2778 {
2779 	vm_offset_t base;
2780 	vm_size_t mapped, sz, ssize;
2781 	int shift;
2782 
2783 	mapped = 0;
2784 	base = va;
2785 	ssize = size;
2786 
2787 	if (mmuv2)
2788 		shift = 1;
2789 	else
2790 		shift = 2;
2791 
2792 	while (size > 0) {
2793 		sz = 1UL << (ilog2(size) & ~(shift - 1));
2794 		/* Align size to PA */
2795 		if (pa % sz != 0) {
2796 			do {
2797 				sz >>= shift;
2798 			} while (pa % sz != 0);
2799 		}
2800 		/* Now align from there to VA */
2801 		if (va % sz != 0) {
2802 			do {
2803 				sz >>= shift;
2804 			} while (va % sz != 0);
2805 		}
2806 #ifdef __powerpc64__
2807 		/*
2808 		 * Clamp TLB1 entries to 4G.
2809 		 *
2810 		 * While the e6500 supports up to 1TB mappings, the e5500
2811 		 * only supports up to 4G mappings. (0b1011)
2812 		 *
2813 		 * If any e6500 machines capable of supporting a very
2814 		 * large amount of memory appear in the future, we can
2815 		 * revisit this.
2816 		 *
2817 		 * For now, though, since we have plenty of space in TLB1,
2818 		 * always avoid creating entries larger than 4GB.
2819 		 */
2820 		if (!mmuv2)
2821 			sz = MIN(sz, 1UL << 32);
2822 #endif
2823 		if (bootverbose)
2824 			printf("Wiring VA=%p to PA=%jx (size=%lx)\n",
2825 			    (void *)va, (uintmax_t)pa, (long)sz);
2826 		if (tlb1_set_entry(va, pa, sz,
2827 		    _TLB_ENTRY_SHARED | wimge) < 0)
2828 			return (mapped);
2829 		size -= sz;
2830 		pa += sz;
2831 		va += sz;
2832 	}
2833 
2834 	mapped = (va - base);
2835 	if (bootverbose)
2836 		printf("mapped size 0x%"PRIxPTR" (wasted space 0x%"PRIxPTR")\n",
2837 		    mapped, mapped - ssize);
2838 
2839 	return (mapped);
2840 }
2841 
2842 /*
2843  * TLB1 initialization routine, to be called after the very first
2844  * assembler level setup done in locore.S.
2845  */
2846 void
tlb1_init(void)2847 tlb1_init(void)
2848 {
2849 	vm_offset_t mas2;
2850 	uint32_t mas0, mas1, mas3, mas7;
2851 	uint32_t tsz;
2852 
2853 	tlb1_get_tlbconf();
2854 
2855 	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
2856 	mtspr(SPR_MAS0, mas0);
2857 	__asm __volatile("isync; tlbre");
2858 
2859 	mas1 = mfspr(SPR_MAS1);
2860 	mas2 = mfspr(SPR_MAS2);
2861 	mas3 = mfspr(SPR_MAS3);
2862 	mas7 = mfspr(SPR_MAS7);
2863 
2864 	kernload =  ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
2865 	    (mas3 & MAS3_RPN);
2866 
2867 	tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2868 	kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
2869 	kernstart = trunc_page(mas2);
2870 
2871 	/* Setup TLB miss defaults */
2872 	set_mas4_defaults();
2873 }
2874 
2875 /*
2876  * pmap_early_io_unmap() should be used in short conjunction with
2877  * pmap_early_io_map(), as in the following snippet:
2878  *
2879  * x = pmap_early_io_map(...);
2880  * <do something with x>
2881  * pmap_early_io_unmap(x, size);
2882  *
2883  * And avoiding more allocations between.
2884  */
2885 void
pmap_early_io_unmap(vm_offset_t va,vm_size_t size)2886 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
2887 {
2888 	int i;
2889 	tlb_entry_t e;
2890 	vm_size_t isize;
2891 
2892 	size = roundup(size, PAGE_SIZE);
2893 	isize = size;
2894 	for (i = 0; i < TLB1_ENTRIES && size > 0; i++) {
2895 		tlb1_read_entry(&e, i);
2896 		if (!(e.mas1 & MAS1_VALID))
2897 			continue;
2898 		if (va <= e.virt && (va + isize) >= (e.virt + e.size)) {
2899 			size -= e.size;
2900 			e.mas1 &= ~MAS1_VALID;
2901 			tlb1_write_entry(&e, i);
2902 		}
2903 	}
2904 	if (tlb1_map_base == va + isize)
2905 		tlb1_map_base -= isize;
2906 }
2907 
2908 vm_offset_t
pmap_early_io_map(vm_paddr_t pa,vm_size_t size)2909 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
2910 {
2911 	vm_paddr_t pa_base;
2912 	vm_offset_t va, sz;
2913 	int i;
2914 	tlb_entry_t e;
2915 
2916 	KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
2917 
2918 	for (i = 0; i < TLB1_ENTRIES; i++) {
2919 		tlb1_read_entry(&e, i);
2920 		if (!(e.mas1 & MAS1_VALID))
2921 			continue;
2922 		if (pa >= e.phys && (pa + size) <=
2923 		    (e.phys + e.size))
2924 			return (e.virt + (pa - e.phys));
2925 	}
2926 
2927 	pa_base = rounddown(pa, PAGE_SIZE);
2928 	size = roundup(size + (pa - pa_base), PAGE_SIZE);
2929 	tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
2930 	va = tlb1_map_base + (pa - pa_base);
2931 
2932 	do {
2933 		sz = 1 << (ilog2(size) & ~1);
2934 		tlb1_set_entry(tlb1_map_base, pa_base, sz,
2935 		    _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
2936 		size -= sz;
2937 		pa_base += sz;
2938 		tlb1_map_base += sz;
2939 	} while (size > 0);
2940 
2941 	return (va);
2942 }
2943 
2944 void
pmap_track_page(pmap_t pmap,vm_offset_t va)2945 pmap_track_page(pmap_t pmap, vm_offset_t va)
2946 {
2947 	vm_paddr_t pa;
2948 	vm_page_t page;
2949 	struct pv_entry *pve;
2950 
2951 	va = trunc_page(va);
2952 	pa = pmap_kextract(va);
2953 	page = PHYS_TO_VM_PAGE(pa);
2954 
2955 	rw_wlock(&pvh_global_lock);
2956 	PMAP_LOCK(pmap);
2957 
2958 	TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) {
2959 		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
2960 			goto out;
2961 		}
2962 	}
2963 	page->md.pv_tracked = true;
2964 	pv_insert(pmap, va, page);
2965 out:
2966 	PMAP_UNLOCK(pmap);
2967 	rw_wunlock(&pvh_global_lock);
2968 }
2969 
2970 /*
2971  * Setup MAS4 defaults.
2972  * These values are loaded to MAS0-2 on a TLB miss.
2973  */
2974 static void
set_mas4_defaults(void)2975 set_mas4_defaults(void)
2976 {
2977 	uint32_t mas4;
2978 
2979 	/* Defaults: TLB0, PID0, TSIZED=4K */
2980 	mas4 = MAS4_TLBSELD0;
2981 	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
2982 #ifdef SMP
2983 	mas4 |= MAS4_MD;
2984 #endif
2985 	mtspr(SPR_MAS4, mas4);
2986 	__asm __volatile("isync");
2987 }
2988 
2989 /*
2990  * Return 0 if the physical IO range is encompassed by one of the
2991  * the TLB1 entries, otherwise return related error code.
2992  */
2993 static int
tlb1_iomapped(int i,vm_paddr_t pa,vm_size_t size,vm_offset_t * va)2994 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
2995 {
2996 	uint32_t prot;
2997 	vm_paddr_t pa_start;
2998 	vm_paddr_t pa_end;
2999 	unsigned int entry_tsize;
3000 	vm_size_t entry_size;
3001 	tlb_entry_t e;
3002 
3003 	*va = (vm_offset_t)NULL;
3004 
3005 	tlb1_read_entry(&e, i);
3006 	/* Skip invalid entries */
3007 	if (!(e.mas1 & MAS1_VALID))
3008 		return (EINVAL);
3009 
3010 	/*
3011 	 * The entry must be cache-inhibited, guarded, and r/w
3012 	 * so it can function as an i/o page
3013 	 */
3014 	prot = e.mas2 & (MAS2_I | MAS2_G);
3015 	if (prot != (MAS2_I | MAS2_G))
3016 		return (EPERM);
3017 
3018 	prot = e.mas3 & (MAS3_SR | MAS3_SW);
3019 	if (prot != (MAS3_SR | MAS3_SW))
3020 		return (EPERM);
3021 
3022 	/* The address should be within the entry range. */
3023 	entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3024 	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3025 
3026 	entry_size = tsize2size(entry_tsize);
3027 	pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) |
3028 	    (e.mas3 & MAS3_RPN);
3029 	pa_end = pa_start + entry_size;
3030 
3031 	if ((pa < pa_start) || ((pa + size) > pa_end))
3032 		return (ERANGE);
3033 
3034 	/* Return virtual address of this mapping. */
3035 	*va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start);
3036 	return (0);
3037 }
3038 
3039 #ifdef DDB
3040 /* Print out contents of the MAS registers for each TLB0 entry */
3041 static void
3042 #ifdef __powerpc64__
tlb_print_entry(int i,uint32_t mas1,uint64_t mas2,uint32_t mas3,uint32_t mas7)3043 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
3044 #else
3045 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
3046 #endif
3047     uint32_t mas7)
3048 {
3049 	int as;
3050 	char desc[3];
3051 	tlbtid_t tid;
3052 	vm_size_t size;
3053 	unsigned int tsize;
3054 
3055 	desc[2] = '\0';
3056 	if (mas1 & MAS1_VALID)
3057 		desc[0] = 'V';
3058 	else
3059 		desc[0] = ' ';
3060 
3061 	if (mas1 & MAS1_IPROT)
3062 		desc[1] = 'P';
3063 	else
3064 		desc[1] = ' ';
3065 
3066 	as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
3067 	tid = MAS1_GETTID(mas1);
3068 
3069 	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3070 	size = 0;
3071 	if (tsize)
3072 		size = tsize2size(tsize);
3073 
3074 	printf("%3d: (%s) [AS=%d] "
3075 	    "sz = 0x%jx tsz = %d tid = %d mas1 = 0x%08x "
3076 	    "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
3077 	    i, desc, as, (uintmax_t)size, tsize, tid, mas1, mas2, mas3, mas7);
3078 }
3079 
DB_SHOW_COMMAND(tlb0,tlb0_print_tlbentries)3080 DB_SHOW_COMMAND(tlb0, tlb0_print_tlbentries)
3081 {
3082 	uint32_t mas0, mas1, mas3, mas7;
3083 #ifdef __powerpc64__
3084 	uint64_t mas2;
3085 #else
3086 	uint32_t mas2;
3087 #endif
3088 	int entryidx, way, idx;
3089 
3090 	printf("TLB0 entries:\n");
3091 	for (way = 0; way < TLB0_WAYS; way ++)
3092 		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
3093 			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
3094 			mtspr(SPR_MAS0, mas0);
3095 
3096 			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
3097 			mtspr(SPR_MAS2, mas2);
3098 
3099 			__asm __volatile("isync; tlbre");
3100 
3101 			mas1 = mfspr(SPR_MAS1);
3102 			mas2 = mfspr(SPR_MAS2);
3103 			mas3 = mfspr(SPR_MAS3);
3104 			mas7 = mfspr(SPR_MAS7);
3105 
3106 			idx = tlb0_tableidx(mas2, way);
3107 			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
3108 		}
3109 }
3110 
3111 /*
3112  * Print out contents of the MAS registers for each TLB1 entry
3113  */
DB_SHOW_COMMAND(tlb1,tlb1_print_tlbentries)3114 DB_SHOW_COMMAND(tlb1, tlb1_print_tlbentries)
3115 {
3116 	uint32_t mas0, mas1, mas3, mas7;
3117 #ifdef __powerpc64__
3118 	uint64_t mas2;
3119 #else
3120 	uint32_t mas2;
3121 #endif
3122 	int i;
3123 
3124 	printf("TLB1 entries:\n");
3125 	for (i = 0; i < TLB1_ENTRIES; i++) {
3126 		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3127 		mtspr(SPR_MAS0, mas0);
3128 
3129 		__asm __volatile("isync; tlbre");
3130 
3131 		mas1 = mfspr(SPR_MAS1);
3132 		mas2 = mfspr(SPR_MAS2);
3133 		mas3 = mfspr(SPR_MAS3);
3134 		mas7 = mfspr(SPR_MAS7);
3135 
3136 		tlb_print_entry(i, mas1, mas2, mas3, mas7);
3137 	}
3138 }
3139 #endif
3140