1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 5 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Some hw specific parts of this pmap were derived or influenced 29 * by NetBSD's ibm4xx pmap module. More generic code is shared with 30 * a few other pmap modules from the FreeBSD tree. 31 */ 32 33 /* 34 * VM layout notes: 35 * 36 * Kernel and user threads run within one common virtual address space 37 * defined by AS=0. 38 * 39 * 32-bit pmap: 40 * Virtual address space layout: 41 * ----------------------------- 42 * 0x0000_0000 - 0x7fff_ffff : user process 43 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 44 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 45 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 46 * 0xc100_0000 - 0xffff_ffff : KVA 47 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 48 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 49 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 50 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 51 * 52 * 64-bit pmap: 53 * Virtual address space layout: 54 * ----------------------------- 55 * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process 56 * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries 57 * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region 58 * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack 59 * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved 60 * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data 61 * endkernel - msgbufp-1 : flat device tree 62 * msgbufp - kernel_pdir-1 : message buffer 63 * kernel_pdir - kernel_pp2d-1 : kernel page directory 64 * kernel_pp2d - . : kernel pointers to page directory 65 * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy 66 * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs 67 * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables 68 * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space 69 * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region 70 * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region 71 * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map 72 * 0xf000_0000_0000_0000 - +Maxmem : physmem map 73 * - 0xffff_ffff_ffff_ffff : device direct map 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ddb.h" 80 #include "opt_kstack_pages.h" 81 82 #include <sys/param.h> 83 #include <sys/conf.h> 84 #include <sys/malloc.h> 85 #include <sys/ktr.h> 86 #include <sys/proc.h> 87 #include <sys/user.h> 88 #include <sys/queue.h> 89 #include <sys/systm.h> 90 #include <sys/kernel.h> 91 #include <sys/kerneldump.h> 92 #include <sys/linker.h> 93 #include <sys/msgbuf.h> 94 #include <sys/lock.h> 95 #include <sys/mutex.h> 96 #include <sys/rwlock.h> 97 #include <sys/sched.h> 98 #include <sys/smp.h> 99 #include <sys/vmmeter.h> 100 101 #include <vm/vm.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_kern.h> 104 #include <vm/vm_pageout.h> 105 #include <vm/vm_extern.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_param.h> 108 #include <vm/vm_map.h> 109 #include <vm/vm_pager.h> 110 #include <vm/vm_phys.h> 111 #include <vm/vm_pagequeue.h> 112 #include <vm/uma.h> 113 114 #include <machine/_inttypes.h> 115 #include <machine/cpu.h> 116 #include <machine/pcb.h> 117 #include <machine/platform.h> 118 119 #include <machine/tlb.h> 120 #include <machine/spr.h> 121 #include <machine/md_var.h> 122 #include <machine/mmuvar.h> 123 #include <machine/pmap.h> 124 #include <machine/pte.h> 125 126 #include <ddb/ddb.h> 127 128 #include "mmu_if.h" 129 130 #define SPARSE_MAPDEV 131 132 /* Use power-of-two mappings in mmu_booke_mapdev(), to save entries. */ 133 #define POW2_MAPPINGS 134 135 #ifdef DEBUG 136 #define debugf(fmt, args...) printf(fmt, ##args) 137 #else 138 #define debugf(fmt, args...) 139 #endif 140 141 #ifdef __powerpc64__ 142 #define PRI0ptrX "016lx" 143 #else 144 #define PRI0ptrX "08x" 145 #endif 146 147 #define TODO panic("%s: not implemented", __func__); 148 149 extern unsigned char _etext[]; 150 extern unsigned char _end[]; 151 152 extern uint32_t *bootinfo; 153 154 vm_paddr_t kernload; 155 vm_offset_t kernstart; 156 vm_size_t kernsize; 157 158 /* Message buffer and tables. */ 159 static vm_offset_t data_start; 160 static vm_size_t data_end; 161 162 /* Phys/avail memory regions. */ 163 static struct mem_region *availmem_regions; 164 static int availmem_regions_sz; 165 static struct mem_region *physmem_regions; 166 static int physmem_regions_sz; 167 168 #ifndef __powerpc64__ 169 /* Reserved KVA space and mutex for mmu_booke_zero_page. */ 170 static vm_offset_t zero_page_va; 171 static struct mtx zero_page_mutex; 172 173 /* Reserved KVA space and mutex for mmu_booke_copy_page. */ 174 static vm_offset_t copy_page_src_va; 175 static vm_offset_t copy_page_dst_va; 176 static struct mtx copy_page_mutex; 177 #endif 178 179 static struct mtx tlbivax_mutex; 180 181 /**************************************************************************/ 182 /* PMAP */ 183 /**************************************************************************/ 184 185 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 186 vm_prot_t, u_int flags, int8_t psind); 187 188 unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 189 static uma_zone_t ptbl_root_zone; 190 191 /* 192 * If user pmap is processed with mmu_booke_remove and the resident count 193 * drops to 0, there are no more pages to remove, so we need not continue. 194 */ 195 #define PMAP_REMOVE_DONE(pmap) \ 196 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 197 198 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__) 199 extern int elf32_nxstack; 200 #endif 201 202 /**************************************************************************/ 203 /* TLB and TID handling */ 204 /**************************************************************************/ 205 206 /* Translation ID busy table */ 207 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 208 209 /* 210 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 211 * core revisions and should be read from h/w registers during early config. 212 */ 213 uint32_t tlb0_entries; 214 uint32_t tlb0_ways; 215 uint32_t tlb0_entries_per_way; 216 uint32_t tlb1_entries; 217 218 #define TLB0_ENTRIES (tlb0_entries) 219 #define TLB0_WAYS (tlb0_ways) 220 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 221 222 #define TLB1_ENTRIES (tlb1_entries) 223 224 static tlbtid_t tid_alloc(struct pmap *); 225 226 #ifdef DDB 227 #ifdef __powerpc64__ 228 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t); 229 #else 230 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 231 #endif 232 #endif 233 234 static void tlb1_read_entry(tlb_entry_t *, unsigned int); 235 static void tlb1_write_entry(tlb_entry_t *, unsigned int); 236 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 237 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t, int); 238 239 static __inline uint32_t tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma); 240 241 static vm_size_t tsize2size(unsigned int); 242 static unsigned int size2tsize(vm_size_t); 243 static unsigned long ilog2(unsigned long); 244 245 static void set_mas4_defaults(void); 246 247 static inline void tlb0_flush_entry(vm_offset_t); 248 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 249 250 /**************************************************************************/ 251 /* Page table management */ 252 /**************************************************************************/ 253 254 static struct rwlock_padalign pvh_global_lock; 255 256 /* Data for the pv entry allocation mechanism */ 257 static uma_zone_t pvzone; 258 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 259 260 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 261 262 #ifndef PMAP_SHPGPERPROC 263 #define PMAP_SHPGPERPROC 200 264 #endif 265 266 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 267 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); 268 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 269 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 270 static void kernel_pte_alloc(vm_offset_t, vm_offset_t); 271 272 static pv_entry_t pv_alloc(void); 273 static void pv_free(pv_entry_t); 274 static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 275 static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 276 277 static void booke_pmap_init_qpages(void); 278 279 static inline void tlb_miss_lock(void); 280 static inline void tlb_miss_unlock(void); 281 282 #ifdef SMP 283 extern tlb_entry_t __boot_tlb1[]; 284 void pmap_bootstrap_ap(volatile uint32_t *); 285 #endif 286 287 /* 288 * Kernel MMU interface 289 */ 290 static void mmu_booke_clear_modify(mmu_t, vm_page_t); 291 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 292 vm_size_t, vm_offset_t); 293 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 294 static void mmu_booke_copy_pages(mmu_t, vm_page_t *, 295 vm_offset_t, vm_page_t *, vm_offset_t, int); 296 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 297 vm_prot_t, u_int flags, int8_t psind); 298 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 299 vm_page_t, vm_prot_t); 300 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 301 vm_prot_t); 302 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 303 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 304 vm_prot_t); 305 static void mmu_booke_init(mmu_t); 306 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 307 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 308 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 309 static int mmu_booke_ts_referenced(mmu_t, vm_page_t); 310 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 311 int); 312 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 313 vm_paddr_t *); 314 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 315 vm_object_t, vm_pindex_t, vm_size_t); 316 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 317 static void mmu_booke_page_init(mmu_t, vm_page_t); 318 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 319 static void mmu_booke_pinit(mmu_t, pmap_t); 320 static void mmu_booke_pinit0(mmu_t, pmap_t); 321 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 322 vm_prot_t); 323 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 324 static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 325 static void mmu_booke_release(mmu_t, pmap_t); 326 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 327 static void mmu_booke_remove_all(mmu_t, vm_page_t); 328 static void mmu_booke_remove_write(mmu_t, vm_page_t); 329 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 330 static void mmu_booke_zero_page(mmu_t, vm_page_t); 331 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 332 static void mmu_booke_activate(mmu_t, struct thread *); 333 static void mmu_booke_deactivate(mmu_t, struct thread *); 334 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 335 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 336 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); 337 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 338 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 339 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 340 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t); 341 static void mmu_booke_kremove(mmu_t, vm_offset_t); 342 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 343 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 344 vm_size_t); 345 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t, 346 void **); 347 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t, 348 void *); 349 static void mmu_booke_scan_init(mmu_t); 350 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m); 351 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr); 352 static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, 353 vm_size_t sz, vm_memattr_t mode); 354 static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, 355 volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); 356 static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, 357 int *is_user, vm_offset_t *decoded_addr); 358 static void mmu_booke_page_array_startup(mmu_t , long); 359 static boolean_t mmu_booke_page_is_mapped(mmu_t mmu, vm_page_t m); 360 361 362 static mmu_method_t mmu_booke_methods[] = { 363 /* pmap dispatcher interface */ 364 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 365 MMUMETHOD(mmu_copy, mmu_booke_copy), 366 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 367 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), 368 MMUMETHOD(mmu_enter, mmu_booke_enter), 369 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 370 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 371 MMUMETHOD(mmu_extract, mmu_booke_extract), 372 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 373 MMUMETHOD(mmu_init, mmu_booke_init), 374 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 375 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 376 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 377 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 378 MMUMETHOD(mmu_map, mmu_booke_map), 379 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 380 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 381 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 382 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 383 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 384 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 385 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 386 MMUMETHOD(mmu_protect, mmu_booke_protect), 387 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 388 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 389 MMUMETHOD(mmu_release, mmu_booke_release), 390 MMUMETHOD(mmu_remove, mmu_booke_remove), 391 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 392 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 393 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 394 MMUMETHOD(mmu_unwire, mmu_booke_unwire), 395 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 396 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 397 MMUMETHOD(mmu_activate, mmu_booke_activate), 398 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 399 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page), 400 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page), 401 MMUMETHOD(mmu_page_array_startup, mmu_booke_page_array_startup), 402 MMUMETHOD(mmu_page_is_mapped, mmu_booke_page_is_mapped), 403 404 /* Internal interfaces */ 405 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 406 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 407 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 408 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr), 409 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 410 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr), 411 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 412 MMUMETHOD(mmu_kremove, mmu_booke_kremove), 413 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 414 MMUMETHOD(mmu_change_attr, mmu_booke_change_attr), 415 MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr), 416 MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr), 417 418 /* dumpsys() support */ 419 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 420 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 421 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init), 422 423 { 0, 0 } 424 }; 425 426 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 427 428 #ifdef __powerpc64__ 429 #include "pmap_64.c" 430 #else 431 #include "pmap_32.c" 432 #endif 433 434 static vm_offset_t tlb1_map_base = VM_MAPDEV_BASE; 435 436 static __inline uint32_t 437 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) 438 { 439 uint32_t attrib; 440 int i; 441 442 if (ma != VM_MEMATTR_DEFAULT) { 443 switch (ma) { 444 case VM_MEMATTR_UNCACHEABLE: 445 return (MAS2_I | MAS2_G); 446 case VM_MEMATTR_WRITE_COMBINING: 447 case VM_MEMATTR_WRITE_BACK: 448 case VM_MEMATTR_PREFETCHABLE: 449 return (MAS2_I); 450 case VM_MEMATTR_WRITE_THROUGH: 451 return (MAS2_W | MAS2_M); 452 case VM_MEMATTR_CACHEABLE: 453 return (MAS2_M); 454 } 455 } 456 457 /* 458 * Assume the page is cache inhibited and access is guarded unless 459 * it's in our available memory array. 460 */ 461 attrib = _TLB_ENTRY_IO; 462 for (i = 0; i < physmem_regions_sz; i++) { 463 if ((pa >= physmem_regions[i].mr_start) && 464 (pa < (physmem_regions[i].mr_start + 465 physmem_regions[i].mr_size))) { 466 attrib = _TLB_ENTRY_MEM; 467 break; 468 } 469 } 470 471 return (attrib); 472 } 473 474 static inline void 475 tlb_miss_lock(void) 476 { 477 #ifdef SMP 478 struct pcpu *pc; 479 480 if (!smp_started) 481 return; 482 483 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 484 if (pc != pcpup) { 485 486 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 487 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke.tlb_lock); 488 489 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 490 ("tlb_miss_lock: tried to lock self")); 491 492 tlb_lock(pc->pc_booke.tlb_lock); 493 494 CTR1(KTR_PMAP, "%s: locked", __func__); 495 } 496 } 497 #endif 498 } 499 500 static inline void 501 tlb_miss_unlock(void) 502 { 503 #ifdef SMP 504 struct pcpu *pc; 505 506 if (!smp_started) 507 return; 508 509 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 510 if (pc != pcpup) { 511 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 512 __func__, pc->pc_cpuid); 513 514 tlb_unlock(pc->pc_booke.tlb_lock); 515 516 CTR1(KTR_PMAP, "%s: unlocked", __func__); 517 } 518 } 519 #endif 520 } 521 522 /* Return number of entries in TLB0. */ 523 static __inline void 524 tlb0_get_tlbconf(void) 525 { 526 uint32_t tlb0_cfg; 527 528 tlb0_cfg = mfspr(SPR_TLB0CFG); 529 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 530 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 531 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 532 } 533 534 /* Return number of entries in TLB1. */ 535 static __inline void 536 tlb1_get_tlbconf(void) 537 { 538 uint32_t tlb1_cfg; 539 540 tlb1_cfg = mfspr(SPR_TLB1CFG); 541 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK; 542 } 543 544 /**************************************************************************/ 545 /* Page table related */ 546 /**************************************************************************/ 547 548 /* Allocate pv_entry structure. */ 549 pv_entry_t 550 pv_alloc(void) 551 { 552 pv_entry_t pv; 553 554 pv_entry_count++; 555 if (pv_entry_count > pv_entry_high_water) 556 pagedaemon_wakeup(0); /* XXX powerpc NUMA */ 557 pv = uma_zalloc(pvzone, M_NOWAIT); 558 559 return (pv); 560 } 561 562 /* Free pv_entry structure. */ 563 static __inline void 564 pv_free(pv_entry_t pve) 565 { 566 567 pv_entry_count--; 568 uma_zfree(pvzone, pve); 569 } 570 571 572 /* Allocate and initialize pv_entry structure. */ 573 static void 574 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 575 { 576 pv_entry_t pve; 577 578 //int su = (pmap == kernel_pmap); 579 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 580 // (u_int32_t)pmap, va, (u_int32_t)m); 581 582 pve = pv_alloc(); 583 if (pve == NULL) 584 panic("pv_insert: no pv entries!"); 585 586 pve->pv_pmap = pmap; 587 pve->pv_va = va; 588 589 /* add to pv_list */ 590 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 591 rw_assert(&pvh_global_lock, RA_WLOCKED); 592 593 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 594 595 //debugf("pv_insert: e\n"); 596 } 597 598 /* Destroy pv entry. */ 599 static void 600 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 601 { 602 pv_entry_t pve; 603 604 //int su = (pmap == kernel_pmap); 605 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 606 607 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 608 rw_assert(&pvh_global_lock, RA_WLOCKED); 609 610 /* find pv entry */ 611 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 612 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 613 /* remove from pv_list */ 614 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 615 if (TAILQ_EMPTY(&m->md.pv_list)) 616 vm_page_aflag_clear(m, PGA_WRITEABLE); 617 618 /* free pv entry struct */ 619 pv_free(pve); 620 break; 621 } 622 } 623 624 //debugf("pv_remove: e\n"); 625 } 626 627 /**************************************************************************/ 628 /* PMAP related */ 629 /**************************************************************************/ 630 631 /* 632 * This is called during booke_init, before the system is really initialized. 633 */ 634 static void 635 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 636 { 637 vm_paddr_t phys_kernelend; 638 struct mem_region *mp, *mp1; 639 int cnt, i, j; 640 vm_paddr_t s, e, sz; 641 vm_paddr_t physsz, hwphyssz; 642 u_int phys_avail_count; 643 vm_size_t kstack0_sz; 644 vm_paddr_t kstack0_phys; 645 vm_offset_t kstack0; 646 void *dpcpu; 647 648 debugf("mmu_booke_bootstrap: entered\n"); 649 650 /* Set interesting system properties */ 651 #ifdef __powerpc64__ 652 hw_direct_map = 1; 653 #else 654 hw_direct_map = 0; 655 #endif 656 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__) 657 elf32_nxstack = 1; 658 #endif 659 660 /* Initialize invalidation mutex */ 661 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 662 663 /* Read TLB0 size and associativity. */ 664 tlb0_get_tlbconf(); 665 666 /* 667 * Align kernel start and end address (kernel image). 668 * Note that kernel end does not necessarily relate to kernsize. 669 * kernsize is the size of the kernel that is actually mapped. 670 */ 671 data_start = round_page(kernelend); 672 data_end = data_start; 673 674 /* Allocate the dynamic per-cpu area. */ 675 dpcpu = (void *)data_end; 676 data_end += DPCPU_SIZE; 677 678 /* Allocate space for the message buffer. */ 679 msgbufp = (struct msgbuf *)data_end; 680 data_end += msgbufsize; 681 debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n", 682 (uintptr_t)msgbufp, data_end); 683 684 data_end = round_page(data_end); 685 data_end = round_page(mmu_booke_alloc_kernel_pgtables(data_end)); 686 687 /* Retrieve phys/avail mem regions */ 688 mem_regions(&physmem_regions, &physmem_regions_sz, 689 &availmem_regions, &availmem_regions_sz); 690 691 if (PHYS_AVAIL_ENTRIES < availmem_regions_sz) 692 panic("mmu_booke_bootstrap: phys_avail too small"); 693 694 data_end = round_page(data_end); 695 vm_page_array = (vm_page_t)data_end; 696 /* 697 * Get a rough idea (upper bound) on the size of the page array. The 698 * vm_page_array will not handle any more pages than we have in the 699 * avail_regions array, and most likely much less. 700 */ 701 sz = 0; 702 for (mp = availmem_regions; mp->mr_size; mp++) { 703 sz += mp->mr_size; 704 } 705 sz = (round_page(sz) / (PAGE_SIZE + sizeof(struct vm_page))); 706 data_end += round_page(sz * sizeof(struct vm_page)); 707 708 /* Pre-round up to 1MB. This wastes some space, but saves TLB entries */ 709 data_end = roundup2(data_end, 1 << 20); 710 711 debugf(" data_end: 0x%"PRI0ptrX"\n", data_end); 712 debugf(" kernstart: %#zx\n", kernstart); 713 debugf(" kernsize: %#zx\n", kernsize); 714 715 if (data_end - kernstart > kernsize) { 716 kernsize += tlb1_mapin_region(kernstart + kernsize, 717 kernload + kernsize, (data_end - kernstart) - kernsize, 718 _TLB_ENTRY_MEM); 719 } 720 data_end = kernstart + kernsize; 721 debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end); 722 723 /* 724 * Clear the structures - note we can only do it safely after the 725 * possible additional TLB1 translations are in place (above) so that 726 * all range up to the currently calculated 'data_end' is covered. 727 */ 728 bzero((void *)data_start, data_end - data_start); 729 dpcpu_init(dpcpu, 0); 730 731 /*******************************************************/ 732 /* Set the start and end of kva. */ 733 /*******************************************************/ 734 virtual_avail = round_page(data_end); 735 virtual_end = VM_MAX_KERNEL_ADDRESS; 736 737 #ifndef __powerpc64__ 738 /* Allocate KVA space for page zero/copy operations. */ 739 zero_page_va = virtual_avail; 740 virtual_avail += PAGE_SIZE; 741 copy_page_src_va = virtual_avail; 742 virtual_avail += PAGE_SIZE; 743 copy_page_dst_va = virtual_avail; 744 virtual_avail += PAGE_SIZE; 745 debugf("zero_page_va = 0x%"PRI0ptrX"\n", zero_page_va); 746 debugf("copy_page_src_va = 0x%"PRI0ptrX"\n", copy_page_src_va); 747 debugf("copy_page_dst_va = 0x%"PRI0ptrX"\n", copy_page_dst_va); 748 749 /* Initialize page zero/copy mutexes. */ 750 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 751 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 752 753 /* Allocate KVA space for ptbl bufs. */ 754 ptbl_buf_pool_vabase = virtual_avail; 755 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 756 debugf("ptbl_buf_pool_vabase = 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n", 757 ptbl_buf_pool_vabase, virtual_avail); 758 #endif 759 760 /* Calculate corresponding physical addresses for the kernel region. */ 761 phys_kernelend = kernload + kernsize; 762 debugf("kernel image and allocated data:\n"); 763 debugf(" kernload = 0x%09jx\n", (uintmax_t)kernload); 764 debugf(" kernstart = 0x%"PRI0ptrX"\n", kernstart); 765 debugf(" kernsize = 0x%"PRI0ptrX"\n", kernsize); 766 767 /* 768 * Remove kernel physical address range from avail regions list. Page 769 * align all regions. Non-page aligned memory isn't very interesting 770 * to us. Also, sort the entries for ascending addresses. 771 */ 772 773 sz = 0; 774 cnt = availmem_regions_sz; 775 debugf("processing avail regions:\n"); 776 for (mp = availmem_regions; mp->mr_size; mp++) { 777 s = mp->mr_start; 778 e = mp->mr_start + mp->mr_size; 779 debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e); 780 /* Check whether this region holds all of the kernel. */ 781 if (s < kernload && e > phys_kernelend) { 782 availmem_regions[cnt].mr_start = phys_kernelend; 783 availmem_regions[cnt++].mr_size = e - phys_kernelend; 784 e = kernload; 785 } 786 /* Look whether this regions starts within the kernel. */ 787 if (s >= kernload && s < phys_kernelend) { 788 if (e <= phys_kernelend) 789 goto empty; 790 s = phys_kernelend; 791 } 792 /* Now look whether this region ends within the kernel. */ 793 if (e > kernload && e <= phys_kernelend) { 794 if (s >= kernload) 795 goto empty; 796 e = kernload; 797 } 798 /* Now page align the start and size of the region. */ 799 s = round_page(s); 800 e = trunc_page(e); 801 if (e < s) 802 e = s; 803 sz = e - s; 804 debugf("%09jx-%09jx = %jx\n", 805 (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz); 806 807 /* Check whether some memory is left here. */ 808 if (sz == 0) { 809 empty: 810 memmove(mp, mp + 1, 811 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 812 cnt--; 813 mp--; 814 continue; 815 } 816 817 /* Do an insertion sort. */ 818 for (mp1 = availmem_regions; mp1 < mp; mp1++) 819 if (s < mp1->mr_start) 820 break; 821 if (mp1 < mp) { 822 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 823 mp1->mr_start = s; 824 mp1->mr_size = sz; 825 } else { 826 mp->mr_start = s; 827 mp->mr_size = sz; 828 } 829 } 830 availmem_regions_sz = cnt; 831 832 /*******************************************************/ 833 /* Steal physical memory for kernel stack from the end */ 834 /* of the first avail region */ 835 /*******************************************************/ 836 kstack0_sz = kstack_pages * PAGE_SIZE; 837 kstack0_phys = availmem_regions[0].mr_start + 838 availmem_regions[0].mr_size; 839 kstack0_phys -= kstack0_sz; 840 availmem_regions[0].mr_size -= kstack0_sz; 841 842 /*******************************************************/ 843 /* Fill in phys_avail table, based on availmem_regions */ 844 /*******************************************************/ 845 phys_avail_count = 0; 846 physsz = 0; 847 hwphyssz = 0; 848 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 849 850 debugf("fill in phys_avail:\n"); 851 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 852 853 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n", 854 (uintmax_t)availmem_regions[i].mr_start, 855 (uintmax_t)availmem_regions[i].mr_start + 856 availmem_regions[i].mr_size, 857 (uintmax_t)availmem_regions[i].mr_size); 858 859 if (hwphyssz != 0 && 860 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 861 debugf(" hw.physmem adjust\n"); 862 if (physsz < hwphyssz) { 863 phys_avail[j] = availmem_regions[i].mr_start; 864 phys_avail[j + 1] = 865 availmem_regions[i].mr_start + 866 hwphyssz - physsz; 867 physsz = hwphyssz; 868 phys_avail_count++; 869 dump_avail[j] = phys_avail[j]; 870 dump_avail[j + 1] = phys_avail[j + 1]; 871 } 872 break; 873 } 874 875 phys_avail[j] = availmem_regions[i].mr_start; 876 phys_avail[j + 1] = availmem_regions[i].mr_start + 877 availmem_regions[i].mr_size; 878 phys_avail_count++; 879 physsz += availmem_regions[i].mr_size; 880 dump_avail[j] = phys_avail[j]; 881 dump_avail[j + 1] = phys_avail[j + 1]; 882 } 883 physmem = btoc(physsz); 884 885 /* Calculate the last available physical address. */ 886 for (i = 0; phys_avail[i + 2] != 0; i += 2) 887 ; 888 Maxmem = powerpc_btop(phys_avail[i + 1]); 889 890 debugf("Maxmem = 0x%08lx\n", Maxmem); 891 debugf("phys_avail_count = %d\n", phys_avail_count); 892 debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n", 893 (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem); 894 895 #ifdef __powerpc64__ 896 /* 897 * Map the physical memory contiguously in TLB1. 898 * Round so it fits into a single mapping. 899 */ 900 tlb1_mapin_region(DMAP_BASE_ADDRESS, 0, 901 phys_avail[i + 1], _TLB_ENTRY_MEM); 902 #endif 903 904 /*******************************************************/ 905 /* Initialize (statically allocated) kernel pmap. */ 906 /*******************************************************/ 907 PMAP_LOCK_INIT(kernel_pmap); 908 909 debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap); 910 kernel_pte_alloc(virtual_avail, kernstart); 911 for (i = 0; i < MAXCPU; i++) { 912 kernel_pmap->pm_tid[i] = TID_KERNEL; 913 914 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 915 tidbusy[i][TID_KERNEL] = kernel_pmap; 916 } 917 918 /* Mark kernel_pmap active on all CPUs */ 919 CPU_FILL(&kernel_pmap->pm_active); 920 921 /* 922 * Initialize the global pv list lock. 923 */ 924 rw_init(&pvh_global_lock, "pmap pv global"); 925 926 /*******************************************************/ 927 /* Final setup */ 928 /*******************************************************/ 929 930 /* Enter kstack0 into kernel map, provide guard page */ 931 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 932 thread0.td_kstack = kstack0; 933 thread0.td_kstack_pages = kstack_pages; 934 935 debugf("kstack_sz = 0x%08jx\n", (uintmax_t)kstack0_sz); 936 debugf("kstack0_phys at 0x%09jx - 0x%09jx\n", 937 (uintmax_t)kstack0_phys, (uintmax_t)kstack0_phys + kstack0_sz); 938 debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n", 939 kstack0, kstack0 + kstack0_sz); 940 941 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 942 for (i = 0; i < kstack_pages; i++) { 943 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 944 kstack0 += PAGE_SIZE; 945 kstack0_phys += PAGE_SIZE; 946 } 947 948 pmap_bootstrapped = 1; 949 950 debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail); 951 debugf("virtual_end = %"PRI0ptrX"\n", virtual_end); 952 953 debugf("mmu_booke_bootstrap: exit\n"); 954 } 955 956 #ifdef SMP 957 void 958 tlb1_ap_prep(void) 959 { 960 tlb_entry_t *e, tmp; 961 unsigned int i; 962 963 /* Prepare TLB1 image for AP processors */ 964 e = __boot_tlb1; 965 for (i = 0; i < TLB1_ENTRIES; i++) { 966 tlb1_read_entry(&tmp, i); 967 968 if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED)) 969 memcpy(e++, &tmp, sizeof(tmp)); 970 } 971 } 972 973 void 974 pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 975 { 976 int i; 977 978 /* 979 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 980 * have the snapshot of its contents in the s/w __boot_tlb1[] table 981 * created by tlb1_ap_prep(), so use these values directly to 982 * (re)program AP's TLB1 hardware. 983 * 984 * Start at index 1 because index 0 has the kernel map. 985 */ 986 for (i = 1; i < TLB1_ENTRIES; i++) { 987 if (__boot_tlb1[i].mas1 & MAS1_VALID) 988 tlb1_write_entry(&__boot_tlb1[i], i); 989 } 990 991 set_mas4_defaults(); 992 } 993 #endif 994 995 static void 996 booke_pmap_init_qpages(void) 997 { 998 struct pcpu *pc; 999 int i; 1000 1001 CPU_FOREACH(i) { 1002 pc = pcpu_find(i); 1003 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE); 1004 if (pc->pc_qmap_addr == 0) 1005 panic("pmap_init_qpages: unable to allocate KVA"); 1006 } 1007 } 1008 1009 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL); 1010 1011 /* 1012 * Get the physical page address for the given pmap/virtual address. 1013 */ 1014 static vm_paddr_t 1015 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1016 { 1017 vm_paddr_t pa; 1018 1019 PMAP_LOCK(pmap); 1020 pa = pte_vatopa(mmu, pmap, va); 1021 PMAP_UNLOCK(pmap); 1022 1023 return (pa); 1024 } 1025 1026 /* 1027 * Extract the physical page address associated with the given 1028 * kernel virtual address. 1029 */ 1030 static vm_paddr_t 1031 mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1032 { 1033 tlb_entry_t e; 1034 vm_paddr_t p = 0; 1035 int i; 1036 1037 #ifdef __powerpc64__ 1038 if (va >= DMAP_BASE_ADDRESS && va <= DMAP_MAX_ADDRESS) 1039 return (DMAP_TO_PHYS(va)); 1040 #endif 1041 1042 if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS) 1043 p = pte_vatopa(mmu, kernel_pmap, va); 1044 1045 if (p == 0) { 1046 /* Check TLB1 mappings */ 1047 for (i = 0; i < TLB1_ENTRIES; i++) { 1048 tlb1_read_entry(&e, i); 1049 if (!(e.mas1 & MAS1_VALID)) 1050 continue; 1051 if (va >= e.virt && va < e.virt + e.size) 1052 return (e.phys + (va - e.virt)); 1053 } 1054 } 1055 1056 return (p); 1057 } 1058 1059 /* 1060 * Initialize the pmap module. 1061 * Called by vm_init, to initialize any structures that the pmap 1062 * system needs to map virtual memory. 1063 */ 1064 static void 1065 mmu_booke_init(mmu_t mmu) 1066 { 1067 int shpgperproc = PMAP_SHPGPERPROC; 1068 1069 /* 1070 * Initialize the address space (zone) for the pv entries. Set a 1071 * high water mark so that the system can recover from excessive 1072 * numbers of pv entries. 1073 */ 1074 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1075 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1076 1077 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1078 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1079 1080 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1081 pv_entry_high_water = 9 * (pv_entry_max / 10); 1082 1083 uma_zone_reserve_kva(pvzone, pv_entry_max); 1084 1085 /* Pre-fill pvzone with initial number of pv entries. */ 1086 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1087 1088 /* Create a UMA zone for page table roots. */ 1089 ptbl_root_zone = uma_zcreate("pmap root", PMAP_ROOT_SIZE, 1090 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, UMA_ZONE_VM); 1091 1092 /* Initialize ptbl allocation. */ 1093 ptbl_init(); 1094 } 1095 1096 /* 1097 * Map a list of wired pages into kernel virtual address space. This is 1098 * intended for temporary mappings which do not need page modification or 1099 * references recorded. Existing mappings in the region are overwritten. 1100 */ 1101 static void 1102 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1103 { 1104 vm_offset_t va; 1105 1106 va = sva; 1107 while (count-- > 0) { 1108 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1109 va += PAGE_SIZE; 1110 m++; 1111 } 1112 } 1113 1114 /* 1115 * Remove page mappings from kernel virtual address space. Intended for 1116 * temporary mappings entered by mmu_booke_qenter. 1117 */ 1118 static void 1119 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1120 { 1121 vm_offset_t va; 1122 1123 va = sva; 1124 while (count-- > 0) { 1125 mmu_booke_kremove(mmu, va); 1126 va += PAGE_SIZE; 1127 } 1128 } 1129 1130 /* 1131 * Map a wired page into kernel virtual address space. 1132 */ 1133 static void 1134 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1135 { 1136 1137 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1138 } 1139 1140 static void 1141 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 1142 { 1143 uint32_t flags; 1144 pte_t *pte; 1145 1146 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1147 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1148 1149 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 1150 flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT; 1151 flags |= PTE_PS_4KB; 1152 1153 pte = pte_find(mmu, kernel_pmap, va); 1154 KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE")); 1155 1156 mtx_lock_spin(&tlbivax_mutex); 1157 tlb_miss_lock(); 1158 1159 if (PTE_ISVALID(pte)) { 1160 1161 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1162 1163 /* Flush entry from TLB0 */ 1164 tlb0_flush_entry(va); 1165 } 1166 1167 *pte = PTE_RPN_FROM_PA(pa) | flags; 1168 1169 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1170 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1171 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1172 1173 /* Flush the real memory from the instruction cache. */ 1174 if ((flags & (PTE_I | PTE_G)) == 0) 1175 __syncicache((void *)va, PAGE_SIZE); 1176 1177 tlb_miss_unlock(); 1178 mtx_unlock_spin(&tlbivax_mutex); 1179 } 1180 1181 /* 1182 * Remove a page from kernel page table. 1183 */ 1184 static void 1185 mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1186 { 1187 pte_t *pte; 1188 1189 CTR2(KTR_PMAP,"%s: s (va = 0x%"PRI0ptrX")\n", __func__, va); 1190 1191 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1192 (va <= VM_MAX_KERNEL_ADDRESS)), 1193 ("mmu_booke_kremove: invalid va")); 1194 1195 pte = pte_find(mmu, kernel_pmap, va); 1196 1197 if (!PTE_ISVALID(pte)) { 1198 1199 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1200 1201 return; 1202 } 1203 1204 mtx_lock_spin(&tlbivax_mutex); 1205 tlb_miss_lock(); 1206 1207 /* Invalidate entry in TLB0, update PTE. */ 1208 tlb0_flush_entry(va); 1209 *pte = 0; 1210 1211 tlb_miss_unlock(); 1212 mtx_unlock_spin(&tlbivax_mutex); 1213 } 1214 1215 /* 1216 * Provide a kernel pointer corresponding to a given userland pointer. 1217 * The returned pointer is valid until the next time this function is 1218 * called in this thread. This is used internally in copyin/copyout. 1219 */ 1220 int 1221 mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, 1222 void **kaddr, size_t ulen, size_t *klen) 1223 { 1224 1225 if (trunc_page((uintptr_t)uaddr + ulen) > VM_MAXUSER_ADDRESS) 1226 return (EFAULT); 1227 1228 *kaddr = (void *)(uintptr_t)uaddr; 1229 if (klen) 1230 *klen = ulen; 1231 1232 return (0); 1233 } 1234 1235 /* 1236 * Figure out where a given kernel pointer (usually in a fault) points 1237 * to from the VM's perspective, potentially remapping into userland's 1238 * address space. 1239 */ 1240 static int 1241 mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, 1242 vm_offset_t *decoded_addr) 1243 { 1244 1245 if (trunc_page(addr) <= VM_MAXUSER_ADDRESS) 1246 *is_user = 1; 1247 else 1248 *is_user = 0; 1249 1250 *decoded_addr = addr; 1251 return (0); 1252 } 1253 1254 static boolean_t 1255 mmu_booke_page_is_mapped(mmu_t mmu, vm_page_t m) 1256 { 1257 1258 return (!TAILQ_EMPTY(&(m)->md.pv_list)); 1259 } 1260 1261 /* 1262 * Initialize pmap associated with process 0. 1263 */ 1264 static void 1265 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1266 { 1267 1268 PMAP_LOCK_INIT(pmap); 1269 mmu_booke_pinit(mmu, pmap); 1270 PCPU_SET(curpmap, pmap); 1271 } 1272 1273 /* 1274 * Insert the given physical page at the specified virtual address in the 1275 * target physical map with the protection requested. If specified the page 1276 * will be wired down. 1277 */ 1278 static int 1279 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1280 vm_prot_t prot, u_int flags, int8_t psind) 1281 { 1282 int error; 1283 1284 rw_wlock(&pvh_global_lock); 1285 PMAP_LOCK(pmap); 1286 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind); 1287 PMAP_UNLOCK(pmap); 1288 rw_wunlock(&pvh_global_lock); 1289 return (error); 1290 } 1291 1292 static int 1293 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1294 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused) 1295 { 1296 pte_t *pte; 1297 vm_paddr_t pa; 1298 pte_t flags; 1299 int error, su, sync; 1300 1301 pa = VM_PAGE_TO_PHYS(m); 1302 su = (pmap == kernel_pmap); 1303 sync = 0; 1304 1305 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1306 // "pa=0x%08x prot=0x%08x flags=%#x)\n", 1307 // (u_int32_t)pmap, su, pmap->pm_tid, 1308 // (u_int32_t)m, va, pa, prot, flags); 1309 1310 if (su) { 1311 KASSERT(((va >= virtual_avail) && 1312 (va <= VM_MAX_KERNEL_ADDRESS)), 1313 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1314 } else { 1315 KASSERT((va <= VM_MAXUSER_ADDRESS), 1316 ("mmu_booke_enter_locked: user pmap, non user va")); 1317 } 1318 if ((m->oflags & VPO_UNMANAGED) == 0) { 1319 if ((pmap_flags & PMAP_ENTER_QUICK_LOCKED) == 0) 1320 VM_PAGE_OBJECT_BUSY_ASSERT(m); 1321 else 1322 VM_OBJECT_ASSERT_LOCKED(m->object); 1323 } 1324 1325 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1326 1327 /* 1328 * If there is an existing mapping, and the physical address has not 1329 * changed, must be protection or wiring change. 1330 */ 1331 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1332 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1333 1334 /* 1335 * Before actually updating pte->flags we calculate and 1336 * prepare its new value in a helper var. 1337 */ 1338 flags = *pte; 1339 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1340 1341 /* Wiring change, just update stats. */ 1342 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) { 1343 if (!PTE_ISWIRED(pte)) { 1344 flags |= PTE_WIRED; 1345 pmap->pm_stats.wired_count++; 1346 } 1347 } else { 1348 if (PTE_ISWIRED(pte)) { 1349 flags &= ~PTE_WIRED; 1350 pmap->pm_stats.wired_count--; 1351 } 1352 } 1353 1354 if (prot & VM_PROT_WRITE) { 1355 /* Add write permissions. */ 1356 flags |= PTE_SW; 1357 if (!su) 1358 flags |= PTE_UW; 1359 1360 if ((flags & PTE_MANAGED) != 0) 1361 vm_page_aflag_set(m, PGA_WRITEABLE); 1362 } else { 1363 /* Handle modified pages, sense modify status. */ 1364 1365 /* 1366 * The PTE_MODIFIED flag could be set by underlying 1367 * TLB misses since we last read it (above), possibly 1368 * other CPUs could update it so we check in the PTE 1369 * directly rather than rely on that saved local flags 1370 * copy. 1371 */ 1372 if (PTE_ISMODIFIED(pte)) 1373 vm_page_dirty(m); 1374 } 1375 1376 if (prot & VM_PROT_EXECUTE) { 1377 flags |= PTE_SX; 1378 if (!su) 1379 flags |= PTE_UX; 1380 1381 /* 1382 * Check existing flags for execute permissions: if we 1383 * are turning execute permissions on, icache should 1384 * be flushed. 1385 */ 1386 if ((*pte & (PTE_UX | PTE_SX)) == 0) 1387 sync++; 1388 } 1389 1390 flags &= ~PTE_REFERENCED; 1391 1392 /* 1393 * The new flags value is all calculated -- only now actually 1394 * update the PTE. 1395 */ 1396 mtx_lock_spin(&tlbivax_mutex); 1397 tlb_miss_lock(); 1398 1399 tlb0_flush_entry(va); 1400 *pte &= ~PTE_FLAGS_MASK; 1401 *pte |= flags; 1402 1403 tlb_miss_unlock(); 1404 mtx_unlock_spin(&tlbivax_mutex); 1405 1406 } else { 1407 /* 1408 * If there is an existing mapping, but it's for a different 1409 * physical address, pte_enter() will delete the old mapping. 1410 */ 1411 //if ((pte != NULL) && PTE_ISVALID(pte)) 1412 // debugf("mmu_booke_enter_locked: replace\n"); 1413 //else 1414 // debugf("mmu_booke_enter_locked: new\n"); 1415 1416 /* Now set up the flags and install the new mapping. */ 1417 flags = (PTE_SR | PTE_VALID); 1418 flags |= PTE_M; 1419 1420 if (!su) 1421 flags |= PTE_UR; 1422 1423 if (prot & VM_PROT_WRITE) { 1424 flags |= PTE_SW; 1425 if (!su) 1426 flags |= PTE_UW; 1427 1428 if ((m->oflags & VPO_UNMANAGED) == 0) 1429 vm_page_aflag_set(m, PGA_WRITEABLE); 1430 } 1431 1432 if (prot & VM_PROT_EXECUTE) { 1433 flags |= PTE_SX; 1434 if (!su) 1435 flags |= PTE_UX; 1436 } 1437 1438 /* If its wired update stats. */ 1439 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) 1440 flags |= PTE_WIRED; 1441 1442 error = pte_enter(mmu, pmap, m, va, flags, 1443 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0); 1444 if (error != 0) 1445 return (KERN_RESOURCE_SHORTAGE); 1446 1447 if ((flags & PMAP_ENTER_WIRED) != 0) 1448 pmap->pm_stats.wired_count++; 1449 1450 /* Flush the real memory from the instruction cache. */ 1451 if (prot & VM_PROT_EXECUTE) 1452 sync++; 1453 } 1454 1455 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1456 __syncicache((void *)va, PAGE_SIZE); 1457 sync = 0; 1458 } 1459 1460 return (KERN_SUCCESS); 1461 } 1462 1463 /* 1464 * Maps a sequence of resident pages belonging to the same object. 1465 * The sequence begins with the given page m_start. This page is 1466 * mapped at the given virtual address start. Each subsequent page is 1467 * mapped at a virtual address that is offset from start by the same 1468 * amount as the page is offset from m_start within the object. The 1469 * last page in the sequence is the page with the largest offset from 1470 * m_start that can be mapped at a virtual address less than the given 1471 * virtual address end. Not every virtual page between start and end 1472 * is mapped; only those for which a resident page exists with the 1473 * corresponding offset from m_start are mapped. 1474 */ 1475 static void 1476 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1477 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1478 { 1479 vm_page_t m; 1480 vm_pindex_t diff, psize; 1481 1482 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1483 1484 psize = atop(end - start); 1485 m = m_start; 1486 rw_wlock(&pvh_global_lock); 1487 PMAP_LOCK(pmap); 1488 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1489 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1490 prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1491 PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0); 1492 m = TAILQ_NEXT(m, listq); 1493 } 1494 PMAP_UNLOCK(pmap); 1495 rw_wunlock(&pvh_global_lock); 1496 } 1497 1498 static void 1499 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1500 vm_prot_t prot) 1501 { 1502 1503 rw_wlock(&pvh_global_lock); 1504 PMAP_LOCK(pmap); 1505 mmu_booke_enter_locked(mmu, pmap, va, m, 1506 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP | 1507 PMAP_ENTER_QUICK_LOCKED, 0); 1508 PMAP_UNLOCK(pmap); 1509 rw_wunlock(&pvh_global_lock); 1510 } 1511 1512 /* 1513 * Remove the given range of addresses from the specified map. 1514 * 1515 * It is assumed that the start and end are properly rounded to the page size. 1516 */ 1517 static void 1518 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1519 { 1520 pte_t *pte; 1521 uint8_t hold_flag; 1522 1523 int su = (pmap == kernel_pmap); 1524 1525 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1526 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1527 1528 if (su) { 1529 KASSERT(((va >= virtual_avail) && 1530 (va <= VM_MAX_KERNEL_ADDRESS)), 1531 ("mmu_booke_remove: kernel pmap, non kernel va")); 1532 } else { 1533 KASSERT((va <= VM_MAXUSER_ADDRESS), 1534 ("mmu_booke_remove: user pmap, non user va")); 1535 } 1536 1537 if (PMAP_REMOVE_DONE(pmap)) { 1538 //debugf("mmu_booke_remove: e (empty)\n"); 1539 return; 1540 } 1541 1542 hold_flag = PTBL_HOLD_FLAG(pmap); 1543 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1544 1545 rw_wlock(&pvh_global_lock); 1546 PMAP_LOCK(pmap); 1547 for (; va < endva; va += PAGE_SIZE) { 1548 pte = pte_find_next(mmu, pmap, &va); 1549 if ((pte == NULL) || !PTE_ISVALID(pte)) 1550 break; 1551 if (va >= endva) 1552 break; 1553 pte_remove(mmu, pmap, va, hold_flag); 1554 } 1555 PMAP_UNLOCK(pmap); 1556 rw_wunlock(&pvh_global_lock); 1557 1558 //debugf("mmu_booke_remove: e\n"); 1559 } 1560 1561 /* 1562 * Remove physical page from all pmaps in which it resides. 1563 */ 1564 static void 1565 mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1566 { 1567 pv_entry_t pv, pvn; 1568 uint8_t hold_flag; 1569 1570 rw_wlock(&pvh_global_lock); 1571 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_link, pvn) { 1572 PMAP_LOCK(pv->pv_pmap); 1573 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1574 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1575 PMAP_UNLOCK(pv->pv_pmap); 1576 } 1577 vm_page_aflag_clear(m, PGA_WRITEABLE); 1578 rw_wunlock(&pvh_global_lock); 1579 } 1580 1581 /* 1582 * Map a range of physical addresses into kernel virtual address space. 1583 */ 1584 static vm_offset_t 1585 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1586 vm_paddr_t pa_end, int prot) 1587 { 1588 vm_offset_t sva = *virt; 1589 vm_offset_t va = sva; 1590 1591 #ifdef __powerpc64__ 1592 /* XXX: Handle memory not starting at 0x0. */ 1593 if (pa_end < ctob(Maxmem)) 1594 return (PHYS_TO_DMAP(pa_start)); 1595 #endif 1596 1597 while (pa_start < pa_end) { 1598 mmu_booke_kenter(mmu, va, pa_start); 1599 va += PAGE_SIZE; 1600 pa_start += PAGE_SIZE; 1601 } 1602 *virt = va; 1603 1604 return (sva); 1605 } 1606 1607 /* 1608 * The pmap must be activated before it's address space can be accessed in any 1609 * way. 1610 */ 1611 static void 1612 mmu_booke_activate(mmu_t mmu, struct thread *td) 1613 { 1614 pmap_t pmap; 1615 u_int cpuid; 1616 1617 pmap = &td->td_proc->p_vmspace->vm_pmap; 1618 1619 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%"PRI0ptrX")", 1620 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1621 1622 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1623 1624 sched_pin(); 1625 1626 cpuid = PCPU_GET(cpuid); 1627 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 1628 PCPU_SET(curpmap, pmap); 1629 1630 if (pmap->pm_tid[cpuid] == TID_NONE) 1631 tid_alloc(pmap); 1632 1633 /* Load PID0 register with pmap tid value. */ 1634 mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 1635 __asm __volatile("isync"); 1636 1637 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0); 1638 1639 sched_unpin(); 1640 1641 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1642 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1643 } 1644 1645 /* 1646 * Deactivate the specified process's address space. 1647 */ 1648 static void 1649 mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1650 { 1651 pmap_t pmap; 1652 1653 pmap = &td->td_proc->p_vmspace->vm_pmap; 1654 1655 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%"PRI0ptrX, 1656 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1657 1658 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0); 1659 1660 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 1661 PCPU_SET(curpmap, NULL); 1662 } 1663 1664 /* 1665 * Copy the range specified by src_addr/len 1666 * from the source map to the range dst_addr/len 1667 * in the destination map. 1668 * 1669 * This routine is only advisory and need not do anything. 1670 */ 1671 static void 1672 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1673 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1674 { 1675 1676 } 1677 1678 /* 1679 * Set the physical protection on the specified range of this map as requested. 1680 */ 1681 static void 1682 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1683 vm_prot_t prot) 1684 { 1685 vm_offset_t va; 1686 vm_page_t m; 1687 pte_t *pte; 1688 1689 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1690 mmu_booke_remove(mmu, pmap, sva, eva); 1691 return; 1692 } 1693 1694 if (prot & VM_PROT_WRITE) 1695 return; 1696 1697 PMAP_LOCK(pmap); 1698 for (va = sva; va < eva; va += PAGE_SIZE) { 1699 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1700 if (PTE_ISVALID(pte)) { 1701 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1702 1703 mtx_lock_spin(&tlbivax_mutex); 1704 tlb_miss_lock(); 1705 1706 /* Handle modified pages. */ 1707 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 1708 vm_page_dirty(m); 1709 1710 tlb0_flush_entry(va); 1711 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1712 1713 tlb_miss_unlock(); 1714 mtx_unlock_spin(&tlbivax_mutex); 1715 } 1716 } 1717 } 1718 PMAP_UNLOCK(pmap); 1719 } 1720 1721 /* 1722 * Clear the write and modified bits in each of the given page's mappings. 1723 */ 1724 static void 1725 mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1726 { 1727 pv_entry_t pv; 1728 pte_t *pte; 1729 1730 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1731 ("mmu_booke_remove_write: page %p is not managed", m)); 1732 vm_page_assert_busied(m); 1733 1734 if (!pmap_page_is_write_mapped(m)) 1735 return; 1736 rw_wlock(&pvh_global_lock); 1737 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1738 PMAP_LOCK(pv->pv_pmap); 1739 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1740 if (PTE_ISVALID(pte)) { 1741 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1742 1743 mtx_lock_spin(&tlbivax_mutex); 1744 tlb_miss_lock(); 1745 1746 /* Handle modified pages. */ 1747 if (PTE_ISMODIFIED(pte)) 1748 vm_page_dirty(m); 1749 1750 /* Flush mapping from TLB0. */ 1751 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1752 1753 tlb_miss_unlock(); 1754 mtx_unlock_spin(&tlbivax_mutex); 1755 } 1756 } 1757 PMAP_UNLOCK(pv->pv_pmap); 1758 } 1759 vm_page_aflag_clear(m, PGA_WRITEABLE); 1760 rw_wunlock(&pvh_global_lock); 1761 } 1762 1763 /* 1764 * Atomically extract and hold the physical page with the given 1765 * pmap and virtual address pair if that mapping permits the given 1766 * protection. 1767 */ 1768 static vm_page_t 1769 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 1770 vm_prot_t prot) 1771 { 1772 pte_t *pte; 1773 vm_page_t m; 1774 uint32_t pte_wbit; 1775 1776 m = NULL; 1777 PMAP_LOCK(pmap); 1778 pte = pte_find(mmu, pmap, va); 1779 if ((pte != NULL) && PTE_ISVALID(pte)) { 1780 if (pmap == kernel_pmap) 1781 pte_wbit = PTE_SW; 1782 else 1783 pte_wbit = PTE_UW; 1784 1785 if ((*pte & pte_wbit) != 0 || (prot & VM_PROT_WRITE) == 0) { 1786 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1787 if (!vm_page_wire_mapped(m)) 1788 m = NULL; 1789 } 1790 } 1791 PMAP_UNLOCK(pmap); 1792 return (m); 1793 } 1794 1795 /* 1796 * Initialize a vm_page's machine-dependent fields. 1797 */ 1798 static void 1799 mmu_booke_page_init(mmu_t mmu, vm_page_t m) 1800 { 1801 1802 m->md.pv_tracked = 0; 1803 TAILQ_INIT(&m->md.pv_list); 1804 } 1805 1806 /* 1807 * Return whether or not the specified physical page was modified 1808 * in any of physical maps. 1809 */ 1810 static boolean_t 1811 mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 1812 { 1813 pte_t *pte; 1814 pv_entry_t pv; 1815 boolean_t rv; 1816 1817 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1818 ("mmu_booke_is_modified: page %p is not managed", m)); 1819 rv = FALSE; 1820 1821 /* 1822 * If the page is not busied then this check is racy. 1823 */ 1824 if (!pmap_page_is_write_mapped(m)) 1825 return (FALSE); 1826 1827 rw_wlock(&pvh_global_lock); 1828 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1829 PMAP_LOCK(pv->pv_pmap); 1830 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 1831 PTE_ISVALID(pte)) { 1832 if (PTE_ISMODIFIED(pte)) 1833 rv = TRUE; 1834 } 1835 PMAP_UNLOCK(pv->pv_pmap); 1836 if (rv) 1837 break; 1838 } 1839 rw_wunlock(&pvh_global_lock); 1840 return (rv); 1841 } 1842 1843 /* 1844 * Return whether or not the specified virtual address is eligible 1845 * for prefault. 1846 */ 1847 static boolean_t 1848 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 1849 { 1850 1851 return (FALSE); 1852 } 1853 1854 /* 1855 * Return whether or not the specified physical page was referenced 1856 * in any physical maps. 1857 */ 1858 static boolean_t 1859 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 1860 { 1861 pte_t *pte; 1862 pv_entry_t pv; 1863 boolean_t rv; 1864 1865 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1866 ("mmu_booke_is_referenced: page %p is not managed", m)); 1867 rv = FALSE; 1868 rw_wlock(&pvh_global_lock); 1869 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1870 PMAP_LOCK(pv->pv_pmap); 1871 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 1872 PTE_ISVALID(pte)) { 1873 if (PTE_ISREFERENCED(pte)) 1874 rv = TRUE; 1875 } 1876 PMAP_UNLOCK(pv->pv_pmap); 1877 if (rv) 1878 break; 1879 } 1880 rw_wunlock(&pvh_global_lock); 1881 return (rv); 1882 } 1883 1884 /* 1885 * Clear the modify bits on the specified physical page. 1886 */ 1887 static void 1888 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 1889 { 1890 pte_t *pte; 1891 pv_entry_t pv; 1892 1893 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1894 ("mmu_booke_clear_modify: page %p is not managed", m)); 1895 vm_page_assert_busied(m); 1896 1897 if (!pmap_page_is_write_mapped(m)) 1898 return; 1899 1900 rw_wlock(&pvh_global_lock); 1901 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1902 PMAP_LOCK(pv->pv_pmap); 1903 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 1904 PTE_ISVALID(pte)) { 1905 mtx_lock_spin(&tlbivax_mutex); 1906 tlb_miss_lock(); 1907 1908 if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 1909 tlb0_flush_entry(pv->pv_va); 1910 *pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 1911 PTE_REFERENCED); 1912 } 1913 1914 tlb_miss_unlock(); 1915 mtx_unlock_spin(&tlbivax_mutex); 1916 } 1917 PMAP_UNLOCK(pv->pv_pmap); 1918 } 1919 rw_wunlock(&pvh_global_lock); 1920 } 1921 1922 /* 1923 * Return a count of reference bits for a page, clearing those bits. 1924 * It is not necessary for every reference bit to be cleared, but it 1925 * is necessary that 0 only be returned when there are truly no 1926 * reference bits set. 1927 * 1928 * As an optimization, update the page's dirty field if a modified bit is 1929 * found while counting reference bits. This opportunistic update can be 1930 * performed at low cost and can eliminate the need for some future calls 1931 * to pmap_is_modified(). However, since this function stops after 1932 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 1933 * dirty pages. Those dirty pages will only be detected by a future call 1934 * to pmap_is_modified(). 1935 */ 1936 static int 1937 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 1938 { 1939 pte_t *pte; 1940 pv_entry_t pv; 1941 int count; 1942 1943 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1944 ("mmu_booke_ts_referenced: page %p is not managed", m)); 1945 count = 0; 1946 rw_wlock(&pvh_global_lock); 1947 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1948 PMAP_LOCK(pv->pv_pmap); 1949 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 1950 PTE_ISVALID(pte)) { 1951 if (PTE_ISMODIFIED(pte)) 1952 vm_page_dirty(m); 1953 if (PTE_ISREFERENCED(pte)) { 1954 mtx_lock_spin(&tlbivax_mutex); 1955 tlb_miss_lock(); 1956 1957 tlb0_flush_entry(pv->pv_va); 1958 *pte &= ~PTE_REFERENCED; 1959 1960 tlb_miss_unlock(); 1961 mtx_unlock_spin(&tlbivax_mutex); 1962 1963 if (++count >= PMAP_TS_REFERENCED_MAX) { 1964 PMAP_UNLOCK(pv->pv_pmap); 1965 break; 1966 } 1967 } 1968 } 1969 PMAP_UNLOCK(pv->pv_pmap); 1970 } 1971 rw_wunlock(&pvh_global_lock); 1972 return (count); 1973 } 1974 1975 /* 1976 * Clear the wired attribute from the mappings for the specified range of 1977 * addresses in the given pmap. Every valid mapping within that range must 1978 * have the wired attribute set. In contrast, invalid mappings cannot have 1979 * the wired attribute set, so they are ignored. 1980 * 1981 * The wired attribute of the page table entry is not a hardware feature, so 1982 * there is no need to invalidate any TLB entries. 1983 */ 1984 static void 1985 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1986 { 1987 vm_offset_t va; 1988 pte_t *pte; 1989 1990 PMAP_LOCK(pmap); 1991 for (va = sva; va < eva; va += PAGE_SIZE) { 1992 if ((pte = pte_find(mmu, pmap, va)) != NULL && 1993 PTE_ISVALID(pte)) { 1994 if (!PTE_ISWIRED(pte)) 1995 panic("mmu_booke_unwire: pte %p isn't wired", 1996 pte); 1997 *pte &= ~PTE_WIRED; 1998 pmap->pm_stats.wired_count--; 1999 } 2000 } 2001 PMAP_UNLOCK(pmap); 2002 2003 } 2004 2005 /* 2006 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2007 * page. This count may be changed upwards or downwards in the future; it is 2008 * only necessary that true be returned for a small subset of pmaps for proper 2009 * page aging. 2010 */ 2011 static boolean_t 2012 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2013 { 2014 pv_entry_t pv; 2015 int loops; 2016 boolean_t rv; 2017 2018 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2019 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2020 loops = 0; 2021 rv = FALSE; 2022 rw_wlock(&pvh_global_lock); 2023 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2024 if (pv->pv_pmap == pmap) { 2025 rv = TRUE; 2026 break; 2027 } 2028 if (++loops >= 16) 2029 break; 2030 } 2031 rw_wunlock(&pvh_global_lock); 2032 return (rv); 2033 } 2034 2035 /* 2036 * Return the number of managed mappings to the given physical page that are 2037 * wired. 2038 */ 2039 static int 2040 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2041 { 2042 pv_entry_t pv; 2043 pte_t *pte; 2044 int count = 0; 2045 2046 if ((m->oflags & VPO_UNMANAGED) != 0) 2047 return (count); 2048 rw_wlock(&pvh_global_lock); 2049 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2050 PMAP_LOCK(pv->pv_pmap); 2051 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2052 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2053 count++; 2054 PMAP_UNLOCK(pv->pv_pmap); 2055 } 2056 rw_wunlock(&pvh_global_lock); 2057 return (count); 2058 } 2059 2060 static int 2061 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2062 { 2063 int i; 2064 vm_offset_t va; 2065 2066 /* 2067 * This currently does not work for entries that 2068 * overlap TLB1 entries. 2069 */ 2070 for (i = 0; i < TLB1_ENTRIES; i ++) { 2071 if (tlb1_iomapped(i, pa, size, &va) == 0) 2072 return (0); 2073 } 2074 2075 return (EFAULT); 2076 } 2077 2078 void 2079 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va) 2080 { 2081 vm_paddr_t ppa; 2082 vm_offset_t ofs; 2083 vm_size_t gran; 2084 2085 /* Minidumps are based on virtual memory addresses. */ 2086 if (do_minidump) { 2087 *va = (void *)(vm_offset_t)pa; 2088 return; 2089 } 2090 2091 /* Raw physical memory dumps don't have a virtual address. */ 2092 /* We always map a 256MB page at 256M. */ 2093 gran = 256 * 1024 * 1024; 2094 ppa = rounddown2(pa, gran); 2095 ofs = pa - ppa; 2096 *va = (void *)gran; 2097 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO); 2098 2099 if (sz > (gran - ofs)) 2100 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran, 2101 _TLB_ENTRY_IO); 2102 } 2103 2104 void 2105 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va) 2106 { 2107 vm_paddr_t ppa; 2108 vm_offset_t ofs; 2109 vm_size_t gran; 2110 tlb_entry_t e; 2111 int i; 2112 2113 /* Minidumps are based on virtual memory addresses. */ 2114 /* Nothing to do... */ 2115 if (do_minidump) 2116 return; 2117 2118 for (i = 0; i < TLB1_ENTRIES; i++) { 2119 tlb1_read_entry(&e, i); 2120 if (!(e.mas1 & MAS1_VALID)) 2121 break; 2122 } 2123 2124 /* Raw physical memory dumps don't have a virtual address. */ 2125 i--; 2126 e.mas1 = 0; 2127 e.mas2 = 0; 2128 e.mas3 = 0; 2129 tlb1_write_entry(&e, i); 2130 2131 gran = 256 * 1024 * 1024; 2132 ppa = rounddown2(pa, gran); 2133 ofs = pa - ppa; 2134 if (sz > (gran - ofs)) { 2135 i--; 2136 e.mas1 = 0; 2137 e.mas2 = 0; 2138 e.mas3 = 0; 2139 tlb1_write_entry(&e, i); 2140 } 2141 } 2142 2143 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; 2144 2145 void 2146 mmu_booke_scan_init(mmu_t mmu) 2147 { 2148 vm_offset_t va; 2149 pte_t *pte; 2150 int i; 2151 2152 if (!do_minidump) { 2153 /* Initialize phys. segments for dumpsys(). */ 2154 memset(&dump_map, 0, sizeof(dump_map)); 2155 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions, 2156 &availmem_regions_sz); 2157 for (i = 0; i < physmem_regions_sz; i++) { 2158 dump_map[i].pa_start = physmem_regions[i].mr_start; 2159 dump_map[i].pa_size = physmem_regions[i].mr_size; 2160 } 2161 return; 2162 } 2163 2164 /* Virtual segments for minidumps: */ 2165 memset(&dump_map, 0, sizeof(dump_map)); 2166 2167 /* 1st: kernel .data and .bss. */ 2168 dump_map[0].pa_start = trunc_page((uintptr_t)_etext); 2169 dump_map[0].pa_size = 2170 round_page((uintptr_t)_end) - dump_map[0].pa_start; 2171 2172 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2173 dump_map[1].pa_start = data_start; 2174 dump_map[1].pa_size = data_end - data_start; 2175 2176 /* 3rd: kernel VM. */ 2177 va = dump_map[1].pa_start + dump_map[1].pa_size; 2178 /* Find start of next chunk (from va). */ 2179 while (va < virtual_end) { 2180 /* Don't dump the buffer cache. */ 2181 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) { 2182 va = kmi.buffer_eva; 2183 continue; 2184 } 2185 pte = pte_find(mmu, kernel_pmap, va); 2186 if (pte != NULL && PTE_ISVALID(pte)) 2187 break; 2188 va += PAGE_SIZE; 2189 } 2190 if (va < virtual_end) { 2191 dump_map[2].pa_start = va; 2192 va += PAGE_SIZE; 2193 /* Find last page in chunk. */ 2194 while (va < virtual_end) { 2195 /* Don't run into the buffer cache. */ 2196 if (va == kmi.buffer_sva) 2197 break; 2198 pte = pte_find(mmu, kernel_pmap, va); 2199 if (pte == NULL || !PTE_ISVALID(pte)) 2200 break; 2201 va += PAGE_SIZE; 2202 } 2203 dump_map[2].pa_size = va - dump_map[2].pa_start; 2204 } 2205 } 2206 2207 /* 2208 * Map a set of physical memory pages into the kernel virtual address space. 2209 * Return a pointer to where it is mapped. This routine is intended to be used 2210 * for mapping device memory, NOT real memory. 2211 */ 2212 static void * 2213 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2214 { 2215 2216 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2217 } 2218 2219 static int 2220 tlb1_find_pa(vm_paddr_t pa, tlb_entry_t *e) 2221 { 2222 int i; 2223 2224 for (i = 0; i < TLB1_ENTRIES; i++) { 2225 tlb1_read_entry(e, i); 2226 if ((e->mas1 & MAS1_VALID) == 0) 2227 continue; 2228 if (e->phys == pa) 2229 return (i); 2230 } 2231 return (-1); 2232 } 2233 2234 static void * 2235 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) 2236 { 2237 tlb_entry_t e; 2238 vm_paddr_t tmppa; 2239 #ifndef __powerpc64__ 2240 uintptr_t tmpva; 2241 #endif 2242 uintptr_t va, retva; 2243 vm_size_t sz; 2244 int i; 2245 int wimge; 2246 2247 /* 2248 * Check if this is premapped in TLB1. 2249 */ 2250 sz = size; 2251 tmppa = pa; 2252 va = ~0; 2253 wimge = tlb_calc_wimg(pa, ma); 2254 for (i = 0; i < TLB1_ENTRIES; i++) { 2255 tlb1_read_entry(&e, i); 2256 if (!(e.mas1 & MAS1_VALID)) 2257 continue; 2258 if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))) 2259 continue; 2260 if (tmppa >= e.phys && tmppa < e.phys + e.size) { 2261 va = e.virt + (pa - e.phys); 2262 tmppa = e.phys + e.size; 2263 sz -= MIN(sz, e.size - (pa - e.phys)); 2264 while (sz > 0 && (i = tlb1_find_pa(tmppa, &e)) != -1) { 2265 if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))) 2266 break; 2267 sz -= MIN(sz, e.size); 2268 tmppa = e.phys + e.size; 2269 } 2270 if (sz != 0) 2271 break; 2272 return ((void *)va); 2273 } 2274 } 2275 2276 size = roundup(size, PAGE_SIZE); 2277 2278 #ifdef __powerpc64__ 2279 KASSERT(pa < VM_MAPDEV_PA_MAX, 2280 ("Unsupported physical address! %lx", pa)); 2281 va = VM_MAPDEV_BASE + pa; 2282 retva = va; 2283 #ifdef POW2_MAPPINGS 2284 /* 2285 * Align the mapping to a power of 2 size, taking into account that we 2286 * may need to increase the size multiple times to satisfy the size and 2287 * alignment requirements. 2288 * 2289 * This works in the general case because it's very rare (near never?) 2290 * to have different access properties (WIMG) within a single 2291 * power-of-two region. If a design does call for that, POW2_MAPPINGS 2292 * can be undefined, and exact mappings will be used instead. 2293 */ 2294 sz = size; 2295 size = roundup2(size, 1 << ilog2(size)); 2296 while (rounddown2(va, size) + size < va + sz) 2297 size <<= 1; 2298 va = rounddown2(va, size); 2299 pa = rounddown2(pa, size); 2300 #endif 2301 #else 2302 /* 2303 * The device mapping area is between VM_MAXUSER_ADDRESS and 2304 * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing. 2305 */ 2306 #ifdef SPARSE_MAPDEV 2307 /* 2308 * With a sparse mapdev, align to the largest starting region. This 2309 * could feasibly be optimized for a 'best-fit' alignment, but that 2310 * calculation could be very costly. 2311 * Align to the smaller of: 2312 * - first set bit in overlap of (pa & size mask) 2313 * - largest size envelope 2314 * 2315 * It's possible the device mapping may start at a PA that's not larger 2316 * than the size mask, so we need to offset in to maximize the TLB entry 2317 * range and minimize the number of used TLB entries. 2318 */ 2319 do { 2320 tmpva = tlb1_map_base; 2321 sz = ffsl((~((1 << flsl(size-1)) - 1)) & pa); 2322 sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1; 2323 va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa); 2324 } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size)); 2325 #endif 2326 va = atomic_fetchadd_int(&tlb1_map_base, size); 2327 retva = va; 2328 #endif 2329 2330 if (tlb1_mapin_region(va, pa, size, tlb_calc_wimg(pa, ma)) != size) 2331 return (NULL); 2332 2333 return ((void *)retva); 2334 } 2335 2336 /* 2337 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2338 */ 2339 static void 2340 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2341 { 2342 #ifdef SUPPORTS_SHRINKING_TLB1 2343 vm_offset_t base, offset; 2344 2345 /* 2346 * Unmap only if this is inside kernel virtual space. 2347 */ 2348 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2349 base = trunc_page(va); 2350 offset = va & PAGE_MASK; 2351 size = roundup(offset + size, PAGE_SIZE); 2352 kva_free(base, size); 2353 } 2354 #endif 2355 } 2356 2357 /* 2358 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2359 * specified pmap. This eliminates the blast of soft faults on process startup 2360 * and immediately after an mmap. 2361 */ 2362 static void 2363 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2364 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2365 { 2366 2367 VM_OBJECT_ASSERT_WLOCKED(object); 2368 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2369 ("mmu_booke_object_init_pt: non-device object")); 2370 } 2371 2372 /* 2373 * Perform the pmap work for mincore. 2374 */ 2375 static int 2376 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2377 vm_paddr_t *pap) 2378 { 2379 2380 /* XXX: this should be implemented at some point */ 2381 return (0); 2382 } 2383 2384 static int 2385 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz, 2386 vm_memattr_t mode) 2387 { 2388 vm_offset_t va; 2389 pte_t *pte; 2390 int i, j; 2391 tlb_entry_t e; 2392 2393 addr = trunc_page(addr); 2394 2395 /* Only allow changes to mapped kernel addresses. This includes: 2396 * - KVA 2397 * - DMAP (powerpc64) 2398 * - Device mappings 2399 */ 2400 if (addr <= VM_MAXUSER_ADDRESS || 2401 #ifdef __powerpc64__ 2402 (addr >= tlb1_map_base && addr < DMAP_BASE_ADDRESS) || 2403 (addr > DMAP_MAX_ADDRESS && addr < VM_MIN_KERNEL_ADDRESS) || 2404 #else 2405 (addr >= tlb1_map_base && addr < VM_MIN_KERNEL_ADDRESS) || 2406 #endif 2407 (addr > VM_MAX_KERNEL_ADDRESS)) 2408 return (EINVAL); 2409 2410 /* Check TLB1 mappings */ 2411 for (i = 0; i < TLB1_ENTRIES; i++) { 2412 tlb1_read_entry(&e, i); 2413 if (!(e.mas1 & MAS1_VALID)) 2414 continue; 2415 if (addr >= e.virt && addr < e.virt + e.size) 2416 break; 2417 } 2418 if (i < TLB1_ENTRIES) { 2419 /* Only allow full mappings to be modified for now. */ 2420 /* Validate the range. */ 2421 for (j = i, va = addr; va < addr + sz; va += e.size, j++) { 2422 tlb1_read_entry(&e, j); 2423 if (va != e.virt || (sz - (va - addr) < e.size)) 2424 return (EINVAL); 2425 } 2426 for (va = addr; va < addr + sz; va += e.size, i++) { 2427 tlb1_read_entry(&e, i); 2428 e.mas2 &= ~MAS2_WIMGE_MASK; 2429 e.mas2 |= tlb_calc_wimg(e.phys, mode); 2430 2431 /* 2432 * Write it out to the TLB. Should really re-sync with other 2433 * cores. 2434 */ 2435 tlb1_write_entry(&e, i); 2436 } 2437 return (0); 2438 } 2439 2440 /* Not in TLB1, try through pmap */ 2441 /* First validate the range. */ 2442 for (va = addr; va < addr + sz; va += PAGE_SIZE) { 2443 pte = pte_find(mmu, kernel_pmap, va); 2444 if (pte == NULL || !PTE_ISVALID(pte)) 2445 return (EINVAL); 2446 } 2447 2448 mtx_lock_spin(&tlbivax_mutex); 2449 tlb_miss_lock(); 2450 for (va = addr; va < addr + sz; va += PAGE_SIZE) { 2451 pte = pte_find(mmu, kernel_pmap, va); 2452 *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT); 2453 *pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT; 2454 tlb0_flush_entry(va); 2455 } 2456 tlb_miss_unlock(); 2457 mtx_unlock_spin(&tlbivax_mutex); 2458 2459 return (0); 2460 } 2461 2462 static void 2463 mmu_booke_page_array_startup(mmu_t mmu, long pages) 2464 { 2465 vm_page_array_size = pages; 2466 } 2467 2468 /**************************************************************************/ 2469 /* TID handling */ 2470 /**************************************************************************/ 2471 2472 /* 2473 * Allocate a TID. If necessary, steal one from someone else. 2474 * The new TID is flushed from the TLB before returning. 2475 */ 2476 static tlbtid_t 2477 tid_alloc(pmap_t pmap) 2478 { 2479 tlbtid_t tid; 2480 int thiscpu; 2481 2482 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2483 2484 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2485 2486 thiscpu = PCPU_GET(cpuid); 2487 2488 tid = PCPU_GET(booke.tid_next); 2489 if (tid > TID_MAX) 2490 tid = TID_MIN; 2491 PCPU_SET(booke.tid_next, tid + 1); 2492 2493 /* If we are stealing TID then clear the relevant pmap's field */ 2494 if (tidbusy[thiscpu][tid] != NULL) { 2495 2496 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2497 2498 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2499 2500 /* Flush all entries from TLB0 matching this TID. */ 2501 tid_flush(tid); 2502 } 2503 2504 tidbusy[thiscpu][tid] = pmap; 2505 pmap->pm_tid[thiscpu] = tid; 2506 __asm __volatile("msync; isync"); 2507 2508 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2509 PCPU_GET(booke.tid_next)); 2510 2511 return (tid); 2512 } 2513 2514 /**************************************************************************/ 2515 /* TLB0 handling */ 2516 /**************************************************************************/ 2517 2518 /* Convert TLB0 va and way number to tlb0[] table index. */ 2519 static inline unsigned int 2520 tlb0_tableidx(vm_offset_t va, unsigned int way) 2521 { 2522 unsigned int idx; 2523 2524 idx = (way * TLB0_ENTRIES_PER_WAY); 2525 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2526 return (idx); 2527 } 2528 2529 /* 2530 * Invalidate TLB0 entry. 2531 */ 2532 static inline void 2533 tlb0_flush_entry(vm_offset_t va) 2534 { 2535 2536 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2537 2538 mtx_assert(&tlbivax_mutex, MA_OWNED); 2539 2540 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2541 __asm __volatile("isync; msync"); 2542 __asm __volatile("tlbsync; msync"); 2543 2544 CTR1(KTR_PMAP, "%s: e", __func__); 2545 } 2546 2547 2548 /**************************************************************************/ 2549 /* TLB1 handling */ 2550 /**************************************************************************/ 2551 2552 /* 2553 * TLB1 mapping notes: 2554 * 2555 * TLB1[0] Kernel text and data. 2556 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI 2557 * windows, other devices mappings. 2558 */ 2559 2560 /* 2561 * Read an entry from given TLB1 slot. 2562 */ 2563 void 2564 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot) 2565 { 2566 register_t msr; 2567 uint32_t mas0; 2568 2569 KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__)); 2570 2571 msr = mfmsr(); 2572 __asm __volatile("wrteei 0"); 2573 2574 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot); 2575 mtspr(SPR_MAS0, mas0); 2576 __asm __volatile("isync; tlbre"); 2577 2578 entry->mas1 = mfspr(SPR_MAS1); 2579 entry->mas2 = mfspr(SPR_MAS2); 2580 entry->mas3 = mfspr(SPR_MAS3); 2581 2582 switch ((mfpvr() >> 16) & 0xFFFF) { 2583 case FSL_E500v2: 2584 case FSL_E500mc: 2585 case FSL_E5500: 2586 case FSL_E6500: 2587 entry->mas7 = mfspr(SPR_MAS7); 2588 break; 2589 default: 2590 entry->mas7 = 0; 2591 break; 2592 } 2593 __asm __volatile("wrtee %0" :: "r"(msr)); 2594 2595 entry->virt = entry->mas2 & MAS2_EPN_MASK; 2596 entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) | 2597 (entry->mas3 & MAS3_RPN); 2598 entry->size = 2599 tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT); 2600 } 2601 2602 struct tlbwrite_args { 2603 tlb_entry_t *e; 2604 unsigned int idx; 2605 }; 2606 2607 static uint32_t 2608 tlb1_find_free(void) 2609 { 2610 tlb_entry_t e; 2611 int i; 2612 2613 for (i = 0; i < TLB1_ENTRIES; i++) { 2614 tlb1_read_entry(&e, i); 2615 if ((e.mas1 & MAS1_VALID) == 0) 2616 return (i); 2617 } 2618 return (-1); 2619 } 2620 2621 static void 2622 tlb1_purge_va_range(vm_offset_t va, vm_size_t size) 2623 { 2624 tlb_entry_t e; 2625 int i; 2626 2627 for (i = 0; i < TLB1_ENTRIES; i++) { 2628 tlb1_read_entry(&e, i); 2629 if ((e.mas1 & MAS1_VALID) == 0) 2630 continue; 2631 if ((e.mas2 & MAS2_EPN_MASK) >= va && 2632 (e.mas2 & MAS2_EPN_MASK) < va + size) { 2633 mtspr(SPR_MAS1, e.mas1 & ~MAS1_VALID); 2634 __asm __volatile("isync; tlbwe; isync; msync"); 2635 } 2636 } 2637 } 2638 2639 static void 2640 tlb1_write_entry_int(void *arg) 2641 { 2642 struct tlbwrite_args *args = arg; 2643 uint32_t idx, mas0; 2644 2645 idx = args->idx; 2646 if (idx == -1) { 2647 tlb1_purge_va_range(args->e->virt, args->e->size); 2648 idx = tlb1_find_free(); 2649 if (idx == -1) 2650 panic("No free TLB1 entries!\n"); 2651 } 2652 /* Select entry */ 2653 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2654 2655 mtspr(SPR_MAS0, mas0); 2656 mtspr(SPR_MAS1, args->e->mas1); 2657 mtspr(SPR_MAS2, args->e->mas2); 2658 mtspr(SPR_MAS3, args->e->mas3); 2659 switch ((mfpvr() >> 16) & 0xFFFF) { 2660 case FSL_E500mc: 2661 case FSL_E5500: 2662 case FSL_E6500: 2663 mtspr(SPR_MAS8, 0); 2664 /* FALLTHROUGH */ 2665 case FSL_E500v2: 2666 mtspr(SPR_MAS7, args->e->mas7); 2667 break; 2668 default: 2669 break; 2670 } 2671 2672 __asm __volatile("isync; tlbwe; isync; msync"); 2673 2674 } 2675 2676 static void 2677 tlb1_write_entry_sync(void *arg) 2678 { 2679 /* Empty synchronization point for smp_rendezvous(). */ 2680 } 2681 2682 /* 2683 * Write given entry to TLB1 hardware. 2684 */ 2685 static void 2686 tlb1_write_entry(tlb_entry_t *e, unsigned int idx) 2687 { 2688 struct tlbwrite_args args; 2689 2690 args.e = e; 2691 args.idx = idx; 2692 2693 #ifdef SMP 2694 if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) { 2695 mb(); 2696 smp_rendezvous(tlb1_write_entry_sync, 2697 tlb1_write_entry_int, 2698 tlb1_write_entry_sync, &args); 2699 } else 2700 #endif 2701 { 2702 register_t msr; 2703 2704 msr = mfmsr(); 2705 __asm __volatile("wrteei 0"); 2706 tlb1_write_entry_int(&args); 2707 __asm __volatile("wrtee %0" :: "r"(msr)); 2708 } 2709 } 2710 2711 /* 2712 * Convert TLB TSIZE value to mapped region size. 2713 */ 2714 static vm_size_t 2715 tsize2size(unsigned int tsize) 2716 { 2717 2718 /* 2719 * size = 4^tsize KB 2720 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2721 */ 2722 2723 return ((1 << (2 * tsize)) * 1024); 2724 } 2725 2726 /* 2727 * Convert region size (must be power of 4) to TLB TSIZE value. 2728 */ 2729 static unsigned int 2730 size2tsize(vm_size_t size) 2731 { 2732 2733 return (ilog2(size) / 2 - 5); 2734 } 2735 2736 /* 2737 * Register permanent kernel mapping in TLB1. 2738 * 2739 * Entries are created starting from index 0 (current free entry is 2740 * kept in tlb1_idx) and are not supposed to be invalidated. 2741 */ 2742 int 2743 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size, 2744 uint32_t flags) 2745 { 2746 tlb_entry_t e; 2747 uint32_t ts, tid; 2748 int tsize, index; 2749 2750 /* First try to update an existing entry. */ 2751 for (index = 0; index < TLB1_ENTRIES; index++) { 2752 tlb1_read_entry(&e, index); 2753 /* Check if we're just updating the flags, and update them. */ 2754 if (e.phys == pa && e.virt == va && e.size == size) { 2755 e.mas2 = (va & MAS2_EPN_MASK) | flags; 2756 tlb1_write_entry(&e, index); 2757 return (0); 2758 } 2759 } 2760 2761 /* Convert size to TSIZE */ 2762 tsize = size2tsize(size); 2763 2764 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2765 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2766 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2767 2768 e.phys = pa; 2769 e.virt = va; 2770 e.size = size; 2771 e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2772 e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2773 e.mas2 = (va & MAS2_EPN_MASK) | flags; 2774 2775 /* Set supervisor RWX permission bits */ 2776 e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2777 e.mas7 = (pa >> 32) & MAS7_RPN; 2778 2779 tlb1_write_entry(&e, -1); 2780 2781 return (0); 2782 } 2783 2784 /* 2785 * Map in contiguous RAM region into the TLB1. 2786 */ 2787 static vm_size_t 2788 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size, int wimge) 2789 { 2790 vm_offset_t base; 2791 vm_size_t mapped, sz, ssize; 2792 2793 mapped = 0; 2794 base = va; 2795 ssize = size; 2796 2797 while (size > 0) { 2798 sz = 1UL << (ilog2(size) & ~1); 2799 /* Align size to PA */ 2800 if (pa % sz != 0) { 2801 do { 2802 sz >>= 2; 2803 } while (pa % sz != 0); 2804 } 2805 /* Now align from there to VA */ 2806 if (va % sz != 0) { 2807 do { 2808 sz >>= 2; 2809 } while (va % sz != 0); 2810 } 2811 #ifdef __powerpc64__ 2812 /* 2813 * Clamp TLB1 entries to 4G. 2814 * 2815 * While the e6500 supports up to 1TB mappings, the e5500 2816 * only supports up to 4G mappings. (0b1011) 2817 * 2818 * If any e6500 machines capable of supporting a very 2819 * large amount of memory appear in the future, we can 2820 * revisit this. 2821 * 2822 * For now, though, since we have plenty of space in TLB1, 2823 * always avoid creating entries larger than 4GB. 2824 */ 2825 sz = MIN(sz, 1UL << 32); 2826 #endif 2827 if (bootverbose) 2828 printf("Wiring VA=%p to PA=%jx (size=%lx)\n", 2829 (void *)va, (uintmax_t)pa, (long)sz); 2830 if (tlb1_set_entry(va, pa, sz, 2831 _TLB_ENTRY_SHARED | wimge) < 0) 2832 return (mapped); 2833 size -= sz; 2834 pa += sz; 2835 va += sz; 2836 } 2837 2838 mapped = (va - base); 2839 if (bootverbose) 2840 printf("mapped size 0x%"PRIxPTR" (wasted space 0x%"PRIxPTR")\n", 2841 mapped, mapped - ssize); 2842 2843 return (mapped); 2844 } 2845 2846 /* 2847 * TLB1 initialization routine, to be called after the very first 2848 * assembler level setup done in locore.S. 2849 */ 2850 void 2851 tlb1_init() 2852 { 2853 vm_offset_t mas2; 2854 uint32_t mas0, mas1, mas3, mas7; 2855 uint32_t tsz; 2856 2857 tlb1_get_tlbconf(); 2858 2859 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0); 2860 mtspr(SPR_MAS0, mas0); 2861 __asm __volatile("isync; tlbre"); 2862 2863 mas1 = mfspr(SPR_MAS1); 2864 mas2 = mfspr(SPR_MAS2); 2865 mas3 = mfspr(SPR_MAS3); 2866 mas7 = mfspr(SPR_MAS7); 2867 2868 kernload = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) | 2869 (mas3 & MAS3_RPN); 2870 2871 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2872 kernsize += (tsz > 0) ? tsize2size(tsz) : 0; 2873 kernstart = trunc_page(mas2); 2874 2875 /* Setup TLB miss defaults */ 2876 set_mas4_defaults(); 2877 } 2878 2879 /* 2880 * pmap_early_io_unmap() should be used in short conjunction with 2881 * pmap_early_io_map(), as in the following snippet: 2882 * 2883 * x = pmap_early_io_map(...); 2884 * <do something with x> 2885 * pmap_early_io_unmap(x, size); 2886 * 2887 * And avoiding more allocations between. 2888 */ 2889 void 2890 pmap_early_io_unmap(vm_offset_t va, vm_size_t size) 2891 { 2892 int i; 2893 tlb_entry_t e; 2894 vm_size_t isize; 2895 2896 size = roundup(size, PAGE_SIZE); 2897 isize = size; 2898 for (i = 0; i < TLB1_ENTRIES && size > 0; i++) { 2899 tlb1_read_entry(&e, i); 2900 if (!(e.mas1 & MAS1_VALID)) 2901 continue; 2902 if (va <= e.virt && (va + isize) >= (e.virt + e.size)) { 2903 size -= e.size; 2904 e.mas1 &= ~MAS1_VALID; 2905 tlb1_write_entry(&e, i); 2906 } 2907 } 2908 if (tlb1_map_base == va + isize) 2909 tlb1_map_base -= isize; 2910 } 2911 2912 vm_offset_t 2913 pmap_early_io_map(vm_paddr_t pa, vm_size_t size) 2914 { 2915 vm_paddr_t pa_base; 2916 vm_offset_t va, sz; 2917 int i; 2918 tlb_entry_t e; 2919 2920 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!")); 2921 2922 for (i = 0; i < TLB1_ENTRIES; i++) { 2923 tlb1_read_entry(&e, i); 2924 if (!(e.mas1 & MAS1_VALID)) 2925 continue; 2926 if (pa >= e.phys && (pa + size) <= 2927 (e.phys + e.size)) 2928 return (e.virt + (pa - e.phys)); 2929 } 2930 2931 pa_base = rounddown(pa, PAGE_SIZE); 2932 size = roundup(size + (pa - pa_base), PAGE_SIZE); 2933 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1)); 2934 va = tlb1_map_base + (pa - pa_base); 2935 2936 do { 2937 sz = 1 << (ilog2(size) & ~1); 2938 tlb1_set_entry(tlb1_map_base, pa_base, sz, 2939 _TLB_ENTRY_SHARED | _TLB_ENTRY_IO); 2940 size -= sz; 2941 pa_base += sz; 2942 tlb1_map_base += sz; 2943 } while (size > 0); 2944 2945 return (va); 2946 } 2947 2948 void 2949 pmap_track_page(pmap_t pmap, vm_offset_t va) 2950 { 2951 vm_paddr_t pa; 2952 vm_page_t page; 2953 struct pv_entry *pve; 2954 2955 va = trunc_page(va); 2956 pa = pmap_kextract(va); 2957 page = PHYS_TO_VM_PAGE(pa); 2958 2959 rw_wlock(&pvh_global_lock); 2960 PMAP_LOCK(pmap); 2961 2962 TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) { 2963 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 2964 goto out; 2965 } 2966 } 2967 page->md.pv_tracked = true; 2968 pv_insert(pmap, va, page); 2969 out: 2970 PMAP_UNLOCK(pmap); 2971 rw_wunlock(&pvh_global_lock); 2972 } 2973 2974 2975 /* 2976 * Setup MAS4 defaults. 2977 * These values are loaded to MAS0-2 on a TLB miss. 2978 */ 2979 static void 2980 set_mas4_defaults(void) 2981 { 2982 uint32_t mas4; 2983 2984 /* Defaults: TLB0, PID0, TSIZED=4K */ 2985 mas4 = MAS4_TLBSELD0; 2986 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 2987 #ifdef SMP 2988 mas4 |= MAS4_MD; 2989 #endif 2990 mtspr(SPR_MAS4, mas4); 2991 __asm __volatile("isync"); 2992 } 2993 2994 2995 /* 2996 * Return 0 if the physical IO range is encompassed by one of the 2997 * the TLB1 entries, otherwise return related error code. 2998 */ 2999 static int 3000 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3001 { 3002 uint32_t prot; 3003 vm_paddr_t pa_start; 3004 vm_paddr_t pa_end; 3005 unsigned int entry_tsize; 3006 vm_size_t entry_size; 3007 tlb_entry_t e; 3008 3009 *va = (vm_offset_t)NULL; 3010 3011 tlb1_read_entry(&e, i); 3012 /* Skip invalid entries */ 3013 if (!(e.mas1 & MAS1_VALID)) 3014 return (EINVAL); 3015 3016 /* 3017 * The entry must be cache-inhibited, guarded, and r/w 3018 * so it can function as an i/o page 3019 */ 3020 prot = e.mas2 & (MAS2_I | MAS2_G); 3021 if (prot != (MAS2_I | MAS2_G)) 3022 return (EPERM); 3023 3024 prot = e.mas3 & (MAS3_SR | MAS3_SW); 3025 if (prot != (MAS3_SR | MAS3_SW)) 3026 return (EPERM); 3027 3028 /* The address should be within the entry range. */ 3029 entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3030 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3031 3032 entry_size = tsize2size(entry_tsize); 3033 pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) | 3034 (e.mas3 & MAS3_RPN); 3035 pa_end = pa_start + entry_size; 3036 3037 if ((pa < pa_start) || ((pa + size) > pa_end)) 3038 return (ERANGE); 3039 3040 /* Return virtual address of this mapping. */ 3041 *va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3042 return (0); 3043 } 3044 3045 #ifdef DDB 3046 /* Print out contents of the MAS registers for each TLB0 entry */ 3047 static void 3048 #ifdef __powerpc64__ 3049 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3, 3050 #else 3051 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 3052 #endif 3053 uint32_t mas7) 3054 { 3055 int as; 3056 char desc[3]; 3057 tlbtid_t tid; 3058 vm_size_t size; 3059 unsigned int tsize; 3060 3061 desc[2] = '\0'; 3062 if (mas1 & MAS1_VALID) 3063 desc[0] = 'V'; 3064 else 3065 desc[0] = ' '; 3066 3067 if (mas1 & MAS1_IPROT) 3068 desc[1] = 'P'; 3069 else 3070 desc[1] = ' '; 3071 3072 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 3073 tid = MAS1_GETTID(mas1); 3074 3075 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3076 size = 0; 3077 if (tsize) 3078 size = tsize2size(tsize); 3079 3080 printf("%3d: (%s) [AS=%d] " 3081 "sz = 0x%jx tsz = %d tid = %d mas1 = 0x%08x " 3082 "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n", 3083 i, desc, as, (uintmax_t)size, tsize, tid, mas1, mas2, mas3, mas7); 3084 } 3085 3086 DB_SHOW_COMMAND(tlb0, tlb0_print_tlbentries) 3087 { 3088 uint32_t mas0, mas1, mas3, mas7; 3089 #ifdef __powerpc64__ 3090 uint64_t mas2; 3091 #else 3092 uint32_t mas2; 3093 #endif 3094 int entryidx, way, idx; 3095 3096 printf("TLB0 entries:\n"); 3097 for (way = 0; way < TLB0_WAYS; way ++) 3098 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 3099 3100 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 3101 mtspr(SPR_MAS0, mas0); 3102 3103 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 3104 mtspr(SPR_MAS2, mas2); 3105 3106 __asm __volatile("isync; tlbre"); 3107 3108 mas1 = mfspr(SPR_MAS1); 3109 mas2 = mfspr(SPR_MAS2); 3110 mas3 = mfspr(SPR_MAS3); 3111 mas7 = mfspr(SPR_MAS7); 3112 3113 idx = tlb0_tableidx(mas2, way); 3114 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 3115 } 3116 } 3117 3118 /* 3119 * Print out contents of the MAS registers for each TLB1 entry 3120 */ 3121 DB_SHOW_COMMAND(tlb1, tlb1_print_tlbentries) 3122 { 3123 uint32_t mas0, mas1, mas3, mas7; 3124 #ifdef __powerpc64__ 3125 uint64_t mas2; 3126 #else 3127 uint32_t mas2; 3128 #endif 3129 int i; 3130 3131 printf("TLB1 entries:\n"); 3132 for (i = 0; i < TLB1_ENTRIES; i++) { 3133 3134 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3135 mtspr(SPR_MAS0, mas0); 3136 3137 __asm __volatile("isync; tlbre"); 3138 3139 mas1 = mfspr(SPR_MAS1); 3140 mas2 = mfspr(SPR_MAS2); 3141 mas3 = mfspr(SPR_MAS3); 3142 mas7 = mfspr(SPR_MAS7); 3143 3144 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3145 } 3146 } 3147 #endif 3148