1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 5 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Some hw specific parts of this pmap were derived or influenced 29 * by NetBSD's ibm4xx pmap module. More generic code is shared with 30 * a few other pmap modules from the FreeBSD tree. 31 */ 32 33 /* 34 * VM layout notes: 35 * 36 * Kernel and user threads run within one common virtual address space 37 * defined by AS=0. 38 * 39 * 32-bit pmap: 40 * Virtual address space layout: 41 * ----------------------------- 42 * 0x0000_0000 - 0x7fff_ffff : user process 43 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 44 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 45 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 46 * 0xc100_0000 - 0xffff_ffff : KVA 47 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 48 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 49 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 50 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 51 * 52 * 64-bit pmap: 53 * Virtual address space layout: 54 * ----------------------------- 55 * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process 56 * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries 57 * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region 58 * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack 59 * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved 60 * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data 61 * endkernel - msgbufp-1 : flat device tree 62 * msgbufp - kernel_pdir-1 : message buffer 63 * kernel_pdir - kernel_pp2d-1 : kernel page directory 64 * kernel_pp2d - . : kernel pointers to page directory 65 * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy 66 * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs 67 * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables 68 * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space 69 * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region 70 * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region 71 * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map 72 * 0xf000_0000_0000_0000 - +Maxmem : physmem map 73 * - 0xffff_ffff_ffff_ffff : device direct map 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ddb.h" 80 #include "opt_kstack_pages.h" 81 82 #include <sys/param.h> 83 #include <sys/conf.h> 84 #include <sys/malloc.h> 85 #include <sys/ktr.h> 86 #include <sys/proc.h> 87 #include <sys/user.h> 88 #include <sys/queue.h> 89 #include <sys/systm.h> 90 #include <sys/kernel.h> 91 #include <sys/kerneldump.h> 92 #include <sys/linker.h> 93 #include <sys/msgbuf.h> 94 #include <sys/lock.h> 95 #include <sys/mutex.h> 96 #include <sys/rwlock.h> 97 #include <sys/sched.h> 98 #include <sys/smp.h> 99 #include <sys/vmmeter.h> 100 101 #include <vm/vm.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_kern.h> 104 #include <vm/vm_pageout.h> 105 #include <vm/vm_extern.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_param.h> 108 #include <vm/vm_map.h> 109 #include <vm/vm_pager.h> 110 #include <vm/vm_phys.h> 111 #include <vm/vm_pagequeue.h> 112 #include <vm/uma.h> 113 114 #include <machine/_inttypes.h> 115 #include <machine/cpu.h> 116 #include <machine/pcb.h> 117 #include <machine/platform.h> 118 119 #include <machine/tlb.h> 120 #include <machine/spr.h> 121 #include <machine/md_var.h> 122 #include <machine/mmuvar.h> 123 #include <machine/pmap.h> 124 #include <machine/pte.h> 125 126 #include <ddb/ddb.h> 127 128 #include "mmu_if.h" 129 130 #define SPARSE_MAPDEV 131 #ifdef DEBUG 132 #define debugf(fmt, args...) printf(fmt, ##args) 133 #else 134 #define debugf(fmt, args...) 135 #endif 136 137 #ifdef __powerpc64__ 138 #define PRI0ptrX "016lx" 139 #else 140 #define PRI0ptrX "08x" 141 #endif 142 143 #define TODO panic("%s: not implemented", __func__); 144 145 extern unsigned char _etext[]; 146 extern unsigned char _end[]; 147 148 extern uint32_t *bootinfo; 149 150 vm_paddr_t kernload; 151 vm_offset_t kernstart; 152 vm_size_t kernsize; 153 154 /* Message buffer and tables. */ 155 static vm_offset_t data_start; 156 static vm_size_t data_end; 157 158 /* Phys/avail memory regions. */ 159 static struct mem_region *availmem_regions; 160 static int availmem_regions_sz; 161 static struct mem_region *physmem_regions; 162 static int physmem_regions_sz; 163 164 #ifndef __powerpc64__ 165 /* Reserved KVA space and mutex for mmu_booke_zero_page. */ 166 static vm_offset_t zero_page_va; 167 static struct mtx zero_page_mutex; 168 169 /* Reserved KVA space and mutex for mmu_booke_copy_page. */ 170 static vm_offset_t copy_page_src_va; 171 static vm_offset_t copy_page_dst_va; 172 static struct mtx copy_page_mutex; 173 #endif 174 175 static struct mtx tlbivax_mutex; 176 177 /**************************************************************************/ 178 /* PMAP */ 179 /**************************************************************************/ 180 181 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 182 vm_prot_t, u_int flags, int8_t psind); 183 184 unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 185 unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 186 #ifdef __powerpc64__ 187 unsigned int kernel_pdirs; 188 #endif 189 static uma_zone_t ptbl_root_zone; 190 191 /* 192 * If user pmap is processed with mmu_booke_remove and the resident count 193 * drops to 0, there are no more pages to remove, so we need not continue. 194 */ 195 #define PMAP_REMOVE_DONE(pmap) \ 196 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 197 198 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__) 199 extern int elf32_nxstack; 200 #endif 201 202 /**************************************************************************/ 203 /* TLB and TID handling */ 204 /**************************************************************************/ 205 206 /* Translation ID busy table */ 207 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 208 209 /* 210 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 211 * core revisions and should be read from h/w registers during early config. 212 */ 213 uint32_t tlb0_entries; 214 uint32_t tlb0_ways; 215 uint32_t tlb0_entries_per_way; 216 uint32_t tlb1_entries; 217 218 #define TLB0_ENTRIES (tlb0_entries) 219 #define TLB0_WAYS (tlb0_ways) 220 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 221 222 #define TLB1_ENTRIES (tlb1_entries) 223 224 static vm_offset_t tlb1_map_base = (vm_offset_t)VM_MAXUSER_ADDRESS + PAGE_SIZE; 225 226 static tlbtid_t tid_alloc(struct pmap *); 227 static void tid_flush(tlbtid_t tid); 228 229 #ifdef DDB 230 #ifdef __powerpc64__ 231 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t); 232 #else 233 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 234 #endif 235 #endif 236 237 static void tlb1_read_entry(tlb_entry_t *, unsigned int); 238 static void tlb1_write_entry(tlb_entry_t *, unsigned int); 239 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 240 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 241 242 static vm_size_t tsize2size(unsigned int); 243 static unsigned int size2tsize(vm_size_t); 244 static unsigned int ilog2(unsigned long); 245 246 static void set_mas4_defaults(void); 247 248 static inline void tlb0_flush_entry(vm_offset_t); 249 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 250 251 /**************************************************************************/ 252 /* Page table management */ 253 /**************************************************************************/ 254 255 static struct rwlock_padalign pvh_global_lock; 256 257 /* Data for the pv entry allocation mechanism */ 258 static uma_zone_t pvzone; 259 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 260 261 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 262 263 #ifndef PMAP_SHPGPERPROC 264 #define PMAP_SHPGPERPROC 200 265 #endif 266 267 #ifdef __powerpc64__ 268 #define PMAP_ROOT_SIZE (sizeof(pte_t***) * PP2D_NENTRIES) 269 static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **, 270 unsigned int, boolean_t); 271 static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int, vm_page_t); 272 static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int); 273 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t); 274 #else 275 #define PMAP_ROOT_SIZE (sizeof(pte_t**) * PDIR_NENTRIES) 276 static void ptbl_init(void); 277 static struct ptbl_buf *ptbl_buf_alloc(void); 278 static void ptbl_buf_free(struct ptbl_buf *); 279 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 280 281 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t); 282 static void ptbl_free(mmu_t, pmap_t, unsigned int); 283 static void ptbl_hold(mmu_t, pmap_t, unsigned int); 284 static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 285 #endif 286 287 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 288 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); 289 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 290 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 291 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t); 292 293 static pv_entry_t pv_alloc(void); 294 static void pv_free(pv_entry_t); 295 static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 296 static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 297 298 static void booke_pmap_init_qpages(void); 299 300 struct ptbl_buf { 301 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 302 vm_offset_t kva; /* va of mapping */ 303 }; 304 305 #ifndef __powerpc64__ 306 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 307 #define PTBL_BUFS (128 * 16) 308 309 /* ptbl free list and a lock used for access synchronization. */ 310 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 311 static struct mtx ptbl_buf_freelist_lock; 312 313 /* Base address of kva space allocated fot ptbl bufs. */ 314 static vm_offset_t ptbl_buf_pool_vabase; 315 316 /* Pointer to ptbl_buf structures. */ 317 static struct ptbl_buf *ptbl_bufs; 318 #endif 319 320 #ifdef SMP 321 extern tlb_entry_t __boot_tlb1[]; 322 void pmap_bootstrap_ap(volatile uint32_t *); 323 #endif 324 325 /* 326 * Kernel MMU interface 327 */ 328 static void mmu_booke_clear_modify(mmu_t, vm_page_t); 329 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 330 vm_size_t, vm_offset_t); 331 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 332 static void mmu_booke_copy_pages(mmu_t, vm_page_t *, 333 vm_offset_t, vm_page_t *, vm_offset_t, int); 334 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 335 vm_prot_t, u_int flags, int8_t psind); 336 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 337 vm_page_t, vm_prot_t); 338 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 339 vm_prot_t); 340 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 341 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 342 vm_prot_t); 343 static void mmu_booke_init(mmu_t); 344 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 345 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 346 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 347 static int mmu_booke_ts_referenced(mmu_t, vm_page_t); 348 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 349 int); 350 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 351 vm_paddr_t *); 352 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 353 vm_object_t, vm_pindex_t, vm_size_t); 354 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 355 static void mmu_booke_page_init(mmu_t, vm_page_t); 356 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 357 static void mmu_booke_pinit(mmu_t, pmap_t); 358 static void mmu_booke_pinit0(mmu_t, pmap_t); 359 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 360 vm_prot_t); 361 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 362 static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 363 static void mmu_booke_release(mmu_t, pmap_t); 364 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 365 static void mmu_booke_remove_all(mmu_t, vm_page_t); 366 static void mmu_booke_remove_write(mmu_t, vm_page_t); 367 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 368 static void mmu_booke_zero_page(mmu_t, vm_page_t); 369 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 370 static void mmu_booke_activate(mmu_t, struct thread *); 371 static void mmu_booke_deactivate(mmu_t, struct thread *); 372 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 373 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 374 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); 375 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 376 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 377 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 378 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t); 379 static void mmu_booke_kremove(mmu_t, vm_offset_t); 380 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 381 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 382 vm_size_t); 383 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t, 384 void **); 385 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t, 386 void *); 387 static void mmu_booke_scan_init(mmu_t); 388 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m); 389 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr); 390 static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, 391 vm_size_t sz, vm_memattr_t mode); 392 static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, 393 volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); 394 static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, 395 int *is_user, vm_offset_t *decoded_addr); 396 397 398 static mmu_method_t mmu_booke_methods[] = { 399 /* pmap dispatcher interface */ 400 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 401 MMUMETHOD(mmu_copy, mmu_booke_copy), 402 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 403 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), 404 MMUMETHOD(mmu_enter, mmu_booke_enter), 405 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 406 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 407 MMUMETHOD(mmu_extract, mmu_booke_extract), 408 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 409 MMUMETHOD(mmu_init, mmu_booke_init), 410 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 411 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 412 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 413 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 414 MMUMETHOD(mmu_map, mmu_booke_map), 415 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 416 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 417 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 418 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 419 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 420 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 421 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 422 MMUMETHOD(mmu_protect, mmu_booke_protect), 423 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 424 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 425 MMUMETHOD(mmu_release, mmu_booke_release), 426 MMUMETHOD(mmu_remove, mmu_booke_remove), 427 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 428 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 429 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 430 MMUMETHOD(mmu_unwire, mmu_booke_unwire), 431 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 432 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 433 MMUMETHOD(mmu_activate, mmu_booke_activate), 434 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 435 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page), 436 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page), 437 438 /* Internal interfaces */ 439 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 440 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 441 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 442 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr), 443 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 444 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr), 445 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 446 MMUMETHOD(mmu_kremove, mmu_booke_kremove), 447 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 448 MMUMETHOD(mmu_change_attr, mmu_booke_change_attr), 449 MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr), 450 MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr), 451 452 /* dumpsys() support */ 453 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 454 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 455 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init), 456 457 { 0, 0 } 458 }; 459 460 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 461 462 static __inline uint32_t 463 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) 464 { 465 uint32_t attrib; 466 int i; 467 468 if (ma != VM_MEMATTR_DEFAULT) { 469 switch (ma) { 470 case VM_MEMATTR_UNCACHEABLE: 471 return (MAS2_I | MAS2_G); 472 case VM_MEMATTR_WRITE_COMBINING: 473 case VM_MEMATTR_WRITE_BACK: 474 case VM_MEMATTR_PREFETCHABLE: 475 return (MAS2_I); 476 case VM_MEMATTR_WRITE_THROUGH: 477 return (MAS2_W | MAS2_M); 478 case VM_MEMATTR_CACHEABLE: 479 return (MAS2_M); 480 } 481 } 482 483 /* 484 * Assume the page is cache inhibited and access is guarded unless 485 * it's in our available memory array. 486 */ 487 attrib = _TLB_ENTRY_IO; 488 for (i = 0; i < physmem_regions_sz; i++) { 489 if ((pa >= physmem_regions[i].mr_start) && 490 (pa < (physmem_regions[i].mr_start + 491 physmem_regions[i].mr_size))) { 492 attrib = _TLB_ENTRY_MEM; 493 break; 494 } 495 } 496 497 return (attrib); 498 } 499 500 static inline void 501 tlb_miss_lock(void) 502 { 503 #ifdef SMP 504 struct pcpu *pc; 505 506 if (!smp_started) 507 return; 508 509 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 510 if (pc != pcpup) { 511 512 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 513 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke.tlb_lock); 514 515 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 516 ("tlb_miss_lock: tried to lock self")); 517 518 tlb_lock(pc->pc_booke.tlb_lock); 519 520 CTR1(KTR_PMAP, "%s: locked", __func__); 521 } 522 } 523 #endif 524 } 525 526 static inline void 527 tlb_miss_unlock(void) 528 { 529 #ifdef SMP 530 struct pcpu *pc; 531 532 if (!smp_started) 533 return; 534 535 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 536 if (pc != pcpup) { 537 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 538 __func__, pc->pc_cpuid); 539 540 tlb_unlock(pc->pc_booke.tlb_lock); 541 542 CTR1(KTR_PMAP, "%s: unlocked", __func__); 543 } 544 } 545 #endif 546 } 547 548 /* Return number of entries in TLB0. */ 549 static __inline void 550 tlb0_get_tlbconf(void) 551 { 552 uint32_t tlb0_cfg; 553 554 tlb0_cfg = mfspr(SPR_TLB0CFG); 555 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 556 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 557 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 558 } 559 560 /* Return number of entries in TLB1. */ 561 static __inline void 562 tlb1_get_tlbconf(void) 563 { 564 uint32_t tlb1_cfg; 565 566 tlb1_cfg = mfspr(SPR_TLB1CFG); 567 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK; 568 } 569 570 /**************************************************************************/ 571 /* Page table related */ 572 /**************************************************************************/ 573 574 #ifdef __powerpc64__ 575 /* Initialize pool of kva ptbl buffers. */ 576 static void 577 ptbl_init(void) 578 { 579 } 580 581 /* Get a pointer to a PTE in a page table. */ 582 static __inline pte_t * 583 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 584 { 585 pte_t **pdir; 586 pte_t *ptbl; 587 588 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 589 590 pdir = pmap->pm_pp2d[PP2D_IDX(va)]; 591 if (!pdir) 592 return NULL; 593 ptbl = pdir[PDIR_IDX(va)]; 594 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL); 595 } 596 597 /* 598 * allocate a page of pointers to page directories, do not preallocate the 599 * page tables 600 */ 601 static pte_t ** 602 pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep) 603 { 604 vm_page_t m; 605 pte_t **pdir; 606 int req; 607 608 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; 609 while ((m = vm_page_alloc(NULL, pp2d_idx, req)) == NULL) { 610 PMAP_UNLOCK(pmap); 611 if (nosleep) { 612 return (NULL); 613 } 614 vm_wait(NULL); 615 PMAP_LOCK(pmap); 616 } 617 618 /* Zero whole ptbl. */ 619 pdir = (pte_t **)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 620 mmu_booke_zero_page(mmu, m); 621 622 return (pdir); 623 } 624 625 /* Free pdir pages and invalidate pdir entry. */ 626 static void 627 pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, vm_page_t m) 628 { 629 pte_t **pdir; 630 631 pdir = pmap->pm_pp2d[pp2d_idx]; 632 633 KASSERT((pdir != NULL), ("pdir_free: null pdir")); 634 635 pmap->pm_pp2d[pp2d_idx] = NULL; 636 637 vm_wire_sub(1); 638 vm_page_free_zero(m); 639 } 640 641 /* 642 * Decrement pdir pages hold count and attempt to free pdir pages. Called 643 * when removing directory entry from pdir. 644 * 645 * Return 1 if pdir pages were freed. 646 */ 647 static int 648 pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx) 649 { 650 pte_t **pdir; 651 vm_paddr_t pa; 652 vm_page_t m; 653 654 KASSERT((pmap != kernel_pmap), 655 ("pdir_unhold: unholding kernel pdir!")); 656 657 pdir = pmap->pm_pp2d[pp2d_idx]; 658 659 /* decrement hold count */ 660 pa = DMAP_TO_PHYS((vm_offset_t) pdir); 661 m = PHYS_TO_VM_PAGE(pa); 662 663 /* 664 * Free pdir page if there are no dir entries in this pdir. 665 */ 666 m->wire_count--; 667 if (m->wire_count == 0) { 668 pdir_free(mmu, pmap, pp2d_idx, m); 669 return (1); 670 } 671 return (0); 672 } 673 674 /* 675 * Increment hold count for pdir pages. This routine is used when new ptlb 676 * entry is being inserted into pdir. 677 */ 678 static void 679 pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir) 680 { 681 vm_page_t m; 682 683 KASSERT((pmap != kernel_pmap), 684 ("pdir_hold: holding kernel pdir!")); 685 686 KASSERT((pdir != NULL), ("pdir_hold: null pdir")); 687 688 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pdir)); 689 m->wire_count++; 690 } 691 692 /* Allocate page table. */ 693 static pte_t * 694 ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx, 695 boolean_t nosleep) 696 { 697 vm_page_t m; 698 pte_t *ptbl; 699 int req; 700 701 KASSERT((pdir[pdir_idx] == NULL), 702 ("%s: valid ptbl entry exists!", __func__)); 703 704 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; 705 while ((m = vm_page_alloc(NULL, pdir_idx, req)) == NULL) { 706 PMAP_UNLOCK(pmap); 707 rw_wunlock(&pvh_global_lock); 708 if (nosleep) { 709 return (NULL); 710 } 711 vm_wait(NULL); 712 rw_wlock(&pvh_global_lock); 713 PMAP_LOCK(pmap); 714 } 715 716 /* Zero whole ptbl. */ 717 ptbl = (pte_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 718 mmu_booke_zero_page(mmu, m); 719 720 return (ptbl); 721 } 722 723 /* Free ptbl pages and invalidate pdir entry. */ 724 static void 725 ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx, vm_page_t m) 726 { 727 pte_t *ptbl; 728 729 ptbl = pdir[pdir_idx]; 730 731 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 732 733 pdir[pdir_idx] = NULL; 734 735 vm_wire_sub(1); 736 vm_page_free_zero(m); 737 } 738 739 /* 740 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called 741 * when removing pte entry from ptbl. 742 * 743 * Return 1 if ptbl pages were freed. 744 */ 745 static int 746 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va) 747 { 748 pte_t *ptbl; 749 vm_page_t m; 750 u_int pp2d_idx; 751 pte_t **pdir; 752 u_int pdir_idx; 753 754 pp2d_idx = PP2D_IDX(va); 755 pdir_idx = PDIR_IDX(va); 756 757 KASSERT((pmap != kernel_pmap), 758 ("ptbl_unhold: unholding kernel ptbl!")); 759 760 pdir = pmap->pm_pp2d[pp2d_idx]; 761 ptbl = pdir[pdir_idx]; 762 763 /* decrement hold count */ 764 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl)); 765 766 /* 767 * Free ptbl pages if there are no pte entries in this ptbl. 768 * wire_count has the same value for all ptbl pages, so check the 769 * last page. 770 */ 771 m->wire_count--; 772 if (m->wire_count == 0) { 773 ptbl_free(mmu, pmap, pdir, pdir_idx, m); 774 pdir_unhold(mmu, pmap, pp2d_idx); 775 return (1); 776 } 777 return (0); 778 } 779 780 /* 781 * Increment hold count for ptbl pages. This routine is used when new pte 782 * entry is being inserted into ptbl. 783 */ 784 static void 785 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx) 786 { 787 pte_t *ptbl; 788 vm_page_t m; 789 790 KASSERT((pmap != kernel_pmap), 791 ("ptbl_hold: holding kernel ptbl!")); 792 793 ptbl = pdir[pdir_idx]; 794 795 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 796 797 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl)); 798 m->wire_count++; 799 } 800 #else 801 802 /* Initialize pool of kva ptbl buffers. */ 803 static void 804 ptbl_init(void) 805 { 806 int i; 807 808 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 809 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 810 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 811 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 812 813 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 814 TAILQ_INIT(&ptbl_buf_freelist); 815 816 for (i = 0; i < PTBL_BUFS; i++) { 817 ptbl_bufs[i].kva = 818 ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 819 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 820 } 821 } 822 823 /* Get a ptbl_buf from the freelist. */ 824 static struct ptbl_buf * 825 ptbl_buf_alloc(void) 826 { 827 struct ptbl_buf *buf; 828 829 mtx_lock(&ptbl_buf_freelist_lock); 830 buf = TAILQ_FIRST(&ptbl_buf_freelist); 831 if (buf != NULL) 832 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 833 mtx_unlock(&ptbl_buf_freelist_lock); 834 835 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 836 837 return (buf); 838 } 839 840 /* Return ptbl buff to free pool. */ 841 static void 842 ptbl_buf_free(struct ptbl_buf *buf) 843 { 844 845 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 846 847 mtx_lock(&ptbl_buf_freelist_lock); 848 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 849 mtx_unlock(&ptbl_buf_freelist_lock); 850 } 851 852 /* 853 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 854 */ 855 static void 856 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 857 { 858 struct ptbl_buf *pbuf; 859 860 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 861 862 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 863 864 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 865 if (pbuf->kva == (vm_offset_t)ptbl) { 866 /* Remove from pmap ptbl buf list. */ 867 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 868 869 /* Free corresponding ptbl buf. */ 870 ptbl_buf_free(pbuf); 871 break; 872 } 873 } 874 875 /* Allocate page table. */ 876 static pte_t * 877 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) 878 { 879 vm_page_t mtbl[PTBL_PAGES]; 880 vm_page_t m; 881 struct ptbl_buf *pbuf; 882 unsigned int pidx; 883 pte_t *ptbl; 884 int i, j; 885 886 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 887 (pmap == kernel_pmap), pdir_idx); 888 889 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 890 ("ptbl_alloc: invalid pdir_idx")); 891 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 892 ("pte_alloc: valid ptbl entry exists!")); 893 894 pbuf = ptbl_buf_alloc(); 895 if (pbuf == NULL) 896 panic("pte_alloc: couldn't alloc kernel virtual memory"); 897 898 ptbl = (pte_t *)pbuf->kva; 899 900 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 901 902 for (i = 0; i < PTBL_PAGES; i++) { 903 pidx = (PTBL_PAGES * pdir_idx) + i; 904 while ((m = vm_page_alloc(NULL, pidx, 905 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 906 PMAP_UNLOCK(pmap); 907 rw_wunlock(&pvh_global_lock); 908 if (nosleep) { 909 ptbl_free_pmap_ptbl(pmap, ptbl); 910 for (j = 0; j < i; j++) 911 vm_page_free(mtbl[j]); 912 vm_wire_sub(i); 913 return (NULL); 914 } 915 vm_wait(NULL); 916 rw_wlock(&pvh_global_lock); 917 PMAP_LOCK(pmap); 918 } 919 mtbl[i] = m; 920 } 921 922 /* Map allocated pages into kernel_pmap. */ 923 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 924 925 /* Zero whole ptbl. */ 926 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 927 928 /* Add pbuf to the pmap ptbl bufs list. */ 929 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 930 931 return (ptbl); 932 } 933 934 /* Free ptbl pages and invalidate pdir entry. */ 935 static void 936 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 937 { 938 pte_t *ptbl; 939 vm_paddr_t pa; 940 vm_offset_t va; 941 vm_page_t m; 942 int i; 943 944 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 945 (pmap == kernel_pmap), pdir_idx); 946 947 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 948 ("ptbl_free: invalid pdir_idx")); 949 950 ptbl = pmap->pm_pdir[pdir_idx]; 951 952 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 953 954 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 955 956 /* 957 * Invalidate the pdir entry as soon as possible, so that other CPUs 958 * don't attempt to look up the page tables we are releasing. 959 */ 960 mtx_lock_spin(&tlbivax_mutex); 961 tlb_miss_lock(); 962 963 pmap->pm_pdir[pdir_idx] = NULL; 964 965 tlb_miss_unlock(); 966 mtx_unlock_spin(&tlbivax_mutex); 967 968 for (i = 0; i < PTBL_PAGES; i++) { 969 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 970 pa = pte_vatopa(mmu, kernel_pmap, va); 971 m = PHYS_TO_VM_PAGE(pa); 972 vm_page_free_zero(m); 973 vm_wire_sub(1); 974 mmu_booke_kremove(mmu, va); 975 } 976 977 ptbl_free_pmap_ptbl(pmap, ptbl); 978 } 979 980 /* 981 * Decrement ptbl pages hold count and attempt to free ptbl pages. 982 * Called when removing pte entry from ptbl. 983 * 984 * Return 1 if ptbl pages were freed. 985 */ 986 static int 987 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 988 { 989 pte_t *ptbl; 990 vm_paddr_t pa; 991 vm_page_t m; 992 int i; 993 994 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 995 (pmap == kernel_pmap), pdir_idx); 996 997 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 998 ("ptbl_unhold: invalid pdir_idx")); 999 KASSERT((pmap != kernel_pmap), 1000 ("ptbl_unhold: unholding kernel ptbl!")); 1001 1002 ptbl = pmap->pm_pdir[pdir_idx]; 1003 1004 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 1005 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 1006 ("ptbl_unhold: non kva ptbl")); 1007 1008 /* decrement hold count */ 1009 for (i = 0; i < PTBL_PAGES; i++) { 1010 pa = pte_vatopa(mmu, kernel_pmap, 1011 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 1012 m = PHYS_TO_VM_PAGE(pa); 1013 m->wire_count--; 1014 } 1015 1016 /* 1017 * Free ptbl pages if there are no pte etries in this ptbl. 1018 * wire_count has the same value for all ptbl pages, so check the last 1019 * page. 1020 */ 1021 if (m->wire_count == 0) { 1022 ptbl_free(mmu, pmap, pdir_idx); 1023 1024 //debugf("ptbl_unhold: e (freed ptbl)\n"); 1025 return (1); 1026 } 1027 1028 return (0); 1029 } 1030 1031 /* 1032 * Increment hold count for ptbl pages. This routine is used when a new pte 1033 * entry is being inserted into the ptbl. 1034 */ 1035 static void 1036 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 1037 { 1038 vm_paddr_t pa; 1039 pte_t *ptbl; 1040 vm_page_t m; 1041 int i; 1042 1043 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 1044 pdir_idx); 1045 1046 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 1047 ("ptbl_hold: invalid pdir_idx")); 1048 KASSERT((pmap != kernel_pmap), 1049 ("ptbl_hold: holding kernel ptbl!")); 1050 1051 ptbl = pmap->pm_pdir[pdir_idx]; 1052 1053 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 1054 1055 for (i = 0; i < PTBL_PAGES; i++) { 1056 pa = pte_vatopa(mmu, kernel_pmap, 1057 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 1058 m = PHYS_TO_VM_PAGE(pa); 1059 m->wire_count++; 1060 } 1061 } 1062 #endif 1063 1064 /* Allocate pv_entry structure. */ 1065 pv_entry_t 1066 pv_alloc(void) 1067 { 1068 pv_entry_t pv; 1069 1070 pv_entry_count++; 1071 if (pv_entry_count > pv_entry_high_water) 1072 pagedaemon_wakeup(0); /* XXX powerpc NUMA */ 1073 pv = uma_zalloc(pvzone, M_NOWAIT); 1074 1075 return (pv); 1076 } 1077 1078 /* Free pv_entry structure. */ 1079 static __inline void 1080 pv_free(pv_entry_t pve) 1081 { 1082 1083 pv_entry_count--; 1084 uma_zfree(pvzone, pve); 1085 } 1086 1087 1088 /* Allocate and initialize pv_entry structure. */ 1089 static void 1090 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 1091 { 1092 pv_entry_t pve; 1093 1094 //int su = (pmap == kernel_pmap); 1095 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 1096 // (u_int32_t)pmap, va, (u_int32_t)m); 1097 1098 pve = pv_alloc(); 1099 if (pve == NULL) 1100 panic("pv_insert: no pv entries!"); 1101 1102 pve->pv_pmap = pmap; 1103 pve->pv_va = va; 1104 1105 /* add to pv_list */ 1106 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1107 rw_assert(&pvh_global_lock, RA_WLOCKED); 1108 1109 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 1110 1111 //debugf("pv_insert: e\n"); 1112 } 1113 1114 /* Destroy pv entry. */ 1115 static void 1116 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 1117 { 1118 pv_entry_t pve; 1119 1120 //int su = (pmap == kernel_pmap); 1121 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 1122 1123 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1124 rw_assert(&pvh_global_lock, RA_WLOCKED); 1125 1126 /* find pv entry */ 1127 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 1128 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 1129 /* remove from pv_list */ 1130 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 1131 if (TAILQ_EMPTY(&m->md.pv_list)) 1132 vm_page_aflag_clear(m, PGA_WRITEABLE); 1133 1134 /* free pv entry struct */ 1135 pv_free(pve); 1136 break; 1137 } 1138 } 1139 1140 //debugf("pv_remove: e\n"); 1141 } 1142 1143 #ifdef __powerpc64__ 1144 /* 1145 * Clean pte entry, try to free page table page if requested. 1146 * 1147 * Return 1 if ptbl pages were freed, otherwise return 0. 1148 */ 1149 static int 1150 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags) 1151 { 1152 vm_page_t m; 1153 pte_t *pte; 1154 1155 pte = pte_find(mmu, pmap, va); 1156 KASSERT(pte != NULL, ("%s: NULL pte", __func__)); 1157 1158 if (!PTE_ISVALID(pte)) 1159 return (0); 1160 1161 /* Get vm_page_t for mapped pte. */ 1162 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1163 1164 if (PTE_ISWIRED(pte)) 1165 pmap->pm_stats.wired_count--; 1166 1167 /* Handle managed entry. */ 1168 if (PTE_ISMANAGED(pte)) { 1169 1170 /* Handle modified pages. */ 1171 if (PTE_ISMODIFIED(pte)) 1172 vm_page_dirty(m); 1173 1174 /* Referenced pages. */ 1175 if (PTE_ISREFERENCED(pte)) 1176 vm_page_aflag_set(m, PGA_REFERENCED); 1177 1178 /* Remove pv_entry from pv_list. */ 1179 pv_remove(pmap, va, m); 1180 } else if (m->md.pv_tracked) { 1181 pv_remove(pmap, va, m); 1182 if (TAILQ_EMPTY(&m->md.pv_list)) 1183 m->md.pv_tracked = false; 1184 } 1185 mtx_lock_spin(&tlbivax_mutex); 1186 tlb_miss_lock(); 1187 1188 tlb0_flush_entry(va); 1189 *pte = 0; 1190 1191 tlb_miss_unlock(); 1192 mtx_unlock_spin(&tlbivax_mutex); 1193 1194 pmap->pm_stats.resident_count--; 1195 1196 if (flags & PTBL_UNHOLD) { 1197 return (ptbl_unhold(mmu, pmap, va)); 1198 } 1199 return (0); 1200 } 1201 1202 /* 1203 * Insert PTE for a given page and virtual address. 1204 */ 1205 static int 1206 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, 1207 boolean_t nosleep) 1208 { 1209 unsigned int pp2d_idx = PP2D_IDX(va); 1210 unsigned int pdir_idx = PDIR_IDX(va); 1211 unsigned int ptbl_idx = PTBL_IDX(va); 1212 pte_t *ptbl, *pte, pte_tmp; 1213 pte_t **pdir; 1214 1215 /* Get the page directory pointer. */ 1216 pdir = pmap->pm_pp2d[pp2d_idx]; 1217 if (pdir == NULL) 1218 pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep); 1219 1220 /* Get the page table pointer. */ 1221 ptbl = pdir[pdir_idx]; 1222 1223 if (ptbl == NULL) { 1224 /* Allocate page table pages. */ 1225 ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep); 1226 if (ptbl == NULL) { 1227 KASSERT(nosleep, ("nosleep and NULL ptbl")); 1228 return (ENOMEM); 1229 } 1230 pte = &ptbl[ptbl_idx]; 1231 } else { 1232 /* 1233 * Check if there is valid mapping for requested va, if there 1234 * is, remove it. 1235 */ 1236 pte = &ptbl[ptbl_idx]; 1237 if (PTE_ISVALID(pte)) { 1238 pte_remove(mmu, pmap, va, PTBL_HOLD); 1239 } else { 1240 /* 1241 * pte is not used, increment hold count for ptbl 1242 * pages. 1243 */ 1244 if (pmap != kernel_pmap) 1245 ptbl_hold(mmu, pmap, pdir, pdir_idx); 1246 } 1247 } 1248 1249 if (pdir[pdir_idx] == NULL) { 1250 if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL) 1251 pdir_hold(mmu, pmap, pdir); 1252 pdir[pdir_idx] = ptbl; 1253 } 1254 if (pmap->pm_pp2d[pp2d_idx] == NULL) 1255 pmap->pm_pp2d[pp2d_idx] = pdir; 1256 1257 /* 1258 * Insert pv_entry into pv_list for mapped page if part of managed 1259 * memory. 1260 */ 1261 if ((m->oflags & VPO_UNMANAGED) == 0) { 1262 flags |= PTE_MANAGED; 1263 1264 /* Create and insert pv entry. */ 1265 pv_insert(pmap, va, m); 1266 } 1267 1268 pmap->pm_stats.resident_count++; 1269 1270 pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m)); 1271 pte_tmp |= (PTE_VALID | flags); 1272 1273 mtx_lock_spin(&tlbivax_mutex); 1274 tlb_miss_lock(); 1275 1276 tlb0_flush_entry(va); 1277 *pte = pte_tmp; 1278 1279 tlb_miss_unlock(); 1280 mtx_unlock_spin(&tlbivax_mutex); 1281 1282 return (0); 1283 } 1284 1285 /* Return the pa for the given pmap/va. */ 1286 static vm_paddr_t 1287 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1288 { 1289 vm_paddr_t pa = 0; 1290 pte_t *pte; 1291 1292 pte = pte_find(mmu, pmap, va); 1293 if ((pte != NULL) && PTE_ISVALID(pte)) 1294 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 1295 return (pa); 1296 } 1297 1298 1299 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */ 1300 static void 1301 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir) 1302 { 1303 int i, j; 1304 vm_offset_t va; 1305 pte_t *pte; 1306 1307 va = addr; 1308 /* Initialize kernel pdir */ 1309 for (i = 0; i < kernel_pdirs; i++) { 1310 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] = 1311 (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES)); 1312 for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES)); 1313 j < PDIR_NENTRIES; j++) { 1314 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] = 1315 (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE) + 1316 (((i * PDIR_NENTRIES) + j) * PAGE_SIZE)); 1317 } 1318 } 1319 1320 /* 1321 * Fill in PTEs covering kernel code and data. They are not required 1322 * for address translation, as this area is covered by static TLB1 1323 * entries, but for pte_vatopa() to work correctly with kernel area 1324 * addresses. 1325 */ 1326 for (va = addr; va < data_end; va += PAGE_SIZE) { 1327 pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]); 1328 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart)); 1329 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1330 PTE_VALID | PTE_PS_4KB; 1331 } 1332 } 1333 #else 1334 /* 1335 * Clean pte entry, try to free page table page if requested. 1336 * 1337 * Return 1 if ptbl pages were freed, otherwise return 0. 1338 */ 1339 static int 1340 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 1341 { 1342 unsigned int pdir_idx = PDIR_IDX(va); 1343 unsigned int ptbl_idx = PTBL_IDX(va); 1344 vm_page_t m; 1345 pte_t *ptbl; 1346 pte_t *pte; 1347 1348 //int su = (pmap == kernel_pmap); 1349 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 1350 // su, (u_int32_t)pmap, va, flags); 1351 1352 ptbl = pmap->pm_pdir[pdir_idx]; 1353 KASSERT(ptbl, ("pte_remove: null ptbl")); 1354 1355 pte = &ptbl[ptbl_idx]; 1356 1357 if (pte == NULL || !PTE_ISVALID(pte)) 1358 return (0); 1359 1360 if (PTE_ISWIRED(pte)) 1361 pmap->pm_stats.wired_count--; 1362 1363 /* Get vm_page_t for mapped pte. */ 1364 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1365 1366 /* Handle managed entry. */ 1367 if (PTE_ISMANAGED(pte)) { 1368 1369 if (PTE_ISMODIFIED(pte)) 1370 vm_page_dirty(m); 1371 1372 if (PTE_ISREFERENCED(pte)) 1373 vm_page_aflag_set(m, PGA_REFERENCED); 1374 1375 pv_remove(pmap, va, m); 1376 } else if (m->md.pv_tracked) { 1377 /* 1378 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is 1379 * used. This is needed by the NCSW support code for fast 1380 * VA<->PA translation. 1381 */ 1382 pv_remove(pmap, va, m); 1383 if (TAILQ_EMPTY(&m->md.pv_list)) 1384 m->md.pv_tracked = false; 1385 } 1386 1387 mtx_lock_spin(&tlbivax_mutex); 1388 tlb_miss_lock(); 1389 1390 tlb0_flush_entry(va); 1391 *pte = 0; 1392 1393 tlb_miss_unlock(); 1394 mtx_unlock_spin(&tlbivax_mutex); 1395 1396 pmap->pm_stats.resident_count--; 1397 1398 if (flags & PTBL_UNHOLD) { 1399 //debugf("pte_remove: e (unhold)\n"); 1400 return (ptbl_unhold(mmu, pmap, pdir_idx)); 1401 } 1402 1403 //debugf("pte_remove: e\n"); 1404 return (0); 1405 } 1406 1407 /* 1408 * Insert PTE for a given page and virtual address. 1409 */ 1410 static int 1411 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, 1412 boolean_t nosleep) 1413 { 1414 unsigned int pdir_idx = PDIR_IDX(va); 1415 unsigned int ptbl_idx = PTBL_IDX(va); 1416 pte_t *ptbl, *pte, pte_tmp; 1417 1418 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 1419 pmap == kernel_pmap, pmap, va); 1420 1421 /* Get the page table pointer. */ 1422 ptbl = pmap->pm_pdir[pdir_idx]; 1423 1424 if (ptbl == NULL) { 1425 /* Allocate page table pages. */ 1426 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep); 1427 if (ptbl == NULL) { 1428 KASSERT(nosleep, ("nosleep and NULL ptbl")); 1429 return (ENOMEM); 1430 } 1431 pmap->pm_pdir[pdir_idx] = ptbl; 1432 pte = &ptbl[ptbl_idx]; 1433 } else { 1434 /* 1435 * Check if there is valid mapping for requested 1436 * va, if there is, remove it. 1437 */ 1438 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 1439 if (PTE_ISVALID(pte)) { 1440 pte_remove(mmu, pmap, va, PTBL_HOLD); 1441 } else { 1442 /* 1443 * pte is not used, increment hold count 1444 * for ptbl pages. 1445 */ 1446 if (pmap != kernel_pmap) 1447 ptbl_hold(mmu, pmap, pdir_idx); 1448 } 1449 } 1450 1451 /* 1452 * Insert pv_entry into pv_list for mapped page if part of managed 1453 * memory. 1454 */ 1455 if ((m->oflags & VPO_UNMANAGED) == 0) { 1456 flags |= PTE_MANAGED; 1457 1458 /* Create and insert pv entry. */ 1459 pv_insert(pmap, va, m); 1460 } 1461 1462 pmap->pm_stats.resident_count++; 1463 1464 pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m)); 1465 pte_tmp |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */ 1466 1467 mtx_lock_spin(&tlbivax_mutex); 1468 tlb_miss_lock(); 1469 1470 tlb0_flush_entry(va); 1471 *pte = pte_tmp; 1472 1473 tlb_miss_unlock(); 1474 mtx_unlock_spin(&tlbivax_mutex); 1475 return (0); 1476 } 1477 1478 /* Return the pa for the given pmap/va. */ 1479 static vm_paddr_t 1480 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1481 { 1482 vm_paddr_t pa = 0; 1483 pte_t *pte; 1484 1485 pte = pte_find(mmu, pmap, va); 1486 if ((pte != NULL) && PTE_ISVALID(pte)) 1487 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 1488 return (pa); 1489 } 1490 1491 /* Get a pointer to a PTE in a page table. */ 1492 static pte_t * 1493 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1494 { 1495 unsigned int pdir_idx = PDIR_IDX(va); 1496 unsigned int ptbl_idx = PTBL_IDX(va); 1497 1498 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 1499 1500 if (pmap->pm_pdir[pdir_idx]) 1501 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 1502 1503 return (NULL); 1504 } 1505 1506 /* Set up kernel page tables. */ 1507 static void 1508 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir) 1509 { 1510 int i; 1511 vm_offset_t va; 1512 pte_t *pte; 1513 1514 /* Initialize kernel pdir */ 1515 for (i = 0; i < kernel_ptbls; i++) 1516 kernel_pmap->pm_pdir[kptbl_min + i] = 1517 (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1518 1519 /* 1520 * Fill in PTEs covering kernel code and data. They are not required 1521 * for address translation, as this area is covered by static TLB1 1522 * entries, but for pte_vatopa() to work correctly with kernel area 1523 * addresses. 1524 */ 1525 for (va = addr; va < data_end; va += PAGE_SIZE) { 1526 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1527 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart)); 1528 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1529 PTE_VALID | PTE_PS_4KB; 1530 } 1531 } 1532 #endif 1533 1534 /**************************************************************************/ 1535 /* PMAP related */ 1536 /**************************************************************************/ 1537 1538 /* 1539 * This is called during booke_init, before the system is really initialized. 1540 */ 1541 static void 1542 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 1543 { 1544 vm_paddr_t phys_kernelend; 1545 struct mem_region *mp, *mp1; 1546 int cnt, i, j; 1547 vm_paddr_t s, e, sz; 1548 vm_paddr_t physsz, hwphyssz; 1549 u_int phys_avail_count; 1550 vm_size_t kstack0_sz; 1551 vm_offset_t kernel_pdir, kstack0; 1552 vm_paddr_t kstack0_phys; 1553 void *dpcpu; 1554 vm_offset_t kernel_ptbl_root; 1555 1556 debugf("mmu_booke_bootstrap: entered\n"); 1557 1558 /* Set interesting system properties */ 1559 #ifdef __powerpc64__ 1560 hw_direct_map = 1; 1561 #else 1562 hw_direct_map = 0; 1563 #endif 1564 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__) 1565 elf32_nxstack = 1; 1566 #endif 1567 1568 /* Initialize invalidation mutex */ 1569 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 1570 1571 /* Read TLB0 size and associativity. */ 1572 tlb0_get_tlbconf(); 1573 1574 /* 1575 * Align kernel start and end address (kernel image). 1576 * Note that kernel end does not necessarily relate to kernsize. 1577 * kernsize is the size of the kernel that is actually mapped. 1578 */ 1579 data_start = round_page(kernelend); 1580 data_end = data_start; 1581 1582 /* Allocate the dynamic per-cpu area. */ 1583 dpcpu = (void *)data_end; 1584 data_end += DPCPU_SIZE; 1585 1586 /* Allocate space for the message buffer. */ 1587 msgbufp = (struct msgbuf *)data_end; 1588 data_end += msgbufsize; 1589 debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n", 1590 (uintptr_t)msgbufp, data_end); 1591 1592 data_end = round_page(data_end); 1593 1594 #ifdef __powerpc64__ 1595 kernel_ptbl_root = data_end; 1596 data_end += PP2D_NENTRIES * sizeof(pte_t**); 1597 #else 1598 /* Allocate space for ptbl_bufs. */ 1599 ptbl_bufs = (struct ptbl_buf *)data_end; 1600 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1601 debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n", 1602 (uintptr_t)ptbl_bufs, data_end); 1603 1604 data_end = round_page(data_end); 1605 kernel_ptbl_root = data_end; 1606 data_end += PDIR_NENTRIES * sizeof(pte_t*); 1607 #endif 1608 1609 /* Allocate PTE tables for kernel KVA. */ 1610 kernel_pdir = data_end; 1611 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 1612 PDIR_SIZE); 1613 #ifdef __powerpc64__ 1614 kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES); 1615 data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE; 1616 #endif 1617 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1618 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1619 debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n", 1620 kernel_pdir, data_end); 1621 1622 debugf(" data_end: 0x%"PRI0ptrX"\n", data_end); 1623 if (data_end - kernstart > kernsize) { 1624 kernsize += tlb1_mapin_region(kernstart + kernsize, 1625 kernload + kernsize, (data_end - kernstart) - kernsize); 1626 } 1627 data_end = kernstart + kernsize; 1628 debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end); 1629 1630 /* 1631 * Clear the structures - note we can only do it safely after the 1632 * possible additional TLB1 translations are in place (above) so that 1633 * all range up to the currently calculated 'data_end' is covered. 1634 */ 1635 dpcpu_init(dpcpu, 0); 1636 #ifdef __powerpc64__ 1637 memset((void *)kernel_pdir, 0, 1638 kernel_pdirs * PDIR_PAGES * PAGE_SIZE + 1639 kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1640 #else 1641 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1642 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1643 #endif 1644 1645 /*******************************************************/ 1646 /* Set the start and end of kva. */ 1647 /*******************************************************/ 1648 virtual_avail = round_page(data_end); 1649 virtual_end = VM_MAX_KERNEL_ADDRESS; 1650 1651 #ifndef __powerpc64__ 1652 /* Allocate KVA space for page zero/copy operations. */ 1653 zero_page_va = virtual_avail; 1654 virtual_avail += PAGE_SIZE; 1655 copy_page_src_va = virtual_avail; 1656 virtual_avail += PAGE_SIZE; 1657 copy_page_dst_va = virtual_avail; 1658 virtual_avail += PAGE_SIZE; 1659 debugf("zero_page_va = 0x%"PRI0ptrX"\n", zero_page_va); 1660 debugf("copy_page_src_va = 0x%"PRI0ptrX"\n", copy_page_src_va); 1661 debugf("copy_page_dst_va = 0x%"PRI0ptrX"\n", copy_page_dst_va); 1662 1663 /* Initialize page zero/copy mutexes. */ 1664 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1665 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1666 1667 /* Allocate KVA space for ptbl bufs. */ 1668 ptbl_buf_pool_vabase = virtual_avail; 1669 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1670 debugf("ptbl_buf_pool_vabase = 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n", 1671 ptbl_buf_pool_vabase, virtual_avail); 1672 #endif 1673 1674 /* Calculate corresponding physical addresses for the kernel region. */ 1675 phys_kernelend = kernload + kernsize; 1676 debugf("kernel image and allocated data:\n"); 1677 debugf(" kernload = 0x%09llx\n", (uint64_t)kernload); 1678 debugf(" kernstart = 0x%"PRI0ptrX"\n", kernstart); 1679 debugf(" kernsize = 0x%"PRI0ptrX"\n", kernsize); 1680 1681 /* 1682 * Remove kernel physical address range from avail regions list. Page 1683 * align all regions. Non-page aligned memory isn't very interesting 1684 * to us. Also, sort the entries for ascending addresses. 1685 */ 1686 1687 /* Retrieve phys/avail mem regions */ 1688 mem_regions(&physmem_regions, &physmem_regions_sz, 1689 &availmem_regions, &availmem_regions_sz); 1690 1691 if (PHYS_AVAIL_ENTRIES < availmem_regions_sz) 1692 panic("mmu_booke_bootstrap: phys_avail too small"); 1693 1694 sz = 0; 1695 cnt = availmem_regions_sz; 1696 debugf("processing avail regions:\n"); 1697 for (mp = availmem_regions; mp->mr_size; mp++) { 1698 s = mp->mr_start; 1699 e = mp->mr_start + mp->mr_size; 1700 debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e); 1701 /* Check whether this region holds all of the kernel. */ 1702 if (s < kernload && e > phys_kernelend) { 1703 availmem_regions[cnt].mr_start = phys_kernelend; 1704 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1705 e = kernload; 1706 } 1707 /* Look whether this regions starts within the kernel. */ 1708 if (s >= kernload && s < phys_kernelend) { 1709 if (e <= phys_kernelend) 1710 goto empty; 1711 s = phys_kernelend; 1712 } 1713 /* Now look whether this region ends within the kernel. */ 1714 if (e > kernload && e <= phys_kernelend) { 1715 if (s >= kernload) 1716 goto empty; 1717 e = kernload; 1718 } 1719 /* Now page align the start and size of the region. */ 1720 s = round_page(s); 1721 e = trunc_page(e); 1722 if (e < s) 1723 e = s; 1724 sz = e - s; 1725 debugf("%09jx-%09jx = %jx\n", 1726 (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz); 1727 1728 /* Check whether some memory is left here. */ 1729 if (sz == 0) { 1730 empty: 1731 memmove(mp, mp + 1, 1732 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1733 cnt--; 1734 mp--; 1735 continue; 1736 } 1737 1738 /* Do an insertion sort. */ 1739 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1740 if (s < mp1->mr_start) 1741 break; 1742 if (mp1 < mp) { 1743 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1744 mp1->mr_start = s; 1745 mp1->mr_size = sz; 1746 } else { 1747 mp->mr_start = s; 1748 mp->mr_size = sz; 1749 } 1750 } 1751 availmem_regions_sz = cnt; 1752 1753 /*******************************************************/ 1754 /* Steal physical memory for kernel stack from the end */ 1755 /* of the first avail region */ 1756 /*******************************************************/ 1757 kstack0_sz = kstack_pages * PAGE_SIZE; 1758 kstack0_phys = availmem_regions[0].mr_start + 1759 availmem_regions[0].mr_size; 1760 kstack0_phys -= kstack0_sz; 1761 availmem_regions[0].mr_size -= kstack0_sz; 1762 1763 /*******************************************************/ 1764 /* Fill in phys_avail table, based on availmem_regions */ 1765 /*******************************************************/ 1766 phys_avail_count = 0; 1767 physsz = 0; 1768 hwphyssz = 0; 1769 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1770 1771 debugf("fill in phys_avail:\n"); 1772 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1773 1774 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n", 1775 (uintmax_t)availmem_regions[i].mr_start, 1776 (uintmax_t)availmem_regions[i].mr_start + 1777 availmem_regions[i].mr_size, 1778 (uintmax_t)availmem_regions[i].mr_size); 1779 1780 if (hwphyssz != 0 && 1781 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1782 debugf(" hw.physmem adjust\n"); 1783 if (physsz < hwphyssz) { 1784 phys_avail[j] = availmem_regions[i].mr_start; 1785 phys_avail[j + 1] = 1786 availmem_regions[i].mr_start + 1787 hwphyssz - physsz; 1788 physsz = hwphyssz; 1789 phys_avail_count++; 1790 } 1791 break; 1792 } 1793 1794 phys_avail[j] = availmem_regions[i].mr_start; 1795 phys_avail[j + 1] = availmem_regions[i].mr_start + 1796 availmem_regions[i].mr_size; 1797 phys_avail_count++; 1798 physsz += availmem_regions[i].mr_size; 1799 } 1800 physmem = btoc(physsz); 1801 1802 /* Calculate the last available physical address. */ 1803 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1804 ; 1805 Maxmem = powerpc_btop(phys_avail[i + 1]); 1806 1807 debugf("Maxmem = 0x%08lx\n", Maxmem); 1808 debugf("phys_avail_count = %d\n", phys_avail_count); 1809 debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n", 1810 (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem); 1811 1812 #ifdef __powerpc64__ 1813 /* 1814 * Map the physical memory contiguously in TLB1. 1815 * Round so it fits into a single mapping. 1816 */ 1817 tlb1_mapin_region(DMAP_BASE_ADDRESS, 0, 1818 phys_avail[i + 1]); 1819 #endif 1820 1821 /*******************************************************/ 1822 /* Initialize (statically allocated) kernel pmap. */ 1823 /*******************************************************/ 1824 PMAP_LOCK_INIT(kernel_pmap); 1825 #ifdef __powerpc64__ 1826 kernel_pmap->pm_pp2d = (pte_t ***)kernel_ptbl_root; 1827 #else 1828 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1829 kernel_pmap->pm_pdir = (pte_t **)kernel_ptbl_root; 1830 #endif 1831 1832 debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap); 1833 kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir); 1834 for (i = 0; i < MAXCPU; i++) { 1835 kernel_pmap->pm_tid[i] = TID_KERNEL; 1836 1837 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1838 tidbusy[i][TID_KERNEL] = kernel_pmap; 1839 } 1840 1841 /* Mark kernel_pmap active on all CPUs */ 1842 CPU_FILL(&kernel_pmap->pm_active); 1843 1844 /* 1845 * Initialize the global pv list lock. 1846 */ 1847 rw_init(&pvh_global_lock, "pmap pv global"); 1848 1849 /*******************************************************/ 1850 /* Final setup */ 1851 /*******************************************************/ 1852 1853 /* Enter kstack0 into kernel map, provide guard page */ 1854 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1855 thread0.td_kstack = kstack0; 1856 thread0.td_kstack_pages = kstack_pages; 1857 1858 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1859 debugf("kstack0_phys at 0x%09llx - 0x%09llx\n", 1860 kstack0_phys, kstack0_phys + kstack0_sz); 1861 debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n", 1862 kstack0, kstack0 + kstack0_sz); 1863 1864 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1865 for (i = 0; i < kstack_pages; i++) { 1866 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1867 kstack0 += PAGE_SIZE; 1868 kstack0_phys += PAGE_SIZE; 1869 } 1870 1871 pmap_bootstrapped = 1; 1872 1873 debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail); 1874 debugf("virtual_end = %"PRI0ptrX"\n", virtual_end); 1875 1876 debugf("mmu_booke_bootstrap: exit\n"); 1877 } 1878 1879 #ifdef SMP 1880 void 1881 tlb1_ap_prep(void) 1882 { 1883 tlb_entry_t *e, tmp; 1884 unsigned int i; 1885 1886 /* Prepare TLB1 image for AP processors */ 1887 e = __boot_tlb1; 1888 for (i = 0; i < TLB1_ENTRIES; i++) { 1889 tlb1_read_entry(&tmp, i); 1890 1891 if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED)) 1892 memcpy(e++, &tmp, sizeof(tmp)); 1893 } 1894 } 1895 1896 void 1897 pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1898 { 1899 int i; 1900 1901 /* 1902 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1903 * have the snapshot of its contents in the s/w __boot_tlb1[] table 1904 * created by tlb1_ap_prep(), so use these values directly to 1905 * (re)program AP's TLB1 hardware. 1906 * 1907 * Start at index 1 because index 0 has the kernel map. 1908 */ 1909 for (i = 1; i < TLB1_ENTRIES; i++) { 1910 if (__boot_tlb1[i].mas1 & MAS1_VALID) 1911 tlb1_write_entry(&__boot_tlb1[i], i); 1912 } 1913 1914 set_mas4_defaults(); 1915 } 1916 #endif 1917 1918 static void 1919 booke_pmap_init_qpages(void) 1920 { 1921 struct pcpu *pc; 1922 int i; 1923 1924 CPU_FOREACH(i) { 1925 pc = pcpu_find(i); 1926 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE); 1927 if (pc->pc_qmap_addr == 0) 1928 panic("pmap_init_qpages: unable to allocate KVA"); 1929 } 1930 } 1931 1932 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL); 1933 1934 /* 1935 * Get the physical page address for the given pmap/virtual address. 1936 */ 1937 static vm_paddr_t 1938 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1939 { 1940 vm_paddr_t pa; 1941 1942 PMAP_LOCK(pmap); 1943 pa = pte_vatopa(mmu, pmap, va); 1944 PMAP_UNLOCK(pmap); 1945 1946 return (pa); 1947 } 1948 1949 /* 1950 * Extract the physical page address associated with the given 1951 * kernel virtual address. 1952 */ 1953 static vm_paddr_t 1954 mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1955 { 1956 tlb_entry_t e; 1957 vm_paddr_t p = 0; 1958 int i; 1959 1960 #ifdef __powerpc64__ 1961 if (va >= DMAP_BASE_ADDRESS && va <= DMAP_MAX_ADDRESS) 1962 return (DMAP_TO_PHYS(va)); 1963 #endif 1964 1965 if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS) 1966 p = pte_vatopa(mmu, kernel_pmap, va); 1967 1968 if (p == 0) { 1969 /* Check TLB1 mappings */ 1970 for (i = 0; i < TLB1_ENTRIES; i++) { 1971 tlb1_read_entry(&e, i); 1972 if (!(e.mas1 & MAS1_VALID)) 1973 continue; 1974 if (va >= e.virt && va < e.virt + e.size) 1975 return (e.phys + (va - e.virt)); 1976 } 1977 } 1978 1979 return (p); 1980 } 1981 1982 /* 1983 * Initialize the pmap module. 1984 * Called by vm_init, to initialize any structures that the pmap 1985 * system needs to map virtual memory. 1986 */ 1987 static void 1988 mmu_booke_init(mmu_t mmu) 1989 { 1990 int shpgperproc = PMAP_SHPGPERPROC; 1991 1992 /* 1993 * Initialize the address space (zone) for the pv entries. Set a 1994 * high water mark so that the system can recover from excessive 1995 * numbers of pv entries. 1996 */ 1997 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1998 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1999 2000 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 2001 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 2002 2003 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 2004 pv_entry_high_water = 9 * (pv_entry_max / 10); 2005 2006 uma_zone_reserve_kva(pvzone, pv_entry_max); 2007 2008 /* Pre-fill pvzone with initial number of pv entries. */ 2009 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 2010 2011 /* Create a UMA zone for page table roots. */ 2012 ptbl_root_zone = uma_zcreate("pmap root", PMAP_ROOT_SIZE, 2013 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, UMA_ZONE_VM); 2014 2015 /* Initialize ptbl allocation. */ 2016 ptbl_init(); 2017 } 2018 2019 /* 2020 * Map a list of wired pages into kernel virtual address space. This is 2021 * intended for temporary mappings which do not need page modification or 2022 * references recorded. Existing mappings in the region are overwritten. 2023 */ 2024 static void 2025 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 2026 { 2027 vm_offset_t va; 2028 2029 va = sva; 2030 while (count-- > 0) { 2031 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2032 va += PAGE_SIZE; 2033 m++; 2034 } 2035 } 2036 2037 /* 2038 * Remove page mappings from kernel virtual address space. Intended for 2039 * temporary mappings entered by mmu_booke_qenter. 2040 */ 2041 static void 2042 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 2043 { 2044 vm_offset_t va; 2045 2046 va = sva; 2047 while (count-- > 0) { 2048 mmu_booke_kremove(mmu, va); 2049 va += PAGE_SIZE; 2050 } 2051 } 2052 2053 /* 2054 * Map a wired page into kernel virtual address space. 2055 */ 2056 static void 2057 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 2058 { 2059 2060 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 2061 } 2062 2063 static void 2064 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 2065 { 2066 uint32_t flags; 2067 pte_t *pte; 2068 2069 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 2070 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 2071 2072 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 2073 flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT; 2074 flags |= PTE_PS_4KB; 2075 2076 pte = pte_find(mmu, kernel_pmap, va); 2077 KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE")); 2078 2079 mtx_lock_spin(&tlbivax_mutex); 2080 tlb_miss_lock(); 2081 2082 if (PTE_ISVALID(pte)) { 2083 2084 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 2085 2086 /* Flush entry from TLB0 */ 2087 tlb0_flush_entry(va); 2088 } 2089 2090 *pte = PTE_RPN_FROM_PA(pa) | flags; 2091 2092 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 2093 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 2094 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 2095 2096 /* Flush the real memory from the instruction cache. */ 2097 if ((flags & (PTE_I | PTE_G)) == 0) 2098 __syncicache((void *)va, PAGE_SIZE); 2099 2100 tlb_miss_unlock(); 2101 mtx_unlock_spin(&tlbivax_mutex); 2102 } 2103 2104 /* 2105 * Remove a page from kernel page table. 2106 */ 2107 static void 2108 mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 2109 { 2110 pte_t *pte; 2111 2112 CTR2(KTR_PMAP,"%s: s (va = 0x%"PRI0ptrX")\n", __func__, va); 2113 2114 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 2115 (va <= VM_MAX_KERNEL_ADDRESS)), 2116 ("mmu_booke_kremove: invalid va")); 2117 2118 pte = pte_find(mmu, kernel_pmap, va); 2119 2120 if (!PTE_ISVALID(pte)) { 2121 2122 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 2123 2124 return; 2125 } 2126 2127 mtx_lock_spin(&tlbivax_mutex); 2128 tlb_miss_lock(); 2129 2130 /* Invalidate entry in TLB0, update PTE. */ 2131 tlb0_flush_entry(va); 2132 *pte = 0; 2133 2134 tlb_miss_unlock(); 2135 mtx_unlock_spin(&tlbivax_mutex); 2136 } 2137 2138 /* 2139 * Provide a kernel pointer corresponding to a given userland pointer. 2140 * The returned pointer is valid until the next time this function is 2141 * called in this thread. This is used internally in copyin/copyout. 2142 */ 2143 int 2144 mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, 2145 void **kaddr, size_t ulen, size_t *klen) 2146 { 2147 2148 if (trunc_page((uintptr_t)uaddr + ulen) > VM_MAXUSER_ADDRESS) 2149 return (EFAULT); 2150 2151 *kaddr = (void *)(uintptr_t)uaddr; 2152 if (klen) 2153 *klen = ulen; 2154 2155 return (0); 2156 } 2157 2158 /* 2159 * Figure out where a given kernel pointer (usually in a fault) points 2160 * to from the VM's perspective, potentially remapping into userland's 2161 * address space. 2162 */ 2163 static int 2164 mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, 2165 vm_offset_t *decoded_addr) 2166 { 2167 2168 if (trunc_page(addr) <= VM_MAXUSER_ADDRESS) 2169 *is_user = 1; 2170 else 2171 *is_user = 0; 2172 2173 *decoded_addr = addr; 2174 return (0); 2175 } 2176 2177 /* 2178 * Initialize pmap associated with process 0. 2179 */ 2180 static void 2181 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 2182 { 2183 2184 PMAP_LOCK_INIT(pmap); 2185 mmu_booke_pinit(mmu, pmap); 2186 PCPU_SET(curpmap, pmap); 2187 } 2188 2189 /* 2190 * Initialize a preallocated and zeroed pmap structure, 2191 * such as one in a vmspace structure. 2192 */ 2193 static void 2194 mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 2195 { 2196 int i; 2197 2198 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 2199 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 2200 2201 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 2202 2203 for (i = 0; i < MAXCPU; i++) 2204 pmap->pm_tid[i] = TID_NONE; 2205 CPU_ZERO(&kernel_pmap->pm_active); 2206 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 2207 #ifdef __powerpc64__ 2208 pmap->pm_pp2d = uma_zalloc(ptbl_root_zone, M_WAITOK); 2209 bzero(pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES); 2210 #else 2211 pmap->pm_pdir = uma_zalloc(ptbl_root_zone, M_WAITOK); 2212 bzero(pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 2213 TAILQ_INIT(&pmap->pm_ptbl_list); 2214 #endif 2215 } 2216 2217 /* 2218 * Release any resources held by the given physical map. 2219 * Called when a pmap initialized by mmu_booke_pinit is being released. 2220 * Should only be called if the map contains no valid mappings. 2221 */ 2222 static void 2223 mmu_booke_release(mmu_t mmu, pmap_t pmap) 2224 { 2225 2226 KASSERT(pmap->pm_stats.resident_count == 0, 2227 ("pmap_release: pmap resident count %ld != 0", 2228 pmap->pm_stats.resident_count)); 2229 #ifdef __powerpc64__ 2230 uma_zfree(ptbl_root_zone, pmap->pm_pp2d); 2231 #else 2232 uma_zfree(ptbl_root_zone, pmap->pm_pdir); 2233 #endif 2234 } 2235 2236 /* 2237 * Insert the given physical page at the specified virtual address in the 2238 * target physical map with the protection requested. If specified the page 2239 * will be wired down. 2240 */ 2241 static int 2242 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 2243 vm_prot_t prot, u_int flags, int8_t psind) 2244 { 2245 int error; 2246 2247 rw_wlock(&pvh_global_lock); 2248 PMAP_LOCK(pmap); 2249 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind); 2250 PMAP_UNLOCK(pmap); 2251 rw_wunlock(&pvh_global_lock); 2252 return (error); 2253 } 2254 2255 static int 2256 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 2257 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused) 2258 { 2259 pte_t *pte; 2260 vm_paddr_t pa; 2261 uint32_t flags; 2262 int error, su, sync; 2263 2264 pa = VM_PAGE_TO_PHYS(m); 2265 su = (pmap == kernel_pmap); 2266 sync = 0; 2267 2268 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 2269 // "pa=0x%08x prot=0x%08x flags=%#x)\n", 2270 // (u_int32_t)pmap, su, pmap->pm_tid, 2271 // (u_int32_t)m, va, pa, prot, flags); 2272 2273 if (su) { 2274 KASSERT(((va >= virtual_avail) && 2275 (va <= VM_MAX_KERNEL_ADDRESS)), 2276 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 2277 } else { 2278 KASSERT((va <= VM_MAXUSER_ADDRESS), 2279 ("mmu_booke_enter_locked: user pmap, non user va")); 2280 } 2281 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 2282 VM_OBJECT_ASSERT_LOCKED(m->object); 2283 2284 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2285 2286 /* 2287 * If there is an existing mapping, and the physical address has not 2288 * changed, must be protection or wiring change. 2289 */ 2290 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 2291 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 2292 2293 /* 2294 * Before actually updating pte->flags we calculate and 2295 * prepare its new value in a helper var. 2296 */ 2297 flags = *pte; 2298 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 2299 2300 /* Wiring change, just update stats. */ 2301 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) { 2302 if (!PTE_ISWIRED(pte)) { 2303 flags |= PTE_WIRED; 2304 pmap->pm_stats.wired_count++; 2305 } 2306 } else { 2307 if (PTE_ISWIRED(pte)) { 2308 flags &= ~PTE_WIRED; 2309 pmap->pm_stats.wired_count--; 2310 } 2311 } 2312 2313 if (prot & VM_PROT_WRITE) { 2314 /* Add write permissions. */ 2315 flags |= PTE_SW; 2316 if (!su) 2317 flags |= PTE_UW; 2318 2319 if ((flags & PTE_MANAGED) != 0) 2320 vm_page_aflag_set(m, PGA_WRITEABLE); 2321 } else { 2322 /* Handle modified pages, sense modify status. */ 2323 2324 /* 2325 * The PTE_MODIFIED flag could be set by underlying 2326 * TLB misses since we last read it (above), possibly 2327 * other CPUs could update it so we check in the PTE 2328 * directly rather than rely on that saved local flags 2329 * copy. 2330 */ 2331 if (PTE_ISMODIFIED(pte)) 2332 vm_page_dirty(m); 2333 } 2334 2335 if (prot & VM_PROT_EXECUTE) { 2336 flags |= PTE_SX; 2337 if (!su) 2338 flags |= PTE_UX; 2339 2340 /* 2341 * Check existing flags for execute permissions: if we 2342 * are turning execute permissions on, icache should 2343 * be flushed. 2344 */ 2345 if ((*pte & (PTE_UX | PTE_SX)) == 0) 2346 sync++; 2347 } 2348 2349 flags &= ~PTE_REFERENCED; 2350 2351 /* 2352 * The new flags value is all calculated -- only now actually 2353 * update the PTE. 2354 */ 2355 mtx_lock_spin(&tlbivax_mutex); 2356 tlb_miss_lock(); 2357 2358 tlb0_flush_entry(va); 2359 *pte &= ~PTE_FLAGS_MASK; 2360 *pte |= flags; 2361 2362 tlb_miss_unlock(); 2363 mtx_unlock_spin(&tlbivax_mutex); 2364 2365 } else { 2366 /* 2367 * If there is an existing mapping, but it's for a different 2368 * physical address, pte_enter() will delete the old mapping. 2369 */ 2370 //if ((pte != NULL) && PTE_ISVALID(pte)) 2371 // debugf("mmu_booke_enter_locked: replace\n"); 2372 //else 2373 // debugf("mmu_booke_enter_locked: new\n"); 2374 2375 /* Now set up the flags and install the new mapping. */ 2376 flags = (PTE_SR | PTE_VALID); 2377 flags |= PTE_M; 2378 2379 if (!su) 2380 flags |= PTE_UR; 2381 2382 if (prot & VM_PROT_WRITE) { 2383 flags |= PTE_SW; 2384 if (!su) 2385 flags |= PTE_UW; 2386 2387 if ((m->oflags & VPO_UNMANAGED) == 0) 2388 vm_page_aflag_set(m, PGA_WRITEABLE); 2389 } 2390 2391 if (prot & VM_PROT_EXECUTE) { 2392 flags |= PTE_SX; 2393 if (!su) 2394 flags |= PTE_UX; 2395 } 2396 2397 /* If its wired update stats. */ 2398 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) 2399 flags |= PTE_WIRED; 2400 2401 error = pte_enter(mmu, pmap, m, va, flags, 2402 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0); 2403 if (error != 0) 2404 return (KERN_RESOURCE_SHORTAGE); 2405 2406 if ((flags & PMAP_ENTER_WIRED) != 0) 2407 pmap->pm_stats.wired_count++; 2408 2409 /* Flush the real memory from the instruction cache. */ 2410 if (prot & VM_PROT_EXECUTE) 2411 sync++; 2412 } 2413 2414 if (sync && (su || pmap == PCPU_GET(curpmap))) { 2415 __syncicache((void *)va, PAGE_SIZE); 2416 sync = 0; 2417 } 2418 2419 return (KERN_SUCCESS); 2420 } 2421 2422 /* 2423 * Maps a sequence of resident pages belonging to the same object. 2424 * The sequence begins with the given page m_start. This page is 2425 * mapped at the given virtual address start. Each subsequent page is 2426 * mapped at a virtual address that is offset from start by the same 2427 * amount as the page is offset from m_start within the object. The 2428 * last page in the sequence is the page with the largest offset from 2429 * m_start that can be mapped at a virtual address less than the given 2430 * virtual address end. Not every virtual page between start and end 2431 * is mapped; only those for which a resident page exists with the 2432 * corresponding offset from m_start are mapped. 2433 */ 2434 static void 2435 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 2436 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 2437 { 2438 vm_page_t m; 2439 vm_pindex_t diff, psize; 2440 2441 VM_OBJECT_ASSERT_LOCKED(m_start->object); 2442 2443 psize = atop(end - start); 2444 m = m_start; 2445 rw_wlock(&pvh_global_lock); 2446 PMAP_LOCK(pmap); 2447 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2448 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 2449 prot & (VM_PROT_READ | VM_PROT_EXECUTE), 2450 PMAP_ENTER_NOSLEEP, 0); 2451 m = TAILQ_NEXT(m, listq); 2452 } 2453 rw_wunlock(&pvh_global_lock); 2454 PMAP_UNLOCK(pmap); 2455 } 2456 2457 static void 2458 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 2459 vm_prot_t prot) 2460 { 2461 2462 rw_wlock(&pvh_global_lock); 2463 PMAP_LOCK(pmap); 2464 mmu_booke_enter_locked(mmu, pmap, va, m, 2465 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 2466 0); 2467 rw_wunlock(&pvh_global_lock); 2468 PMAP_UNLOCK(pmap); 2469 } 2470 2471 /* 2472 * Remove the given range of addresses from the specified map. 2473 * 2474 * It is assumed that the start and end are properly rounded to the page size. 2475 */ 2476 static void 2477 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 2478 { 2479 pte_t *pte; 2480 uint8_t hold_flag; 2481 2482 int su = (pmap == kernel_pmap); 2483 2484 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 2485 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 2486 2487 if (su) { 2488 KASSERT(((va >= virtual_avail) && 2489 (va <= VM_MAX_KERNEL_ADDRESS)), 2490 ("mmu_booke_remove: kernel pmap, non kernel va")); 2491 } else { 2492 KASSERT((va <= VM_MAXUSER_ADDRESS), 2493 ("mmu_booke_remove: user pmap, non user va")); 2494 } 2495 2496 if (PMAP_REMOVE_DONE(pmap)) { 2497 //debugf("mmu_booke_remove: e (empty)\n"); 2498 return; 2499 } 2500 2501 hold_flag = PTBL_HOLD_FLAG(pmap); 2502 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 2503 2504 rw_wlock(&pvh_global_lock); 2505 PMAP_LOCK(pmap); 2506 for (; va < endva; va += PAGE_SIZE) { 2507 pte = pte_find(mmu, pmap, va); 2508 if ((pte != NULL) && PTE_ISVALID(pte)) 2509 pte_remove(mmu, pmap, va, hold_flag); 2510 } 2511 PMAP_UNLOCK(pmap); 2512 rw_wunlock(&pvh_global_lock); 2513 2514 //debugf("mmu_booke_remove: e\n"); 2515 } 2516 2517 /* 2518 * Remove physical page from all pmaps in which it resides. 2519 */ 2520 static void 2521 mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 2522 { 2523 pv_entry_t pv, pvn; 2524 uint8_t hold_flag; 2525 2526 rw_wlock(&pvh_global_lock); 2527 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 2528 pvn = TAILQ_NEXT(pv, pv_link); 2529 2530 PMAP_LOCK(pv->pv_pmap); 2531 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 2532 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 2533 PMAP_UNLOCK(pv->pv_pmap); 2534 } 2535 vm_page_aflag_clear(m, PGA_WRITEABLE); 2536 rw_wunlock(&pvh_global_lock); 2537 } 2538 2539 /* 2540 * Map a range of physical addresses into kernel virtual address space. 2541 */ 2542 static vm_offset_t 2543 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 2544 vm_paddr_t pa_end, int prot) 2545 { 2546 vm_offset_t sva = *virt; 2547 vm_offset_t va = sva; 2548 2549 #ifdef __powerpc64__ 2550 /* XXX: Handle memory not starting at 0x0. */ 2551 if (pa_end < ctob(Maxmem)) 2552 return (PHYS_TO_DMAP(pa_start)); 2553 #endif 2554 2555 while (pa_start < pa_end) { 2556 mmu_booke_kenter(mmu, va, pa_start); 2557 va += PAGE_SIZE; 2558 pa_start += PAGE_SIZE; 2559 } 2560 *virt = va; 2561 2562 return (sva); 2563 } 2564 2565 /* 2566 * The pmap must be activated before it's address space can be accessed in any 2567 * way. 2568 */ 2569 static void 2570 mmu_booke_activate(mmu_t mmu, struct thread *td) 2571 { 2572 pmap_t pmap; 2573 u_int cpuid; 2574 2575 pmap = &td->td_proc->p_vmspace->vm_pmap; 2576 2577 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%"PRI0ptrX")", 2578 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 2579 2580 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 2581 2582 sched_pin(); 2583 2584 cpuid = PCPU_GET(cpuid); 2585 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 2586 PCPU_SET(curpmap, pmap); 2587 2588 if (pmap->pm_tid[cpuid] == TID_NONE) 2589 tid_alloc(pmap); 2590 2591 /* Load PID0 register with pmap tid value. */ 2592 mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 2593 __asm __volatile("isync"); 2594 2595 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0); 2596 2597 sched_unpin(); 2598 2599 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 2600 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 2601 } 2602 2603 /* 2604 * Deactivate the specified process's address space. 2605 */ 2606 static void 2607 mmu_booke_deactivate(mmu_t mmu, struct thread *td) 2608 { 2609 pmap_t pmap; 2610 2611 pmap = &td->td_proc->p_vmspace->vm_pmap; 2612 2613 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%"PRI0ptrX, 2614 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 2615 2616 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0); 2617 2618 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 2619 PCPU_SET(curpmap, NULL); 2620 } 2621 2622 /* 2623 * Copy the range specified by src_addr/len 2624 * from the source map to the range dst_addr/len 2625 * in the destination map. 2626 * 2627 * This routine is only advisory and need not do anything. 2628 */ 2629 static void 2630 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 2631 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 2632 { 2633 2634 } 2635 2636 /* 2637 * Set the physical protection on the specified range of this map as requested. 2638 */ 2639 static void 2640 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 2641 vm_prot_t prot) 2642 { 2643 vm_offset_t va; 2644 vm_page_t m; 2645 pte_t *pte; 2646 2647 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2648 mmu_booke_remove(mmu, pmap, sva, eva); 2649 return; 2650 } 2651 2652 if (prot & VM_PROT_WRITE) 2653 return; 2654 2655 PMAP_LOCK(pmap); 2656 for (va = sva; va < eva; va += PAGE_SIZE) { 2657 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2658 if (PTE_ISVALID(pte)) { 2659 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2660 2661 mtx_lock_spin(&tlbivax_mutex); 2662 tlb_miss_lock(); 2663 2664 /* Handle modified pages. */ 2665 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 2666 vm_page_dirty(m); 2667 2668 tlb0_flush_entry(va); 2669 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 2670 2671 tlb_miss_unlock(); 2672 mtx_unlock_spin(&tlbivax_mutex); 2673 } 2674 } 2675 } 2676 PMAP_UNLOCK(pmap); 2677 } 2678 2679 /* 2680 * Clear the write and modified bits in each of the given page's mappings. 2681 */ 2682 static void 2683 mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 2684 { 2685 pv_entry_t pv; 2686 pte_t *pte; 2687 2688 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2689 ("mmu_booke_remove_write: page %p is not managed", m)); 2690 2691 /* 2692 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2693 * set by another thread while the object is locked. Thus, 2694 * if PGA_WRITEABLE is clear, no page table entries need updating. 2695 */ 2696 VM_OBJECT_ASSERT_WLOCKED(m->object); 2697 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2698 return; 2699 rw_wlock(&pvh_global_lock); 2700 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2701 PMAP_LOCK(pv->pv_pmap); 2702 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2703 if (PTE_ISVALID(pte)) { 2704 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2705 2706 mtx_lock_spin(&tlbivax_mutex); 2707 tlb_miss_lock(); 2708 2709 /* Handle modified pages. */ 2710 if (PTE_ISMODIFIED(pte)) 2711 vm_page_dirty(m); 2712 2713 /* Flush mapping from TLB0. */ 2714 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 2715 2716 tlb_miss_unlock(); 2717 mtx_unlock_spin(&tlbivax_mutex); 2718 } 2719 } 2720 PMAP_UNLOCK(pv->pv_pmap); 2721 } 2722 vm_page_aflag_clear(m, PGA_WRITEABLE); 2723 rw_wunlock(&pvh_global_lock); 2724 } 2725 2726 static void 2727 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2728 { 2729 pte_t *pte; 2730 vm_paddr_t pa = 0; 2731 int sync_sz, valid; 2732 #ifndef __powerpc64__ 2733 pmap_t pmap; 2734 vm_page_t m; 2735 vm_offset_t addr; 2736 int active; 2737 #endif 2738 2739 #ifndef __powerpc64__ 2740 rw_wlock(&pvh_global_lock); 2741 pmap = PCPU_GET(curpmap); 2742 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2743 #endif 2744 while (sz > 0) { 2745 PMAP_LOCK(pm); 2746 pte = pte_find(mmu, pm, va); 2747 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2748 if (valid) 2749 pa = PTE_PA(pte); 2750 PMAP_UNLOCK(pm); 2751 sync_sz = PAGE_SIZE - (va & PAGE_MASK); 2752 sync_sz = min(sync_sz, sz); 2753 if (valid) { 2754 #ifdef __powerpc64__ 2755 pa += (va & PAGE_MASK); 2756 __syncicache((void *)PHYS_TO_DMAP(pa), sync_sz); 2757 #else 2758 if (!active) { 2759 /* Create a mapping in the active pmap. */ 2760 addr = 0; 2761 m = PHYS_TO_VM_PAGE(pa); 2762 PMAP_LOCK(pmap); 2763 pte_enter(mmu, pmap, m, addr, 2764 PTE_SR | PTE_VALID, FALSE); 2765 addr += (va & PAGE_MASK); 2766 __syncicache((void *)addr, sync_sz); 2767 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2768 PMAP_UNLOCK(pmap); 2769 } else 2770 __syncicache((void *)va, sync_sz); 2771 #endif 2772 } 2773 va += sync_sz; 2774 sz -= sync_sz; 2775 } 2776 #ifndef __powerpc64__ 2777 rw_wunlock(&pvh_global_lock); 2778 #endif 2779 } 2780 2781 /* 2782 * Atomically extract and hold the physical page with the given 2783 * pmap and virtual address pair if that mapping permits the given 2784 * protection. 2785 */ 2786 static vm_page_t 2787 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2788 vm_prot_t prot) 2789 { 2790 pte_t *pte; 2791 vm_page_t m; 2792 uint32_t pte_wbit; 2793 vm_paddr_t pa; 2794 2795 m = NULL; 2796 pa = 0; 2797 PMAP_LOCK(pmap); 2798 retry: 2799 pte = pte_find(mmu, pmap, va); 2800 if ((pte != NULL) && PTE_ISVALID(pte)) { 2801 if (pmap == kernel_pmap) 2802 pte_wbit = PTE_SW; 2803 else 2804 pte_wbit = PTE_UW; 2805 2806 if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2807 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2808 goto retry; 2809 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2810 vm_page_wire(m); 2811 } 2812 } 2813 2814 PA_UNLOCK_COND(pa); 2815 PMAP_UNLOCK(pmap); 2816 return (m); 2817 } 2818 2819 /* 2820 * Initialize a vm_page's machine-dependent fields. 2821 */ 2822 static void 2823 mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2824 { 2825 2826 m->md.pv_tracked = 0; 2827 TAILQ_INIT(&m->md.pv_list); 2828 } 2829 2830 /* 2831 * mmu_booke_zero_page_area zeros the specified hardware page by 2832 * mapping it into virtual memory and using bzero to clear 2833 * its contents. 2834 * 2835 * off and size must reside within a single page. 2836 */ 2837 static void 2838 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2839 { 2840 vm_offset_t va; 2841 2842 /* XXX KASSERT off and size are within a single page? */ 2843 2844 #ifdef __powerpc64__ 2845 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 2846 bzero((caddr_t)va + off, size); 2847 #else 2848 mtx_lock(&zero_page_mutex); 2849 va = zero_page_va; 2850 2851 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2852 bzero((caddr_t)va + off, size); 2853 mmu_booke_kremove(mmu, va); 2854 2855 mtx_unlock(&zero_page_mutex); 2856 #endif 2857 } 2858 2859 /* 2860 * mmu_booke_zero_page zeros the specified hardware page. 2861 */ 2862 static void 2863 mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2864 { 2865 vm_offset_t off, va; 2866 2867 #ifdef __powerpc64__ 2868 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 2869 2870 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 2871 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 2872 #else 2873 va = zero_page_va; 2874 mtx_lock(&zero_page_mutex); 2875 2876 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2877 2878 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 2879 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 2880 2881 mmu_booke_kremove(mmu, va); 2882 2883 mtx_unlock(&zero_page_mutex); 2884 #endif 2885 } 2886 2887 /* 2888 * mmu_booke_copy_page copies the specified (machine independent) page by 2889 * mapping the page into virtual memory and using memcopy to copy the page, 2890 * one machine dependent page at a time. 2891 */ 2892 static void 2893 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2894 { 2895 vm_offset_t sva, dva; 2896 2897 #ifdef __powerpc64__ 2898 sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm)); 2899 dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm)); 2900 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2901 #else 2902 sva = copy_page_src_va; 2903 dva = copy_page_dst_va; 2904 2905 mtx_lock(©_page_mutex); 2906 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2907 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2908 2909 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2910 2911 mmu_booke_kremove(mmu, dva); 2912 mmu_booke_kremove(mmu, sva); 2913 mtx_unlock(©_page_mutex); 2914 #endif 2915 } 2916 2917 static inline void 2918 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 2919 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 2920 { 2921 void *a_cp, *b_cp; 2922 vm_offset_t a_pg_offset, b_pg_offset; 2923 int cnt; 2924 2925 #ifdef __powerpc64__ 2926 vm_page_t pa, pb; 2927 2928 while (xfersize > 0) { 2929 a_pg_offset = a_offset & PAGE_MASK; 2930 pa = ma[a_offset >> PAGE_SHIFT]; 2931 b_pg_offset = b_offset & PAGE_MASK; 2932 pb = mb[b_offset >> PAGE_SHIFT]; 2933 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2934 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2935 a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) + 2936 a_pg_offset); 2937 b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) + 2938 b_pg_offset); 2939 bcopy(a_cp, b_cp, cnt); 2940 a_offset += cnt; 2941 b_offset += cnt; 2942 xfersize -= cnt; 2943 } 2944 #else 2945 mtx_lock(©_page_mutex); 2946 while (xfersize > 0) { 2947 a_pg_offset = a_offset & PAGE_MASK; 2948 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2949 mmu_booke_kenter(mmu, copy_page_src_va, 2950 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 2951 a_cp = (char *)copy_page_src_va + a_pg_offset; 2952 b_pg_offset = b_offset & PAGE_MASK; 2953 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2954 mmu_booke_kenter(mmu, copy_page_dst_va, 2955 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 2956 b_cp = (char *)copy_page_dst_va + b_pg_offset; 2957 bcopy(a_cp, b_cp, cnt); 2958 mmu_booke_kremove(mmu, copy_page_dst_va); 2959 mmu_booke_kremove(mmu, copy_page_src_va); 2960 a_offset += cnt; 2961 b_offset += cnt; 2962 xfersize -= cnt; 2963 } 2964 mtx_unlock(©_page_mutex); 2965 #endif 2966 } 2967 2968 static vm_offset_t 2969 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m) 2970 { 2971 #ifdef __powerpc64__ 2972 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); 2973 #else 2974 vm_paddr_t paddr; 2975 vm_offset_t qaddr; 2976 uint32_t flags; 2977 pte_t *pte; 2978 2979 paddr = VM_PAGE_TO_PHYS(m); 2980 2981 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 2982 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT; 2983 flags |= PTE_PS_4KB; 2984 2985 critical_enter(); 2986 qaddr = PCPU_GET(qmap_addr); 2987 2988 pte = pte_find(mmu, kernel_pmap, qaddr); 2989 2990 KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy")); 2991 2992 /* 2993 * XXX: tlbivax is broadcast to other cores, but qaddr should 2994 * not be present in other TLBs. Is there a better instruction 2995 * sequence to use? Or just forget it & use mmu_booke_kenter()... 2996 */ 2997 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK)); 2998 __asm __volatile("isync; msync"); 2999 3000 *pte = PTE_RPN_FROM_PA(paddr) | flags; 3001 3002 /* Flush the real memory from the instruction cache. */ 3003 if ((flags & (PTE_I | PTE_G)) == 0) 3004 __syncicache((void *)qaddr, PAGE_SIZE); 3005 3006 return (qaddr); 3007 #endif 3008 } 3009 3010 static void 3011 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr) 3012 { 3013 #ifndef __powerpc64__ 3014 pte_t *pte; 3015 3016 pte = pte_find(mmu, kernel_pmap, addr); 3017 3018 KASSERT(PCPU_GET(qmap_addr) == addr, 3019 ("mmu_booke_quick_remove_page: invalid address")); 3020 KASSERT(*pte != 0, 3021 ("mmu_booke_quick_remove_page: PTE not in use")); 3022 3023 *pte = 0; 3024 critical_exit(); 3025 #endif 3026 } 3027 3028 /* 3029 * Return whether or not the specified physical page was modified 3030 * in any of physical maps. 3031 */ 3032 static boolean_t 3033 mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 3034 { 3035 pte_t *pte; 3036 pv_entry_t pv; 3037 boolean_t rv; 3038 3039 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3040 ("mmu_booke_is_modified: page %p is not managed", m)); 3041 rv = FALSE; 3042 3043 /* 3044 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 3045 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 3046 * is clear, no PTEs can be modified. 3047 */ 3048 VM_OBJECT_ASSERT_WLOCKED(m->object); 3049 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 3050 return (rv); 3051 rw_wlock(&pvh_global_lock); 3052 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 3053 PMAP_LOCK(pv->pv_pmap); 3054 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 3055 PTE_ISVALID(pte)) { 3056 if (PTE_ISMODIFIED(pte)) 3057 rv = TRUE; 3058 } 3059 PMAP_UNLOCK(pv->pv_pmap); 3060 if (rv) 3061 break; 3062 } 3063 rw_wunlock(&pvh_global_lock); 3064 return (rv); 3065 } 3066 3067 /* 3068 * Return whether or not the specified virtual address is eligible 3069 * for prefault. 3070 */ 3071 static boolean_t 3072 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 3073 { 3074 3075 return (FALSE); 3076 } 3077 3078 /* 3079 * Return whether or not the specified physical page was referenced 3080 * in any physical maps. 3081 */ 3082 static boolean_t 3083 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 3084 { 3085 pte_t *pte; 3086 pv_entry_t pv; 3087 boolean_t rv; 3088 3089 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3090 ("mmu_booke_is_referenced: page %p is not managed", m)); 3091 rv = FALSE; 3092 rw_wlock(&pvh_global_lock); 3093 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 3094 PMAP_LOCK(pv->pv_pmap); 3095 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 3096 PTE_ISVALID(pte)) { 3097 if (PTE_ISREFERENCED(pte)) 3098 rv = TRUE; 3099 } 3100 PMAP_UNLOCK(pv->pv_pmap); 3101 if (rv) 3102 break; 3103 } 3104 rw_wunlock(&pvh_global_lock); 3105 return (rv); 3106 } 3107 3108 /* 3109 * Clear the modify bits on the specified physical page. 3110 */ 3111 static void 3112 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 3113 { 3114 pte_t *pte; 3115 pv_entry_t pv; 3116 3117 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3118 ("mmu_booke_clear_modify: page %p is not managed", m)); 3119 VM_OBJECT_ASSERT_WLOCKED(m->object); 3120 KASSERT(!vm_page_xbusied(m), 3121 ("mmu_booke_clear_modify: page %p is exclusive busied", m)); 3122 3123 /* 3124 * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 3125 * If the object containing the page is locked and the page is not 3126 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set. 3127 */ 3128 if ((m->aflags & PGA_WRITEABLE) == 0) 3129 return; 3130 rw_wlock(&pvh_global_lock); 3131 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 3132 PMAP_LOCK(pv->pv_pmap); 3133 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 3134 PTE_ISVALID(pte)) { 3135 mtx_lock_spin(&tlbivax_mutex); 3136 tlb_miss_lock(); 3137 3138 if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 3139 tlb0_flush_entry(pv->pv_va); 3140 *pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 3141 PTE_REFERENCED); 3142 } 3143 3144 tlb_miss_unlock(); 3145 mtx_unlock_spin(&tlbivax_mutex); 3146 } 3147 PMAP_UNLOCK(pv->pv_pmap); 3148 } 3149 rw_wunlock(&pvh_global_lock); 3150 } 3151 3152 /* 3153 * Return a count of reference bits for a page, clearing those bits. 3154 * It is not necessary for every reference bit to be cleared, but it 3155 * is necessary that 0 only be returned when there are truly no 3156 * reference bits set. 3157 * 3158 * As an optimization, update the page's dirty field if a modified bit is 3159 * found while counting reference bits. This opportunistic update can be 3160 * performed at low cost and can eliminate the need for some future calls 3161 * to pmap_is_modified(). However, since this function stops after 3162 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 3163 * dirty pages. Those dirty pages will only be detected by a future call 3164 * to pmap_is_modified(). 3165 */ 3166 static int 3167 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 3168 { 3169 pte_t *pte; 3170 pv_entry_t pv; 3171 int count; 3172 3173 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3174 ("mmu_booke_ts_referenced: page %p is not managed", m)); 3175 count = 0; 3176 rw_wlock(&pvh_global_lock); 3177 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 3178 PMAP_LOCK(pv->pv_pmap); 3179 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 3180 PTE_ISVALID(pte)) { 3181 if (PTE_ISMODIFIED(pte)) 3182 vm_page_dirty(m); 3183 if (PTE_ISREFERENCED(pte)) { 3184 mtx_lock_spin(&tlbivax_mutex); 3185 tlb_miss_lock(); 3186 3187 tlb0_flush_entry(pv->pv_va); 3188 *pte &= ~PTE_REFERENCED; 3189 3190 tlb_miss_unlock(); 3191 mtx_unlock_spin(&tlbivax_mutex); 3192 3193 if (++count >= PMAP_TS_REFERENCED_MAX) { 3194 PMAP_UNLOCK(pv->pv_pmap); 3195 break; 3196 } 3197 } 3198 } 3199 PMAP_UNLOCK(pv->pv_pmap); 3200 } 3201 rw_wunlock(&pvh_global_lock); 3202 return (count); 3203 } 3204 3205 /* 3206 * Clear the wired attribute from the mappings for the specified range of 3207 * addresses in the given pmap. Every valid mapping within that range must 3208 * have the wired attribute set. In contrast, invalid mappings cannot have 3209 * the wired attribute set, so they are ignored. 3210 * 3211 * The wired attribute of the page table entry is not a hardware feature, so 3212 * there is no need to invalidate any TLB entries. 3213 */ 3214 static void 3215 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 3216 { 3217 vm_offset_t va; 3218 pte_t *pte; 3219 3220 PMAP_LOCK(pmap); 3221 for (va = sva; va < eva; va += PAGE_SIZE) { 3222 if ((pte = pte_find(mmu, pmap, va)) != NULL && 3223 PTE_ISVALID(pte)) { 3224 if (!PTE_ISWIRED(pte)) 3225 panic("mmu_booke_unwire: pte %p isn't wired", 3226 pte); 3227 *pte &= ~PTE_WIRED; 3228 pmap->pm_stats.wired_count--; 3229 } 3230 } 3231 PMAP_UNLOCK(pmap); 3232 3233 } 3234 3235 /* 3236 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 3237 * page. This count may be changed upwards or downwards in the future; it is 3238 * only necessary that true be returned for a small subset of pmaps for proper 3239 * page aging. 3240 */ 3241 static boolean_t 3242 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 3243 { 3244 pv_entry_t pv; 3245 int loops; 3246 boolean_t rv; 3247 3248 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3249 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 3250 loops = 0; 3251 rv = FALSE; 3252 rw_wlock(&pvh_global_lock); 3253 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 3254 if (pv->pv_pmap == pmap) { 3255 rv = TRUE; 3256 break; 3257 } 3258 if (++loops >= 16) 3259 break; 3260 } 3261 rw_wunlock(&pvh_global_lock); 3262 return (rv); 3263 } 3264 3265 /* 3266 * Return the number of managed mappings to the given physical page that are 3267 * wired. 3268 */ 3269 static int 3270 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 3271 { 3272 pv_entry_t pv; 3273 pte_t *pte; 3274 int count = 0; 3275 3276 if ((m->oflags & VPO_UNMANAGED) != 0) 3277 return (count); 3278 rw_wlock(&pvh_global_lock); 3279 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 3280 PMAP_LOCK(pv->pv_pmap); 3281 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 3282 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 3283 count++; 3284 PMAP_UNLOCK(pv->pv_pmap); 3285 } 3286 rw_wunlock(&pvh_global_lock); 3287 return (count); 3288 } 3289 3290 static int 3291 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 3292 { 3293 int i; 3294 vm_offset_t va; 3295 3296 /* 3297 * This currently does not work for entries that 3298 * overlap TLB1 entries. 3299 */ 3300 for (i = 0; i < TLB1_ENTRIES; i ++) { 3301 if (tlb1_iomapped(i, pa, size, &va) == 0) 3302 return (0); 3303 } 3304 3305 return (EFAULT); 3306 } 3307 3308 void 3309 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va) 3310 { 3311 vm_paddr_t ppa; 3312 vm_offset_t ofs; 3313 vm_size_t gran; 3314 3315 /* Minidumps are based on virtual memory addresses. */ 3316 if (do_minidump) { 3317 *va = (void *)(vm_offset_t)pa; 3318 return; 3319 } 3320 3321 /* Raw physical memory dumps don't have a virtual address. */ 3322 /* We always map a 256MB page at 256M. */ 3323 gran = 256 * 1024 * 1024; 3324 ppa = rounddown2(pa, gran); 3325 ofs = pa - ppa; 3326 *va = (void *)gran; 3327 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO); 3328 3329 if (sz > (gran - ofs)) 3330 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran, 3331 _TLB_ENTRY_IO); 3332 } 3333 3334 void 3335 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va) 3336 { 3337 vm_paddr_t ppa; 3338 vm_offset_t ofs; 3339 vm_size_t gran; 3340 tlb_entry_t e; 3341 int i; 3342 3343 /* Minidumps are based on virtual memory addresses. */ 3344 /* Nothing to do... */ 3345 if (do_minidump) 3346 return; 3347 3348 for (i = 0; i < TLB1_ENTRIES; i++) { 3349 tlb1_read_entry(&e, i); 3350 if (!(e.mas1 & MAS1_VALID)) 3351 break; 3352 } 3353 3354 /* Raw physical memory dumps don't have a virtual address. */ 3355 i--; 3356 e.mas1 = 0; 3357 e.mas2 = 0; 3358 e.mas3 = 0; 3359 tlb1_write_entry(&e, i); 3360 3361 gran = 256 * 1024 * 1024; 3362 ppa = rounddown2(pa, gran); 3363 ofs = pa - ppa; 3364 if (sz > (gran - ofs)) { 3365 i--; 3366 e.mas1 = 0; 3367 e.mas2 = 0; 3368 e.mas3 = 0; 3369 tlb1_write_entry(&e, i); 3370 } 3371 } 3372 3373 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; 3374 3375 void 3376 mmu_booke_scan_init(mmu_t mmu) 3377 { 3378 vm_offset_t va; 3379 pte_t *pte; 3380 int i; 3381 3382 if (!do_minidump) { 3383 /* Initialize phys. segments for dumpsys(). */ 3384 memset(&dump_map, 0, sizeof(dump_map)); 3385 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions, 3386 &availmem_regions_sz); 3387 for (i = 0; i < physmem_regions_sz; i++) { 3388 dump_map[i].pa_start = physmem_regions[i].mr_start; 3389 dump_map[i].pa_size = physmem_regions[i].mr_size; 3390 } 3391 return; 3392 } 3393 3394 /* Virtual segments for minidumps: */ 3395 memset(&dump_map, 0, sizeof(dump_map)); 3396 3397 /* 1st: kernel .data and .bss. */ 3398 dump_map[0].pa_start = trunc_page((uintptr_t)_etext); 3399 dump_map[0].pa_size = 3400 round_page((uintptr_t)_end) - dump_map[0].pa_start; 3401 3402 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 3403 dump_map[1].pa_start = data_start; 3404 dump_map[1].pa_size = data_end - data_start; 3405 3406 /* 3rd: kernel VM. */ 3407 va = dump_map[1].pa_start + dump_map[1].pa_size; 3408 /* Find start of next chunk (from va). */ 3409 while (va < virtual_end) { 3410 /* Don't dump the buffer cache. */ 3411 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) { 3412 va = kmi.buffer_eva; 3413 continue; 3414 } 3415 pte = pte_find(mmu, kernel_pmap, va); 3416 if (pte != NULL && PTE_ISVALID(pte)) 3417 break; 3418 va += PAGE_SIZE; 3419 } 3420 if (va < virtual_end) { 3421 dump_map[2].pa_start = va; 3422 va += PAGE_SIZE; 3423 /* Find last page in chunk. */ 3424 while (va < virtual_end) { 3425 /* Don't run into the buffer cache. */ 3426 if (va == kmi.buffer_sva) 3427 break; 3428 pte = pte_find(mmu, kernel_pmap, va); 3429 if (pte == NULL || !PTE_ISVALID(pte)) 3430 break; 3431 va += PAGE_SIZE; 3432 } 3433 dump_map[2].pa_size = va - dump_map[2].pa_start; 3434 } 3435 } 3436 3437 /* 3438 * Map a set of physical memory pages into the kernel virtual address space. 3439 * Return a pointer to where it is mapped. This routine is intended to be used 3440 * for mapping device memory, NOT real memory. 3441 */ 3442 static void * 3443 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 3444 { 3445 3446 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 3447 } 3448 3449 static void * 3450 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) 3451 { 3452 tlb_entry_t e; 3453 void *res; 3454 uintptr_t va, tmpva; 3455 vm_size_t sz; 3456 int i; 3457 3458 /* 3459 * Check if this is premapped in TLB1. Note: this should probably also 3460 * check whether a sequence of TLB1 entries exist that match the 3461 * requirement, but now only checks the easy case. 3462 */ 3463 for (i = 0; i < TLB1_ENTRIES; i++) { 3464 tlb1_read_entry(&e, i); 3465 if (!(e.mas1 & MAS1_VALID)) 3466 continue; 3467 if (pa >= e.phys && 3468 (pa + size) <= (e.phys + e.size) && 3469 (ma == VM_MEMATTR_DEFAULT || 3470 tlb_calc_wimg(pa, ma) == 3471 (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED)))) 3472 return (void *)(e.virt + 3473 (vm_offset_t)(pa - e.phys)); 3474 } 3475 3476 size = roundup(size, PAGE_SIZE); 3477 3478 /* 3479 * The device mapping area is between VM_MAXUSER_ADDRESS and 3480 * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing. 3481 */ 3482 #ifdef SPARSE_MAPDEV 3483 /* 3484 * With a sparse mapdev, align to the largest starting region. This 3485 * could feasibly be optimized for a 'best-fit' alignment, but that 3486 * calculation could be very costly. 3487 * Align to the smaller of: 3488 * - first set bit in overlap of (pa & size mask) 3489 * - largest size envelope 3490 * 3491 * It's possible the device mapping may start at a PA that's not larger 3492 * than the size mask, so we need to offset in to maximize the TLB entry 3493 * range and minimize the number of used TLB entries. 3494 */ 3495 do { 3496 tmpva = tlb1_map_base; 3497 sz = ffsl(((1 << flsl(size-1)) - 1) & pa); 3498 sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1; 3499 va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa); 3500 #ifdef __powerpc64__ 3501 } while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size)); 3502 #else 3503 } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size)); 3504 #endif 3505 #else 3506 #ifdef __powerpc64__ 3507 va = atomic_fetchadd_long(&tlb1_map_base, size); 3508 #else 3509 va = atomic_fetchadd_int(&tlb1_map_base, size); 3510 #endif 3511 #endif 3512 res = (void *)va; 3513 3514 do { 3515 sz = 1 << (ilog2(size) & ~1); 3516 /* Align size to PA */ 3517 if (pa % sz != 0) { 3518 do { 3519 sz >>= 2; 3520 } while (pa % sz != 0); 3521 } 3522 /* Now align from there to VA */ 3523 if (va % sz != 0) { 3524 do { 3525 sz >>= 2; 3526 } while (va % sz != 0); 3527 } 3528 if (bootverbose) 3529 printf("Wiring VA=%lx to PA=%jx (size=%lx)\n", 3530 va, (uintmax_t)pa, sz); 3531 if (tlb1_set_entry(va, pa, sz, 3532 _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)) < 0) 3533 return (NULL); 3534 size -= sz; 3535 pa += sz; 3536 va += sz; 3537 } while (size > 0); 3538 3539 return (res); 3540 } 3541 3542 /* 3543 * 'Unmap' a range mapped by mmu_booke_mapdev(). 3544 */ 3545 static void 3546 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 3547 { 3548 #ifdef SUPPORTS_SHRINKING_TLB1 3549 vm_offset_t base, offset; 3550 3551 /* 3552 * Unmap only if this is inside kernel virtual space. 3553 */ 3554 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 3555 base = trunc_page(va); 3556 offset = va & PAGE_MASK; 3557 size = roundup(offset + size, PAGE_SIZE); 3558 kva_free(base, size); 3559 } 3560 #endif 3561 } 3562 3563 /* 3564 * mmu_booke_object_init_pt preloads the ptes for a given object into the 3565 * specified pmap. This eliminates the blast of soft faults on process startup 3566 * and immediately after an mmap. 3567 */ 3568 static void 3569 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 3570 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 3571 { 3572 3573 VM_OBJECT_ASSERT_WLOCKED(object); 3574 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3575 ("mmu_booke_object_init_pt: non-device object")); 3576 } 3577 3578 /* 3579 * Perform the pmap work for mincore. 3580 */ 3581 static int 3582 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 3583 vm_paddr_t *locked_pa) 3584 { 3585 3586 /* XXX: this should be implemented at some point */ 3587 return (0); 3588 } 3589 3590 static int 3591 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz, 3592 vm_memattr_t mode) 3593 { 3594 vm_offset_t va; 3595 pte_t *pte; 3596 int i, j; 3597 tlb_entry_t e; 3598 3599 /* Check TLB1 mappings */ 3600 for (i = 0; i < TLB1_ENTRIES; i++) { 3601 tlb1_read_entry(&e, i); 3602 if (!(e.mas1 & MAS1_VALID)) 3603 continue; 3604 if (addr >= e.virt && addr < e.virt + e.size) 3605 break; 3606 } 3607 if (i < TLB1_ENTRIES) { 3608 /* Only allow full mappings to be modified for now. */ 3609 /* Validate the range. */ 3610 for (j = i, va = addr; va < addr + sz; va += e.size, j++) { 3611 tlb1_read_entry(&e, j); 3612 if (va != e.virt || (sz - (va - addr) < e.size)) 3613 return (EINVAL); 3614 } 3615 for (va = addr; va < addr + sz; va += e.size, i++) { 3616 tlb1_read_entry(&e, i); 3617 e.mas2 &= ~MAS2_WIMGE_MASK; 3618 e.mas2 |= tlb_calc_wimg(e.phys, mode); 3619 3620 /* 3621 * Write it out to the TLB. Should really re-sync with other 3622 * cores. 3623 */ 3624 tlb1_write_entry(&e, i); 3625 } 3626 return (0); 3627 } 3628 3629 /* Not in TLB1, try through pmap */ 3630 /* First validate the range. */ 3631 for (va = addr; va < addr + sz; va += PAGE_SIZE) { 3632 pte = pte_find(mmu, kernel_pmap, va); 3633 if (pte == NULL || !PTE_ISVALID(pte)) 3634 return (EINVAL); 3635 } 3636 3637 mtx_lock_spin(&tlbivax_mutex); 3638 tlb_miss_lock(); 3639 for (va = addr; va < addr + sz; va += PAGE_SIZE) { 3640 pte = pte_find(mmu, kernel_pmap, va); 3641 *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT); 3642 *pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT; 3643 tlb0_flush_entry(va); 3644 } 3645 tlb_miss_unlock(); 3646 mtx_unlock_spin(&tlbivax_mutex); 3647 3648 return (0); 3649 } 3650 3651 /**************************************************************************/ 3652 /* TID handling */ 3653 /**************************************************************************/ 3654 3655 /* 3656 * Allocate a TID. If necessary, steal one from someone else. 3657 * The new TID is flushed from the TLB before returning. 3658 */ 3659 static tlbtid_t 3660 tid_alloc(pmap_t pmap) 3661 { 3662 tlbtid_t tid; 3663 int thiscpu; 3664 3665 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 3666 3667 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 3668 3669 thiscpu = PCPU_GET(cpuid); 3670 3671 tid = PCPU_GET(booke.tid_next); 3672 if (tid > TID_MAX) 3673 tid = TID_MIN; 3674 PCPU_SET(booke.tid_next, tid + 1); 3675 3676 /* If we are stealing TID then clear the relevant pmap's field */ 3677 if (tidbusy[thiscpu][tid] != NULL) { 3678 3679 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 3680 3681 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 3682 3683 /* Flush all entries from TLB0 matching this TID. */ 3684 tid_flush(tid); 3685 } 3686 3687 tidbusy[thiscpu][tid] = pmap; 3688 pmap->pm_tid[thiscpu] = tid; 3689 __asm __volatile("msync; isync"); 3690 3691 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 3692 PCPU_GET(booke.tid_next)); 3693 3694 return (tid); 3695 } 3696 3697 /**************************************************************************/ 3698 /* TLB0 handling */ 3699 /**************************************************************************/ 3700 3701 /* Convert TLB0 va and way number to tlb0[] table index. */ 3702 static inline unsigned int 3703 tlb0_tableidx(vm_offset_t va, unsigned int way) 3704 { 3705 unsigned int idx; 3706 3707 idx = (way * TLB0_ENTRIES_PER_WAY); 3708 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 3709 return (idx); 3710 } 3711 3712 /* 3713 * Invalidate TLB0 entry. 3714 */ 3715 static inline void 3716 tlb0_flush_entry(vm_offset_t va) 3717 { 3718 3719 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 3720 3721 mtx_assert(&tlbivax_mutex, MA_OWNED); 3722 3723 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 3724 __asm __volatile("isync; msync"); 3725 __asm __volatile("tlbsync; msync"); 3726 3727 CTR1(KTR_PMAP, "%s: e", __func__); 3728 } 3729 3730 3731 /**************************************************************************/ 3732 /* TLB1 handling */ 3733 /**************************************************************************/ 3734 3735 /* 3736 * TLB1 mapping notes: 3737 * 3738 * TLB1[0] Kernel text and data. 3739 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI 3740 * windows, other devices mappings. 3741 */ 3742 3743 /* 3744 * Read an entry from given TLB1 slot. 3745 */ 3746 void 3747 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot) 3748 { 3749 register_t msr; 3750 uint32_t mas0; 3751 3752 KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__)); 3753 3754 msr = mfmsr(); 3755 __asm __volatile("wrteei 0"); 3756 3757 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot); 3758 mtspr(SPR_MAS0, mas0); 3759 __asm __volatile("isync; tlbre"); 3760 3761 entry->mas1 = mfspr(SPR_MAS1); 3762 entry->mas2 = mfspr(SPR_MAS2); 3763 entry->mas3 = mfspr(SPR_MAS3); 3764 3765 switch ((mfpvr() >> 16) & 0xFFFF) { 3766 case FSL_E500v2: 3767 case FSL_E500mc: 3768 case FSL_E5500: 3769 case FSL_E6500: 3770 entry->mas7 = mfspr(SPR_MAS7); 3771 break; 3772 default: 3773 entry->mas7 = 0; 3774 break; 3775 } 3776 __asm __volatile("wrtee %0" :: "r"(msr)); 3777 3778 entry->virt = entry->mas2 & MAS2_EPN_MASK; 3779 entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) | 3780 (entry->mas3 & MAS3_RPN); 3781 entry->size = 3782 tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT); 3783 } 3784 3785 struct tlbwrite_args { 3786 tlb_entry_t *e; 3787 unsigned int idx; 3788 }; 3789 3790 static void 3791 tlb1_write_entry_int(void *arg) 3792 { 3793 struct tlbwrite_args *args = arg; 3794 uint32_t mas0; 3795 3796 /* Select entry */ 3797 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(args->idx); 3798 3799 mtspr(SPR_MAS0, mas0); 3800 mtspr(SPR_MAS1, args->e->mas1); 3801 mtspr(SPR_MAS2, args->e->mas2); 3802 mtspr(SPR_MAS3, args->e->mas3); 3803 switch ((mfpvr() >> 16) & 0xFFFF) { 3804 case FSL_E500mc: 3805 case FSL_E5500: 3806 case FSL_E6500: 3807 mtspr(SPR_MAS8, 0); 3808 /* FALLTHROUGH */ 3809 case FSL_E500v2: 3810 mtspr(SPR_MAS7, args->e->mas7); 3811 break; 3812 default: 3813 break; 3814 } 3815 3816 __asm __volatile("isync; tlbwe; isync; msync"); 3817 3818 } 3819 3820 static void 3821 tlb1_write_entry_sync(void *arg) 3822 { 3823 /* Empty synchronization point for smp_rendezvous(). */ 3824 } 3825 3826 /* 3827 * Write given entry to TLB1 hardware. 3828 */ 3829 static void 3830 tlb1_write_entry(tlb_entry_t *e, unsigned int idx) 3831 { 3832 struct tlbwrite_args args; 3833 3834 args.e = e; 3835 args.idx = idx; 3836 3837 #ifdef SMP 3838 if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) { 3839 mb(); 3840 smp_rendezvous(tlb1_write_entry_sync, 3841 tlb1_write_entry_int, 3842 tlb1_write_entry_sync, &args); 3843 } else 3844 #endif 3845 { 3846 register_t msr; 3847 3848 msr = mfmsr(); 3849 __asm __volatile("wrteei 0"); 3850 tlb1_write_entry_int(&args); 3851 __asm __volatile("wrtee %0" :: "r"(msr)); 3852 } 3853 } 3854 3855 /* 3856 * Return the largest uint value log such that 2^log <= num. 3857 */ 3858 static unsigned int 3859 ilog2(unsigned long num) 3860 { 3861 long lz; 3862 3863 #ifdef __powerpc64__ 3864 __asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num)); 3865 return (63 - lz); 3866 #else 3867 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 3868 return (31 - lz); 3869 #endif 3870 } 3871 3872 /* 3873 * Convert TLB TSIZE value to mapped region size. 3874 */ 3875 static vm_size_t 3876 tsize2size(unsigned int tsize) 3877 { 3878 3879 /* 3880 * size = 4^tsize KB 3881 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 3882 */ 3883 3884 return ((1 << (2 * tsize)) * 1024); 3885 } 3886 3887 /* 3888 * Convert region size (must be power of 4) to TLB TSIZE value. 3889 */ 3890 static unsigned int 3891 size2tsize(vm_size_t size) 3892 { 3893 3894 return (ilog2(size) / 2 - 5); 3895 } 3896 3897 /* 3898 * Register permanent kernel mapping in TLB1. 3899 * 3900 * Entries are created starting from index 0 (current free entry is 3901 * kept in tlb1_idx) and are not supposed to be invalidated. 3902 */ 3903 int 3904 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size, 3905 uint32_t flags) 3906 { 3907 tlb_entry_t e; 3908 uint32_t ts, tid; 3909 int tsize, index; 3910 3911 for (index = 0; index < TLB1_ENTRIES; index++) { 3912 tlb1_read_entry(&e, index); 3913 if ((e.mas1 & MAS1_VALID) == 0) 3914 break; 3915 /* Check if we're just updating the flags, and update them. */ 3916 if (e.phys == pa && e.virt == va && e.size == size) { 3917 e.mas2 = (va & MAS2_EPN_MASK) | flags; 3918 tlb1_write_entry(&e, index); 3919 return (0); 3920 } 3921 } 3922 if (index >= TLB1_ENTRIES) { 3923 printf("tlb1_set_entry: TLB1 full!\n"); 3924 return (-1); 3925 } 3926 3927 /* Convert size to TSIZE */ 3928 tsize = size2tsize(size); 3929 3930 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 3931 /* XXX TS is hard coded to 0 for now as we only use single address space */ 3932 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 3933 3934 e.phys = pa; 3935 e.virt = va; 3936 e.size = size; 3937 e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 3938 e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 3939 e.mas2 = (va & MAS2_EPN_MASK) | flags; 3940 3941 /* Set supervisor RWX permission bits */ 3942 e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 3943 e.mas7 = (pa >> 32) & MAS7_RPN; 3944 3945 tlb1_write_entry(&e, index); 3946 3947 /* 3948 * XXX in general TLB1 updates should be propagated between CPUs, 3949 * since current design assumes to have the same TLB1 set-up on all 3950 * cores. 3951 */ 3952 return (0); 3953 } 3954 3955 /* 3956 * Map in contiguous RAM region into the TLB1 using maximum of 3957 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 3958 * 3959 * If necessary round up last entry size and return total size 3960 * used by all allocated entries. 3961 */ 3962 vm_size_t 3963 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 3964 { 3965 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; 3966 vm_size_t mapped, pgsz, base, mask; 3967 int idx, nents; 3968 3969 /* Round up to the next 1M */ 3970 size = roundup2(size, 1 << 20); 3971 3972 mapped = 0; 3973 idx = 0; 3974 base = va; 3975 pgsz = 64*1024*1024; 3976 while (mapped < size) { 3977 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { 3978 while (pgsz > (size - mapped)) 3979 pgsz >>= 2; 3980 pgs[idx++] = pgsz; 3981 mapped += pgsz; 3982 } 3983 3984 /* We under-map. Correct for this. */ 3985 if (mapped < size) { 3986 while (pgs[idx - 1] == pgsz) { 3987 idx--; 3988 mapped -= pgsz; 3989 } 3990 /* XXX We may increase beyond out starting point. */ 3991 pgsz <<= 2; 3992 pgs[idx++] = pgsz; 3993 mapped += pgsz; 3994 } 3995 } 3996 3997 nents = idx; 3998 mask = pgs[0] - 1; 3999 /* Align address to the boundary */ 4000 if (va & mask) { 4001 va = (va + mask) & ~mask; 4002 pa = (pa + mask) & ~mask; 4003 } 4004 4005 for (idx = 0; idx < nents; idx++) { 4006 pgsz = pgs[idx]; 4007 debugf("%u: %llx -> %jx, size=%jx\n", idx, pa, 4008 (uintmax_t)va, (uintmax_t)pgsz); 4009 tlb1_set_entry(va, pa, pgsz, 4010 _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM); 4011 pa += pgsz; 4012 va += pgsz; 4013 } 4014 4015 mapped = (va - base); 4016 if (bootverbose) 4017 printf("mapped size 0x%"PRIxPTR" (wasted space 0x%"PRIxPTR")\n", 4018 mapped, mapped - size); 4019 return (mapped); 4020 } 4021 4022 /* 4023 * TLB1 initialization routine, to be called after the very first 4024 * assembler level setup done in locore.S. 4025 */ 4026 void 4027 tlb1_init() 4028 { 4029 vm_offset_t mas2; 4030 uint32_t mas0, mas1, mas3, mas7; 4031 uint32_t tsz; 4032 4033 tlb1_get_tlbconf(); 4034 4035 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0); 4036 mtspr(SPR_MAS0, mas0); 4037 __asm __volatile("isync; tlbre"); 4038 4039 mas1 = mfspr(SPR_MAS1); 4040 mas2 = mfspr(SPR_MAS2); 4041 mas3 = mfspr(SPR_MAS3); 4042 mas7 = mfspr(SPR_MAS7); 4043 4044 kernload = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) | 4045 (mas3 & MAS3_RPN); 4046 4047 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 4048 kernsize += (tsz > 0) ? tsize2size(tsz) : 0; 4049 kernstart = trunc_page(mas2); 4050 4051 /* Setup TLB miss defaults */ 4052 set_mas4_defaults(); 4053 } 4054 4055 /* 4056 * pmap_early_io_unmap() should be used in short conjunction with 4057 * pmap_early_io_map(), as in the following snippet: 4058 * 4059 * x = pmap_early_io_map(...); 4060 * <do something with x> 4061 * pmap_early_io_unmap(x, size); 4062 * 4063 * And avoiding more allocations between. 4064 */ 4065 void 4066 pmap_early_io_unmap(vm_offset_t va, vm_size_t size) 4067 { 4068 int i; 4069 tlb_entry_t e; 4070 vm_size_t isize; 4071 4072 size = roundup(size, PAGE_SIZE); 4073 isize = size; 4074 for (i = 0; i < TLB1_ENTRIES && size > 0; i++) { 4075 tlb1_read_entry(&e, i); 4076 if (!(e.mas1 & MAS1_VALID)) 4077 continue; 4078 if (va <= e.virt && (va + isize) >= (e.virt + e.size)) { 4079 size -= e.size; 4080 e.mas1 &= ~MAS1_VALID; 4081 tlb1_write_entry(&e, i); 4082 } 4083 } 4084 if (tlb1_map_base == va + isize) 4085 tlb1_map_base -= isize; 4086 } 4087 4088 vm_offset_t 4089 pmap_early_io_map(vm_paddr_t pa, vm_size_t size) 4090 { 4091 vm_paddr_t pa_base; 4092 vm_offset_t va, sz; 4093 int i; 4094 tlb_entry_t e; 4095 4096 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!")); 4097 4098 for (i = 0; i < TLB1_ENTRIES; i++) { 4099 tlb1_read_entry(&e, i); 4100 if (!(e.mas1 & MAS1_VALID)) 4101 continue; 4102 if (pa >= e.phys && (pa + size) <= 4103 (e.phys + e.size)) 4104 return (e.virt + (pa - e.phys)); 4105 } 4106 4107 pa_base = rounddown(pa, PAGE_SIZE); 4108 size = roundup(size + (pa - pa_base), PAGE_SIZE); 4109 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1)); 4110 va = tlb1_map_base + (pa - pa_base); 4111 4112 do { 4113 sz = 1 << (ilog2(size) & ~1); 4114 tlb1_set_entry(tlb1_map_base, pa_base, sz, 4115 _TLB_ENTRY_SHARED | _TLB_ENTRY_IO); 4116 size -= sz; 4117 pa_base += sz; 4118 tlb1_map_base += sz; 4119 } while (size > 0); 4120 4121 return (va); 4122 } 4123 4124 void 4125 pmap_track_page(pmap_t pmap, vm_offset_t va) 4126 { 4127 vm_paddr_t pa; 4128 vm_page_t page; 4129 struct pv_entry *pve; 4130 4131 va = trunc_page(va); 4132 pa = pmap_kextract(va); 4133 page = PHYS_TO_VM_PAGE(pa); 4134 4135 rw_wlock(&pvh_global_lock); 4136 PMAP_LOCK(pmap); 4137 4138 TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) { 4139 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 4140 goto out; 4141 } 4142 } 4143 page->md.pv_tracked = true; 4144 pv_insert(pmap, va, page); 4145 out: 4146 PMAP_UNLOCK(pmap); 4147 rw_wunlock(&pvh_global_lock); 4148 } 4149 4150 4151 /* 4152 * Setup MAS4 defaults. 4153 * These values are loaded to MAS0-2 on a TLB miss. 4154 */ 4155 static void 4156 set_mas4_defaults(void) 4157 { 4158 uint32_t mas4; 4159 4160 /* Defaults: TLB0, PID0, TSIZED=4K */ 4161 mas4 = MAS4_TLBSELD0; 4162 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 4163 #ifdef SMP 4164 mas4 |= MAS4_MD; 4165 #endif 4166 mtspr(SPR_MAS4, mas4); 4167 __asm __volatile("isync"); 4168 } 4169 4170 4171 /* 4172 * Return 0 if the physical IO range is encompassed by one of the 4173 * the TLB1 entries, otherwise return related error code. 4174 */ 4175 static int 4176 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 4177 { 4178 uint32_t prot; 4179 vm_paddr_t pa_start; 4180 vm_paddr_t pa_end; 4181 unsigned int entry_tsize; 4182 vm_size_t entry_size; 4183 tlb_entry_t e; 4184 4185 *va = (vm_offset_t)NULL; 4186 4187 tlb1_read_entry(&e, i); 4188 /* Skip invalid entries */ 4189 if (!(e.mas1 & MAS1_VALID)) 4190 return (EINVAL); 4191 4192 /* 4193 * The entry must be cache-inhibited, guarded, and r/w 4194 * so it can function as an i/o page 4195 */ 4196 prot = e.mas2 & (MAS2_I | MAS2_G); 4197 if (prot != (MAS2_I | MAS2_G)) 4198 return (EPERM); 4199 4200 prot = e.mas3 & (MAS3_SR | MAS3_SW); 4201 if (prot != (MAS3_SR | MAS3_SW)) 4202 return (EPERM); 4203 4204 /* The address should be within the entry range. */ 4205 entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 4206 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 4207 4208 entry_size = tsize2size(entry_tsize); 4209 pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) | 4210 (e.mas3 & MAS3_RPN); 4211 pa_end = pa_start + entry_size; 4212 4213 if ((pa < pa_start) || ((pa + size) > pa_end)) 4214 return (ERANGE); 4215 4216 /* Return virtual address of this mapping. */ 4217 *va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start); 4218 return (0); 4219 } 4220 4221 /* 4222 * Invalidate all TLB0 entries which match the given TID. Note this is 4223 * dedicated for cases when invalidations should NOT be propagated to other 4224 * CPUs. 4225 */ 4226 static void 4227 tid_flush(tlbtid_t tid) 4228 { 4229 register_t msr; 4230 uint32_t mas0, mas1, mas2; 4231 int entry, way; 4232 4233 4234 /* Don't evict kernel translations */ 4235 if (tid == TID_KERNEL) 4236 return; 4237 4238 msr = mfmsr(); 4239 __asm __volatile("wrteei 0"); 4240 4241 /* 4242 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use 4243 * it for PID invalidation. 4244 */ 4245 switch ((mfpvr() >> 16) & 0xffff) { 4246 case FSL_E500mc: 4247 case FSL_E5500: 4248 case FSL_E6500: 4249 mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT); 4250 /* tlbilxpid */ 4251 __asm __volatile("isync; .long 0x7c000024; isync; msync"); 4252 __asm __volatile("wrtee %0" :: "r"(msr)); 4253 return; 4254 } 4255 4256 for (way = 0; way < TLB0_WAYS; way++) 4257 for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) { 4258 4259 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 4260 mtspr(SPR_MAS0, mas0); 4261 4262 mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT; 4263 mtspr(SPR_MAS2, mas2); 4264 4265 __asm __volatile("isync; tlbre"); 4266 4267 mas1 = mfspr(SPR_MAS1); 4268 4269 if (!(mas1 & MAS1_VALID)) 4270 continue; 4271 if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid) 4272 continue; 4273 mas1 &= ~MAS1_VALID; 4274 mtspr(SPR_MAS1, mas1); 4275 __asm __volatile("isync; tlbwe; isync; msync"); 4276 } 4277 __asm __volatile("wrtee %0" :: "r"(msr)); 4278 } 4279 4280 #ifdef DDB 4281 /* Print out contents of the MAS registers for each TLB0 entry */ 4282 static void 4283 #ifdef __powerpc64__ 4284 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3, 4285 #else 4286 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 4287 #endif 4288 uint32_t mas7) 4289 { 4290 int as; 4291 char desc[3]; 4292 tlbtid_t tid; 4293 vm_size_t size; 4294 unsigned int tsize; 4295 4296 desc[2] = '\0'; 4297 if (mas1 & MAS1_VALID) 4298 desc[0] = 'V'; 4299 else 4300 desc[0] = ' '; 4301 4302 if (mas1 & MAS1_IPROT) 4303 desc[1] = 'P'; 4304 else 4305 desc[1] = ' '; 4306 4307 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 4308 tid = MAS1_GETTID(mas1); 4309 4310 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 4311 size = 0; 4312 if (tsize) 4313 size = tsize2size(tsize); 4314 4315 printf("%3d: (%s) [AS=%d] " 4316 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 4317 "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n", 4318 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 4319 } 4320 4321 DB_SHOW_COMMAND(tlb0, tlb0_print_tlbentries) 4322 { 4323 uint32_t mas0, mas1, mas3, mas7; 4324 #ifdef __powerpc64__ 4325 uint64_t mas2; 4326 #else 4327 uint32_t mas2; 4328 #endif 4329 int entryidx, way, idx; 4330 4331 printf("TLB0 entries:\n"); 4332 for (way = 0; way < TLB0_WAYS; way ++) 4333 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 4334 4335 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 4336 mtspr(SPR_MAS0, mas0); 4337 4338 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 4339 mtspr(SPR_MAS2, mas2); 4340 4341 __asm __volatile("isync; tlbre"); 4342 4343 mas1 = mfspr(SPR_MAS1); 4344 mas2 = mfspr(SPR_MAS2); 4345 mas3 = mfspr(SPR_MAS3); 4346 mas7 = mfspr(SPR_MAS7); 4347 4348 idx = tlb0_tableidx(mas2, way); 4349 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 4350 } 4351 } 4352 4353 /* 4354 * Print out contents of the MAS registers for each TLB1 entry 4355 */ 4356 DB_SHOW_COMMAND(tlb1, tlb1_print_tlbentries) 4357 { 4358 uint32_t mas0, mas1, mas3, mas7; 4359 #ifdef __powerpc64__ 4360 uint64_t mas2; 4361 #else 4362 uint32_t mas2; 4363 #endif 4364 int i; 4365 4366 printf("TLB1 entries:\n"); 4367 for (i = 0; i < TLB1_ENTRIES; i++) { 4368 4369 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 4370 mtspr(SPR_MAS0, mas0); 4371 4372 __asm __volatile("isync; tlbre"); 4373 4374 mas1 = mfspr(SPR_MAS1); 4375 mas2 = mfspr(SPR_MAS2); 4376 mas3 = mfspr(SPR_MAS3); 4377 mas7 = mfspr(SPR_MAS7); 4378 4379 tlb_print_entry(i, mas1, mas2, mas3, mas7); 4380 } 4381 } 4382 #endif 4383