1 /*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51 #include <sys/cdefs.h> 52 __FBSDID("$FreeBSD$"); 53 54 #include <sys/types.h> 55 #include <sys/param.h> 56 #include <sys/malloc.h> 57 #include <sys/ktr.h> 58 #include <sys/proc.h> 59 #include <sys/user.h> 60 #include <sys/queue.h> 61 #include <sys/systm.h> 62 #include <sys/kernel.h> 63 #include <sys/msgbuf.h> 64 #include <sys/lock.h> 65 #include <sys/mutex.h> 66 #include <sys/smp.h> 67 #include <sys/vmmeter.h> 68 69 #include <vm/vm.h> 70 #include <vm/vm_page.h> 71 #include <vm/vm_kern.h> 72 #include <vm/vm_pageout.h> 73 #include <vm/vm_extern.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_param.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_pager.h> 78 #include <vm/uma.h> 79 80 #include <machine/bootinfo.h> 81 #include <machine/cpu.h> 82 #include <machine/pcb.h> 83 #include <machine/platform.h> 84 85 #include <machine/tlb.h> 86 #include <machine/spr.h> 87 #include <machine/vmparam.h> 88 #include <machine/md_var.h> 89 #include <machine/mmuvar.h> 90 #include <machine/pmap.h> 91 #include <machine/pte.h> 92 93 #include "mmu_if.h" 94 95 #define DEBUG 96 #undef DEBUG 97 98 #ifdef DEBUG 99 #define debugf(fmt, args...) printf(fmt, ##args) 100 #else 101 #define debugf(fmt, args...) 102 #endif 103 104 #define TODO panic("%s: not implemented", __func__); 105 106 #include "opt_sched.h" 107 #ifndef SCHED_4BSD 108 #error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 109 #endif 110 extern struct mtx sched_lock; 111 112 extern int dumpsys_minidump; 113 114 extern unsigned char _etext[]; 115 extern unsigned char _end[]; 116 117 /* Kernel physical load address. */ 118 extern uint32_t kernload; 119 vm_offset_t kernstart; 120 vm_size_t kernsize; 121 122 /* Message buffer and tables. */ 123 static vm_offset_t data_start; 124 static vm_size_t data_end; 125 126 /* Phys/avail memory regions. */ 127 static struct mem_region *availmem_regions; 128 static int availmem_regions_sz; 129 static struct mem_region *physmem_regions; 130 static int physmem_regions_sz; 131 132 /* Reserved KVA space and mutex for mmu_booke_zero_page. */ 133 static vm_offset_t zero_page_va; 134 static struct mtx zero_page_mutex; 135 136 static struct mtx tlbivax_mutex; 137 138 /* 139 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 140 * by idle thred only, no lock required. 141 */ 142 static vm_offset_t zero_page_idle_va; 143 144 /* Reserved KVA space and mutex for mmu_booke_copy_page. */ 145 static vm_offset_t copy_page_src_va; 146 static vm_offset_t copy_page_dst_va; 147 static struct mtx copy_page_mutex; 148 149 /**************************************************************************/ 150 /* PMAP */ 151 /**************************************************************************/ 152 153 static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 154 vm_prot_t, boolean_t); 155 156 unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 157 unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 158 159 /* 160 * If user pmap is processed with mmu_booke_remove and the resident count 161 * drops to 0, there are no more pages to remove, so we need not continue. 162 */ 163 #define PMAP_REMOVE_DONE(pmap) \ 164 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 165 166 extern void tlb_lock(uint32_t *); 167 extern void tlb_unlock(uint32_t *); 168 extern void tid_flush(tlbtid_t); 169 170 /**************************************************************************/ 171 /* TLB and TID handling */ 172 /**************************************************************************/ 173 174 /* Translation ID busy table */ 175 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 176 177 /* 178 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 179 * core revisions and should be read from h/w registers during early config. 180 */ 181 uint32_t tlb0_entries; 182 uint32_t tlb0_ways; 183 uint32_t tlb0_entries_per_way; 184 185 #define TLB0_ENTRIES (tlb0_entries) 186 #define TLB0_WAYS (tlb0_ways) 187 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 188 189 #define TLB1_ENTRIES 16 190 191 /* In-ram copy of the TLB1 */ 192 static tlb_entry_t tlb1[TLB1_ENTRIES]; 193 194 /* Next free entry in the TLB1 */ 195 static unsigned int tlb1_idx; 196 197 static tlbtid_t tid_alloc(struct pmap *); 198 199 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 200 201 static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 202 static void tlb1_write_entry(unsigned int); 203 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 204 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 205 206 static vm_size_t tsize2size(unsigned int); 207 static unsigned int size2tsize(vm_size_t); 208 static unsigned int ilog2(unsigned int); 209 210 static void set_mas4_defaults(void); 211 212 static inline void tlb0_flush_entry(vm_offset_t); 213 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 214 215 /**************************************************************************/ 216 /* Page table management */ 217 /**************************************************************************/ 218 219 /* Data for the pv entry allocation mechanism */ 220 static uma_zone_t pvzone; 221 static struct vm_object pvzone_obj; 222 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 223 224 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 225 226 #ifndef PMAP_SHPGPERPROC 227 #define PMAP_SHPGPERPROC 200 228 #endif 229 230 static void ptbl_init(void); 231 static struct ptbl_buf *ptbl_buf_alloc(void); 232 static void ptbl_buf_free(struct ptbl_buf *); 233 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 234 235 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 236 static void ptbl_free(mmu_t, pmap_t, unsigned int); 237 static void ptbl_hold(mmu_t, pmap_t, unsigned int); 238 static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 239 240 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 241 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 242 static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 243 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 244 245 static pv_entry_t pv_alloc(void); 246 static void pv_free(pv_entry_t); 247 static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 248 static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 249 250 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 251 #define PTBL_BUFS (128 * 16) 252 253 struct ptbl_buf { 254 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 255 vm_offset_t kva; /* va of mapping */ 256 }; 257 258 /* ptbl free list and a lock used for access synchronization. */ 259 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 260 static struct mtx ptbl_buf_freelist_lock; 261 262 /* Base address of kva space allocated fot ptbl bufs. */ 263 static vm_offset_t ptbl_buf_pool_vabase; 264 265 /* Pointer to ptbl_buf structures. */ 266 static struct ptbl_buf *ptbl_bufs; 267 268 void pmap_bootstrap_ap(volatile uint32_t *); 269 270 /* 271 * Kernel MMU interface 272 */ 273 static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 274 static void mmu_booke_clear_modify(mmu_t, vm_page_t); 275 static void mmu_booke_clear_reference(mmu_t, vm_page_t); 276 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 277 vm_size_t, vm_offset_t); 278 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 279 static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 280 vm_prot_t, boolean_t); 281 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 282 vm_page_t, vm_prot_t); 283 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 284 vm_prot_t); 285 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 286 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 287 vm_prot_t); 288 static void mmu_booke_init(mmu_t); 289 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 290 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 291 static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 292 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 293 int); 294 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); 295 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 296 vm_object_t, vm_pindex_t, vm_size_t); 297 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 298 static void mmu_booke_page_init(mmu_t, vm_page_t); 299 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 300 static void mmu_booke_pinit(mmu_t, pmap_t); 301 static void mmu_booke_pinit0(mmu_t, pmap_t); 302 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 303 vm_prot_t); 304 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 305 static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 306 static void mmu_booke_release(mmu_t, pmap_t); 307 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 308 static void mmu_booke_remove_all(mmu_t, vm_page_t); 309 static void mmu_booke_remove_write(mmu_t, vm_page_t); 310 static void mmu_booke_zero_page(mmu_t, vm_page_t); 311 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 312 static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 313 static void mmu_booke_activate(mmu_t, struct thread *); 314 static void mmu_booke_deactivate(mmu_t, struct thread *); 315 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 316 static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 317 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 318 static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 319 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 320 static void mmu_booke_kremove(mmu_t, vm_offset_t); 321 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 322 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 323 vm_size_t); 324 static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 325 vm_size_t, vm_size_t *); 326 static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 327 vm_size_t, vm_offset_t); 328 static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 329 330 static mmu_method_t mmu_booke_methods[] = { 331 /* pmap dispatcher interface */ 332 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 333 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 334 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 335 MMUMETHOD(mmu_copy, mmu_booke_copy), 336 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 337 MMUMETHOD(mmu_enter, mmu_booke_enter), 338 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 339 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 340 MMUMETHOD(mmu_extract, mmu_booke_extract), 341 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 342 MMUMETHOD(mmu_init, mmu_booke_init), 343 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 344 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 345 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 346 MMUMETHOD(mmu_map, mmu_booke_map), 347 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 348 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 349 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 350 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 351 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 352 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 353 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 354 MMUMETHOD(mmu_protect, mmu_booke_protect), 355 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 356 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 357 MMUMETHOD(mmu_release, mmu_booke_release), 358 MMUMETHOD(mmu_remove, mmu_booke_remove), 359 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 360 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 361 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 362 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 363 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 364 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 365 MMUMETHOD(mmu_activate, mmu_booke_activate), 366 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 367 368 /* Internal interfaces */ 369 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 370 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 371 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 372 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 373 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 374 /* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 375 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 376 377 /* dumpsys() support */ 378 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 379 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 380 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 381 382 { 0, 0 } 383 }; 384 385 static mmu_def_t booke_mmu = { 386 MMU_TYPE_BOOKE, 387 mmu_booke_methods, 388 0 389 }; 390 MMU_DEF(booke_mmu); 391 392 static inline void 393 tlb_miss_lock(void) 394 { 395 #ifdef SMP 396 struct pcpu *pc; 397 398 if (!smp_started) 399 return; 400 401 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 402 if (pc != pcpup) { 403 404 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 405 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 406 407 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 408 ("tlb_miss_lock: tried to lock self")); 409 410 tlb_lock(pc->pc_booke_tlb_lock); 411 412 CTR1(KTR_PMAP, "%s: locked", __func__); 413 } 414 } 415 #endif 416 } 417 418 static inline void 419 tlb_miss_unlock(void) 420 { 421 #ifdef SMP 422 struct pcpu *pc; 423 424 if (!smp_started) 425 return; 426 427 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 428 if (pc != pcpup) { 429 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 430 __func__, pc->pc_cpuid); 431 432 tlb_unlock(pc->pc_booke_tlb_lock); 433 434 CTR1(KTR_PMAP, "%s: unlocked", __func__); 435 } 436 } 437 #endif 438 } 439 440 /* Return number of entries in TLB0. */ 441 static __inline void 442 tlb0_get_tlbconf(void) 443 { 444 uint32_t tlb0_cfg; 445 446 tlb0_cfg = mfspr(SPR_TLB0CFG); 447 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 448 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 449 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 450 } 451 452 /* Initialize pool of kva ptbl buffers. */ 453 static void 454 ptbl_init(void) 455 { 456 int i; 457 458 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 459 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 460 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 461 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 462 463 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 464 TAILQ_INIT(&ptbl_buf_freelist); 465 466 for (i = 0; i < PTBL_BUFS; i++) { 467 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 468 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 469 } 470 } 471 472 /* Get a ptbl_buf from the freelist. */ 473 static struct ptbl_buf * 474 ptbl_buf_alloc(void) 475 { 476 struct ptbl_buf *buf; 477 478 mtx_lock(&ptbl_buf_freelist_lock); 479 buf = TAILQ_FIRST(&ptbl_buf_freelist); 480 if (buf != NULL) 481 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 482 mtx_unlock(&ptbl_buf_freelist_lock); 483 484 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 485 486 return (buf); 487 } 488 489 /* Return ptbl buff to free pool. */ 490 static void 491 ptbl_buf_free(struct ptbl_buf *buf) 492 { 493 494 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 495 496 mtx_lock(&ptbl_buf_freelist_lock); 497 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 498 mtx_unlock(&ptbl_buf_freelist_lock); 499 } 500 501 /* 502 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 503 */ 504 static void 505 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 506 { 507 struct ptbl_buf *pbuf; 508 509 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 510 511 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 512 513 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 514 if (pbuf->kva == (vm_offset_t)ptbl) { 515 /* Remove from pmap ptbl buf list. */ 516 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 517 518 /* Free corresponding ptbl buf. */ 519 ptbl_buf_free(pbuf); 520 break; 521 } 522 } 523 524 /* Allocate page table. */ 525 static pte_t * 526 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 527 { 528 vm_page_t mtbl[PTBL_PAGES]; 529 vm_page_t m; 530 struct ptbl_buf *pbuf; 531 unsigned int pidx; 532 pte_t *ptbl; 533 int i; 534 535 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 536 (pmap == kernel_pmap), pdir_idx); 537 538 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 539 ("ptbl_alloc: invalid pdir_idx")); 540 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 541 ("pte_alloc: valid ptbl entry exists!")); 542 543 pbuf = ptbl_buf_alloc(); 544 if (pbuf == NULL) 545 panic("pte_alloc: couldn't alloc kernel virtual memory"); 546 547 ptbl = (pte_t *)pbuf->kva; 548 549 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 550 551 /* Allocate ptbl pages, this will sleep! */ 552 for (i = 0; i < PTBL_PAGES; i++) { 553 pidx = (PTBL_PAGES * pdir_idx) + i; 554 while ((m = vm_page_alloc(NULL, pidx, 555 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 556 557 PMAP_UNLOCK(pmap); 558 vm_page_unlock_queues(); 559 VM_WAIT; 560 vm_page_lock_queues(); 561 PMAP_LOCK(pmap); 562 } 563 mtbl[i] = m; 564 } 565 566 /* Map allocated pages into kernel_pmap. */ 567 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 568 569 /* Zero whole ptbl. */ 570 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 571 572 /* Add pbuf to the pmap ptbl bufs list. */ 573 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 574 575 return (ptbl); 576 } 577 578 /* Free ptbl pages and invalidate pdir entry. */ 579 static void 580 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 581 { 582 pte_t *ptbl; 583 vm_paddr_t pa; 584 vm_offset_t va; 585 vm_page_t m; 586 int i; 587 588 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 589 (pmap == kernel_pmap), pdir_idx); 590 591 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 592 ("ptbl_free: invalid pdir_idx")); 593 594 ptbl = pmap->pm_pdir[pdir_idx]; 595 596 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 597 598 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 599 600 /* 601 * Invalidate the pdir entry as soon as possible, so that other CPUs 602 * don't attempt to look up the page tables we are releasing. 603 */ 604 mtx_lock_spin(&tlbivax_mutex); 605 tlb_miss_lock(); 606 607 pmap->pm_pdir[pdir_idx] = NULL; 608 609 tlb_miss_unlock(); 610 mtx_unlock_spin(&tlbivax_mutex); 611 612 for (i = 0; i < PTBL_PAGES; i++) { 613 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 614 pa = pte_vatopa(mmu, kernel_pmap, va); 615 m = PHYS_TO_VM_PAGE(pa); 616 vm_page_free_zero(m); 617 atomic_subtract_int(&cnt.v_wire_count, 1); 618 mmu_booke_kremove(mmu, va); 619 } 620 621 ptbl_free_pmap_ptbl(pmap, ptbl); 622 } 623 624 /* 625 * Decrement ptbl pages hold count and attempt to free ptbl pages. 626 * Called when removing pte entry from ptbl. 627 * 628 * Return 1 if ptbl pages were freed. 629 */ 630 static int 631 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 632 { 633 pte_t *ptbl; 634 vm_paddr_t pa; 635 vm_page_t m; 636 int i; 637 638 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 639 (pmap == kernel_pmap), pdir_idx); 640 641 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 642 ("ptbl_unhold: invalid pdir_idx")); 643 KASSERT((pmap != kernel_pmap), 644 ("ptbl_unhold: unholding kernel ptbl!")); 645 646 ptbl = pmap->pm_pdir[pdir_idx]; 647 648 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 649 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 650 ("ptbl_unhold: non kva ptbl")); 651 652 /* decrement hold count */ 653 for (i = 0; i < PTBL_PAGES; i++) { 654 pa = pte_vatopa(mmu, kernel_pmap, 655 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 656 m = PHYS_TO_VM_PAGE(pa); 657 m->wire_count--; 658 } 659 660 /* 661 * Free ptbl pages if there are no pte etries in this ptbl. 662 * wire_count has the same value for all ptbl pages, so check the last 663 * page. 664 */ 665 if (m->wire_count == 0) { 666 ptbl_free(mmu, pmap, pdir_idx); 667 668 //debugf("ptbl_unhold: e (freed ptbl)\n"); 669 return (1); 670 } 671 672 return (0); 673 } 674 675 /* 676 * Increment hold count for ptbl pages. This routine is used when a new pte 677 * entry is being inserted into the ptbl. 678 */ 679 static void 680 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 681 { 682 vm_paddr_t pa; 683 pte_t *ptbl; 684 vm_page_t m; 685 int i; 686 687 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 688 pdir_idx); 689 690 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 691 ("ptbl_hold: invalid pdir_idx")); 692 KASSERT((pmap != kernel_pmap), 693 ("ptbl_hold: holding kernel ptbl!")); 694 695 ptbl = pmap->pm_pdir[pdir_idx]; 696 697 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 698 699 for (i = 0; i < PTBL_PAGES; i++) { 700 pa = pte_vatopa(mmu, kernel_pmap, 701 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 702 m = PHYS_TO_VM_PAGE(pa); 703 m->wire_count++; 704 } 705 } 706 707 /* Allocate pv_entry structure. */ 708 pv_entry_t 709 pv_alloc(void) 710 { 711 pv_entry_t pv; 712 713 pv_entry_count++; 714 if (pv_entry_count > pv_entry_high_water) 715 pagedaemon_wakeup(); 716 pv = uma_zalloc(pvzone, M_NOWAIT); 717 718 return (pv); 719 } 720 721 /* Free pv_entry structure. */ 722 static __inline void 723 pv_free(pv_entry_t pve) 724 { 725 726 pv_entry_count--; 727 uma_zfree(pvzone, pve); 728 } 729 730 731 /* Allocate and initialize pv_entry structure. */ 732 static void 733 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 734 { 735 pv_entry_t pve; 736 737 //int su = (pmap == kernel_pmap); 738 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 739 // (u_int32_t)pmap, va, (u_int32_t)m); 740 741 pve = pv_alloc(); 742 if (pve == NULL) 743 panic("pv_insert: no pv entries!"); 744 745 pve->pv_pmap = pmap; 746 pve->pv_va = va; 747 748 /* add to pv_list */ 749 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 750 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 751 752 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 753 754 //debugf("pv_insert: e\n"); 755 } 756 757 /* Destroy pv entry. */ 758 static void 759 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 760 { 761 pv_entry_t pve; 762 763 //int su = (pmap == kernel_pmap); 764 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 765 766 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 767 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 768 769 /* find pv entry */ 770 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 771 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 772 /* remove from pv_list */ 773 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 774 if (TAILQ_EMPTY(&m->md.pv_list)) 775 vm_page_flag_clear(m, PG_WRITEABLE); 776 777 /* free pv entry struct */ 778 pv_free(pve); 779 break; 780 } 781 } 782 783 //debugf("pv_remove: e\n"); 784 } 785 786 /* 787 * Clean pte entry, try to free page table page if requested. 788 * 789 * Return 1 if ptbl pages were freed, otherwise return 0. 790 */ 791 static int 792 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 793 { 794 unsigned int pdir_idx = PDIR_IDX(va); 795 unsigned int ptbl_idx = PTBL_IDX(va); 796 vm_page_t m; 797 pte_t *ptbl; 798 pte_t *pte; 799 800 //int su = (pmap == kernel_pmap); 801 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 802 // su, (u_int32_t)pmap, va, flags); 803 804 ptbl = pmap->pm_pdir[pdir_idx]; 805 KASSERT(ptbl, ("pte_remove: null ptbl")); 806 807 pte = &ptbl[ptbl_idx]; 808 809 if (pte == NULL || !PTE_ISVALID(pte)) 810 return (0); 811 812 if (PTE_ISWIRED(pte)) 813 pmap->pm_stats.wired_count--; 814 815 /* Handle managed entry. */ 816 if (PTE_ISMANAGED(pte)) { 817 /* Get vm_page_t for mapped pte. */ 818 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 819 820 if (PTE_ISMODIFIED(pte)) 821 vm_page_dirty(m); 822 823 if (PTE_ISREFERENCED(pte)) 824 vm_page_flag_set(m, PG_REFERENCED); 825 826 pv_remove(pmap, va, m); 827 } 828 829 mtx_lock_spin(&tlbivax_mutex); 830 tlb_miss_lock(); 831 832 tlb0_flush_entry(va); 833 pte->flags = 0; 834 pte->rpn = 0; 835 836 tlb_miss_unlock(); 837 mtx_unlock_spin(&tlbivax_mutex); 838 839 pmap->pm_stats.resident_count--; 840 841 if (flags & PTBL_UNHOLD) { 842 //debugf("pte_remove: e (unhold)\n"); 843 return (ptbl_unhold(mmu, pmap, pdir_idx)); 844 } 845 846 //debugf("pte_remove: e\n"); 847 return (0); 848 } 849 850 /* 851 * Insert PTE for a given page and virtual address. 852 */ 853 static void 854 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 855 { 856 unsigned int pdir_idx = PDIR_IDX(va); 857 unsigned int ptbl_idx = PTBL_IDX(va); 858 pte_t *ptbl, *pte; 859 860 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 861 pmap == kernel_pmap, pmap, va); 862 863 /* Get the page table pointer. */ 864 ptbl = pmap->pm_pdir[pdir_idx]; 865 866 if (ptbl == NULL) { 867 /* Allocate page table pages. */ 868 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 869 } else { 870 /* 871 * Check if there is valid mapping for requested 872 * va, if there is, remove it. 873 */ 874 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 875 if (PTE_ISVALID(pte)) { 876 pte_remove(mmu, pmap, va, PTBL_HOLD); 877 } else { 878 /* 879 * pte is not used, increment hold count 880 * for ptbl pages. 881 */ 882 if (pmap != kernel_pmap) 883 ptbl_hold(mmu, pmap, pdir_idx); 884 } 885 } 886 887 /* 888 * Insert pv_entry into pv_list for mapped page if part of managed 889 * memory. 890 */ 891 if ((m->flags & PG_FICTITIOUS) == 0) { 892 if ((m->flags & PG_UNMANAGED) == 0) { 893 flags |= PTE_MANAGED; 894 895 /* Create and insert pv entry. */ 896 pv_insert(pmap, va, m); 897 } 898 } 899 900 pmap->pm_stats.resident_count++; 901 902 mtx_lock_spin(&tlbivax_mutex); 903 tlb_miss_lock(); 904 905 tlb0_flush_entry(va); 906 if (pmap->pm_pdir[pdir_idx] == NULL) { 907 /* 908 * If we just allocated a new page table, hook it in 909 * the pdir. 910 */ 911 pmap->pm_pdir[pdir_idx] = ptbl; 912 } 913 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 914 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 915 pte->flags |= (PTE_VALID | flags); 916 917 tlb_miss_unlock(); 918 mtx_unlock_spin(&tlbivax_mutex); 919 } 920 921 /* Return the pa for the given pmap/va. */ 922 static vm_paddr_t 923 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 924 { 925 vm_paddr_t pa = 0; 926 pte_t *pte; 927 928 pte = pte_find(mmu, pmap, va); 929 if ((pte != NULL) && PTE_ISVALID(pte)) 930 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 931 return (pa); 932 } 933 934 /* Get a pointer to a PTE in a page table. */ 935 static pte_t * 936 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 937 { 938 unsigned int pdir_idx = PDIR_IDX(va); 939 unsigned int ptbl_idx = PTBL_IDX(va); 940 941 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 942 943 if (pmap->pm_pdir[pdir_idx]) 944 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 945 946 return (NULL); 947 } 948 949 /**************************************************************************/ 950 /* PMAP related */ 951 /**************************************************************************/ 952 953 /* 954 * This is called during e500_init, before the system is really initialized. 955 */ 956 static void 957 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 958 { 959 vm_offset_t phys_kernelend; 960 struct mem_region *mp, *mp1; 961 int cnt, i, j; 962 u_int s, e, sz; 963 u_int phys_avail_count; 964 vm_size_t physsz, hwphyssz, kstack0_sz; 965 vm_offset_t kernel_pdir, kstack0, va; 966 vm_paddr_t kstack0_phys; 967 void *dpcpu; 968 pte_t *pte; 969 970 debugf("mmu_booke_bootstrap: entered\n"); 971 972 /* Initialize invalidation mutex */ 973 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 974 975 /* Read TLB0 size and associativity. */ 976 tlb0_get_tlbconf(); 977 978 /* Align kernel start and end address (kernel image). */ 979 kernstart = trunc_page(start); 980 data_start = round_page(kernelend); 981 kernsize = data_start - kernstart; 982 983 data_end = data_start; 984 985 /* Allocate space for the message buffer. */ 986 msgbufp = (struct msgbuf *)data_end; 987 data_end += MSGBUF_SIZE; 988 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 989 data_end); 990 991 data_end = round_page(data_end); 992 993 /* Allocate the dynamic per-cpu area. */ 994 dpcpu = (void *)data_end; 995 data_end += DPCPU_SIZE; 996 dpcpu_init(dpcpu, 0); 997 998 /* Allocate space for ptbl_bufs. */ 999 ptbl_bufs = (struct ptbl_buf *)data_end; 1000 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1001 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1002 data_end); 1003 1004 data_end = round_page(data_end); 1005 1006 /* Allocate PTE tables for kernel KVA. */ 1007 kernel_pdir = data_end; 1008 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1009 PDIR_SIZE - 1) / PDIR_SIZE; 1010 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1011 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1012 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1013 1014 debugf(" data_end: 0x%08x\n", data_end); 1015 if (data_end - kernstart > 0x1000000) { 1016 data_end = (data_end + 0x3fffff) & ~0x3fffff; 1017 tlb1_mapin_region(kernstart + 0x1000000, 1018 kernload + 0x1000000, data_end - kernstart - 0x1000000); 1019 } else 1020 data_end = (data_end + 0xffffff) & ~0xffffff; 1021 1022 debugf(" updated data_end: 0x%08x\n", data_end); 1023 1024 kernsize += data_end - data_start; 1025 1026 /* 1027 * Clear the structures - note we can only do it safely after the 1028 * possible additional TLB1 translations are in place (above) so that 1029 * all range up to the currently calculated 'data_end' is covered. 1030 */ 1031 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1032 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1033 1034 /*******************************************************/ 1035 /* Set the start and end of kva. */ 1036 /*******************************************************/ 1037 virtual_avail = round_page(data_end); 1038 virtual_end = VM_MAX_KERNEL_ADDRESS; 1039 1040 /* Allocate KVA space for page zero/copy operations. */ 1041 zero_page_va = virtual_avail; 1042 virtual_avail += PAGE_SIZE; 1043 zero_page_idle_va = virtual_avail; 1044 virtual_avail += PAGE_SIZE; 1045 copy_page_src_va = virtual_avail; 1046 virtual_avail += PAGE_SIZE; 1047 copy_page_dst_va = virtual_avail; 1048 virtual_avail += PAGE_SIZE; 1049 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1050 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1051 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1052 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1053 1054 /* Initialize page zero/copy mutexes. */ 1055 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1056 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1057 1058 /* Allocate KVA space for ptbl bufs. */ 1059 ptbl_buf_pool_vabase = virtual_avail; 1060 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1061 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1062 ptbl_buf_pool_vabase, virtual_avail); 1063 1064 /* Calculate corresponding physical addresses for the kernel region. */ 1065 phys_kernelend = kernload + kernsize; 1066 debugf("kernel image and allocated data:\n"); 1067 debugf(" kernload = 0x%08x\n", kernload); 1068 debugf(" kernstart = 0x%08x\n", kernstart); 1069 debugf(" kernsize = 0x%08x\n", kernsize); 1070 1071 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1072 panic("mmu_booke_bootstrap: phys_avail too small"); 1073 1074 /* 1075 * Remove kernel physical address range from avail regions list. Page 1076 * align all regions. Non-page aligned memory isn't very interesting 1077 * to us. Also, sort the entries for ascending addresses. 1078 */ 1079 1080 /* Retrieve phys/avail mem regions */ 1081 mem_regions(&physmem_regions, &physmem_regions_sz, 1082 &availmem_regions, &availmem_regions_sz); 1083 sz = 0; 1084 cnt = availmem_regions_sz; 1085 debugf("processing avail regions:\n"); 1086 for (mp = availmem_regions; mp->mr_size; mp++) { 1087 s = mp->mr_start; 1088 e = mp->mr_start + mp->mr_size; 1089 debugf(" %08x-%08x -> ", s, e); 1090 /* Check whether this region holds all of the kernel. */ 1091 if (s < kernload && e > phys_kernelend) { 1092 availmem_regions[cnt].mr_start = phys_kernelend; 1093 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1094 e = kernload; 1095 } 1096 /* Look whether this regions starts within the kernel. */ 1097 if (s >= kernload && s < phys_kernelend) { 1098 if (e <= phys_kernelend) 1099 goto empty; 1100 s = phys_kernelend; 1101 } 1102 /* Now look whether this region ends within the kernel. */ 1103 if (e > kernload && e <= phys_kernelend) { 1104 if (s >= kernload) 1105 goto empty; 1106 e = kernload; 1107 } 1108 /* Now page align the start and size of the region. */ 1109 s = round_page(s); 1110 e = trunc_page(e); 1111 if (e < s) 1112 e = s; 1113 sz = e - s; 1114 debugf("%08x-%08x = %x\n", s, e, sz); 1115 1116 /* Check whether some memory is left here. */ 1117 if (sz == 0) { 1118 empty: 1119 memmove(mp, mp + 1, 1120 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1121 cnt--; 1122 mp--; 1123 continue; 1124 } 1125 1126 /* Do an insertion sort. */ 1127 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1128 if (s < mp1->mr_start) 1129 break; 1130 if (mp1 < mp) { 1131 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1132 mp1->mr_start = s; 1133 mp1->mr_size = sz; 1134 } else { 1135 mp->mr_start = s; 1136 mp->mr_size = sz; 1137 } 1138 } 1139 availmem_regions_sz = cnt; 1140 1141 /*******************************************************/ 1142 /* Steal physical memory for kernel stack from the end */ 1143 /* of the first avail region */ 1144 /*******************************************************/ 1145 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1146 kstack0_phys = availmem_regions[0].mr_start + 1147 availmem_regions[0].mr_size; 1148 kstack0_phys -= kstack0_sz; 1149 availmem_regions[0].mr_size -= kstack0_sz; 1150 1151 /*******************************************************/ 1152 /* Fill in phys_avail table, based on availmem_regions */ 1153 /*******************************************************/ 1154 phys_avail_count = 0; 1155 physsz = 0; 1156 hwphyssz = 0; 1157 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1158 1159 debugf("fill in phys_avail:\n"); 1160 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1161 1162 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1163 availmem_regions[i].mr_start, 1164 availmem_regions[i].mr_start + 1165 availmem_regions[i].mr_size, 1166 availmem_regions[i].mr_size); 1167 1168 if (hwphyssz != 0 && 1169 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1170 debugf(" hw.physmem adjust\n"); 1171 if (physsz < hwphyssz) { 1172 phys_avail[j] = availmem_regions[i].mr_start; 1173 phys_avail[j + 1] = 1174 availmem_regions[i].mr_start + 1175 hwphyssz - physsz; 1176 physsz = hwphyssz; 1177 phys_avail_count++; 1178 } 1179 break; 1180 } 1181 1182 phys_avail[j] = availmem_regions[i].mr_start; 1183 phys_avail[j + 1] = availmem_regions[i].mr_start + 1184 availmem_regions[i].mr_size; 1185 phys_avail_count++; 1186 physsz += availmem_regions[i].mr_size; 1187 } 1188 physmem = btoc(physsz); 1189 1190 /* Calculate the last available physical address. */ 1191 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1192 ; 1193 Maxmem = powerpc_btop(phys_avail[i + 1]); 1194 1195 debugf("Maxmem = 0x%08lx\n", Maxmem); 1196 debugf("phys_avail_count = %d\n", phys_avail_count); 1197 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1198 physmem); 1199 1200 /*******************************************************/ 1201 /* Initialize (statically allocated) kernel pmap. */ 1202 /*******************************************************/ 1203 PMAP_LOCK_INIT(kernel_pmap); 1204 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1205 1206 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1207 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1208 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1209 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1210 1211 /* Initialize kernel pdir */ 1212 for (i = 0; i < kernel_ptbls; i++) 1213 kernel_pmap->pm_pdir[kptbl_min + i] = 1214 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1215 1216 for (i = 0; i < MAXCPU; i++) { 1217 kernel_pmap->pm_tid[i] = TID_KERNEL; 1218 1219 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1220 tidbusy[i][0] = kernel_pmap; 1221 } 1222 1223 /* 1224 * Fill in PTEs covering kernel code and data. They are not required 1225 * for address translation, as this area is covered by static TLB1 1226 * entries, but for pte_vatopa() to work correctly with kernel area 1227 * addresses. 1228 */ 1229 for (va = KERNBASE; va < data_end; va += PAGE_SIZE) { 1230 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1231 pte->rpn = kernload + (va - KERNBASE); 1232 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1233 PTE_VALID; 1234 } 1235 /* Mark kernel_pmap active on all CPUs */ 1236 kernel_pmap->pm_active = ~0; 1237 1238 /*******************************************************/ 1239 /* Final setup */ 1240 /*******************************************************/ 1241 1242 /* Enter kstack0 into kernel map, provide guard page */ 1243 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1244 thread0.td_kstack = kstack0; 1245 thread0.td_kstack_pages = KSTACK_PAGES; 1246 1247 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1248 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1249 kstack0_phys, kstack0_phys + kstack0_sz); 1250 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1251 1252 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1253 for (i = 0; i < KSTACK_PAGES; i++) { 1254 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1255 kstack0 += PAGE_SIZE; 1256 kstack0_phys += PAGE_SIZE; 1257 } 1258 1259 debugf("virtual_avail = %08x\n", virtual_avail); 1260 debugf("virtual_end = %08x\n", virtual_end); 1261 1262 debugf("mmu_booke_bootstrap: exit\n"); 1263 } 1264 1265 void 1266 pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1267 { 1268 int i; 1269 1270 /* 1271 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1272 * have the snapshot of its contents in the s/w tlb1[] table, so use 1273 * these values directly to (re)program AP's TLB1 hardware. 1274 */ 1275 for (i = 0; i < tlb1_idx; i ++) { 1276 /* Skip invalid entries */ 1277 if (!(tlb1[i].mas1 & MAS1_VALID)) 1278 continue; 1279 1280 tlb1_write_entry(i); 1281 } 1282 1283 set_mas4_defaults(); 1284 } 1285 1286 /* 1287 * Get the physical page address for the given pmap/virtual address. 1288 */ 1289 static vm_paddr_t 1290 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1291 { 1292 vm_paddr_t pa; 1293 1294 PMAP_LOCK(pmap); 1295 pa = pte_vatopa(mmu, pmap, va); 1296 PMAP_UNLOCK(pmap); 1297 1298 return (pa); 1299 } 1300 1301 /* 1302 * Extract the physical page address associated with the given 1303 * kernel virtual address. 1304 */ 1305 static vm_paddr_t 1306 mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1307 { 1308 1309 return (pte_vatopa(mmu, kernel_pmap, va)); 1310 } 1311 1312 /* 1313 * Initialize the pmap module. 1314 * Called by vm_init, to initialize any structures that the pmap 1315 * system needs to map virtual memory. 1316 */ 1317 static void 1318 mmu_booke_init(mmu_t mmu) 1319 { 1320 int shpgperproc = PMAP_SHPGPERPROC; 1321 1322 /* 1323 * Initialize the address space (zone) for the pv entries. Set a 1324 * high water mark so that the system can recover from excessive 1325 * numbers of pv entries. 1326 */ 1327 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1328 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1329 1330 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1331 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1332 1333 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1334 pv_entry_high_water = 9 * (pv_entry_max / 10); 1335 1336 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1337 1338 /* Pre-fill pvzone with initial number of pv entries. */ 1339 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1340 1341 /* Initialize ptbl allocation. */ 1342 ptbl_init(); 1343 } 1344 1345 /* 1346 * Map a list of wired pages into kernel virtual address space. This is 1347 * intended for temporary mappings which do not need page modification or 1348 * references recorded. Existing mappings in the region are overwritten. 1349 */ 1350 static void 1351 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1352 { 1353 vm_offset_t va; 1354 1355 va = sva; 1356 while (count-- > 0) { 1357 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1358 va += PAGE_SIZE; 1359 m++; 1360 } 1361 } 1362 1363 /* 1364 * Remove page mappings from kernel virtual address space. Intended for 1365 * temporary mappings entered by mmu_booke_qenter. 1366 */ 1367 static void 1368 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1369 { 1370 vm_offset_t va; 1371 1372 va = sva; 1373 while (count-- > 0) { 1374 mmu_booke_kremove(mmu, va); 1375 va += PAGE_SIZE; 1376 } 1377 } 1378 1379 /* 1380 * Map a wired page into kernel virtual address space. 1381 */ 1382 static void 1383 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1384 { 1385 unsigned int pdir_idx = PDIR_IDX(va); 1386 unsigned int ptbl_idx = PTBL_IDX(va); 1387 uint32_t flags; 1388 pte_t *pte; 1389 1390 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1391 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1392 1393 flags = 0; 1394 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1395 flags |= PTE_M; 1396 1397 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1398 1399 mtx_lock_spin(&tlbivax_mutex); 1400 tlb_miss_lock(); 1401 1402 if (PTE_ISVALID(pte)) { 1403 1404 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1405 1406 /* Flush entry from TLB0 */ 1407 tlb0_flush_entry(va); 1408 } 1409 1410 pte->rpn = pa & ~PTE_PA_MASK; 1411 pte->flags = flags; 1412 1413 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1414 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1415 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1416 1417 /* Flush the real memory from the instruction cache. */ 1418 if ((flags & (PTE_I | PTE_G)) == 0) { 1419 __syncicache((void *)va, PAGE_SIZE); 1420 } 1421 1422 tlb_miss_unlock(); 1423 mtx_unlock_spin(&tlbivax_mutex); 1424 } 1425 1426 /* 1427 * Remove a page from kernel page table. 1428 */ 1429 static void 1430 mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1431 { 1432 unsigned int pdir_idx = PDIR_IDX(va); 1433 unsigned int ptbl_idx = PTBL_IDX(va); 1434 pte_t *pte; 1435 1436 // CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1437 1438 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1439 (va <= VM_MAX_KERNEL_ADDRESS)), 1440 ("mmu_booke_kremove: invalid va")); 1441 1442 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1443 1444 if (!PTE_ISVALID(pte)) { 1445 1446 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1447 1448 return; 1449 } 1450 1451 mtx_lock_spin(&tlbivax_mutex); 1452 tlb_miss_lock(); 1453 1454 /* Invalidate entry in TLB0, update PTE. */ 1455 tlb0_flush_entry(va); 1456 pte->flags = 0; 1457 pte->rpn = 0; 1458 1459 tlb_miss_unlock(); 1460 mtx_unlock_spin(&tlbivax_mutex); 1461 } 1462 1463 /* 1464 * Initialize pmap associated with process 0. 1465 */ 1466 static void 1467 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1468 { 1469 1470 mmu_booke_pinit(mmu, pmap); 1471 PCPU_SET(curpmap, pmap); 1472 } 1473 1474 /* 1475 * Initialize a preallocated and zeroed pmap structure, 1476 * such as one in a vmspace structure. 1477 */ 1478 static void 1479 mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1480 { 1481 int i; 1482 1483 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1484 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1485 1486 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1487 1488 PMAP_LOCK_INIT(pmap); 1489 for (i = 0; i < MAXCPU; i++) 1490 pmap->pm_tid[i] = TID_NONE; 1491 pmap->pm_active = 0; 1492 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1493 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1494 TAILQ_INIT(&pmap->pm_ptbl_list); 1495 } 1496 1497 /* 1498 * Release any resources held by the given physical map. 1499 * Called when a pmap initialized by mmu_booke_pinit is being released. 1500 * Should only be called if the map contains no valid mappings. 1501 */ 1502 static void 1503 mmu_booke_release(mmu_t mmu, pmap_t pmap) 1504 { 1505 1506 printf("mmu_booke_release: s\n"); 1507 1508 KASSERT(pmap->pm_stats.resident_count == 0, 1509 ("pmap_release: pmap resident count %ld != 0", 1510 pmap->pm_stats.resident_count)); 1511 1512 PMAP_LOCK_DESTROY(pmap); 1513 } 1514 1515 /* 1516 * Insert the given physical page at the specified virtual address in the 1517 * target physical map with the protection requested. If specified the page 1518 * will be wired down. 1519 */ 1520 static void 1521 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1522 vm_prot_t prot, boolean_t wired) 1523 { 1524 1525 vm_page_lock_queues(); 1526 PMAP_LOCK(pmap); 1527 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1528 vm_page_unlock_queues(); 1529 PMAP_UNLOCK(pmap); 1530 } 1531 1532 static void 1533 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1534 vm_prot_t prot, boolean_t wired) 1535 { 1536 pte_t *pte; 1537 vm_paddr_t pa; 1538 uint32_t flags; 1539 int su, sync; 1540 1541 pa = VM_PAGE_TO_PHYS(m); 1542 su = (pmap == kernel_pmap); 1543 sync = 0; 1544 1545 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1546 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1547 // (u_int32_t)pmap, su, pmap->pm_tid, 1548 // (u_int32_t)m, va, pa, prot, wired); 1549 1550 if (su) { 1551 KASSERT(((va >= virtual_avail) && 1552 (va <= VM_MAX_KERNEL_ADDRESS)), 1553 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1554 } else { 1555 KASSERT((va <= VM_MAXUSER_ADDRESS), 1556 ("mmu_booke_enter_locked: user pmap, non user va")); 1557 } 1558 1559 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1560 1561 /* 1562 * If there is an existing mapping, and the physical address has not 1563 * changed, must be protection or wiring change. 1564 */ 1565 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1566 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1567 1568 /* 1569 * Before actually updating pte->flags we calculate and 1570 * prepare its new value in a helper var. 1571 */ 1572 flags = pte->flags; 1573 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1574 1575 /* Wiring change, just update stats. */ 1576 if (wired) { 1577 if (!PTE_ISWIRED(pte)) { 1578 flags |= PTE_WIRED; 1579 pmap->pm_stats.wired_count++; 1580 } 1581 } else { 1582 if (PTE_ISWIRED(pte)) { 1583 flags &= ~PTE_WIRED; 1584 pmap->pm_stats.wired_count--; 1585 } 1586 } 1587 1588 if (prot & VM_PROT_WRITE) { 1589 /* Add write permissions. */ 1590 flags |= PTE_SW; 1591 if (!su) 1592 flags |= PTE_UW; 1593 1594 vm_page_flag_set(m, PG_WRITEABLE); 1595 } else { 1596 /* Handle modified pages, sense modify status. */ 1597 1598 /* 1599 * The PTE_MODIFIED flag could be set by underlying 1600 * TLB misses since we last read it (above), possibly 1601 * other CPUs could update it so we check in the PTE 1602 * directly rather than rely on that saved local flags 1603 * copy. 1604 */ 1605 if (PTE_ISMODIFIED(pte)) 1606 vm_page_dirty(m); 1607 } 1608 1609 if (prot & VM_PROT_EXECUTE) { 1610 flags |= PTE_SX; 1611 if (!su) 1612 flags |= PTE_UX; 1613 1614 /* 1615 * Check existing flags for execute permissions: if we 1616 * are turning execute permissions on, icache should 1617 * be flushed. 1618 */ 1619 if ((flags & (PTE_UX | PTE_SX)) == 0) 1620 sync++; 1621 } 1622 1623 flags &= ~PTE_REFERENCED; 1624 1625 /* 1626 * The new flags value is all calculated -- only now actually 1627 * update the PTE. 1628 */ 1629 mtx_lock_spin(&tlbivax_mutex); 1630 tlb_miss_lock(); 1631 1632 tlb0_flush_entry(va); 1633 pte->flags = flags; 1634 1635 tlb_miss_unlock(); 1636 mtx_unlock_spin(&tlbivax_mutex); 1637 1638 } else { 1639 /* 1640 * If there is an existing mapping, but it's for a different 1641 * physical address, pte_enter() will delete the old mapping. 1642 */ 1643 //if ((pte != NULL) && PTE_ISVALID(pte)) 1644 // debugf("mmu_booke_enter_locked: replace\n"); 1645 //else 1646 // debugf("mmu_booke_enter_locked: new\n"); 1647 1648 /* Now set up the flags and install the new mapping. */ 1649 flags = (PTE_SR | PTE_VALID); 1650 flags |= PTE_M; 1651 1652 if (!su) 1653 flags |= PTE_UR; 1654 1655 if (prot & VM_PROT_WRITE) { 1656 flags |= PTE_SW; 1657 if (!su) 1658 flags |= PTE_UW; 1659 1660 vm_page_flag_set(m, PG_WRITEABLE); 1661 } 1662 1663 if (prot & VM_PROT_EXECUTE) { 1664 flags |= PTE_SX; 1665 if (!su) 1666 flags |= PTE_UX; 1667 } 1668 1669 /* If its wired update stats. */ 1670 if (wired) { 1671 pmap->pm_stats.wired_count++; 1672 flags |= PTE_WIRED; 1673 } 1674 1675 pte_enter(mmu, pmap, m, va, flags); 1676 1677 /* Flush the real memory from the instruction cache. */ 1678 if (prot & VM_PROT_EXECUTE) 1679 sync++; 1680 } 1681 1682 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1683 __syncicache((void *)va, PAGE_SIZE); 1684 sync = 0; 1685 } 1686 } 1687 1688 /* 1689 * Maps a sequence of resident pages belonging to the same object. 1690 * The sequence begins with the given page m_start. This page is 1691 * mapped at the given virtual address start. Each subsequent page is 1692 * mapped at a virtual address that is offset from start by the same 1693 * amount as the page is offset from m_start within the object. The 1694 * last page in the sequence is the page with the largest offset from 1695 * m_start that can be mapped at a virtual address less than the given 1696 * virtual address end. Not every virtual page between start and end 1697 * is mapped; only those for which a resident page exists with the 1698 * corresponding offset from m_start are mapped. 1699 */ 1700 static void 1701 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1702 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1703 { 1704 vm_page_t m; 1705 vm_pindex_t diff, psize; 1706 1707 psize = atop(end - start); 1708 m = m_start; 1709 PMAP_LOCK(pmap); 1710 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1711 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1712 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1713 m = TAILQ_NEXT(m, listq); 1714 } 1715 PMAP_UNLOCK(pmap); 1716 } 1717 1718 static void 1719 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1720 vm_prot_t prot) 1721 { 1722 1723 PMAP_LOCK(pmap); 1724 mmu_booke_enter_locked(mmu, pmap, va, m, 1725 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1726 PMAP_UNLOCK(pmap); 1727 } 1728 1729 /* 1730 * Remove the given range of addresses from the specified map. 1731 * 1732 * It is assumed that the start and end are properly rounded to the page size. 1733 */ 1734 static void 1735 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1736 { 1737 pte_t *pte; 1738 uint8_t hold_flag; 1739 1740 int su = (pmap == kernel_pmap); 1741 1742 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1743 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1744 1745 if (su) { 1746 KASSERT(((va >= virtual_avail) && 1747 (va <= VM_MAX_KERNEL_ADDRESS)), 1748 ("mmu_booke_remove: kernel pmap, non kernel va")); 1749 } else { 1750 KASSERT((va <= VM_MAXUSER_ADDRESS), 1751 ("mmu_booke_remove: user pmap, non user va")); 1752 } 1753 1754 if (PMAP_REMOVE_DONE(pmap)) { 1755 //debugf("mmu_booke_remove: e (empty)\n"); 1756 return; 1757 } 1758 1759 hold_flag = PTBL_HOLD_FLAG(pmap); 1760 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1761 1762 vm_page_lock_queues(); 1763 PMAP_LOCK(pmap); 1764 for (; va < endva; va += PAGE_SIZE) { 1765 pte = pte_find(mmu, pmap, va); 1766 if ((pte != NULL) && PTE_ISVALID(pte)) 1767 pte_remove(mmu, pmap, va, hold_flag); 1768 } 1769 PMAP_UNLOCK(pmap); 1770 vm_page_unlock_queues(); 1771 1772 //debugf("mmu_booke_remove: e\n"); 1773 } 1774 1775 /* 1776 * Remove physical page from all pmaps in which it resides. 1777 */ 1778 static void 1779 mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1780 { 1781 pv_entry_t pv, pvn; 1782 uint8_t hold_flag; 1783 1784 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1785 1786 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1787 pvn = TAILQ_NEXT(pv, pv_link); 1788 1789 PMAP_LOCK(pv->pv_pmap); 1790 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1791 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1792 PMAP_UNLOCK(pv->pv_pmap); 1793 } 1794 vm_page_flag_clear(m, PG_WRITEABLE); 1795 } 1796 1797 /* 1798 * Map a range of physical addresses into kernel virtual address space. 1799 */ 1800 static vm_offset_t 1801 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1802 vm_offset_t pa_end, int prot) 1803 { 1804 vm_offset_t sva = *virt; 1805 vm_offset_t va = sva; 1806 1807 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1808 // sva, pa_start, pa_end); 1809 1810 while (pa_start < pa_end) { 1811 mmu_booke_kenter(mmu, va, pa_start); 1812 va += PAGE_SIZE; 1813 pa_start += PAGE_SIZE; 1814 } 1815 *virt = va; 1816 1817 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1818 return (sva); 1819 } 1820 1821 /* 1822 * The pmap must be activated before it's address space can be accessed in any 1823 * way. 1824 */ 1825 static void 1826 mmu_booke_activate(mmu_t mmu, struct thread *td) 1827 { 1828 pmap_t pmap; 1829 1830 pmap = &td->td_proc->p_vmspace->vm_pmap; 1831 1832 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1833 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1834 1835 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1836 1837 mtx_lock_spin(&sched_lock); 1838 1839 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 1840 PCPU_SET(curpmap, pmap); 1841 1842 if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) 1843 tid_alloc(pmap); 1844 1845 /* Load PID0 register with pmap tid value. */ 1846 mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); 1847 __asm __volatile("isync"); 1848 1849 mtx_unlock_spin(&sched_lock); 1850 1851 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1852 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1853 } 1854 1855 /* 1856 * Deactivate the specified process's address space. 1857 */ 1858 static void 1859 mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1860 { 1861 pmap_t pmap; 1862 1863 pmap = &td->td_proc->p_vmspace->vm_pmap; 1864 1865 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1866 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1867 1868 atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask)); 1869 PCPU_SET(curpmap, NULL); 1870 } 1871 1872 /* 1873 * Copy the range specified by src_addr/len 1874 * from the source map to the range dst_addr/len 1875 * in the destination map. 1876 * 1877 * This routine is only advisory and need not do anything. 1878 */ 1879 static void 1880 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1881 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1882 { 1883 1884 } 1885 1886 /* 1887 * Set the physical protection on the specified range of this map as requested. 1888 */ 1889 static void 1890 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1891 vm_prot_t prot) 1892 { 1893 vm_offset_t va; 1894 vm_page_t m; 1895 pte_t *pte; 1896 1897 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1898 mmu_booke_remove(mmu, pmap, sva, eva); 1899 return; 1900 } 1901 1902 if (prot & VM_PROT_WRITE) 1903 return; 1904 1905 vm_page_lock_queues(); 1906 PMAP_LOCK(pmap); 1907 for (va = sva; va < eva; va += PAGE_SIZE) { 1908 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1909 if (PTE_ISVALID(pte)) { 1910 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1911 1912 mtx_lock_spin(&tlbivax_mutex); 1913 tlb_miss_lock(); 1914 1915 /* Handle modified pages. */ 1916 if (PTE_ISMODIFIED(pte)) 1917 vm_page_dirty(m); 1918 1919 /* Referenced pages. */ 1920 if (PTE_ISREFERENCED(pte)) 1921 vm_page_flag_set(m, PG_REFERENCED); 1922 1923 tlb0_flush_entry(va); 1924 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1925 PTE_REFERENCED); 1926 1927 tlb_miss_unlock(); 1928 mtx_unlock_spin(&tlbivax_mutex); 1929 } 1930 } 1931 } 1932 PMAP_UNLOCK(pmap); 1933 vm_page_unlock_queues(); 1934 } 1935 1936 /* 1937 * Clear the write and modified bits in each of the given page's mappings. 1938 */ 1939 static void 1940 mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1941 { 1942 pv_entry_t pv; 1943 pte_t *pte; 1944 1945 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1946 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1947 (m->flags & PG_WRITEABLE) == 0) 1948 return; 1949 1950 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1951 PMAP_LOCK(pv->pv_pmap); 1952 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1953 if (PTE_ISVALID(pte)) { 1954 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1955 1956 mtx_lock_spin(&tlbivax_mutex); 1957 tlb_miss_lock(); 1958 1959 /* Handle modified pages. */ 1960 if (PTE_ISMODIFIED(pte)) 1961 vm_page_dirty(m); 1962 1963 /* Referenced pages. */ 1964 if (PTE_ISREFERENCED(pte)) 1965 vm_page_flag_set(m, PG_REFERENCED); 1966 1967 /* Flush mapping from TLB0. */ 1968 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1969 PTE_REFERENCED); 1970 1971 tlb_miss_unlock(); 1972 mtx_unlock_spin(&tlbivax_mutex); 1973 } 1974 } 1975 PMAP_UNLOCK(pv->pv_pmap); 1976 } 1977 vm_page_flag_clear(m, PG_WRITEABLE); 1978 } 1979 1980 static void 1981 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 1982 { 1983 pte_t *pte; 1984 pmap_t pmap; 1985 vm_page_t m; 1986 vm_offset_t addr; 1987 vm_paddr_t pa; 1988 int active, valid; 1989 1990 va = trunc_page(va); 1991 sz = round_page(sz); 1992 1993 vm_page_lock_queues(); 1994 pmap = PCPU_GET(curpmap); 1995 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 1996 while (sz > 0) { 1997 PMAP_LOCK(pm); 1998 pte = pte_find(mmu, pm, va); 1999 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2000 if (valid) 2001 pa = PTE_PA(pte); 2002 PMAP_UNLOCK(pm); 2003 if (valid) { 2004 if (!active) { 2005 /* Create a mapping in the active pmap. */ 2006 addr = 0; 2007 m = PHYS_TO_VM_PAGE(pa); 2008 PMAP_LOCK(pmap); 2009 pte_enter(mmu, pmap, m, addr, 2010 PTE_SR | PTE_VALID | PTE_UR); 2011 __syncicache((void *)addr, PAGE_SIZE); 2012 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2013 PMAP_UNLOCK(pmap); 2014 } else 2015 __syncicache((void *)va, PAGE_SIZE); 2016 } 2017 va += PAGE_SIZE; 2018 sz -= PAGE_SIZE; 2019 } 2020 vm_page_unlock_queues(); 2021 } 2022 2023 /* 2024 * Atomically extract and hold the physical page with the given 2025 * pmap and virtual address pair if that mapping permits the given 2026 * protection. 2027 */ 2028 static vm_page_t 2029 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2030 vm_prot_t prot) 2031 { 2032 pte_t *pte; 2033 vm_page_t m; 2034 uint32_t pte_wbit; 2035 2036 m = NULL; 2037 vm_page_lock_queues(); 2038 PMAP_LOCK(pmap); 2039 2040 pte = pte_find(mmu, pmap, va); 2041 if ((pte != NULL) && PTE_ISVALID(pte)) { 2042 if (pmap == kernel_pmap) 2043 pte_wbit = PTE_SW; 2044 else 2045 pte_wbit = PTE_UW; 2046 2047 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2048 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2049 vm_page_hold(m); 2050 } 2051 } 2052 2053 vm_page_unlock_queues(); 2054 PMAP_UNLOCK(pmap); 2055 return (m); 2056 } 2057 2058 /* 2059 * Initialize a vm_page's machine-dependent fields. 2060 */ 2061 static void 2062 mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2063 { 2064 2065 TAILQ_INIT(&m->md.pv_list); 2066 } 2067 2068 /* 2069 * mmu_booke_zero_page_area zeros the specified hardware page by 2070 * mapping it into virtual memory and using bzero to clear 2071 * its contents. 2072 * 2073 * off and size must reside within a single page. 2074 */ 2075 static void 2076 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2077 { 2078 vm_offset_t va; 2079 2080 /* XXX KASSERT off and size are within a single page? */ 2081 2082 mtx_lock(&zero_page_mutex); 2083 va = zero_page_va; 2084 2085 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2086 bzero((caddr_t)va + off, size); 2087 mmu_booke_kremove(mmu, va); 2088 2089 mtx_unlock(&zero_page_mutex); 2090 } 2091 2092 /* 2093 * mmu_booke_zero_page zeros the specified hardware page. 2094 */ 2095 static void 2096 mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2097 { 2098 2099 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2100 } 2101 2102 /* 2103 * mmu_booke_copy_page copies the specified (machine independent) page by 2104 * mapping the page into virtual memory and using memcopy to copy the page, 2105 * one machine dependent page at a time. 2106 */ 2107 static void 2108 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2109 { 2110 vm_offset_t sva, dva; 2111 2112 sva = copy_page_src_va; 2113 dva = copy_page_dst_va; 2114 2115 mtx_lock(©_page_mutex); 2116 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2117 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2118 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2119 mmu_booke_kremove(mmu, dva); 2120 mmu_booke_kremove(mmu, sva); 2121 mtx_unlock(©_page_mutex); 2122 } 2123 2124 /* 2125 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2126 * into virtual memory and using bzero to clear its contents. This is intended 2127 * to be called from the vm_pagezero process only and outside of Giant. No 2128 * lock is required. 2129 */ 2130 static void 2131 mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2132 { 2133 vm_offset_t va; 2134 2135 va = zero_page_idle_va; 2136 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2137 bzero((caddr_t)va, PAGE_SIZE); 2138 mmu_booke_kremove(mmu, va); 2139 } 2140 2141 /* 2142 * Return whether or not the specified physical page was modified 2143 * in any of physical maps. 2144 */ 2145 static boolean_t 2146 mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2147 { 2148 pte_t *pte; 2149 pv_entry_t pv; 2150 2151 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2152 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2153 return (FALSE); 2154 2155 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2156 PMAP_LOCK(pv->pv_pmap); 2157 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2158 if (!PTE_ISVALID(pte)) 2159 goto make_sure_to_unlock; 2160 2161 if (PTE_ISMODIFIED(pte)) { 2162 PMAP_UNLOCK(pv->pv_pmap); 2163 return (TRUE); 2164 } 2165 } 2166 make_sure_to_unlock: 2167 PMAP_UNLOCK(pv->pv_pmap); 2168 } 2169 return (FALSE); 2170 } 2171 2172 /* 2173 * Return whether or not the specified virtual address is eligible 2174 * for prefault. 2175 */ 2176 static boolean_t 2177 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2178 { 2179 2180 return (FALSE); 2181 } 2182 2183 /* 2184 * Clear the modify bits on the specified physical page. 2185 */ 2186 static void 2187 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2188 { 2189 pte_t *pte; 2190 pv_entry_t pv; 2191 2192 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2193 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2194 return; 2195 2196 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2197 PMAP_LOCK(pv->pv_pmap); 2198 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2199 if (!PTE_ISVALID(pte)) 2200 goto make_sure_to_unlock; 2201 2202 mtx_lock_spin(&tlbivax_mutex); 2203 tlb_miss_lock(); 2204 2205 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2206 tlb0_flush_entry(pv->pv_va); 2207 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2208 PTE_REFERENCED); 2209 } 2210 2211 tlb_miss_unlock(); 2212 mtx_unlock_spin(&tlbivax_mutex); 2213 } 2214 make_sure_to_unlock: 2215 PMAP_UNLOCK(pv->pv_pmap); 2216 } 2217 } 2218 2219 /* 2220 * Return a count of reference bits for a page, clearing those bits. 2221 * It is not necessary for every reference bit to be cleared, but it 2222 * is necessary that 0 only be returned when there are truly no 2223 * reference bits set. 2224 * 2225 * XXX: The exact number of bits to check and clear is a matter that 2226 * should be tested and standardized at some point in the future for 2227 * optimal aging of shared pages. 2228 */ 2229 static int 2230 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2231 { 2232 pte_t *pte; 2233 pv_entry_t pv; 2234 int count; 2235 2236 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2237 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2238 return (0); 2239 2240 count = 0; 2241 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2242 PMAP_LOCK(pv->pv_pmap); 2243 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2244 if (!PTE_ISVALID(pte)) 2245 goto make_sure_to_unlock; 2246 2247 if (PTE_ISREFERENCED(pte)) { 2248 mtx_lock_spin(&tlbivax_mutex); 2249 tlb_miss_lock(); 2250 2251 tlb0_flush_entry(pv->pv_va); 2252 pte->flags &= ~PTE_REFERENCED; 2253 2254 tlb_miss_unlock(); 2255 mtx_unlock_spin(&tlbivax_mutex); 2256 2257 if (++count > 4) { 2258 PMAP_UNLOCK(pv->pv_pmap); 2259 break; 2260 } 2261 } 2262 } 2263 make_sure_to_unlock: 2264 PMAP_UNLOCK(pv->pv_pmap); 2265 } 2266 return (count); 2267 } 2268 2269 /* 2270 * Clear the reference bit on the specified physical page. 2271 */ 2272 static void 2273 mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2274 { 2275 pte_t *pte; 2276 pv_entry_t pv; 2277 2278 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2279 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2280 return; 2281 2282 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2283 PMAP_LOCK(pv->pv_pmap); 2284 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2285 if (!PTE_ISVALID(pte)) 2286 goto make_sure_to_unlock; 2287 2288 if (PTE_ISREFERENCED(pte)) { 2289 mtx_lock_spin(&tlbivax_mutex); 2290 tlb_miss_lock(); 2291 2292 tlb0_flush_entry(pv->pv_va); 2293 pte->flags &= ~PTE_REFERENCED; 2294 2295 tlb_miss_unlock(); 2296 mtx_unlock_spin(&tlbivax_mutex); 2297 } 2298 } 2299 make_sure_to_unlock: 2300 PMAP_UNLOCK(pv->pv_pmap); 2301 } 2302 } 2303 2304 /* 2305 * Change wiring attribute for a map/virtual-address pair. 2306 */ 2307 static void 2308 mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2309 { 2310 pte_t *pte;; 2311 2312 PMAP_LOCK(pmap); 2313 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2314 if (wired) { 2315 if (!PTE_ISWIRED(pte)) { 2316 pte->flags |= PTE_WIRED; 2317 pmap->pm_stats.wired_count++; 2318 } 2319 } else { 2320 if (PTE_ISWIRED(pte)) { 2321 pte->flags &= ~PTE_WIRED; 2322 pmap->pm_stats.wired_count--; 2323 } 2324 } 2325 } 2326 PMAP_UNLOCK(pmap); 2327 } 2328 2329 /* 2330 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2331 * page. This count may be changed upwards or downwards in the future; it is 2332 * only necessary that true be returned for a small subset of pmaps for proper 2333 * page aging. 2334 */ 2335 static boolean_t 2336 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2337 { 2338 pv_entry_t pv; 2339 int loops; 2340 2341 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2342 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2343 return (FALSE); 2344 2345 loops = 0; 2346 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2347 if (pv->pv_pmap == pmap) 2348 return (TRUE); 2349 2350 if (++loops >= 16) 2351 break; 2352 } 2353 return (FALSE); 2354 } 2355 2356 /* 2357 * Return the number of managed mappings to the given physical page that are 2358 * wired. 2359 */ 2360 static int 2361 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2362 { 2363 pv_entry_t pv; 2364 pte_t *pte; 2365 int count = 0; 2366 2367 if ((m->flags & PG_FICTITIOUS) != 0) 2368 return (count); 2369 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2370 2371 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2372 PMAP_LOCK(pv->pv_pmap); 2373 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2374 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2375 count++; 2376 PMAP_UNLOCK(pv->pv_pmap); 2377 } 2378 2379 return (count); 2380 } 2381 2382 static int 2383 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2384 { 2385 int i; 2386 vm_offset_t va; 2387 2388 /* 2389 * This currently does not work for entries that 2390 * overlap TLB1 entries. 2391 */ 2392 for (i = 0; i < tlb1_idx; i ++) { 2393 if (tlb1_iomapped(i, pa, size, &va) == 0) 2394 return (0); 2395 } 2396 2397 return (EFAULT); 2398 } 2399 2400 vm_offset_t 2401 mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2402 vm_size_t *sz) 2403 { 2404 vm_paddr_t pa, ppa; 2405 vm_offset_t va; 2406 vm_size_t gran; 2407 2408 /* Raw physical memory dumps don't have a virtual address. */ 2409 if (md->md_vaddr == ~0UL) { 2410 /* We always map a 256MB page at 256M. */ 2411 gran = 256 * 1024 * 1024; 2412 pa = md->md_paddr + ofs; 2413 ppa = pa & ~(gran - 1); 2414 ofs = pa - ppa; 2415 va = gran; 2416 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2417 if (*sz > (gran - ofs)) 2418 *sz = gran - ofs; 2419 return (va + ofs); 2420 } 2421 2422 /* Minidumps are based on virtual memory addresses. */ 2423 va = md->md_vaddr + ofs; 2424 if (va >= kernstart + kernsize) { 2425 gran = PAGE_SIZE - (va & PAGE_MASK); 2426 if (*sz > gran) 2427 *sz = gran; 2428 } 2429 return (va); 2430 } 2431 2432 void 2433 mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2434 vm_offset_t va) 2435 { 2436 2437 /* Raw physical memory dumps don't have a virtual address. */ 2438 if (md->md_vaddr == ~0UL) { 2439 tlb1_idx--; 2440 tlb1[tlb1_idx].mas1 = 0; 2441 tlb1[tlb1_idx].mas2 = 0; 2442 tlb1[tlb1_idx].mas3 = 0; 2443 tlb1_write_entry(tlb1_idx); 2444 return; 2445 } 2446 2447 /* Minidumps are based on virtual memory addresses. */ 2448 /* Nothing to do... */ 2449 } 2450 2451 struct pmap_md * 2452 mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2453 { 2454 static struct pmap_md md; 2455 struct bi_mem_region *mr; 2456 pte_t *pte; 2457 vm_offset_t va; 2458 2459 if (dumpsys_minidump) { 2460 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2461 if (prev == NULL) { 2462 /* 1st: kernel .data and .bss. */ 2463 md.md_index = 1; 2464 md.md_vaddr = trunc_page((uintptr_t)_etext); 2465 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2466 return (&md); 2467 } 2468 switch (prev->md_index) { 2469 case 1: 2470 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2471 md.md_index = 2; 2472 md.md_vaddr = data_start; 2473 md.md_size = data_end - data_start; 2474 break; 2475 case 2: 2476 /* 3rd: kernel VM. */ 2477 va = prev->md_vaddr + prev->md_size; 2478 /* Find start of next chunk (from va). */ 2479 while (va < virtual_end) { 2480 /* Don't dump the buffer cache. */ 2481 if (va >= kmi.buffer_sva && 2482 va < kmi.buffer_eva) { 2483 va = kmi.buffer_eva; 2484 continue; 2485 } 2486 pte = pte_find(mmu, kernel_pmap, va); 2487 if (pte != NULL && PTE_ISVALID(pte)) 2488 break; 2489 va += PAGE_SIZE; 2490 } 2491 if (va < virtual_end) { 2492 md.md_vaddr = va; 2493 va += PAGE_SIZE; 2494 /* Find last page in chunk. */ 2495 while (va < virtual_end) { 2496 /* Don't run into the buffer cache. */ 2497 if (va == kmi.buffer_sva) 2498 break; 2499 pte = pte_find(mmu, kernel_pmap, va); 2500 if (pte == NULL || !PTE_ISVALID(pte)) 2501 break; 2502 va += PAGE_SIZE; 2503 } 2504 md.md_size = va - md.md_vaddr; 2505 break; 2506 } 2507 md.md_index = 3; 2508 /* FALLTHROUGH */ 2509 default: 2510 return (NULL); 2511 } 2512 } else { /* minidumps */ 2513 mr = bootinfo_mr(); 2514 if (prev == NULL) { 2515 /* first physical chunk. */ 2516 md.md_paddr = mr->mem_base; 2517 md.md_size = mr->mem_size; 2518 md.md_vaddr = ~0UL; 2519 md.md_index = 1; 2520 } else if (md.md_index < bootinfo->bi_mem_reg_no) { 2521 md.md_paddr = mr[md.md_index].mem_base; 2522 md.md_size = mr[md.md_index].mem_size; 2523 md.md_vaddr = ~0UL; 2524 md.md_index++; 2525 } else { 2526 /* There's no next physical chunk. */ 2527 return (NULL); 2528 } 2529 } 2530 2531 return (&md); 2532 } 2533 2534 /* 2535 * Map a set of physical memory pages into the kernel virtual address space. 2536 * Return a pointer to where it is mapped. This routine is intended to be used 2537 * for mapping device memory, NOT real memory. 2538 */ 2539 static void * 2540 mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2541 { 2542 void *res; 2543 uintptr_t va; 2544 vm_size_t sz; 2545 2546 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2547 res = (void *)va; 2548 2549 do { 2550 sz = 1 << (ilog2(size) & ~1); 2551 if (bootverbose) 2552 printf("Wiring VA=%x to PA=%x (size=%x), " 2553 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2554 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2555 size -= sz; 2556 pa += sz; 2557 va += sz; 2558 } while (size > 0); 2559 2560 return (res); 2561 } 2562 2563 /* 2564 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2565 */ 2566 static void 2567 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2568 { 2569 vm_offset_t base, offset; 2570 2571 /* 2572 * Unmap only if this is inside kernel virtual space. 2573 */ 2574 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2575 base = trunc_page(va); 2576 offset = va & PAGE_MASK; 2577 size = roundup(offset + size, PAGE_SIZE); 2578 kmem_free(kernel_map, base, size); 2579 } 2580 } 2581 2582 /* 2583 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2584 * specified pmap. This eliminates the blast of soft faults on process startup 2585 * and immediately after an mmap. 2586 */ 2587 static void 2588 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2589 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2590 { 2591 2592 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2593 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2594 ("mmu_booke_object_init_pt: non-device object")); 2595 } 2596 2597 /* 2598 * Perform the pmap work for mincore. 2599 */ 2600 static int 2601 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2602 { 2603 2604 TODO; 2605 return (0); 2606 } 2607 2608 /**************************************************************************/ 2609 /* TID handling */ 2610 /**************************************************************************/ 2611 2612 /* 2613 * Allocate a TID. If necessary, steal one from someone else. 2614 * The new TID is flushed from the TLB before returning. 2615 */ 2616 static tlbtid_t 2617 tid_alloc(pmap_t pmap) 2618 { 2619 tlbtid_t tid; 2620 int thiscpu; 2621 2622 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2623 2624 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2625 2626 thiscpu = PCPU_GET(cpuid); 2627 2628 tid = PCPU_GET(tid_next); 2629 if (tid > TID_MAX) 2630 tid = TID_MIN; 2631 PCPU_SET(tid_next, tid + 1); 2632 2633 /* If we are stealing TID then clear the relevant pmap's field */ 2634 if (tidbusy[thiscpu][tid] != NULL) { 2635 2636 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2637 2638 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2639 2640 /* Flush all entries from TLB0 matching this TID. */ 2641 tid_flush(tid); 2642 } 2643 2644 tidbusy[thiscpu][tid] = pmap; 2645 pmap->pm_tid[thiscpu] = tid; 2646 __asm __volatile("msync; isync"); 2647 2648 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2649 PCPU_GET(tid_next)); 2650 2651 return (tid); 2652 } 2653 2654 /**************************************************************************/ 2655 /* TLB0 handling */ 2656 /**************************************************************************/ 2657 2658 static void 2659 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2660 uint32_t mas7) 2661 { 2662 int as; 2663 char desc[3]; 2664 tlbtid_t tid; 2665 vm_size_t size; 2666 unsigned int tsize; 2667 2668 desc[2] = '\0'; 2669 if (mas1 & MAS1_VALID) 2670 desc[0] = 'V'; 2671 else 2672 desc[0] = ' '; 2673 2674 if (mas1 & MAS1_IPROT) 2675 desc[1] = 'P'; 2676 else 2677 desc[1] = ' '; 2678 2679 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2680 tid = MAS1_GETTID(mas1); 2681 2682 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2683 size = 0; 2684 if (tsize) 2685 size = tsize2size(tsize); 2686 2687 debugf("%3d: (%s) [AS=%d] " 2688 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2689 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2690 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2691 } 2692 2693 /* Convert TLB0 va and way number to tlb0[] table index. */ 2694 static inline unsigned int 2695 tlb0_tableidx(vm_offset_t va, unsigned int way) 2696 { 2697 unsigned int idx; 2698 2699 idx = (way * TLB0_ENTRIES_PER_WAY); 2700 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2701 return (idx); 2702 } 2703 2704 /* 2705 * Invalidate TLB0 entry. 2706 */ 2707 static inline void 2708 tlb0_flush_entry(vm_offset_t va) 2709 { 2710 2711 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2712 2713 mtx_assert(&tlbivax_mutex, MA_OWNED); 2714 2715 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2716 __asm __volatile("isync; msync"); 2717 __asm __volatile("tlbsync; msync"); 2718 2719 CTR1(KTR_PMAP, "%s: e", __func__); 2720 } 2721 2722 /* Print out contents of the MAS registers for each TLB0 entry */ 2723 void 2724 tlb0_print_tlbentries(void) 2725 { 2726 uint32_t mas0, mas1, mas2, mas3, mas7; 2727 int entryidx, way, idx; 2728 2729 debugf("TLB0 entries:\n"); 2730 for (way = 0; way < TLB0_WAYS; way ++) 2731 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2732 2733 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2734 mtspr(SPR_MAS0, mas0); 2735 __asm __volatile("isync"); 2736 2737 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2738 mtspr(SPR_MAS2, mas2); 2739 2740 __asm __volatile("isync; tlbre"); 2741 2742 mas1 = mfspr(SPR_MAS1); 2743 mas2 = mfspr(SPR_MAS2); 2744 mas3 = mfspr(SPR_MAS3); 2745 mas7 = mfspr(SPR_MAS7); 2746 2747 idx = tlb0_tableidx(mas2, way); 2748 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2749 } 2750 } 2751 2752 /**************************************************************************/ 2753 /* TLB1 handling */ 2754 /**************************************************************************/ 2755 2756 /* 2757 * TLB1 mapping notes: 2758 * 2759 * TLB1[0] CCSRBAR 2760 * TLB1[1] Kernel text and data. 2761 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2762 * windows, other devices mappings. 2763 */ 2764 2765 /* 2766 * Write given entry to TLB1 hardware. 2767 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2768 */ 2769 static void 2770 tlb1_write_entry(unsigned int idx) 2771 { 2772 uint32_t mas0, mas7; 2773 2774 //debugf("tlb1_write_entry: s\n"); 2775 2776 /* Clear high order RPN bits */ 2777 mas7 = 0; 2778 2779 /* Select entry */ 2780 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2781 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2782 2783 mtspr(SPR_MAS0, mas0); 2784 __asm __volatile("isync"); 2785 mtspr(SPR_MAS1, tlb1[idx].mas1); 2786 __asm __volatile("isync"); 2787 mtspr(SPR_MAS2, tlb1[idx].mas2); 2788 __asm __volatile("isync"); 2789 mtspr(SPR_MAS3, tlb1[idx].mas3); 2790 __asm __volatile("isync"); 2791 mtspr(SPR_MAS7, mas7); 2792 __asm __volatile("isync; tlbwe; isync; msync"); 2793 2794 //debugf("tlb1_write_entry: e\n");; 2795 } 2796 2797 /* 2798 * Return the largest uint value log such that 2^log <= num. 2799 */ 2800 static unsigned int 2801 ilog2(unsigned int num) 2802 { 2803 int lz; 2804 2805 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2806 return (31 - lz); 2807 } 2808 2809 /* 2810 * Convert TLB TSIZE value to mapped region size. 2811 */ 2812 static vm_size_t 2813 tsize2size(unsigned int tsize) 2814 { 2815 2816 /* 2817 * size = 4^tsize KB 2818 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2819 */ 2820 2821 return ((1 << (2 * tsize)) * 1024); 2822 } 2823 2824 /* 2825 * Convert region size (must be power of 4) to TLB TSIZE value. 2826 */ 2827 static unsigned int 2828 size2tsize(vm_size_t size) 2829 { 2830 2831 return (ilog2(size) / 2 - 5); 2832 } 2833 2834 /* 2835 * Register permanent kernel mapping in TLB1. 2836 * 2837 * Entries are created starting from index 0 (current free entry is 2838 * kept in tlb1_idx) and are not supposed to be invalidated. 2839 */ 2840 static int 2841 tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2842 uint32_t flags) 2843 { 2844 uint32_t ts, tid; 2845 int tsize; 2846 2847 if (tlb1_idx >= TLB1_ENTRIES) { 2848 printf("tlb1_set_entry: TLB1 full!\n"); 2849 return (-1); 2850 } 2851 2852 /* Convert size to TSIZE */ 2853 tsize = size2tsize(size); 2854 2855 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2856 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2857 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2858 2859 /* XXX LOCK tlb1[] */ 2860 2861 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2862 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2863 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2864 2865 /* Set supervisor RWX permission bits */ 2866 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2867 2868 tlb1_write_entry(tlb1_idx++); 2869 2870 /* XXX UNLOCK tlb1[] */ 2871 2872 /* 2873 * XXX in general TLB1 updates should be propagated between CPUs, 2874 * since current design assumes to have the same TLB1 set-up on all 2875 * cores. 2876 */ 2877 return (0); 2878 } 2879 2880 static int 2881 tlb1_entry_size_cmp(const void *a, const void *b) 2882 { 2883 const vm_size_t *sza; 2884 const vm_size_t *szb; 2885 2886 sza = a; 2887 szb = b; 2888 if (*sza > *szb) 2889 return (-1); 2890 else if (*sza < *szb) 2891 return (1); 2892 else 2893 return (0); 2894 } 2895 2896 /* 2897 * Map in contiguous RAM region into the TLB1 using maximum of 2898 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2899 * 2900 * If necessary round up last entry size and return total size 2901 * used by all allocated entries. 2902 */ 2903 vm_size_t 2904 tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2905 { 2906 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2907 vm_size_t mapped_size, sz, esz; 2908 unsigned int log; 2909 int i; 2910 2911 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2912 __func__, size, va, pa); 2913 2914 mapped_size = 0; 2915 sz = size; 2916 memset(entry_size, 0, sizeof(entry_size)); 2917 2918 /* Calculate entry sizes. */ 2919 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2920 2921 /* Largest region that is power of 4 and fits within size */ 2922 log = ilog2(sz) / 2; 2923 esz = 1 << (2 * log); 2924 2925 /* If this is last entry cover remaining size. */ 2926 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2927 while (esz < sz) 2928 esz = esz << 2; 2929 } 2930 2931 entry_size[i] = esz; 2932 mapped_size += esz; 2933 if (esz < sz) 2934 sz -= esz; 2935 else 2936 sz = 0; 2937 } 2938 2939 /* Sort entry sizes, required to get proper entry address alignment. */ 2940 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2941 sizeof(vm_size_t), tlb1_entry_size_cmp); 2942 2943 /* Load TLB1 entries. */ 2944 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2945 esz = entry_size[i]; 2946 if (!esz) 2947 break; 2948 2949 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 2950 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 2951 2952 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2953 2954 va += esz; 2955 pa += esz; 2956 } 2957 2958 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 2959 __func__, mapped_size, mapped_size - size); 2960 2961 return (mapped_size); 2962 } 2963 2964 /* 2965 * TLB1 initialization routine, to be called after the very first 2966 * assembler level setup done in locore.S. 2967 */ 2968 void 2969 tlb1_init(vm_offset_t ccsrbar) 2970 { 2971 uint32_t mas0; 2972 2973 /* TLB1[1] is used to map the kernel. Save that entry. */ 2974 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2975 mtspr(SPR_MAS0, mas0); 2976 __asm __volatile("isync; tlbre"); 2977 2978 tlb1[1].mas1 = mfspr(SPR_MAS1); 2979 tlb1[1].mas2 = mfspr(SPR_MAS2); 2980 tlb1[1].mas3 = mfspr(SPR_MAS3); 2981 2982 /* Map in CCSRBAR in TLB1[0] */ 2983 tlb1_idx = 0; 2984 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 2985 /* 2986 * Set the next available TLB1 entry index. Note TLB[1] is reserved 2987 * for initial mapping of kernel text+data, which was set early in 2988 * locore, we need to skip this [busy] entry. 2989 */ 2990 tlb1_idx = 2; 2991 2992 /* Setup TLB miss defaults */ 2993 set_mas4_defaults(); 2994 } 2995 2996 /* 2997 * Setup MAS4 defaults. 2998 * These values are loaded to MAS0-2 on a TLB miss. 2999 */ 3000 static void 3001 set_mas4_defaults(void) 3002 { 3003 uint32_t mas4; 3004 3005 /* Defaults: TLB0, PID0, TSIZED=4K */ 3006 mas4 = MAS4_TLBSELD0; 3007 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3008 #ifdef SMP 3009 mas4 |= MAS4_MD; 3010 #endif 3011 mtspr(SPR_MAS4, mas4); 3012 __asm __volatile("isync"); 3013 } 3014 3015 /* 3016 * Print out contents of the MAS registers for each TLB1 entry 3017 */ 3018 void 3019 tlb1_print_tlbentries(void) 3020 { 3021 uint32_t mas0, mas1, mas2, mas3, mas7; 3022 int i; 3023 3024 debugf("TLB1 entries:\n"); 3025 for (i = 0; i < TLB1_ENTRIES; i++) { 3026 3027 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3028 mtspr(SPR_MAS0, mas0); 3029 3030 __asm __volatile("isync; tlbre"); 3031 3032 mas1 = mfspr(SPR_MAS1); 3033 mas2 = mfspr(SPR_MAS2); 3034 mas3 = mfspr(SPR_MAS3); 3035 mas7 = mfspr(SPR_MAS7); 3036 3037 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3038 } 3039 } 3040 3041 /* 3042 * Print out contents of the in-ram tlb1 table. 3043 */ 3044 void 3045 tlb1_print_entries(void) 3046 { 3047 int i; 3048 3049 debugf("tlb1[] table entries:\n"); 3050 for (i = 0; i < TLB1_ENTRIES; i++) 3051 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3052 } 3053 3054 /* 3055 * Return 0 if the physical IO range is encompassed by one of the 3056 * the TLB1 entries, otherwise return related error code. 3057 */ 3058 static int 3059 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3060 { 3061 uint32_t prot; 3062 vm_paddr_t pa_start; 3063 vm_paddr_t pa_end; 3064 unsigned int entry_tsize; 3065 vm_size_t entry_size; 3066 3067 *va = (vm_offset_t)NULL; 3068 3069 /* Skip invalid entries */ 3070 if (!(tlb1[i].mas1 & MAS1_VALID)) 3071 return (EINVAL); 3072 3073 /* 3074 * The entry must be cache-inhibited, guarded, and r/w 3075 * so it can function as an i/o page 3076 */ 3077 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3078 if (prot != (MAS2_I | MAS2_G)) 3079 return (EPERM); 3080 3081 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3082 if (prot != (MAS3_SR | MAS3_SW)) 3083 return (EPERM); 3084 3085 /* The address should be within the entry range. */ 3086 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3087 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3088 3089 entry_size = tsize2size(entry_tsize); 3090 pa_start = tlb1[i].mas3 & MAS3_RPN; 3091 pa_end = pa_start + entry_size - 1; 3092 3093 if ((pa < pa_start) || ((pa + size) > pa_end)) 3094 return (ERANGE); 3095 3096 /* Return virtual address of this mapping. */ 3097 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3098 return (0); 3099 } 3100