1 /*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68 /*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93 #include <sys/cdefs.h> 94 __FBSDID("$FreeBSD$"); 95 96 /* 97 * Manages physical address maps. 98 * 99 * Since the information managed by this module is also stored by the 100 * logical address mapping module, this module may throw away valid virtual 101 * to physical mappings at almost any time. However, invalidations of 102 * mappings must be done as requested. 103 * 104 * In order to cope with hardware architectures which make virtual to 105 * physical map invalidates expensive, this module may delay invalidate 106 * reduced protection operations until such time as they are actually 107 * necessary. This module is given full information as to which processors 108 * are currently using which maps, and to when physical maps must be made 109 * correct. 110 */ 111 112 #include "opt_kstack_pages.h" 113 114 #include <sys/param.h> 115 #include <sys/kernel.h> 116 #include <sys/queue.h> 117 #include <sys/cpuset.h> 118 #include <sys/ktr.h> 119 #include <sys/lock.h> 120 #include <sys/msgbuf.h> 121 #include <sys/mutex.h> 122 #include <sys/proc.h> 123 #include <sys/rwlock.h> 124 #include <sys/sched.h> 125 #include <sys/sysctl.h> 126 #include <sys/systm.h> 127 #include <sys/vmmeter.h> 128 129 #include <dev/ofw/openfirm.h> 130 131 #include <vm/vm.h> 132 #include <vm/vm_param.h> 133 #include <vm/vm_kern.h> 134 #include <vm/vm_page.h> 135 #include <vm/vm_map.h> 136 #include <vm/vm_object.h> 137 #include <vm/vm_extern.h> 138 #include <vm/vm_pageout.h> 139 #include <vm/uma.h> 140 141 #include <machine/cpu.h> 142 #include <machine/platform.h> 143 #include <machine/bat.h> 144 #include <machine/frame.h> 145 #include <machine/md_var.h> 146 #include <machine/psl.h> 147 #include <machine/pte.h> 148 #include <machine/smp.h> 149 #include <machine/sr.h> 150 #include <machine/mmuvar.h> 151 #include <machine/trap_aim.h> 152 153 #include "mmu_if.h" 154 155 #define MOEA_DEBUG 156 157 #define TODO panic("%s: not implemented", __func__); 158 159 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 160 #define VSID_TO_SR(vsid) ((vsid) & 0xf) 161 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 162 163 struct ofw_map { 164 vm_offset_t om_va; 165 vm_size_t om_len; 166 vm_offset_t om_pa; 167 u_int om_mode; 168 }; 169 170 /* 171 * Map of physical memory regions. 172 */ 173 static struct mem_region *regions; 174 static struct mem_region *pregions; 175 static u_int phys_avail_count; 176 static int regions_sz, pregions_sz; 177 static struct ofw_map *translations; 178 179 /* 180 * Lock for the pteg and pvo tables. 181 */ 182 struct mtx moea_table_mutex; 183 struct mtx moea_vsid_mutex; 184 185 /* tlbie instruction synchronization */ 186 static struct mtx tlbie_mtx; 187 188 /* 189 * PTEG data. 190 */ 191 static struct pteg *moea_pteg_table; 192 u_int moea_pteg_count; 193 u_int moea_pteg_mask; 194 195 /* 196 * PVO data. 197 */ 198 struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 199 struct pvo_head moea_pvo_kunmanaged = 200 LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 201 202 static struct rwlock_padalign pvh_global_lock; 203 204 uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 205 uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 206 207 #define BPVO_POOL_SIZE 32768 208 static struct pvo_entry *moea_bpvo_pool; 209 static int moea_bpvo_pool_index = 0; 210 211 #define VSID_NBPW (sizeof(u_int32_t) * 8) 212 static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 213 214 static boolean_t moea_initialized = FALSE; 215 216 /* 217 * Statistics. 218 */ 219 u_int moea_pte_valid = 0; 220 u_int moea_pte_overflow = 0; 221 u_int moea_pte_replacements = 0; 222 u_int moea_pvo_entries = 0; 223 u_int moea_pvo_enter_calls = 0; 224 u_int moea_pvo_remove_calls = 0; 225 u_int moea_pte_spills = 0; 226 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 227 0, ""); 228 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 229 &moea_pte_overflow, 0, ""); 230 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 231 &moea_pte_replacements, 0, ""); 232 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 233 0, ""); 234 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 235 &moea_pvo_enter_calls, 0, ""); 236 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 237 &moea_pvo_remove_calls, 0, ""); 238 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 239 &moea_pte_spills, 0, ""); 240 241 /* 242 * Allocate physical memory for use in moea_bootstrap. 243 */ 244 static vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 245 246 /* 247 * PTE calls. 248 */ 249 static int moea_pte_insert(u_int, struct pte *); 250 251 /* 252 * PVO calls. 253 */ 254 static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 255 vm_offset_t, vm_offset_t, u_int, int); 256 static void moea_pvo_remove(struct pvo_entry *, int); 257 static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 258 static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 259 260 /* 261 * Utility routines. 262 */ 263 static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 264 vm_prot_t, boolean_t); 265 static void moea_syncicache(vm_offset_t, vm_size_t); 266 static boolean_t moea_query_bit(vm_page_t, int); 267 static u_int moea_clear_bit(vm_page_t, int); 268 static void moea_kremove(mmu_t, vm_offset_t); 269 int moea_pte_spill(vm_offset_t); 270 271 /* 272 * Kernel MMU interface 273 */ 274 void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 275 void moea_clear_modify(mmu_t, vm_page_t); 276 void moea_clear_reference(mmu_t, vm_page_t); 277 void moea_copy_page(mmu_t, vm_page_t, vm_page_t); 278 void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 279 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 280 vm_prot_t); 281 void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 282 vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 283 vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 284 void moea_init(mmu_t); 285 boolean_t moea_is_modified(mmu_t, vm_page_t); 286 boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 287 boolean_t moea_is_referenced(mmu_t, vm_page_t); 288 int moea_ts_referenced(mmu_t, vm_page_t); 289 vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 290 boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 291 int moea_page_wired_mappings(mmu_t, vm_page_t); 292 void moea_pinit(mmu_t, pmap_t); 293 void moea_pinit0(mmu_t, pmap_t); 294 void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 295 void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 296 void moea_qremove(mmu_t, vm_offset_t, int); 297 void moea_release(mmu_t, pmap_t); 298 void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 299 void moea_remove_all(mmu_t, vm_page_t); 300 void moea_remove_write(mmu_t, vm_page_t); 301 void moea_zero_page(mmu_t, vm_page_t); 302 void moea_zero_page_area(mmu_t, vm_page_t, int, int); 303 void moea_zero_page_idle(mmu_t, vm_page_t); 304 void moea_activate(mmu_t, struct thread *); 305 void moea_deactivate(mmu_t, struct thread *); 306 void moea_cpu_bootstrap(mmu_t, int); 307 void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 308 void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t); 309 void *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 310 void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 311 vm_paddr_t moea_kextract(mmu_t, vm_offset_t); 312 void moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t); 313 void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t); 314 void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma); 315 boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 316 static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 317 318 static mmu_method_t moea_methods[] = { 319 MMUMETHOD(mmu_change_wiring, moea_change_wiring), 320 MMUMETHOD(mmu_clear_modify, moea_clear_modify), 321 MMUMETHOD(mmu_clear_reference, moea_clear_reference), 322 MMUMETHOD(mmu_copy_page, moea_copy_page), 323 MMUMETHOD(mmu_enter, moea_enter), 324 MMUMETHOD(mmu_enter_object, moea_enter_object), 325 MMUMETHOD(mmu_enter_quick, moea_enter_quick), 326 MMUMETHOD(mmu_extract, moea_extract), 327 MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 328 MMUMETHOD(mmu_init, moea_init), 329 MMUMETHOD(mmu_is_modified, moea_is_modified), 330 MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable), 331 MMUMETHOD(mmu_is_referenced, moea_is_referenced), 332 MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 333 MMUMETHOD(mmu_map, moea_map), 334 MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 335 MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), 336 MMUMETHOD(mmu_pinit, moea_pinit), 337 MMUMETHOD(mmu_pinit0, moea_pinit0), 338 MMUMETHOD(mmu_protect, moea_protect), 339 MMUMETHOD(mmu_qenter, moea_qenter), 340 MMUMETHOD(mmu_qremove, moea_qremove), 341 MMUMETHOD(mmu_release, moea_release), 342 MMUMETHOD(mmu_remove, moea_remove), 343 MMUMETHOD(mmu_remove_all, moea_remove_all), 344 MMUMETHOD(mmu_remove_write, moea_remove_write), 345 MMUMETHOD(mmu_sync_icache, moea_sync_icache), 346 MMUMETHOD(mmu_zero_page, moea_zero_page), 347 MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 348 MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 349 MMUMETHOD(mmu_activate, moea_activate), 350 MMUMETHOD(mmu_deactivate, moea_deactivate), 351 MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), 352 353 /* Internal interfaces */ 354 MMUMETHOD(mmu_bootstrap, moea_bootstrap), 355 MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap), 356 MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr), 357 MMUMETHOD(mmu_mapdev, moea_mapdev), 358 MMUMETHOD(mmu_unmapdev, moea_unmapdev), 359 MMUMETHOD(mmu_kextract, moea_kextract), 360 MMUMETHOD(mmu_kenter, moea_kenter), 361 MMUMETHOD(mmu_kenter_attr, moea_kenter_attr), 362 MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 363 364 { 0, 0 } 365 }; 366 367 MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0); 368 369 static __inline uint32_t 370 moea_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 371 { 372 uint32_t pte_lo; 373 int i; 374 375 if (ma != VM_MEMATTR_DEFAULT) { 376 switch (ma) { 377 case VM_MEMATTR_UNCACHEABLE: 378 return (PTE_I | PTE_G); 379 case VM_MEMATTR_WRITE_COMBINING: 380 case VM_MEMATTR_WRITE_BACK: 381 case VM_MEMATTR_PREFETCHABLE: 382 return (PTE_I); 383 case VM_MEMATTR_WRITE_THROUGH: 384 return (PTE_W | PTE_M); 385 } 386 } 387 388 /* 389 * Assume the page is cache inhibited and access is guarded unless 390 * it's in our available memory array. 391 */ 392 pte_lo = PTE_I | PTE_G; 393 for (i = 0; i < pregions_sz; i++) { 394 if ((pa >= pregions[i].mr_start) && 395 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 396 pte_lo = PTE_M; 397 break; 398 } 399 } 400 401 return pte_lo; 402 } 403 404 static void 405 tlbie(vm_offset_t va) 406 { 407 408 mtx_lock_spin(&tlbie_mtx); 409 __asm __volatile("ptesync"); 410 __asm __volatile("tlbie %0" :: "r"(va)); 411 __asm __volatile("eieio; tlbsync; ptesync"); 412 mtx_unlock_spin(&tlbie_mtx); 413 } 414 415 static void 416 tlbia(void) 417 { 418 vm_offset_t va; 419 420 for (va = 0; va < 0x00040000; va += 0x00001000) { 421 __asm __volatile("tlbie %0" :: "r"(va)); 422 powerpc_sync(); 423 } 424 __asm __volatile("tlbsync"); 425 powerpc_sync(); 426 } 427 428 static __inline int 429 va_to_sr(u_int *sr, vm_offset_t va) 430 { 431 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 432 } 433 434 static __inline u_int 435 va_to_pteg(u_int sr, vm_offset_t addr) 436 { 437 u_int hash; 438 439 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 440 ADDR_PIDX_SHFT); 441 return (hash & moea_pteg_mask); 442 } 443 444 static __inline struct pvo_head * 445 vm_page_to_pvoh(vm_page_t m) 446 { 447 448 return (&m->md.mdpg_pvoh); 449 } 450 451 static __inline void 452 moea_attr_clear(vm_page_t m, int ptebit) 453 { 454 455 rw_assert(&pvh_global_lock, RA_WLOCKED); 456 m->md.mdpg_attrs &= ~ptebit; 457 } 458 459 static __inline int 460 moea_attr_fetch(vm_page_t m) 461 { 462 463 return (m->md.mdpg_attrs); 464 } 465 466 static __inline void 467 moea_attr_save(vm_page_t m, int ptebit) 468 { 469 470 rw_assert(&pvh_global_lock, RA_WLOCKED); 471 m->md.mdpg_attrs |= ptebit; 472 } 473 474 static __inline int 475 moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 476 { 477 if (pt->pte_hi == pvo_pt->pte_hi) 478 return (1); 479 480 return (0); 481 } 482 483 static __inline int 484 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 485 { 486 return (pt->pte_hi & ~PTE_VALID) == 487 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 488 ((va >> ADDR_API_SHFT) & PTE_API) | which); 489 } 490 491 static __inline void 492 moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 493 { 494 495 mtx_assert(&moea_table_mutex, MA_OWNED); 496 497 /* 498 * Construct a PTE. Default to IMB initially. Valid bit only gets 499 * set when the real pte is set in memory. 500 * 501 * Note: Don't set the valid bit for correct operation of tlb update. 502 */ 503 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 504 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 505 pt->pte_lo = pte_lo; 506 } 507 508 static __inline void 509 moea_pte_synch(struct pte *pt, struct pte *pvo_pt) 510 { 511 512 mtx_assert(&moea_table_mutex, MA_OWNED); 513 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 514 } 515 516 static __inline void 517 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 518 { 519 520 mtx_assert(&moea_table_mutex, MA_OWNED); 521 522 /* 523 * As shown in Section 7.6.3.2.3 524 */ 525 pt->pte_lo &= ~ptebit; 526 tlbie(va); 527 } 528 529 static __inline void 530 moea_pte_set(struct pte *pt, struct pte *pvo_pt) 531 { 532 533 mtx_assert(&moea_table_mutex, MA_OWNED); 534 pvo_pt->pte_hi |= PTE_VALID; 535 536 /* 537 * Update the PTE as defined in section 7.6.3.1. 538 * Note that the REF/CHG bits are from pvo_pt and thus should havce 539 * been saved so this routine can restore them (if desired). 540 */ 541 pt->pte_lo = pvo_pt->pte_lo; 542 powerpc_sync(); 543 pt->pte_hi = pvo_pt->pte_hi; 544 powerpc_sync(); 545 moea_pte_valid++; 546 } 547 548 static __inline void 549 moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 550 { 551 552 mtx_assert(&moea_table_mutex, MA_OWNED); 553 pvo_pt->pte_hi &= ~PTE_VALID; 554 555 /* 556 * Force the reg & chg bits back into the PTEs. 557 */ 558 powerpc_sync(); 559 560 /* 561 * Invalidate the pte. 562 */ 563 pt->pte_hi &= ~PTE_VALID; 564 565 tlbie(va); 566 567 /* 568 * Save the reg & chg bits. 569 */ 570 moea_pte_synch(pt, pvo_pt); 571 moea_pte_valid--; 572 } 573 574 static __inline void 575 moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 576 { 577 578 /* 579 * Invalidate the PTE 580 */ 581 moea_pte_unset(pt, pvo_pt, va); 582 moea_pte_set(pt, pvo_pt); 583 } 584 585 /* 586 * Quick sort callout for comparing memory regions. 587 */ 588 static int om_cmp(const void *a, const void *b); 589 590 static int 591 om_cmp(const void *a, const void *b) 592 { 593 const struct ofw_map *mapa; 594 const struct ofw_map *mapb; 595 596 mapa = a; 597 mapb = b; 598 if (mapa->om_pa < mapb->om_pa) 599 return (-1); 600 else if (mapa->om_pa > mapb->om_pa) 601 return (1); 602 else 603 return (0); 604 } 605 606 void 607 moea_cpu_bootstrap(mmu_t mmup, int ap) 608 { 609 u_int sdr; 610 int i; 611 612 if (ap) { 613 powerpc_sync(); 614 __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); 615 __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); 616 isync(); 617 __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu)); 618 __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl)); 619 isync(); 620 } 621 622 #ifdef WII 623 /* 624 * Special case for the Wii: don't install the PCI BAT. 625 */ 626 if (strcmp(installed_platform(), "wii") != 0) { 627 #endif 628 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 629 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 630 #ifdef WII 631 } 632 #endif 633 isync(); 634 635 __asm __volatile("mtibatu 1,%0" :: "r"(0)); 636 __asm __volatile("mtdbatu 2,%0" :: "r"(0)); 637 __asm __volatile("mtibatu 2,%0" :: "r"(0)); 638 __asm __volatile("mtdbatu 3,%0" :: "r"(0)); 639 __asm __volatile("mtibatu 3,%0" :: "r"(0)); 640 isync(); 641 642 for (i = 0; i < 16; i++) 643 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 644 powerpc_sync(); 645 646 sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); 647 __asm __volatile("mtsdr1 %0" :: "r"(sdr)); 648 isync(); 649 650 tlbia(); 651 } 652 653 void 654 moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 655 { 656 ihandle_t mmui; 657 phandle_t chosen, mmu; 658 int sz; 659 int i, j; 660 vm_size_t size, physsz, hwphyssz; 661 vm_offset_t pa, va, off; 662 void *dpcpu; 663 register_t msr; 664 665 /* 666 * Set up BAT0 to map the lowest 256 MB area 667 */ 668 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 669 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 670 671 /* 672 * Map PCI memory space. 673 */ 674 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 675 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 676 677 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 678 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 679 680 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 681 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 682 683 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 684 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 685 686 /* 687 * Map obio devices. 688 */ 689 battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 690 battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 691 692 /* 693 * Use an IBAT and a DBAT to map the bottom segment of memory 694 * where we are. Turn off instruction relocation temporarily 695 * to prevent faults while reprogramming the IBAT. 696 */ 697 msr = mfmsr(); 698 mtmsr(msr & ~PSL_IR); 699 __asm (".balign 32; \n" 700 "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 701 "mtdbatu 0,%0; mtdbatl 0,%1; isync" 702 :: "r"(battable[0].batu), "r"(battable[0].batl)); 703 mtmsr(msr); 704 705 #ifdef WII 706 if (strcmp(installed_platform(), "wii") != 0) { 707 #endif 708 /* map pci space */ 709 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 710 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 711 #ifdef WII 712 } 713 #endif 714 isync(); 715 716 /* set global direct map flag */ 717 hw_direct_map = 1; 718 719 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 720 CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 721 722 for (i = 0; i < pregions_sz; i++) { 723 vm_offset_t pa; 724 vm_offset_t end; 725 726 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 727 pregions[i].mr_start, 728 pregions[i].mr_start + pregions[i].mr_size, 729 pregions[i].mr_size); 730 /* 731 * Install entries into the BAT table to allow all 732 * of physmem to be convered by on-demand BAT entries. 733 * The loop will sometimes set the same battable element 734 * twice, but that's fine since they won't be used for 735 * a while yet. 736 */ 737 pa = pregions[i].mr_start & 0xf0000000; 738 end = pregions[i].mr_start + pregions[i].mr_size; 739 do { 740 u_int n = pa >> ADDR_SR_SHFT; 741 742 battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 743 battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 744 pa += SEGMENT_LENGTH; 745 } while (pa < end); 746 } 747 748 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 749 panic("moea_bootstrap: phys_avail too small"); 750 751 phys_avail_count = 0; 752 physsz = 0; 753 hwphyssz = 0; 754 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 755 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 756 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 757 regions[i].mr_start + regions[i].mr_size, 758 regions[i].mr_size); 759 if (hwphyssz != 0 && 760 (physsz + regions[i].mr_size) >= hwphyssz) { 761 if (physsz < hwphyssz) { 762 phys_avail[j] = regions[i].mr_start; 763 phys_avail[j + 1] = regions[i].mr_start + 764 hwphyssz - physsz; 765 physsz = hwphyssz; 766 phys_avail_count++; 767 } 768 break; 769 } 770 phys_avail[j] = regions[i].mr_start; 771 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 772 phys_avail_count++; 773 physsz += regions[i].mr_size; 774 } 775 776 /* Check for overlap with the kernel and exception vectors */ 777 for (j = 0; j < 2*phys_avail_count; j+=2) { 778 if (phys_avail[j] < EXC_LAST) 779 phys_avail[j] += EXC_LAST; 780 781 if (kernelstart >= phys_avail[j] && 782 kernelstart < phys_avail[j+1]) { 783 if (kernelend < phys_avail[j+1]) { 784 phys_avail[2*phys_avail_count] = 785 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 786 phys_avail[2*phys_avail_count + 1] = 787 phys_avail[j+1]; 788 phys_avail_count++; 789 } 790 791 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 792 } 793 794 if (kernelend >= phys_avail[j] && 795 kernelend < phys_avail[j+1]) { 796 if (kernelstart > phys_avail[j]) { 797 phys_avail[2*phys_avail_count] = phys_avail[j]; 798 phys_avail[2*phys_avail_count + 1] = 799 kernelstart & ~PAGE_MASK; 800 phys_avail_count++; 801 } 802 803 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 804 } 805 } 806 807 physmem = btoc(physsz); 808 809 /* 810 * Allocate PTEG table. 811 */ 812 #ifdef PTEGCOUNT 813 moea_pteg_count = PTEGCOUNT; 814 #else 815 moea_pteg_count = 0x1000; 816 817 while (moea_pteg_count < physmem) 818 moea_pteg_count <<= 1; 819 820 moea_pteg_count >>= 1; 821 #endif /* PTEGCOUNT */ 822 823 size = moea_pteg_count * sizeof(struct pteg); 824 CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 825 size); 826 moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 827 CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 828 bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 829 moea_pteg_mask = moea_pteg_count - 1; 830 831 /* 832 * Allocate pv/overflow lists. 833 */ 834 size = sizeof(struct pvo_head) * moea_pteg_count; 835 moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 836 PAGE_SIZE); 837 CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 838 for (i = 0; i < moea_pteg_count; i++) 839 LIST_INIT(&moea_pvo_table[i]); 840 841 /* 842 * Initialize the lock that synchronizes access to the pteg and pvo 843 * tables. 844 */ 845 mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 846 MTX_RECURSE); 847 mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF); 848 849 mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN); 850 851 /* 852 * Initialise the unmanaged pvo pool. 853 */ 854 moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 855 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 856 moea_bpvo_pool_index = 0; 857 858 /* 859 * Make sure kernel vsid is allocated as well as VSID 0. 860 */ 861 moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 862 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 863 moea_vsid_bitmap[0] |= 1; 864 865 /* 866 * Initialize the kernel pmap (which is statically allocated). 867 */ 868 PMAP_LOCK_INIT(kernel_pmap); 869 for (i = 0; i < 16; i++) 870 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 871 CPU_FILL(&kernel_pmap->pm_active); 872 RB_INIT(&kernel_pmap->pmap_pvo); 873 874 /* 875 * Initialize the global pv list lock. 876 */ 877 rw_init(&pvh_global_lock, "pmap pv global"); 878 879 /* 880 * Set up the Open Firmware mappings 881 */ 882 chosen = OF_finddevice("/chosen"); 883 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 && 884 (mmu = OF_instance_to_package(mmui)) != -1 && 885 (sz = OF_getproplen(mmu, "translations")) != -1) { 886 translations = NULL; 887 for (i = 0; phys_avail[i] != 0; i += 2) { 888 if (phys_avail[i + 1] >= sz) { 889 translations = (struct ofw_map *)phys_avail[i]; 890 break; 891 } 892 } 893 if (translations == NULL) 894 panic("moea_bootstrap: no space to copy translations"); 895 bzero(translations, sz); 896 if (OF_getprop(mmu, "translations", translations, sz) == -1) 897 panic("moea_bootstrap: can't get ofw translations"); 898 CTR0(KTR_PMAP, "moea_bootstrap: translations"); 899 sz /= sizeof(*translations); 900 qsort(translations, sz, sizeof (*translations), om_cmp); 901 for (i = 0; i < sz; i++) { 902 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 903 translations[i].om_pa, translations[i].om_va, 904 translations[i].om_len); 905 906 /* 907 * If the mapping is 1:1, let the RAM and device 908 * on-demand BAT tables take care of the translation. 909 */ 910 if (translations[i].om_va == translations[i].om_pa) 911 continue; 912 913 /* Enter the pages */ 914 for (off = 0; off < translations[i].om_len; 915 off += PAGE_SIZE) 916 moea_kenter(mmup, translations[i].om_va + off, 917 translations[i].om_pa + off); 918 } 919 } 920 921 /* 922 * Calculate the last available physical address. 923 */ 924 for (i = 0; phys_avail[i + 2] != 0; i += 2) 925 ; 926 Maxmem = powerpc_btop(phys_avail[i + 1]); 927 928 moea_cpu_bootstrap(mmup,0); 929 930 pmap_bootstrapped++; 931 932 /* 933 * Set the start and end of kva. 934 */ 935 virtual_avail = VM_MIN_KERNEL_ADDRESS; 936 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 937 938 /* 939 * Allocate a kernel stack with a guard page for thread0 and map it 940 * into the kernel page map. 941 */ 942 pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 943 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 944 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 945 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 946 thread0.td_kstack = va; 947 thread0.td_kstack_pages = KSTACK_PAGES; 948 for (i = 0; i < KSTACK_PAGES; i++) { 949 moea_kenter(mmup, va, pa); 950 pa += PAGE_SIZE; 951 va += PAGE_SIZE; 952 } 953 954 /* 955 * Allocate virtual address space for the message buffer. 956 */ 957 pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE); 958 msgbufp = (struct msgbuf *)virtual_avail; 959 va = virtual_avail; 960 virtual_avail += round_page(msgbufsize); 961 while (va < virtual_avail) { 962 moea_kenter(mmup, va, pa); 963 pa += PAGE_SIZE; 964 va += PAGE_SIZE; 965 } 966 967 /* 968 * Allocate virtual address space for the dynamic percpu area. 969 */ 970 pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 971 dpcpu = (void *)virtual_avail; 972 va = virtual_avail; 973 virtual_avail += DPCPU_SIZE; 974 while (va < virtual_avail) { 975 moea_kenter(mmup, va, pa); 976 pa += PAGE_SIZE; 977 va += PAGE_SIZE; 978 } 979 dpcpu_init(dpcpu, 0); 980 } 981 982 /* 983 * Activate a user pmap. The pmap must be activated before it's address 984 * space can be accessed in any way. 985 */ 986 void 987 moea_activate(mmu_t mmu, struct thread *td) 988 { 989 pmap_t pm, pmr; 990 991 /* 992 * Load all the data we need up front to encourage the compiler to 993 * not issue any loads while we have interrupts disabled below. 994 */ 995 pm = &td->td_proc->p_vmspace->vm_pmap; 996 pmr = pm->pmap_phys; 997 998 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 999 PCPU_SET(curpmap, pmr); 1000 } 1001 1002 void 1003 moea_deactivate(mmu_t mmu, struct thread *td) 1004 { 1005 pmap_t pm; 1006 1007 pm = &td->td_proc->p_vmspace->vm_pmap; 1008 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1009 PCPU_SET(curpmap, NULL); 1010 } 1011 1012 void 1013 moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1014 { 1015 struct pvo_entry *pvo; 1016 1017 PMAP_LOCK(pm); 1018 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1019 1020 if (pvo != NULL) { 1021 if (wired) { 1022 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1023 pm->pm_stats.wired_count++; 1024 pvo->pvo_vaddr |= PVO_WIRED; 1025 } else { 1026 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1027 pm->pm_stats.wired_count--; 1028 pvo->pvo_vaddr &= ~PVO_WIRED; 1029 } 1030 } 1031 PMAP_UNLOCK(pm); 1032 } 1033 1034 void 1035 moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1036 { 1037 vm_offset_t dst; 1038 vm_offset_t src; 1039 1040 dst = VM_PAGE_TO_PHYS(mdst); 1041 src = VM_PAGE_TO_PHYS(msrc); 1042 1043 bcopy((void *)src, (void *)dst, PAGE_SIZE); 1044 } 1045 1046 /* 1047 * Zero a page of physical memory by temporarily mapping it into the tlb. 1048 */ 1049 void 1050 moea_zero_page(mmu_t mmu, vm_page_t m) 1051 { 1052 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1053 void *va = (void *)pa; 1054 1055 bzero(va, PAGE_SIZE); 1056 } 1057 1058 void 1059 moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1060 { 1061 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1062 void *va = (void *)(pa + off); 1063 1064 bzero(va, size); 1065 } 1066 1067 void 1068 moea_zero_page_idle(mmu_t mmu, vm_page_t m) 1069 { 1070 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1071 void *va = (void *)pa; 1072 1073 bzero(va, PAGE_SIZE); 1074 } 1075 1076 /* 1077 * Map the given physical page at the specified virtual address in the 1078 * target pmap with the protection requested. If specified the page 1079 * will be wired down. 1080 */ 1081 void 1082 moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1083 boolean_t wired) 1084 { 1085 1086 rw_wlock(&pvh_global_lock); 1087 PMAP_LOCK(pmap); 1088 moea_enter_locked(pmap, va, m, prot, wired); 1089 rw_wunlock(&pvh_global_lock); 1090 PMAP_UNLOCK(pmap); 1091 } 1092 1093 /* 1094 * Map the given physical page at the specified virtual address in the 1095 * target pmap with the protection requested. If specified the page 1096 * will be wired down. 1097 * 1098 * The page queues and pmap must be locked. 1099 */ 1100 static void 1101 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1102 boolean_t wired) 1103 { 1104 struct pvo_head *pvo_head; 1105 uma_zone_t zone; 1106 vm_page_t pg; 1107 u_int pte_lo, pvo_flags; 1108 int error; 1109 1110 if (!moea_initialized) { 1111 pvo_head = &moea_pvo_kunmanaged; 1112 zone = moea_upvo_zone; 1113 pvo_flags = 0; 1114 pg = NULL; 1115 } else { 1116 pvo_head = vm_page_to_pvoh(m); 1117 pg = m; 1118 zone = moea_mpvo_zone; 1119 pvo_flags = PVO_MANAGED; 1120 } 1121 if (pmap_bootstrapped) 1122 rw_assert(&pvh_global_lock, RA_WLOCKED); 1123 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1124 if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) 1125 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1126 1127 /* XXX change the pvo head for fake pages */ 1128 if ((m->oflags & VPO_UNMANAGED) != 0) { 1129 pvo_flags &= ~PVO_MANAGED; 1130 pvo_head = &moea_pvo_kunmanaged; 1131 zone = moea_upvo_zone; 1132 } 1133 1134 pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1135 1136 if (prot & VM_PROT_WRITE) { 1137 pte_lo |= PTE_BW; 1138 if (pmap_bootstrapped && 1139 (m->oflags & VPO_UNMANAGED) == 0) 1140 vm_page_aflag_set(m, PGA_WRITEABLE); 1141 } else 1142 pte_lo |= PTE_BR; 1143 1144 if (prot & VM_PROT_EXECUTE) 1145 pvo_flags |= PVO_EXECUTABLE; 1146 1147 if (wired) 1148 pvo_flags |= PVO_WIRED; 1149 1150 error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1151 pte_lo, pvo_flags); 1152 1153 /* 1154 * Flush the real page from the instruction cache. This has be done 1155 * for all user mappings to prevent information leakage via the 1156 * instruction cache. moea_pvo_enter() returns ENOENT for the first 1157 * mapping for a page. 1158 */ 1159 if (pmap != kernel_pmap && error == ENOENT && 1160 (pte_lo & (PTE_I | PTE_G)) == 0) 1161 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1162 } 1163 1164 /* 1165 * Maps a sequence of resident pages belonging to the same object. 1166 * The sequence begins with the given page m_start. This page is 1167 * mapped at the given virtual address start. Each subsequent page is 1168 * mapped at a virtual address that is offset from start by the same 1169 * amount as the page is offset from m_start within the object. The 1170 * last page in the sequence is the page with the largest offset from 1171 * m_start that can be mapped at a virtual address less than the given 1172 * virtual address end. Not every virtual page between start and end 1173 * is mapped; only those for which a resident page exists with the 1174 * corresponding offset from m_start are mapped. 1175 */ 1176 void 1177 moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1178 vm_page_t m_start, vm_prot_t prot) 1179 { 1180 vm_page_t m; 1181 vm_pindex_t diff, psize; 1182 1183 psize = atop(end - start); 1184 m = m_start; 1185 rw_wlock(&pvh_global_lock); 1186 PMAP_LOCK(pm); 1187 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1188 moea_enter_locked(pm, start + ptoa(diff), m, prot & 1189 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1190 m = TAILQ_NEXT(m, listq); 1191 } 1192 rw_wunlock(&pvh_global_lock); 1193 PMAP_UNLOCK(pm); 1194 } 1195 1196 void 1197 moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1198 vm_prot_t prot) 1199 { 1200 1201 rw_wlock(&pvh_global_lock); 1202 PMAP_LOCK(pm); 1203 moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1204 FALSE); 1205 rw_wunlock(&pvh_global_lock); 1206 PMAP_UNLOCK(pm); 1207 } 1208 1209 vm_paddr_t 1210 moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1211 { 1212 struct pvo_entry *pvo; 1213 vm_paddr_t pa; 1214 1215 PMAP_LOCK(pm); 1216 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1217 if (pvo == NULL) 1218 pa = 0; 1219 else 1220 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1221 PMAP_UNLOCK(pm); 1222 return (pa); 1223 } 1224 1225 /* 1226 * Atomically extract and hold the physical page with the given 1227 * pmap and virtual address pair if that mapping permits the given 1228 * protection. 1229 */ 1230 vm_page_t 1231 moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1232 { 1233 struct pvo_entry *pvo; 1234 vm_page_t m; 1235 vm_paddr_t pa; 1236 1237 m = NULL; 1238 pa = 0; 1239 PMAP_LOCK(pmap); 1240 retry: 1241 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1242 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && 1243 ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || 1244 (prot & VM_PROT_WRITE) == 0)) { 1245 if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa)) 1246 goto retry; 1247 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 1248 vm_page_hold(m); 1249 } 1250 PA_UNLOCK_COND(pa); 1251 PMAP_UNLOCK(pmap); 1252 return (m); 1253 } 1254 1255 void 1256 moea_init(mmu_t mmu) 1257 { 1258 1259 moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1260 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1261 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1262 moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1263 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1264 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1265 moea_initialized = TRUE; 1266 } 1267 1268 boolean_t 1269 moea_is_referenced(mmu_t mmu, vm_page_t m) 1270 { 1271 boolean_t rv; 1272 1273 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1274 ("moea_is_referenced: page %p is not managed", m)); 1275 rw_wlock(&pvh_global_lock); 1276 rv = moea_query_bit(m, PTE_REF); 1277 rw_wunlock(&pvh_global_lock); 1278 return (rv); 1279 } 1280 1281 boolean_t 1282 moea_is_modified(mmu_t mmu, vm_page_t m) 1283 { 1284 boolean_t rv; 1285 1286 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1287 ("moea_is_modified: page %p is not managed", m)); 1288 1289 /* 1290 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 1291 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1292 * is clear, no PTEs can have PTE_CHG set. 1293 */ 1294 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1295 if ((m->oflags & VPO_BUSY) == 0 && 1296 (m->aflags & PGA_WRITEABLE) == 0) 1297 return (FALSE); 1298 rw_wlock(&pvh_global_lock); 1299 rv = moea_query_bit(m, PTE_CHG); 1300 rw_wunlock(&pvh_global_lock); 1301 return (rv); 1302 } 1303 1304 boolean_t 1305 moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1306 { 1307 struct pvo_entry *pvo; 1308 boolean_t rv; 1309 1310 PMAP_LOCK(pmap); 1311 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1312 rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; 1313 PMAP_UNLOCK(pmap); 1314 return (rv); 1315 } 1316 1317 void 1318 moea_clear_reference(mmu_t mmu, vm_page_t m) 1319 { 1320 1321 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1322 ("moea_clear_reference: page %p is not managed", m)); 1323 rw_wlock(&pvh_global_lock); 1324 moea_clear_bit(m, PTE_REF); 1325 rw_wunlock(&pvh_global_lock); 1326 } 1327 1328 void 1329 moea_clear_modify(mmu_t mmu, vm_page_t m) 1330 { 1331 1332 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1333 ("moea_clear_modify: page %p is not managed", m)); 1334 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1335 KASSERT((m->oflags & VPO_BUSY) == 0, 1336 ("moea_clear_modify: page %p is busy", m)); 1337 1338 /* 1339 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG 1340 * set. If the object containing the page is locked and the page is 1341 * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 1342 */ 1343 if ((m->aflags & PGA_WRITEABLE) == 0) 1344 return; 1345 rw_wlock(&pvh_global_lock); 1346 moea_clear_bit(m, PTE_CHG); 1347 rw_wunlock(&pvh_global_lock); 1348 } 1349 1350 /* 1351 * Clear the write and modified bits in each of the given page's mappings. 1352 */ 1353 void 1354 moea_remove_write(mmu_t mmu, vm_page_t m) 1355 { 1356 struct pvo_entry *pvo; 1357 struct pte *pt; 1358 pmap_t pmap; 1359 u_int lo; 1360 1361 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1362 ("moea_remove_write: page %p is not managed", m)); 1363 1364 /* 1365 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1366 * another thread while the object is locked. Thus, if PGA_WRITEABLE 1367 * is clear, no page table entries need updating. 1368 */ 1369 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1370 if ((m->oflags & VPO_BUSY) == 0 && 1371 (m->aflags & PGA_WRITEABLE) == 0) 1372 return; 1373 rw_wlock(&pvh_global_lock); 1374 lo = moea_attr_fetch(m); 1375 powerpc_sync(); 1376 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1377 pmap = pvo->pvo_pmap; 1378 PMAP_LOCK(pmap); 1379 if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { 1380 pt = moea_pvo_to_pte(pvo, -1); 1381 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1382 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1383 if (pt != NULL) { 1384 moea_pte_synch(pt, &pvo->pvo_pte.pte); 1385 lo |= pvo->pvo_pte.pte.pte_lo; 1386 pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; 1387 moea_pte_change(pt, &pvo->pvo_pte.pte, 1388 pvo->pvo_vaddr); 1389 mtx_unlock(&moea_table_mutex); 1390 } 1391 } 1392 PMAP_UNLOCK(pmap); 1393 } 1394 if ((lo & PTE_CHG) != 0) { 1395 moea_attr_clear(m, PTE_CHG); 1396 vm_page_dirty(m); 1397 } 1398 vm_page_aflag_clear(m, PGA_WRITEABLE); 1399 rw_wunlock(&pvh_global_lock); 1400 } 1401 1402 /* 1403 * moea_ts_referenced: 1404 * 1405 * Return a count of reference bits for a page, clearing those bits. 1406 * It is not necessary for every reference bit to be cleared, but it 1407 * is necessary that 0 only be returned when there are truly no 1408 * reference bits set. 1409 * 1410 * XXX: The exact number of bits to check and clear is a matter that 1411 * should be tested and standardized at some point in the future for 1412 * optimal aging of shared pages. 1413 */ 1414 int 1415 moea_ts_referenced(mmu_t mmu, vm_page_t m) 1416 { 1417 int count; 1418 1419 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1420 ("moea_ts_referenced: page %p is not managed", m)); 1421 rw_wlock(&pvh_global_lock); 1422 count = moea_clear_bit(m, PTE_REF); 1423 rw_wunlock(&pvh_global_lock); 1424 return (count); 1425 } 1426 1427 /* 1428 * Modify the WIMG settings of all mappings for a page. 1429 */ 1430 void 1431 moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1432 { 1433 struct pvo_entry *pvo; 1434 struct pvo_head *pvo_head; 1435 struct pte *pt; 1436 pmap_t pmap; 1437 u_int lo; 1438 1439 if ((m->oflags & VPO_UNMANAGED) != 0) { 1440 m->md.mdpg_cache_attrs = ma; 1441 return; 1442 } 1443 1444 rw_wlock(&pvh_global_lock); 1445 pvo_head = vm_page_to_pvoh(m); 1446 lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1447 1448 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1449 pmap = pvo->pvo_pmap; 1450 PMAP_LOCK(pmap); 1451 pt = moea_pvo_to_pte(pvo, -1); 1452 pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; 1453 pvo->pvo_pte.pte.pte_lo |= lo; 1454 if (pt != NULL) { 1455 moea_pte_change(pt, &pvo->pvo_pte.pte, 1456 pvo->pvo_vaddr); 1457 if (pvo->pvo_pmap == kernel_pmap) 1458 isync(); 1459 } 1460 mtx_unlock(&moea_table_mutex); 1461 PMAP_UNLOCK(pmap); 1462 } 1463 m->md.mdpg_cache_attrs = ma; 1464 rw_wunlock(&pvh_global_lock); 1465 } 1466 1467 /* 1468 * Map a wired page into kernel virtual address space. 1469 */ 1470 void 1471 moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1472 { 1473 1474 moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1475 } 1476 1477 void 1478 moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1479 { 1480 u_int pte_lo; 1481 int error; 1482 1483 #if 0 1484 if (va < VM_MIN_KERNEL_ADDRESS) 1485 panic("moea_kenter: attempt to enter non-kernel address %#x", 1486 va); 1487 #endif 1488 1489 pte_lo = moea_calc_wimg(pa, ma); 1490 1491 PMAP_LOCK(kernel_pmap); 1492 error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1493 &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1494 1495 if (error != 0 && error != ENOENT) 1496 panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 1497 pa, error); 1498 1499 PMAP_UNLOCK(kernel_pmap); 1500 } 1501 1502 /* 1503 * Extract the physical page address associated with the given kernel virtual 1504 * address. 1505 */ 1506 vm_paddr_t 1507 moea_kextract(mmu_t mmu, vm_offset_t va) 1508 { 1509 struct pvo_entry *pvo; 1510 vm_paddr_t pa; 1511 1512 /* 1513 * Allow direct mappings on 32-bit OEA 1514 */ 1515 if (va < VM_MIN_KERNEL_ADDRESS) { 1516 return (va); 1517 } 1518 1519 PMAP_LOCK(kernel_pmap); 1520 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1521 KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1522 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1523 PMAP_UNLOCK(kernel_pmap); 1524 return (pa); 1525 } 1526 1527 /* 1528 * Remove a wired page from kernel virtual address space. 1529 */ 1530 void 1531 moea_kremove(mmu_t mmu, vm_offset_t va) 1532 { 1533 1534 moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1535 } 1536 1537 /* 1538 * Map a range of physical addresses into kernel virtual address space. 1539 * 1540 * The value passed in *virt is a suggested virtual address for the mapping. 1541 * Architectures which can support a direct-mapped physical to virtual region 1542 * can return the appropriate address within that region, leaving '*virt' 1543 * unchanged. We cannot and therefore do not; *virt is updated with the 1544 * first usable address after the mapped region. 1545 */ 1546 vm_offset_t 1547 moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1548 vm_paddr_t pa_end, int prot) 1549 { 1550 vm_offset_t sva, va; 1551 1552 sva = *virt; 1553 va = sva; 1554 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1555 moea_kenter(mmu, va, pa_start); 1556 *virt = va; 1557 return (sva); 1558 } 1559 1560 /* 1561 * Returns true if the pmap's pv is one of the first 1562 * 16 pvs linked to from this page. This count may 1563 * be changed upwards or downwards in the future; it 1564 * is only necessary that true be returned for a small 1565 * subset of pmaps for proper page aging. 1566 */ 1567 boolean_t 1568 moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1569 { 1570 int loops; 1571 struct pvo_entry *pvo; 1572 boolean_t rv; 1573 1574 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1575 ("moea_page_exists_quick: page %p is not managed", m)); 1576 loops = 0; 1577 rv = FALSE; 1578 rw_wlock(&pvh_global_lock); 1579 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1580 if (pvo->pvo_pmap == pmap) { 1581 rv = TRUE; 1582 break; 1583 } 1584 if (++loops >= 16) 1585 break; 1586 } 1587 rw_wunlock(&pvh_global_lock); 1588 return (rv); 1589 } 1590 1591 /* 1592 * Return the number of managed mappings to the given physical page 1593 * that are wired. 1594 */ 1595 int 1596 moea_page_wired_mappings(mmu_t mmu, vm_page_t m) 1597 { 1598 struct pvo_entry *pvo; 1599 int count; 1600 1601 count = 0; 1602 if ((m->oflags & VPO_UNMANAGED) != 0) 1603 return (count); 1604 rw_wlock(&pvh_global_lock); 1605 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1606 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1607 count++; 1608 rw_wunlock(&pvh_global_lock); 1609 return (count); 1610 } 1611 1612 static u_int moea_vsidcontext; 1613 1614 void 1615 moea_pinit(mmu_t mmu, pmap_t pmap) 1616 { 1617 int i, mask; 1618 u_int entropy; 1619 1620 KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1621 PMAP_LOCK_INIT(pmap); 1622 RB_INIT(&pmap->pmap_pvo); 1623 1624 entropy = 0; 1625 __asm __volatile("mftb %0" : "=r"(entropy)); 1626 1627 if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap)) 1628 == NULL) { 1629 pmap->pmap_phys = pmap; 1630 } 1631 1632 1633 mtx_lock(&moea_vsid_mutex); 1634 /* 1635 * Allocate some segment registers for this pmap. 1636 */ 1637 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1638 u_int hash, n; 1639 1640 /* 1641 * Create a new value by mutiplying by a prime and adding in 1642 * entropy from the timebase register. This is to make the 1643 * VSID more random so that the PT hash function collides 1644 * less often. (Note that the prime casues gcc to do shifts 1645 * instead of a multiply.) 1646 */ 1647 moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1648 hash = moea_vsidcontext & (NPMAPS - 1); 1649 if (hash == 0) /* 0 is special, avoid it */ 1650 continue; 1651 n = hash >> 5; 1652 mask = 1 << (hash & (VSID_NBPW - 1)); 1653 hash = (moea_vsidcontext & 0xfffff); 1654 if (moea_vsid_bitmap[n] & mask) { /* collision? */ 1655 /* anything free in this bucket? */ 1656 if (moea_vsid_bitmap[n] == 0xffffffff) { 1657 entropy = (moea_vsidcontext >> 20); 1658 continue; 1659 } 1660 i = ffs(~moea_vsid_bitmap[n]) - 1; 1661 mask = 1 << i; 1662 hash &= 0xfffff & ~(VSID_NBPW - 1); 1663 hash |= i; 1664 } 1665 KASSERT(!(moea_vsid_bitmap[n] & mask), 1666 ("Allocating in-use VSID group %#x\n", hash)); 1667 moea_vsid_bitmap[n] |= mask; 1668 for (i = 0; i < 16; i++) 1669 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1670 mtx_unlock(&moea_vsid_mutex); 1671 return; 1672 } 1673 1674 mtx_unlock(&moea_vsid_mutex); 1675 panic("moea_pinit: out of segments"); 1676 } 1677 1678 /* 1679 * Initialize the pmap associated with process 0. 1680 */ 1681 void 1682 moea_pinit0(mmu_t mmu, pmap_t pm) 1683 { 1684 1685 moea_pinit(mmu, pm); 1686 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1687 } 1688 1689 /* 1690 * Set the physical protection on the specified range of this map as requested. 1691 */ 1692 void 1693 moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1694 vm_prot_t prot) 1695 { 1696 struct pvo_entry *pvo, *tpvo, key; 1697 struct pte *pt; 1698 1699 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1700 ("moea_protect: non current pmap")); 1701 1702 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1703 moea_remove(mmu, pm, sva, eva); 1704 return; 1705 } 1706 1707 rw_wlock(&pvh_global_lock); 1708 PMAP_LOCK(pm); 1709 key.pvo_vaddr = sva; 1710 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1711 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1712 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1713 if ((prot & VM_PROT_EXECUTE) == 0) 1714 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1715 1716 /* 1717 * Grab the PTE pointer before we diddle with the cached PTE 1718 * copy. 1719 */ 1720 pt = moea_pvo_to_pte(pvo, -1); 1721 /* 1722 * Change the protection of the page. 1723 */ 1724 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1725 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1726 1727 /* 1728 * If the PVO is in the page table, update that pte as well. 1729 */ 1730 if (pt != NULL) { 1731 moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1732 mtx_unlock(&moea_table_mutex); 1733 } 1734 } 1735 rw_wunlock(&pvh_global_lock); 1736 PMAP_UNLOCK(pm); 1737 } 1738 1739 /* 1740 * Map a list of wired pages into kernel virtual address space. This is 1741 * intended for temporary mappings which do not need page modification or 1742 * references recorded. Existing mappings in the region are overwritten. 1743 */ 1744 void 1745 moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1746 { 1747 vm_offset_t va; 1748 1749 va = sva; 1750 while (count-- > 0) { 1751 moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1752 va += PAGE_SIZE; 1753 m++; 1754 } 1755 } 1756 1757 /* 1758 * Remove page mappings from kernel virtual address space. Intended for 1759 * temporary mappings entered by moea_qenter. 1760 */ 1761 void 1762 moea_qremove(mmu_t mmu, vm_offset_t sva, int count) 1763 { 1764 vm_offset_t va; 1765 1766 va = sva; 1767 while (count-- > 0) { 1768 moea_kremove(mmu, va); 1769 va += PAGE_SIZE; 1770 } 1771 } 1772 1773 void 1774 moea_release(mmu_t mmu, pmap_t pmap) 1775 { 1776 int idx, mask; 1777 1778 /* 1779 * Free segment register's VSID 1780 */ 1781 if (pmap->pm_sr[0] == 0) 1782 panic("moea_release"); 1783 1784 mtx_lock(&moea_vsid_mutex); 1785 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1786 mask = 1 << (idx % VSID_NBPW); 1787 idx /= VSID_NBPW; 1788 moea_vsid_bitmap[idx] &= ~mask; 1789 mtx_unlock(&moea_vsid_mutex); 1790 PMAP_LOCK_DESTROY(pmap); 1791 } 1792 1793 /* 1794 * Remove the given range of addresses from the specified map. 1795 */ 1796 void 1797 moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1798 { 1799 struct pvo_entry *pvo, *tpvo, key; 1800 1801 rw_wlock(&pvh_global_lock); 1802 PMAP_LOCK(pm); 1803 key.pvo_vaddr = sva; 1804 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1805 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1806 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1807 moea_pvo_remove(pvo, -1); 1808 } 1809 PMAP_UNLOCK(pm); 1810 rw_wunlock(&pvh_global_lock); 1811 } 1812 1813 /* 1814 * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1815 * will reflect changes in pte's back to the vm_page. 1816 */ 1817 void 1818 moea_remove_all(mmu_t mmu, vm_page_t m) 1819 { 1820 struct pvo_head *pvo_head; 1821 struct pvo_entry *pvo, *next_pvo; 1822 pmap_t pmap; 1823 1824 rw_wlock(&pvh_global_lock); 1825 pvo_head = vm_page_to_pvoh(m); 1826 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1827 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1828 1829 pmap = pvo->pvo_pmap; 1830 PMAP_LOCK(pmap); 1831 moea_pvo_remove(pvo, -1); 1832 PMAP_UNLOCK(pmap); 1833 } 1834 if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) { 1835 moea_attr_clear(m, PTE_CHG); 1836 vm_page_dirty(m); 1837 } 1838 vm_page_aflag_clear(m, PGA_WRITEABLE); 1839 rw_wunlock(&pvh_global_lock); 1840 } 1841 1842 /* 1843 * Allocate a physical page of memory directly from the phys_avail map. 1844 * Can only be called from moea_bootstrap before avail start and end are 1845 * calculated. 1846 */ 1847 static vm_offset_t 1848 moea_bootstrap_alloc(vm_size_t size, u_int align) 1849 { 1850 vm_offset_t s, e; 1851 int i, j; 1852 1853 size = round_page(size); 1854 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1855 if (align != 0) 1856 s = (phys_avail[i] + align - 1) & ~(align - 1); 1857 else 1858 s = phys_avail[i]; 1859 e = s + size; 1860 1861 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1862 continue; 1863 1864 if (s == phys_avail[i]) { 1865 phys_avail[i] += size; 1866 } else if (e == phys_avail[i + 1]) { 1867 phys_avail[i + 1] -= size; 1868 } else { 1869 for (j = phys_avail_count * 2; j > i; j -= 2) { 1870 phys_avail[j] = phys_avail[j - 2]; 1871 phys_avail[j + 1] = phys_avail[j - 1]; 1872 } 1873 1874 phys_avail[i + 3] = phys_avail[i + 1]; 1875 phys_avail[i + 1] = s; 1876 phys_avail[i + 2] = e; 1877 phys_avail_count++; 1878 } 1879 1880 return (s); 1881 } 1882 panic("moea_bootstrap_alloc: could not allocate memory"); 1883 } 1884 1885 static void 1886 moea_syncicache(vm_offset_t pa, vm_size_t len) 1887 { 1888 __syncicache((void *)pa, len); 1889 } 1890 1891 static int 1892 moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1893 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1894 { 1895 struct pvo_entry *pvo; 1896 u_int sr; 1897 int first; 1898 u_int ptegidx; 1899 int i; 1900 int bootstrap; 1901 1902 moea_pvo_enter_calls++; 1903 first = 0; 1904 bootstrap = 0; 1905 1906 /* 1907 * Compute the PTE Group index. 1908 */ 1909 va &= ~ADDR_POFF; 1910 sr = va_to_sr(pm->pm_sr, va); 1911 ptegidx = va_to_pteg(sr, va); 1912 1913 /* 1914 * Remove any existing mapping for this page. Reuse the pvo entry if 1915 * there is a mapping. 1916 */ 1917 mtx_lock(&moea_table_mutex); 1918 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 1919 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1920 if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa && 1921 (pvo->pvo_pte.pte.pte_lo & PTE_PP) == 1922 (pte_lo & PTE_PP)) { 1923 mtx_unlock(&moea_table_mutex); 1924 return (0); 1925 } 1926 moea_pvo_remove(pvo, -1); 1927 break; 1928 } 1929 } 1930 1931 /* 1932 * If we aren't overwriting a mapping, try to allocate. 1933 */ 1934 if (moea_initialized) { 1935 pvo = uma_zalloc(zone, M_NOWAIT); 1936 } else { 1937 if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1938 panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1939 moea_bpvo_pool_index, BPVO_POOL_SIZE, 1940 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 1941 } 1942 pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1943 moea_bpvo_pool_index++; 1944 bootstrap = 1; 1945 } 1946 1947 if (pvo == NULL) { 1948 mtx_unlock(&moea_table_mutex); 1949 return (ENOMEM); 1950 } 1951 1952 moea_pvo_entries++; 1953 pvo->pvo_vaddr = va; 1954 pvo->pvo_pmap = pm; 1955 LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 1956 pvo->pvo_vaddr &= ~ADDR_POFF; 1957 if (flags & VM_PROT_EXECUTE) 1958 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1959 if (flags & PVO_WIRED) 1960 pvo->pvo_vaddr |= PVO_WIRED; 1961 if (pvo_head != &moea_pvo_kunmanaged) 1962 pvo->pvo_vaddr |= PVO_MANAGED; 1963 if (bootstrap) 1964 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1965 1966 moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); 1967 1968 /* 1969 * Add to pmap list 1970 */ 1971 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 1972 1973 /* 1974 * Remember if the list was empty and therefore will be the first 1975 * item. 1976 */ 1977 if (LIST_FIRST(pvo_head) == NULL) 1978 first = 1; 1979 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1980 1981 if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 1982 pm->pm_stats.wired_count++; 1983 pm->pm_stats.resident_count++; 1984 1985 /* 1986 * We hope this succeeds but it isn't required. 1987 */ 1988 i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 1989 if (i >= 0) { 1990 PVO_PTEGIDX_SET(pvo, i); 1991 } else { 1992 panic("moea_pvo_enter: overflow"); 1993 moea_pte_overflow++; 1994 } 1995 mtx_unlock(&moea_table_mutex); 1996 1997 return (first ? ENOENT : 0); 1998 } 1999 2000 static void 2001 moea_pvo_remove(struct pvo_entry *pvo, int pteidx) 2002 { 2003 struct pte *pt; 2004 2005 /* 2006 * If there is an active pte entry, we need to deactivate it (and 2007 * save the ref & cfg bits). 2008 */ 2009 pt = moea_pvo_to_pte(pvo, pteidx); 2010 if (pt != NULL) { 2011 moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 2012 mtx_unlock(&moea_table_mutex); 2013 PVO_PTEGIDX_CLR(pvo); 2014 } else { 2015 moea_pte_overflow--; 2016 } 2017 2018 /* 2019 * Update our statistics. 2020 */ 2021 pvo->pvo_pmap->pm_stats.resident_count--; 2022 if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 2023 pvo->pvo_pmap->pm_stats.wired_count--; 2024 2025 /* 2026 * Save the REF/CHG bits into their cache if the page is managed. 2027 */ 2028 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { 2029 struct vm_page *pg; 2030 2031 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 2032 if (pg != NULL) { 2033 moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & 2034 (PTE_REF | PTE_CHG)); 2035 } 2036 } 2037 2038 /* 2039 * Remove this PVO from the PV and pmap lists. 2040 */ 2041 LIST_REMOVE(pvo, pvo_vlink); 2042 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2043 2044 /* 2045 * Remove this from the overflow list and return it to the pool 2046 * if we aren't going to reuse it. 2047 */ 2048 LIST_REMOVE(pvo, pvo_olink); 2049 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2050 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2051 moea_upvo_zone, pvo); 2052 moea_pvo_entries--; 2053 moea_pvo_remove_calls++; 2054 } 2055 2056 static __inline int 2057 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2058 { 2059 int pteidx; 2060 2061 /* 2062 * We can find the actual pte entry without searching by grabbing 2063 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 2064 * noticing the HID bit. 2065 */ 2066 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2067 if (pvo->pvo_pte.pte.pte_hi & PTE_HID) 2068 pteidx ^= moea_pteg_mask * 8; 2069 2070 return (pteidx); 2071 } 2072 2073 static struct pvo_entry * 2074 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2075 { 2076 struct pvo_entry *pvo; 2077 int ptegidx; 2078 u_int sr; 2079 2080 va &= ~ADDR_POFF; 2081 sr = va_to_sr(pm->pm_sr, va); 2082 ptegidx = va_to_pteg(sr, va); 2083 2084 mtx_lock(&moea_table_mutex); 2085 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2086 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2087 if (pteidx_p) 2088 *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2089 break; 2090 } 2091 } 2092 mtx_unlock(&moea_table_mutex); 2093 2094 return (pvo); 2095 } 2096 2097 static struct pte * 2098 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2099 { 2100 struct pte *pt; 2101 2102 /* 2103 * If we haven't been supplied the ptegidx, calculate it. 2104 */ 2105 if (pteidx == -1) { 2106 int ptegidx; 2107 u_int sr; 2108 2109 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 2110 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2111 pteidx = moea_pvo_pte_index(pvo, ptegidx); 2112 } 2113 2114 pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2115 mtx_lock(&moea_table_mutex); 2116 2117 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2118 panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 2119 "valid pte index", pvo); 2120 } 2121 2122 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2123 panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 2124 "pvo but no valid pte", pvo); 2125 } 2126 2127 if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2128 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { 2129 panic("moea_pvo_to_pte: pvo %p has valid pte in " 2130 "moea_pteg_table %p but invalid in pvo", pvo, pt); 2131 } 2132 2133 if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 2134 != 0) { 2135 panic("moea_pvo_to_pte: pvo %p pte does not match " 2136 "pte %p in moea_pteg_table", pvo, pt); 2137 } 2138 2139 mtx_assert(&moea_table_mutex, MA_OWNED); 2140 return (pt); 2141 } 2142 2143 if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { 2144 panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2145 "moea_pteg_table but valid in pvo", pvo, pt); 2146 } 2147 2148 mtx_unlock(&moea_table_mutex); 2149 return (NULL); 2150 } 2151 2152 /* 2153 * XXX: THIS STUFF SHOULD BE IN pte.c? 2154 */ 2155 int 2156 moea_pte_spill(vm_offset_t addr) 2157 { 2158 struct pvo_entry *source_pvo, *victim_pvo; 2159 struct pvo_entry *pvo; 2160 int ptegidx, i, j; 2161 u_int sr; 2162 struct pteg *pteg; 2163 struct pte *pt; 2164 2165 moea_pte_spills++; 2166 2167 sr = mfsrin(addr); 2168 ptegidx = va_to_pteg(sr, addr); 2169 2170 /* 2171 * Have to substitute some entry. Use the primary hash for this. 2172 * Use low bits of timebase as random generator. 2173 */ 2174 pteg = &moea_pteg_table[ptegidx]; 2175 mtx_lock(&moea_table_mutex); 2176 __asm __volatile("mftb %0" : "=r"(i)); 2177 i &= 7; 2178 pt = &pteg->pt[i]; 2179 2180 source_pvo = NULL; 2181 victim_pvo = NULL; 2182 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2183 /* 2184 * We need to find a pvo entry for this address. 2185 */ 2186 if (source_pvo == NULL && 2187 moea_pte_match(&pvo->pvo_pte.pte, sr, addr, 2188 pvo->pvo_pte.pte.pte_hi & PTE_HID)) { 2189 /* 2190 * Now found an entry to be spilled into the pteg. 2191 * The PTE is now valid, so we know it's active. 2192 */ 2193 j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 2194 2195 if (j >= 0) { 2196 PVO_PTEGIDX_SET(pvo, j); 2197 moea_pte_overflow--; 2198 mtx_unlock(&moea_table_mutex); 2199 return (1); 2200 } 2201 2202 source_pvo = pvo; 2203 2204 if (victim_pvo != NULL) 2205 break; 2206 } 2207 2208 /* 2209 * We also need the pvo entry of the victim we are replacing 2210 * so save the R & C bits of the PTE. 2211 */ 2212 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2213 moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2214 victim_pvo = pvo; 2215 if (source_pvo != NULL) 2216 break; 2217 } 2218 } 2219 2220 if (source_pvo == NULL) { 2221 mtx_unlock(&moea_table_mutex); 2222 return (0); 2223 } 2224 2225 if (victim_pvo == NULL) { 2226 if ((pt->pte_hi & PTE_HID) == 0) 2227 panic("moea_pte_spill: victim p-pte (%p) has no pvo" 2228 "entry", pt); 2229 2230 /* 2231 * If this is a secondary PTE, we need to search it's primary 2232 * pvo bucket for the matching PVO. 2233 */ 2234 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 2235 pvo_olink) { 2236 /* 2237 * We also need the pvo entry of the victim we are 2238 * replacing so save the R & C bits of the PTE. 2239 */ 2240 if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2241 victim_pvo = pvo; 2242 break; 2243 } 2244 } 2245 2246 if (victim_pvo == NULL) 2247 panic("moea_pte_spill: victim s-pte (%p) has no pvo" 2248 "entry", pt); 2249 } 2250 2251 /* 2252 * We are invalidating the TLB entry for the EA we are replacing even 2253 * though it's valid. If we don't, we lose any ref/chg bit changes 2254 * contained in the TLB entry. 2255 */ 2256 source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; 2257 2258 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2259 moea_pte_set(pt, &source_pvo->pvo_pte.pte); 2260 2261 PVO_PTEGIDX_CLR(victim_pvo); 2262 PVO_PTEGIDX_SET(source_pvo, i); 2263 moea_pte_replacements++; 2264 2265 mtx_unlock(&moea_table_mutex); 2266 return (1); 2267 } 2268 2269 static int 2270 moea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2271 { 2272 struct pte *pt; 2273 int i; 2274 2275 mtx_assert(&moea_table_mutex, MA_OWNED); 2276 2277 /* 2278 * First try primary hash. 2279 */ 2280 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2281 if ((pt->pte_hi & PTE_VALID) == 0) { 2282 pvo_pt->pte_hi &= ~PTE_HID; 2283 moea_pte_set(pt, pvo_pt); 2284 return (i); 2285 } 2286 } 2287 2288 /* 2289 * Now try secondary hash. 2290 */ 2291 ptegidx ^= moea_pteg_mask; 2292 2293 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2294 if ((pt->pte_hi & PTE_VALID) == 0) { 2295 pvo_pt->pte_hi |= PTE_HID; 2296 moea_pte_set(pt, pvo_pt); 2297 return (i); 2298 } 2299 } 2300 2301 panic("moea_pte_insert: overflow"); 2302 return (-1); 2303 } 2304 2305 static boolean_t 2306 moea_query_bit(vm_page_t m, int ptebit) 2307 { 2308 struct pvo_entry *pvo; 2309 struct pte *pt; 2310 2311 rw_assert(&pvh_global_lock, RA_WLOCKED); 2312 if (moea_attr_fetch(m) & ptebit) 2313 return (TRUE); 2314 2315 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2316 2317 /* 2318 * See if we saved the bit off. If so, cache it and return 2319 * success. 2320 */ 2321 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2322 moea_attr_save(m, ptebit); 2323 return (TRUE); 2324 } 2325 } 2326 2327 /* 2328 * No luck, now go through the hard part of looking at the PTEs 2329 * themselves. Sync so that any pending REF/CHG bits are flushed to 2330 * the PTEs. 2331 */ 2332 powerpc_sync(); 2333 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2334 2335 /* 2336 * See if this pvo has a valid PTE. if so, fetch the 2337 * REF/CHG bits from the valid PTE. If the appropriate 2338 * ptebit is set, cache it and return success. 2339 */ 2340 pt = moea_pvo_to_pte(pvo, -1); 2341 if (pt != NULL) { 2342 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2343 mtx_unlock(&moea_table_mutex); 2344 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2345 moea_attr_save(m, ptebit); 2346 return (TRUE); 2347 } 2348 } 2349 } 2350 2351 return (FALSE); 2352 } 2353 2354 static u_int 2355 moea_clear_bit(vm_page_t m, int ptebit) 2356 { 2357 u_int count; 2358 struct pvo_entry *pvo; 2359 struct pte *pt; 2360 2361 rw_assert(&pvh_global_lock, RA_WLOCKED); 2362 2363 /* 2364 * Clear the cached value. 2365 */ 2366 moea_attr_clear(m, ptebit); 2367 2368 /* 2369 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2370 * we can reset the right ones). note that since the pvo entries and 2371 * list heads are accessed via BAT0 and are never placed in the page 2372 * table, we don't have to worry about further accesses setting the 2373 * REF/CHG bits. 2374 */ 2375 powerpc_sync(); 2376 2377 /* 2378 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2379 * valid pte clear the ptebit from the valid pte. 2380 */ 2381 count = 0; 2382 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2383 pt = moea_pvo_to_pte(pvo, -1); 2384 if (pt != NULL) { 2385 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2386 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2387 count++; 2388 moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2389 } 2390 mtx_unlock(&moea_table_mutex); 2391 } 2392 pvo->pvo_pte.pte.pte_lo &= ~ptebit; 2393 } 2394 2395 return (count); 2396 } 2397 2398 /* 2399 * Return true if the physical range is encompassed by the battable[idx] 2400 */ 2401 static int 2402 moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2403 { 2404 u_int prot; 2405 u_int32_t start; 2406 u_int32_t end; 2407 u_int32_t bat_ble; 2408 2409 /* 2410 * Return immediately if not a valid mapping 2411 */ 2412 if (!(battable[idx].batu & BAT_Vs)) 2413 return (EINVAL); 2414 2415 /* 2416 * The BAT entry must be cache-inhibited, guarded, and r/w 2417 * so it can function as an i/o page 2418 */ 2419 prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2420 if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2421 return (EPERM); 2422 2423 /* 2424 * The address should be within the BAT range. Assume that the 2425 * start address in the BAT has the correct alignment (thus 2426 * not requiring masking) 2427 */ 2428 start = battable[idx].batl & BAT_PBS; 2429 bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2430 end = start | (bat_ble << 15) | 0x7fff; 2431 2432 if ((pa < start) || ((pa + size) > end)) 2433 return (ERANGE); 2434 2435 return (0); 2436 } 2437 2438 boolean_t 2439 moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2440 { 2441 int i; 2442 2443 /* 2444 * This currently does not work for entries that 2445 * overlap 256M BAT segments. 2446 */ 2447 2448 for(i = 0; i < 16; i++) 2449 if (moea_bat_mapped(i, pa, size) == 0) 2450 return (0); 2451 2452 return (EFAULT); 2453 } 2454 2455 /* 2456 * Map a set of physical memory pages into the kernel virtual 2457 * address space. Return a pointer to where it is mapped. This 2458 * routine is intended to be used for mapping device memory, 2459 * NOT real memory. 2460 */ 2461 void * 2462 moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2463 { 2464 2465 return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2466 } 2467 2468 void * 2469 moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2470 { 2471 vm_offset_t va, tmpva, ppa, offset; 2472 int i; 2473 2474 ppa = trunc_page(pa); 2475 offset = pa & PAGE_MASK; 2476 size = roundup(offset + size, PAGE_SIZE); 2477 2478 /* 2479 * If the physical address lies within a valid BAT table entry, 2480 * return the 1:1 mapping. This currently doesn't work 2481 * for regions that overlap 256M BAT segments. 2482 */ 2483 for (i = 0; i < 16; i++) { 2484 if (moea_bat_mapped(i, pa, size) == 0) 2485 return ((void *) pa); 2486 } 2487 2488 va = kmem_alloc_nofault(kernel_map, size); 2489 if (!va) 2490 panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 2491 2492 for (tmpva = va; size > 0;) { 2493 moea_kenter_attr(mmu, tmpva, ppa, ma); 2494 tlbie(tmpva); 2495 size -= PAGE_SIZE; 2496 tmpva += PAGE_SIZE; 2497 ppa += PAGE_SIZE; 2498 } 2499 2500 return ((void *)(va + offset)); 2501 } 2502 2503 void 2504 moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2505 { 2506 vm_offset_t base, offset; 2507 2508 /* 2509 * If this is outside kernel virtual space, then it's a 2510 * battable entry and doesn't require unmapping 2511 */ 2512 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { 2513 base = trunc_page(va); 2514 offset = va & PAGE_MASK; 2515 size = roundup(offset + size, PAGE_SIZE); 2516 kmem_free(kernel_map, base, size); 2517 } 2518 } 2519 2520 static void 2521 moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2522 { 2523 struct pvo_entry *pvo; 2524 vm_offset_t lim; 2525 vm_paddr_t pa; 2526 vm_size_t len; 2527 2528 PMAP_LOCK(pm); 2529 while (sz > 0) { 2530 lim = round_page(va); 2531 len = MIN(lim - va, sz); 2532 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2533 if (pvo != NULL) { 2534 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2535 (va & ADDR_POFF); 2536 moea_syncicache(pa, len); 2537 } 2538 va += len; 2539 sz -= len; 2540 } 2541 PMAP_UNLOCK(pm); 2542 } 2543