1 /*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68 /*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93 #include <sys/cdefs.h> 94 __FBSDID("$FreeBSD$"); 95 96 /* 97 * Manages physical address maps. 98 * 99 * Since the information managed by this module is also stored by the 100 * logical address mapping module, this module may throw away valid virtual 101 * to physical mappings at almost any time. However, invalidations of 102 * mappings must be done as requested. 103 * 104 * In order to cope with hardware architectures which make virtual to 105 * physical map invalidates expensive, this module may delay invalidate 106 * reduced protection operations until such time as they are actually 107 * necessary. This module is given full information as to which processors 108 * are currently using which maps, and to when physical maps must be made 109 * correct. 110 */ 111 112 #include "opt_kstack_pages.h" 113 114 #include <sys/param.h> 115 #include <sys/kernel.h> 116 #include <sys/queue.h> 117 #include <sys/cpuset.h> 118 #include <sys/ktr.h> 119 #include <sys/lock.h> 120 #include <sys/msgbuf.h> 121 #include <sys/mutex.h> 122 #include <sys/proc.h> 123 #include <sys/rwlock.h> 124 #include <sys/sched.h> 125 #include <sys/sysctl.h> 126 #include <sys/systm.h> 127 #include <sys/vmmeter.h> 128 129 #include <dev/ofw/openfirm.h> 130 131 #include <vm/vm.h> 132 #include <vm/vm_param.h> 133 #include <vm/vm_kern.h> 134 #include <vm/vm_page.h> 135 #include <vm/vm_map.h> 136 #include <vm/vm_object.h> 137 #include <vm/vm_extern.h> 138 #include <vm/vm_pageout.h> 139 #include <vm/vm_pager.h> 140 #include <vm/uma.h> 141 142 #include <machine/cpu.h> 143 #include <machine/platform.h> 144 #include <machine/bat.h> 145 #include <machine/frame.h> 146 #include <machine/md_var.h> 147 #include <machine/psl.h> 148 #include <machine/pte.h> 149 #include <machine/smp.h> 150 #include <machine/sr.h> 151 #include <machine/mmuvar.h> 152 #include <machine/trap_aim.h> 153 154 #include "mmu_if.h" 155 156 #define MOEA_DEBUG 157 158 #define TODO panic("%s: not implemented", __func__); 159 160 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 161 #define VSID_TO_SR(vsid) ((vsid) & 0xf) 162 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 163 164 struct ofw_map { 165 vm_offset_t om_va; 166 vm_size_t om_len; 167 vm_offset_t om_pa; 168 u_int om_mode; 169 }; 170 171 /* 172 * Map of physical memory regions. 173 */ 174 static struct mem_region *regions; 175 static struct mem_region *pregions; 176 static u_int phys_avail_count; 177 static int regions_sz, pregions_sz; 178 static struct ofw_map *translations; 179 180 /* 181 * Lock for the pteg and pvo tables. 182 */ 183 struct mtx moea_table_mutex; 184 struct mtx moea_vsid_mutex; 185 186 /* tlbie instruction synchronization */ 187 static struct mtx tlbie_mtx; 188 189 /* 190 * PTEG data. 191 */ 192 static struct pteg *moea_pteg_table; 193 u_int moea_pteg_count; 194 u_int moea_pteg_mask; 195 196 /* 197 * PVO data. 198 */ 199 struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 200 struct pvo_head moea_pvo_kunmanaged = 201 LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 202 203 /* 204 * Isolate the global pv list lock from data and other locks to prevent false 205 * sharing within the cache. 206 */ 207 static struct { 208 struct rwlock lock; 209 char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; 210 } pvh_global __aligned(CACHE_LINE_SIZE); 211 212 #define pvh_global_lock pvh_global.lock 213 214 uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 215 uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 216 217 #define BPVO_POOL_SIZE 32768 218 static struct pvo_entry *moea_bpvo_pool; 219 static int moea_bpvo_pool_index = 0; 220 221 #define VSID_NBPW (sizeof(u_int32_t) * 8) 222 static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 223 224 static boolean_t moea_initialized = FALSE; 225 226 /* 227 * Statistics. 228 */ 229 u_int moea_pte_valid = 0; 230 u_int moea_pte_overflow = 0; 231 u_int moea_pte_replacements = 0; 232 u_int moea_pvo_entries = 0; 233 u_int moea_pvo_enter_calls = 0; 234 u_int moea_pvo_remove_calls = 0; 235 u_int moea_pte_spills = 0; 236 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 237 0, ""); 238 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 239 &moea_pte_overflow, 0, ""); 240 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 241 &moea_pte_replacements, 0, ""); 242 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 243 0, ""); 244 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 245 &moea_pvo_enter_calls, 0, ""); 246 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 247 &moea_pvo_remove_calls, 0, ""); 248 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 249 &moea_pte_spills, 0, ""); 250 251 /* 252 * Allocate physical memory for use in moea_bootstrap. 253 */ 254 static vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 255 256 /* 257 * PTE calls. 258 */ 259 static int moea_pte_insert(u_int, struct pte *); 260 261 /* 262 * PVO calls. 263 */ 264 static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 265 vm_offset_t, vm_offset_t, u_int, int); 266 static void moea_pvo_remove(struct pvo_entry *, int); 267 static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 268 static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 269 270 /* 271 * Utility routines. 272 */ 273 static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 274 vm_prot_t, boolean_t); 275 static void moea_syncicache(vm_offset_t, vm_size_t); 276 static boolean_t moea_query_bit(vm_page_t, int); 277 static u_int moea_clear_bit(vm_page_t, int); 278 static void moea_kremove(mmu_t, vm_offset_t); 279 int moea_pte_spill(vm_offset_t); 280 281 /* 282 * Kernel MMU interface 283 */ 284 void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 285 void moea_clear_modify(mmu_t, vm_page_t); 286 void moea_clear_reference(mmu_t, vm_page_t); 287 void moea_copy_page(mmu_t, vm_page_t, vm_page_t); 288 void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 289 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 290 vm_prot_t); 291 void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 292 vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 293 vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 294 void moea_init(mmu_t); 295 boolean_t moea_is_modified(mmu_t, vm_page_t); 296 boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 297 boolean_t moea_is_referenced(mmu_t, vm_page_t); 298 int moea_ts_referenced(mmu_t, vm_page_t); 299 vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 300 boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 301 int moea_page_wired_mappings(mmu_t, vm_page_t); 302 void moea_pinit(mmu_t, pmap_t); 303 void moea_pinit0(mmu_t, pmap_t); 304 void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 305 void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 306 void moea_qremove(mmu_t, vm_offset_t, int); 307 void moea_release(mmu_t, pmap_t); 308 void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 309 void moea_remove_all(mmu_t, vm_page_t); 310 void moea_remove_write(mmu_t, vm_page_t); 311 void moea_zero_page(mmu_t, vm_page_t); 312 void moea_zero_page_area(mmu_t, vm_page_t, int, int); 313 void moea_zero_page_idle(mmu_t, vm_page_t); 314 void moea_activate(mmu_t, struct thread *); 315 void moea_deactivate(mmu_t, struct thread *); 316 void moea_cpu_bootstrap(mmu_t, int); 317 void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 318 void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t); 319 void *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 320 void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 321 vm_paddr_t moea_kextract(mmu_t, vm_offset_t); 322 void moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t); 323 void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t); 324 void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma); 325 boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 326 static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 327 328 static mmu_method_t moea_methods[] = { 329 MMUMETHOD(mmu_change_wiring, moea_change_wiring), 330 MMUMETHOD(mmu_clear_modify, moea_clear_modify), 331 MMUMETHOD(mmu_clear_reference, moea_clear_reference), 332 MMUMETHOD(mmu_copy_page, moea_copy_page), 333 MMUMETHOD(mmu_enter, moea_enter), 334 MMUMETHOD(mmu_enter_object, moea_enter_object), 335 MMUMETHOD(mmu_enter_quick, moea_enter_quick), 336 MMUMETHOD(mmu_extract, moea_extract), 337 MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 338 MMUMETHOD(mmu_init, moea_init), 339 MMUMETHOD(mmu_is_modified, moea_is_modified), 340 MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable), 341 MMUMETHOD(mmu_is_referenced, moea_is_referenced), 342 MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 343 MMUMETHOD(mmu_map, moea_map), 344 MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 345 MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), 346 MMUMETHOD(mmu_pinit, moea_pinit), 347 MMUMETHOD(mmu_pinit0, moea_pinit0), 348 MMUMETHOD(mmu_protect, moea_protect), 349 MMUMETHOD(mmu_qenter, moea_qenter), 350 MMUMETHOD(mmu_qremove, moea_qremove), 351 MMUMETHOD(mmu_release, moea_release), 352 MMUMETHOD(mmu_remove, moea_remove), 353 MMUMETHOD(mmu_remove_all, moea_remove_all), 354 MMUMETHOD(mmu_remove_write, moea_remove_write), 355 MMUMETHOD(mmu_sync_icache, moea_sync_icache), 356 MMUMETHOD(mmu_zero_page, moea_zero_page), 357 MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 358 MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 359 MMUMETHOD(mmu_activate, moea_activate), 360 MMUMETHOD(mmu_deactivate, moea_deactivate), 361 MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), 362 363 /* Internal interfaces */ 364 MMUMETHOD(mmu_bootstrap, moea_bootstrap), 365 MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap), 366 MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr), 367 MMUMETHOD(mmu_mapdev, moea_mapdev), 368 MMUMETHOD(mmu_unmapdev, moea_unmapdev), 369 MMUMETHOD(mmu_kextract, moea_kextract), 370 MMUMETHOD(mmu_kenter, moea_kenter), 371 MMUMETHOD(mmu_kenter_attr, moea_kenter_attr), 372 MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 373 374 { 0, 0 } 375 }; 376 377 MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0); 378 379 static __inline uint32_t 380 moea_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 381 { 382 uint32_t pte_lo; 383 int i; 384 385 if (ma != VM_MEMATTR_DEFAULT) { 386 switch (ma) { 387 case VM_MEMATTR_UNCACHEABLE: 388 return (PTE_I | PTE_G); 389 case VM_MEMATTR_WRITE_COMBINING: 390 case VM_MEMATTR_WRITE_BACK: 391 case VM_MEMATTR_PREFETCHABLE: 392 return (PTE_I); 393 case VM_MEMATTR_WRITE_THROUGH: 394 return (PTE_W | PTE_M); 395 } 396 } 397 398 /* 399 * Assume the page is cache inhibited and access is guarded unless 400 * it's in our available memory array. 401 */ 402 pte_lo = PTE_I | PTE_G; 403 for (i = 0; i < pregions_sz; i++) { 404 if ((pa >= pregions[i].mr_start) && 405 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 406 pte_lo = PTE_M; 407 break; 408 } 409 } 410 411 return pte_lo; 412 } 413 414 static void 415 tlbie(vm_offset_t va) 416 { 417 418 mtx_lock_spin(&tlbie_mtx); 419 __asm __volatile("ptesync"); 420 __asm __volatile("tlbie %0" :: "r"(va)); 421 __asm __volatile("eieio; tlbsync; ptesync"); 422 mtx_unlock_spin(&tlbie_mtx); 423 } 424 425 static void 426 tlbia(void) 427 { 428 vm_offset_t va; 429 430 for (va = 0; va < 0x00040000; va += 0x00001000) { 431 __asm __volatile("tlbie %0" :: "r"(va)); 432 powerpc_sync(); 433 } 434 __asm __volatile("tlbsync"); 435 powerpc_sync(); 436 } 437 438 static __inline int 439 va_to_sr(u_int *sr, vm_offset_t va) 440 { 441 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 442 } 443 444 static __inline u_int 445 va_to_pteg(u_int sr, vm_offset_t addr) 446 { 447 u_int hash; 448 449 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 450 ADDR_PIDX_SHFT); 451 return (hash & moea_pteg_mask); 452 } 453 454 static __inline struct pvo_head * 455 vm_page_to_pvoh(vm_page_t m) 456 { 457 458 return (&m->md.mdpg_pvoh); 459 } 460 461 static __inline void 462 moea_attr_clear(vm_page_t m, int ptebit) 463 { 464 465 rw_assert(&pvh_global_lock, RA_WLOCKED); 466 m->md.mdpg_attrs &= ~ptebit; 467 } 468 469 static __inline int 470 moea_attr_fetch(vm_page_t m) 471 { 472 473 return (m->md.mdpg_attrs); 474 } 475 476 static __inline void 477 moea_attr_save(vm_page_t m, int ptebit) 478 { 479 480 rw_assert(&pvh_global_lock, RA_WLOCKED); 481 m->md.mdpg_attrs |= ptebit; 482 } 483 484 static __inline int 485 moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 486 { 487 if (pt->pte_hi == pvo_pt->pte_hi) 488 return (1); 489 490 return (0); 491 } 492 493 static __inline int 494 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 495 { 496 return (pt->pte_hi & ~PTE_VALID) == 497 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 498 ((va >> ADDR_API_SHFT) & PTE_API) | which); 499 } 500 501 static __inline void 502 moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 503 { 504 505 mtx_assert(&moea_table_mutex, MA_OWNED); 506 507 /* 508 * Construct a PTE. Default to IMB initially. Valid bit only gets 509 * set when the real pte is set in memory. 510 * 511 * Note: Don't set the valid bit for correct operation of tlb update. 512 */ 513 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 514 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 515 pt->pte_lo = pte_lo; 516 } 517 518 static __inline void 519 moea_pte_synch(struct pte *pt, struct pte *pvo_pt) 520 { 521 522 mtx_assert(&moea_table_mutex, MA_OWNED); 523 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 524 } 525 526 static __inline void 527 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 528 { 529 530 mtx_assert(&moea_table_mutex, MA_OWNED); 531 532 /* 533 * As shown in Section 7.6.3.2.3 534 */ 535 pt->pte_lo &= ~ptebit; 536 tlbie(va); 537 } 538 539 static __inline void 540 moea_pte_set(struct pte *pt, struct pte *pvo_pt) 541 { 542 543 mtx_assert(&moea_table_mutex, MA_OWNED); 544 pvo_pt->pte_hi |= PTE_VALID; 545 546 /* 547 * Update the PTE as defined in section 7.6.3.1. 548 * Note that the REF/CHG bits are from pvo_pt and thus should havce 549 * been saved so this routine can restore them (if desired). 550 */ 551 pt->pte_lo = pvo_pt->pte_lo; 552 powerpc_sync(); 553 pt->pte_hi = pvo_pt->pte_hi; 554 powerpc_sync(); 555 moea_pte_valid++; 556 } 557 558 static __inline void 559 moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 560 { 561 562 mtx_assert(&moea_table_mutex, MA_OWNED); 563 pvo_pt->pte_hi &= ~PTE_VALID; 564 565 /* 566 * Force the reg & chg bits back into the PTEs. 567 */ 568 powerpc_sync(); 569 570 /* 571 * Invalidate the pte. 572 */ 573 pt->pte_hi &= ~PTE_VALID; 574 575 tlbie(va); 576 577 /* 578 * Save the reg & chg bits. 579 */ 580 moea_pte_synch(pt, pvo_pt); 581 moea_pte_valid--; 582 } 583 584 static __inline void 585 moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 586 { 587 588 /* 589 * Invalidate the PTE 590 */ 591 moea_pte_unset(pt, pvo_pt, va); 592 moea_pte_set(pt, pvo_pt); 593 } 594 595 /* 596 * Quick sort callout for comparing memory regions. 597 */ 598 static int om_cmp(const void *a, const void *b); 599 600 static int 601 om_cmp(const void *a, const void *b) 602 { 603 const struct ofw_map *mapa; 604 const struct ofw_map *mapb; 605 606 mapa = a; 607 mapb = b; 608 if (mapa->om_pa < mapb->om_pa) 609 return (-1); 610 else if (mapa->om_pa > mapb->om_pa) 611 return (1); 612 else 613 return (0); 614 } 615 616 void 617 moea_cpu_bootstrap(mmu_t mmup, int ap) 618 { 619 u_int sdr; 620 int i; 621 622 if (ap) { 623 powerpc_sync(); 624 __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); 625 __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); 626 isync(); 627 __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu)); 628 __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl)); 629 isync(); 630 } 631 632 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 633 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 634 isync(); 635 636 __asm __volatile("mtibatu 1,%0" :: "r"(0)); 637 __asm __volatile("mtdbatu 2,%0" :: "r"(0)); 638 __asm __volatile("mtibatu 2,%0" :: "r"(0)); 639 __asm __volatile("mtdbatu 3,%0" :: "r"(0)); 640 __asm __volatile("mtibatu 3,%0" :: "r"(0)); 641 isync(); 642 643 for (i = 0; i < 16; i++) 644 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 645 powerpc_sync(); 646 647 sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); 648 __asm __volatile("mtsdr1 %0" :: "r"(sdr)); 649 isync(); 650 651 tlbia(); 652 } 653 654 void 655 moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 656 { 657 ihandle_t mmui; 658 phandle_t chosen, mmu; 659 int sz; 660 int i, j; 661 vm_size_t size, physsz, hwphyssz; 662 vm_offset_t pa, va, off; 663 void *dpcpu; 664 register_t msr; 665 666 /* 667 * Set up BAT0 to map the lowest 256 MB area 668 */ 669 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 670 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 671 672 /* 673 * Map PCI memory space. 674 */ 675 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 676 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 677 678 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 679 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 680 681 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 682 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 683 684 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 685 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 686 687 /* 688 * Map obio devices. 689 */ 690 battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 691 battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 692 693 /* 694 * Use an IBAT and a DBAT to map the bottom segment of memory 695 * where we are. Turn off instruction relocation temporarily 696 * to prevent faults while reprogramming the IBAT. 697 */ 698 msr = mfmsr(); 699 mtmsr(msr & ~PSL_IR); 700 __asm (".balign 32; \n" 701 "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 702 "mtdbatu 0,%0; mtdbatl 0,%1; isync" 703 :: "r"(battable[0].batu), "r"(battable[0].batl)); 704 mtmsr(msr); 705 706 /* map pci space */ 707 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 708 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 709 isync(); 710 711 /* set global direct map flag */ 712 hw_direct_map = 1; 713 714 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 715 CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 716 717 for (i = 0; i < pregions_sz; i++) { 718 vm_offset_t pa; 719 vm_offset_t end; 720 721 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 722 pregions[i].mr_start, 723 pregions[i].mr_start + pregions[i].mr_size, 724 pregions[i].mr_size); 725 /* 726 * Install entries into the BAT table to allow all 727 * of physmem to be convered by on-demand BAT entries. 728 * The loop will sometimes set the same battable element 729 * twice, but that's fine since they won't be used for 730 * a while yet. 731 */ 732 pa = pregions[i].mr_start & 0xf0000000; 733 end = pregions[i].mr_start + pregions[i].mr_size; 734 do { 735 u_int n = pa >> ADDR_SR_SHFT; 736 737 battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 738 battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 739 pa += SEGMENT_LENGTH; 740 } while (pa < end); 741 } 742 743 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 744 panic("moea_bootstrap: phys_avail too small"); 745 746 phys_avail_count = 0; 747 physsz = 0; 748 hwphyssz = 0; 749 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 750 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 751 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 752 regions[i].mr_start + regions[i].mr_size, 753 regions[i].mr_size); 754 if (hwphyssz != 0 && 755 (physsz + regions[i].mr_size) >= hwphyssz) { 756 if (physsz < hwphyssz) { 757 phys_avail[j] = regions[i].mr_start; 758 phys_avail[j + 1] = regions[i].mr_start + 759 hwphyssz - physsz; 760 physsz = hwphyssz; 761 phys_avail_count++; 762 } 763 break; 764 } 765 phys_avail[j] = regions[i].mr_start; 766 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 767 phys_avail_count++; 768 physsz += regions[i].mr_size; 769 } 770 771 /* Check for overlap with the kernel and exception vectors */ 772 for (j = 0; j < 2*phys_avail_count; j+=2) { 773 if (phys_avail[j] < EXC_LAST) 774 phys_avail[j] += EXC_LAST; 775 776 if (kernelstart >= phys_avail[j] && 777 kernelstart < phys_avail[j+1]) { 778 if (kernelend < phys_avail[j+1]) { 779 phys_avail[2*phys_avail_count] = 780 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 781 phys_avail[2*phys_avail_count + 1] = 782 phys_avail[j+1]; 783 phys_avail_count++; 784 } 785 786 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 787 } 788 789 if (kernelend >= phys_avail[j] && 790 kernelend < phys_avail[j+1]) { 791 if (kernelstart > phys_avail[j]) { 792 phys_avail[2*phys_avail_count] = phys_avail[j]; 793 phys_avail[2*phys_avail_count + 1] = 794 kernelstart & ~PAGE_MASK; 795 phys_avail_count++; 796 } 797 798 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 799 } 800 } 801 802 physmem = btoc(physsz); 803 804 /* 805 * Allocate PTEG table. 806 */ 807 #ifdef PTEGCOUNT 808 moea_pteg_count = PTEGCOUNT; 809 #else 810 moea_pteg_count = 0x1000; 811 812 while (moea_pteg_count < physmem) 813 moea_pteg_count <<= 1; 814 815 moea_pteg_count >>= 1; 816 #endif /* PTEGCOUNT */ 817 818 size = moea_pteg_count * sizeof(struct pteg); 819 CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 820 size); 821 moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 822 CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 823 bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 824 moea_pteg_mask = moea_pteg_count - 1; 825 826 /* 827 * Allocate pv/overflow lists. 828 */ 829 size = sizeof(struct pvo_head) * moea_pteg_count; 830 moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 831 PAGE_SIZE); 832 CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 833 for (i = 0; i < moea_pteg_count; i++) 834 LIST_INIT(&moea_pvo_table[i]); 835 836 /* 837 * Initialize the lock that synchronizes access to the pteg and pvo 838 * tables. 839 */ 840 mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 841 MTX_RECURSE); 842 mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF); 843 844 mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN); 845 846 /* 847 * Initialise the unmanaged pvo pool. 848 */ 849 moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 850 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 851 moea_bpvo_pool_index = 0; 852 853 /* 854 * Make sure kernel vsid is allocated as well as VSID 0. 855 */ 856 moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 857 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 858 moea_vsid_bitmap[0] |= 1; 859 860 /* 861 * Initialize the kernel pmap (which is statically allocated). 862 */ 863 PMAP_LOCK_INIT(kernel_pmap); 864 for (i = 0; i < 16; i++) 865 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 866 CPU_FILL(&kernel_pmap->pm_active); 867 RB_INIT(&kernel_pmap->pmap_pvo); 868 869 /* 870 * Initialize the global pv list lock. 871 */ 872 rw_init(&pvh_global_lock, "pmap pv global"); 873 874 /* 875 * Set up the Open Firmware mappings 876 */ 877 chosen = OF_finddevice("/chosen"); 878 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 && 879 (mmu = OF_instance_to_package(mmui)) != -1 && 880 (sz = OF_getproplen(mmu, "translations")) != -1) { 881 translations = NULL; 882 for (i = 0; phys_avail[i] != 0; i += 2) { 883 if (phys_avail[i + 1] >= sz) { 884 translations = (struct ofw_map *)phys_avail[i]; 885 break; 886 } 887 } 888 if (translations == NULL) 889 panic("moea_bootstrap: no space to copy translations"); 890 bzero(translations, sz); 891 if (OF_getprop(mmu, "translations", translations, sz) == -1) 892 panic("moea_bootstrap: can't get ofw translations"); 893 CTR0(KTR_PMAP, "moea_bootstrap: translations"); 894 sz /= sizeof(*translations); 895 qsort(translations, sz, sizeof (*translations), om_cmp); 896 for (i = 0; i < sz; i++) { 897 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 898 translations[i].om_pa, translations[i].om_va, 899 translations[i].om_len); 900 901 /* 902 * If the mapping is 1:1, let the RAM and device 903 * on-demand BAT tables take care of the translation. 904 */ 905 if (translations[i].om_va == translations[i].om_pa) 906 continue; 907 908 /* Enter the pages */ 909 for (off = 0; off < translations[i].om_len; 910 off += PAGE_SIZE) 911 moea_kenter(mmup, translations[i].om_va + off, 912 translations[i].om_pa + off); 913 } 914 } 915 916 /* 917 * Calculate the last available physical address. 918 */ 919 for (i = 0; phys_avail[i + 2] != 0; i += 2) 920 ; 921 Maxmem = powerpc_btop(phys_avail[i + 1]); 922 923 moea_cpu_bootstrap(mmup,0); 924 925 pmap_bootstrapped++; 926 927 /* 928 * Set the start and end of kva. 929 */ 930 virtual_avail = VM_MIN_KERNEL_ADDRESS; 931 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 932 933 /* 934 * Allocate a kernel stack with a guard page for thread0 and map it 935 * into the kernel page map. 936 */ 937 pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 938 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 939 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 940 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 941 thread0.td_kstack = va; 942 thread0.td_kstack_pages = KSTACK_PAGES; 943 for (i = 0; i < KSTACK_PAGES; i++) { 944 moea_kenter(mmup, va, pa); 945 pa += PAGE_SIZE; 946 va += PAGE_SIZE; 947 } 948 949 /* 950 * Allocate virtual address space for the message buffer. 951 */ 952 pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE); 953 msgbufp = (struct msgbuf *)virtual_avail; 954 va = virtual_avail; 955 virtual_avail += round_page(msgbufsize); 956 while (va < virtual_avail) { 957 moea_kenter(mmup, va, pa); 958 pa += PAGE_SIZE; 959 va += PAGE_SIZE; 960 } 961 962 /* 963 * Allocate virtual address space for the dynamic percpu area. 964 */ 965 pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 966 dpcpu = (void *)virtual_avail; 967 va = virtual_avail; 968 virtual_avail += DPCPU_SIZE; 969 while (va < virtual_avail) { 970 moea_kenter(mmup, va, pa); 971 pa += PAGE_SIZE; 972 va += PAGE_SIZE; 973 } 974 dpcpu_init(dpcpu, 0); 975 } 976 977 /* 978 * Activate a user pmap. The pmap must be activated before it's address 979 * space can be accessed in any way. 980 */ 981 void 982 moea_activate(mmu_t mmu, struct thread *td) 983 { 984 pmap_t pm, pmr; 985 986 /* 987 * Load all the data we need up front to encourage the compiler to 988 * not issue any loads while we have interrupts disabled below. 989 */ 990 pm = &td->td_proc->p_vmspace->vm_pmap; 991 pmr = pm->pmap_phys; 992 993 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 994 PCPU_SET(curpmap, pmr); 995 } 996 997 void 998 moea_deactivate(mmu_t mmu, struct thread *td) 999 { 1000 pmap_t pm; 1001 1002 pm = &td->td_proc->p_vmspace->vm_pmap; 1003 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1004 PCPU_SET(curpmap, NULL); 1005 } 1006 1007 void 1008 moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1009 { 1010 struct pvo_entry *pvo; 1011 1012 PMAP_LOCK(pm); 1013 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1014 1015 if (pvo != NULL) { 1016 if (wired) { 1017 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1018 pm->pm_stats.wired_count++; 1019 pvo->pvo_vaddr |= PVO_WIRED; 1020 } else { 1021 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1022 pm->pm_stats.wired_count--; 1023 pvo->pvo_vaddr &= ~PVO_WIRED; 1024 } 1025 } 1026 PMAP_UNLOCK(pm); 1027 } 1028 1029 void 1030 moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1031 { 1032 vm_offset_t dst; 1033 vm_offset_t src; 1034 1035 dst = VM_PAGE_TO_PHYS(mdst); 1036 src = VM_PAGE_TO_PHYS(msrc); 1037 1038 bcopy((void *)src, (void *)dst, PAGE_SIZE); 1039 } 1040 1041 /* 1042 * Zero a page of physical memory by temporarily mapping it into the tlb. 1043 */ 1044 void 1045 moea_zero_page(mmu_t mmu, vm_page_t m) 1046 { 1047 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1048 void *va = (void *)pa; 1049 1050 bzero(va, PAGE_SIZE); 1051 } 1052 1053 void 1054 moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1055 { 1056 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1057 void *va = (void *)(pa + off); 1058 1059 bzero(va, size); 1060 } 1061 1062 void 1063 moea_zero_page_idle(mmu_t mmu, vm_page_t m) 1064 { 1065 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1066 void *va = (void *)pa; 1067 1068 bzero(va, PAGE_SIZE); 1069 } 1070 1071 /* 1072 * Map the given physical page at the specified virtual address in the 1073 * target pmap with the protection requested. If specified the page 1074 * will be wired down. 1075 */ 1076 void 1077 moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1078 boolean_t wired) 1079 { 1080 1081 rw_wlock(&pvh_global_lock); 1082 PMAP_LOCK(pmap); 1083 moea_enter_locked(pmap, va, m, prot, wired); 1084 rw_wunlock(&pvh_global_lock); 1085 PMAP_UNLOCK(pmap); 1086 } 1087 1088 /* 1089 * Map the given physical page at the specified virtual address in the 1090 * target pmap with the protection requested. If specified the page 1091 * will be wired down. 1092 * 1093 * The page queues and pmap must be locked. 1094 */ 1095 static void 1096 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1097 boolean_t wired) 1098 { 1099 struct pvo_head *pvo_head; 1100 uma_zone_t zone; 1101 vm_page_t pg; 1102 u_int pte_lo, pvo_flags; 1103 int error; 1104 1105 if (!moea_initialized) { 1106 pvo_head = &moea_pvo_kunmanaged; 1107 zone = moea_upvo_zone; 1108 pvo_flags = 0; 1109 pg = NULL; 1110 } else { 1111 pvo_head = vm_page_to_pvoh(m); 1112 pg = m; 1113 zone = moea_mpvo_zone; 1114 pvo_flags = PVO_MANAGED; 1115 } 1116 if (pmap_bootstrapped) 1117 rw_assert(&pvh_global_lock, RA_WLOCKED); 1118 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1119 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 1120 VM_OBJECT_LOCKED(m->object), 1121 ("moea_enter_locked: page %p is not busy", m)); 1122 1123 /* XXX change the pvo head for fake pages */ 1124 if ((m->oflags & VPO_UNMANAGED) != 0) { 1125 pvo_flags &= ~PVO_MANAGED; 1126 pvo_head = &moea_pvo_kunmanaged; 1127 zone = moea_upvo_zone; 1128 } 1129 1130 pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1131 1132 if (prot & VM_PROT_WRITE) { 1133 pte_lo |= PTE_BW; 1134 if (pmap_bootstrapped && 1135 (m->oflags & VPO_UNMANAGED) == 0) 1136 vm_page_aflag_set(m, PGA_WRITEABLE); 1137 } else 1138 pte_lo |= PTE_BR; 1139 1140 if (prot & VM_PROT_EXECUTE) 1141 pvo_flags |= PVO_EXECUTABLE; 1142 1143 if (wired) 1144 pvo_flags |= PVO_WIRED; 1145 1146 error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1147 pte_lo, pvo_flags); 1148 1149 /* 1150 * Flush the real page from the instruction cache. This has be done 1151 * for all user mappings to prevent information leakage via the 1152 * instruction cache. moea_pvo_enter() returns ENOENT for the first 1153 * mapping for a page. 1154 */ 1155 if (pmap != kernel_pmap && error == ENOENT && 1156 (pte_lo & (PTE_I | PTE_G)) == 0) 1157 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1158 } 1159 1160 /* 1161 * Maps a sequence of resident pages belonging to the same object. 1162 * The sequence begins with the given page m_start. This page is 1163 * mapped at the given virtual address start. Each subsequent page is 1164 * mapped at a virtual address that is offset from start by the same 1165 * amount as the page is offset from m_start within the object. The 1166 * last page in the sequence is the page with the largest offset from 1167 * m_start that can be mapped at a virtual address less than the given 1168 * virtual address end. Not every virtual page between start and end 1169 * is mapped; only those for which a resident page exists with the 1170 * corresponding offset from m_start are mapped. 1171 */ 1172 void 1173 moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1174 vm_page_t m_start, vm_prot_t prot) 1175 { 1176 vm_page_t m; 1177 vm_pindex_t diff, psize; 1178 1179 psize = atop(end - start); 1180 m = m_start; 1181 rw_wlock(&pvh_global_lock); 1182 PMAP_LOCK(pm); 1183 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1184 moea_enter_locked(pm, start + ptoa(diff), m, prot & 1185 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1186 m = TAILQ_NEXT(m, listq); 1187 } 1188 rw_wunlock(&pvh_global_lock); 1189 PMAP_UNLOCK(pm); 1190 } 1191 1192 void 1193 moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1194 vm_prot_t prot) 1195 { 1196 1197 rw_wlock(&pvh_global_lock); 1198 PMAP_LOCK(pm); 1199 moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1200 FALSE); 1201 rw_wunlock(&pvh_global_lock); 1202 PMAP_UNLOCK(pm); 1203 } 1204 1205 vm_paddr_t 1206 moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1207 { 1208 struct pvo_entry *pvo; 1209 vm_paddr_t pa; 1210 1211 PMAP_LOCK(pm); 1212 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1213 if (pvo == NULL) 1214 pa = 0; 1215 else 1216 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1217 PMAP_UNLOCK(pm); 1218 return (pa); 1219 } 1220 1221 /* 1222 * Atomically extract and hold the physical page with the given 1223 * pmap and virtual address pair if that mapping permits the given 1224 * protection. 1225 */ 1226 vm_page_t 1227 moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1228 { 1229 struct pvo_entry *pvo; 1230 vm_page_t m; 1231 vm_paddr_t pa; 1232 1233 m = NULL; 1234 pa = 0; 1235 PMAP_LOCK(pmap); 1236 retry: 1237 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1238 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && 1239 ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || 1240 (prot & VM_PROT_WRITE) == 0)) { 1241 if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa)) 1242 goto retry; 1243 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 1244 vm_page_hold(m); 1245 } 1246 PA_UNLOCK_COND(pa); 1247 PMAP_UNLOCK(pmap); 1248 return (m); 1249 } 1250 1251 void 1252 moea_init(mmu_t mmu) 1253 { 1254 1255 moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1256 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1257 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1258 moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1259 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1260 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1261 moea_initialized = TRUE; 1262 } 1263 1264 boolean_t 1265 moea_is_referenced(mmu_t mmu, vm_page_t m) 1266 { 1267 boolean_t rv; 1268 1269 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1270 ("moea_is_referenced: page %p is not managed", m)); 1271 rw_wlock(&pvh_global_lock); 1272 rv = moea_query_bit(m, PTE_REF); 1273 rw_wunlock(&pvh_global_lock); 1274 return (rv); 1275 } 1276 1277 boolean_t 1278 moea_is_modified(mmu_t mmu, vm_page_t m) 1279 { 1280 boolean_t rv; 1281 1282 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1283 ("moea_is_modified: page %p is not managed", m)); 1284 1285 /* 1286 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 1287 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1288 * is clear, no PTEs can have PTE_CHG set. 1289 */ 1290 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1291 if ((m->oflags & VPO_BUSY) == 0 && 1292 (m->aflags & PGA_WRITEABLE) == 0) 1293 return (FALSE); 1294 rw_wlock(&pvh_global_lock); 1295 rv = moea_query_bit(m, PTE_CHG); 1296 rw_wunlock(&pvh_global_lock); 1297 return (rv); 1298 } 1299 1300 boolean_t 1301 moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1302 { 1303 struct pvo_entry *pvo; 1304 boolean_t rv; 1305 1306 PMAP_LOCK(pmap); 1307 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1308 rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; 1309 PMAP_UNLOCK(pmap); 1310 return (rv); 1311 } 1312 1313 void 1314 moea_clear_reference(mmu_t mmu, vm_page_t m) 1315 { 1316 1317 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1318 ("moea_clear_reference: page %p is not managed", m)); 1319 rw_wlock(&pvh_global_lock); 1320 moea_clear_bit(m, PTE_REF); 1321 rw_wunlock(&pvh_global_lock); 1322 } 1323 1324 void 1325 moea_clear_modify(mmu_t mmu, vm_page_t m) 1326 { 1327 1328 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1329 ("moea_clear_modify: page %p is not managed", m)); 1330 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1331 KASSERT((m->oflags & VPO_BUSY) == 0, 1332 ("moea_clear_modify: page %p is busy", m)); 1333 1334 /* 1335 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG 1336 * set. If the object containing the page is locked and the page is 1337 * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 1338 */ 1339 if ((m->aflags & PGA_WRITEABLE) == 0) 1340 return; 1341 rw_wlock(&pvh_global_lock); 1342 moea_clear_bit(m, PTE_CHG); 1343 rw_wunlock(&pvh_global_lock); 1344 } 1345 1346 /* 1347 * Clear the write and modified bits in each of the given page's mappings. 1348 */ 1349 void 1350 moea_remove_write(mmu_t mmu, vm_page_t m) 1351 { 1352 struct pvo_entry *pvo; 1353 struct pte *pt; 1354 pmap_t pmap; 1355 u_int lo; 1356 1357 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1358 ("moea_remove_write: page %p is not managed", m)); 1359 1360 /* 1361 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1362 * another thread while the object is locked. Thus, if PGA_WRITEABLE 1363 * is clear, no page table entries need updating. 1364 */ 1365 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1366 if ((m->oflags & VPO_BUSY) == 0 && 1367 (m->aflags & PGA_WRITEABLE) == 0) 1368 return; 1369 rw_wlock(&pvh_global_lock); 1370 lo = moea_attr_fetch(m); 1371 powerpc_sync(); 1372 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1373 pmap = pvo->pvo_pmap; 1374 PMAP_LOCK(pmap); 1375 if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { 1376 pt = moea_pvo_to_pte(pvo, -1); 1377 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1378 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1379 if (pt != NULL) { 1380 moea_pte_synch(pt, &pvo->pvo_pte.pte); 1381 lo |= pvo->pvo_pte.pte.pte_lo; 1382 pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; 1383 moea_pte_change(pt, &pvo->pvo_pte.pte, 1384 pvo->pvo_vaddr); 1385 mtx_unlock(&moea_table_mutex); 1386 } 1387 } 1388 PMAP_UNLOCK(pmap); 1389 } 1390 if ((lo & PTE_CHG) != 0) { 1391 moea_attr_clear(m, PTE_CHG); 1392 vm_page_dirty(m); 1393 } 1394 vm_page_aflag_clear(m, PGA_WRITEABLE); 1395 rw_wunlock(&pvh_global_lock); 1396 } 1397 1398 /* 1399 * moea_ts_referenced: 1400 * 1401 * Return a count of reference bits for a page, clearing those bits. 1402 * It is not necessary for every reference bit to be cleared, but it 1403 * is necessary that 0 only be returned when there are truly no 1404 * reference bits set. 1405 * 1406 * XXX: The exact number of bits to check and clear is a matter that 1407 * should be tested and standardized at some point in the future for 1408 * optimal aging of shared pages. 1409 */ 1410 int 1411 moea_ts_referenced(mmu_t mmu, vm_page_t m) 1412 { 1413 int count; 1414 1415 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1416 ("moea_ts_referenced: page %p is not managed", m)); 1417 rw_wlock(&pvh_global_lock); 1418 count = moea_clear_bit(m, PTE_REF); 1419 rw_wunlock(&pvh_global_lock); 1420 return (count); 1421 } 1422 1423 /* 1424 * Modify the WIMG settings of all mappings for a page. 1425 */ 1426 void 1427 moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1428 { 1429 struct pvo_entry *pvo; 1430 struct pvo_head *pvo_head; 1431 struct pte *pt; 1432 pmap_t pmap; 1433 u_int lo; 1434 1435 if ((m->oflags & VPO_UNMANAGED) != 0) { 1436 m->md.mdpg_cache_attrs = ma; 1437 return; 1438 } 1439 1440 rw_wlock(&pvh_global_lock); 1441 pvo_head = vm_page_to_pvoh(m); 1442 lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1443 1444 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1445 pmap = pvo->pvo_pmap; 1446 PMAP_LOCK(pmap); 1447 pt = moea_pvo_to_pte(pvo, -1); 1448 pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; 1449 pvo->pvo_pte.pte.pte_lo |= lo; 1450 if (pt != NULL) { 1451 moea_pte_change(pt, &pvo->pvo_pte.pte, 1452 pvo->pvo_vaddr); 1453 if (pvo->pvo_pmap == kernel_pmap) 1454 isync(); 1455 } 1456 mtx_unlock(&moea_table_mutex); 1457 PMAP_UNLOCK(pmap); 1458 } 1459 m->md.mdpg_cache_attrs = ma; 1460 rw_wunlock(&pvh_global_lock); 1461 } 1462 1463 /* 1464 * Map a wired page into kernel virtual address space. 1465 */ 1466 void 1467 moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1468 { 1469 1470 moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1471 } 1472 1473 void 1474 moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1475 { 1476 u_int pte_lo; 1477 int error; 1478 1479 #if 0 1480 if (va < VM_MIN_KERNEL_ADDRESS) 1481 panic("moea_kenter: attempt to enter non-kernel address %#x", 1482 va); 1483 #endif 1484 1485 pte_lo = moea_calc_wimg(pa, ma); 1486 1487 PMAP_LOCK(kernel_pmap); 1488 error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1489 &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1490 1491 if (error != 0 && error != ENOENT) 1492 panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 1493 pa, error); 1494 1495 PMAP_UNLOCK(kernel_pmap); 1496 } 1497 1498 /* 1499 * Extract the physical page address associated with the given kernel virtual 1500 * address. 1501 */ 1502 vm_paddr_t 1503 moea_kextract(mmu_t mmu, vm_offset_t va) 1504 { 1505 struct pvo_entry *pvo; 1506 vm_paddr_t pa; 1507 1508 /* 1509 * Allow direct mappings on 32-bit OEA 1510 */ 1511 if (va < VM_MIN_KERNEL_ADDRESS) { 1512 return (va); 1513 } 1514 1515 PMAP_LOCK(kernel_pmap); 1516 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1517 KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1518 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1519 PMAP_UNLOCK(kernel_pmap); 1520 return (pa); 1521 } 1522 1523 /* 1524 * Remove a wired page from kernel virtual address space. 1525 */ 1526 void 1527 moea_kremove(mmu_t mmu, vm_offset_t va) 1528 { 1529 1530 moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1531 } 1532 1533 /* 1534 * Map a range of physical addresses into kernel virtual address space. 1535 * 1536 * The value passed in *virt is a suggested virtual address for the mapping. 1537 * Architectures which can support a direct-mapped physical to virtual region 1538 * can return the appropriate address within that region, leaving '*virt' 1539 * unchanged. We cannot and therefore do not; *virt is updated with the 1540 * first usable address after the mapped region. 1541 */ 1542 vm_offset_t 1543 moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1544 vm_paddr_t pa_end, int prot) 1545 { 1546 vm_offset_t sva, va; 1547 1548 sva = *virt; 1549 va = sva; 1550 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1551 moea_kenter(mmu, va, pa_start); 1552 *virt = va; 1553 return (sva); 1554 } 1555 1556 /* 1557 * Returns true if the pmap's pv is one of the first 1558 * 16 pvs linked to from this page. This count may 1559 * be changed upwards or downwards in the future; it 1560 * is only necessary that true be returned for a small 1561 * subset of pmaps for proper page aging. 1562 */ 1563 boolean_t 1564 moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1565 { 1566 int loops; 1567 struct pvo_entry *pvo; 1568 boolean_t rv; 1569 1570 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1571 ("moea_page_exists_quick: page %p is not managed", m)); 1572 loops = 0; 1573 rv = FALSE; 1574 rw_wlock(&pvh_global_lock); 1575 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1576 if (pvo->pvo_pmap == pmap) { 1577 rv = TRUE; 1578 break; 1579 } 1580 if (++loops >= 16) 1581 break; 1582 } 1583 rw_wunlock(&pvh_global_lock); 1584 return (rv); 1585 } 1586 1587 /* 1588 * Return the number of managed mappings to the given physical page 1589 * that are wired. 1590 */ 1591 int 1592 moea_page_wired_mappings(mmu_t mmu, vm_page_t m) 1593 { 1594 struct pvo_entry *pvo; 1595 int count; 1596 1597 count = 0; 1598 if ((m->oflags & VPO_UNMANAGED) != 0) 1599 return (count); 1600 rw_wlock(&pvh_global_lock); 1601 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1602 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1603 count++; 1604 rw_wunlock(&pvh_global_lock); 1605 return (count); 1606 } 1607 1608 static u_int moea_vsidcontext; 1609 1610 void 1611 moea_pinit(mmu_t mmu, pmap_t pmap) 1612 { 1613 int i, mask; 1614 u_int entropy; 1615 1616 KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1617 PMAP_LOCK_INIT(pmap); 1618 RB_INIT(&pmap->pmap_pvo); 1619 1620 entropy = 0; 1621 __asm __volatile("mftb %0" : "=r"(entropy)); 1622 1623 if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap)) 1624 == NULL) { 1625 pmap->pmap_phys = pmap; 1626 } 1627 1628 1629 mtx_lock(&moea_vsid_mutex); 1630 /* 1631 * Allocate some segment registers for this pmap. 1632 */ 1633 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1634 u_int hash, n; 1635 1636 /* 1637 * Create a new value by mutiplying by a prime and adding in 1638 * entropy from the timebase register. This is to make the 1639 * VSID more random so that the PT hash function collides 1640 * less often. (Note that the prime casues gcc to do shifts 1641 * instead of a multiply.) 1642 */ 1643 moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1644 hash = moea_vsidcontext & (NPMAPS - 1); 1645 if (hash == 0) /* 0 is special, avoid it */ 1646 continue; 1647 n = hash >> 5; 1648 mask = 1 << (hash & (VSID_NBPW - 1)); 1649 hash = (moea_vsidcontext & 0xfffff); 1650 if (moea_vsid_bitmap[n] & mask) { /* collision? */ 1651 /* anything free in this bucket? */ 1652 if (moea_vsid_bitmap[n] == 0xffffffff) { 1653 entropy = (moea_vsidcontext >> 20); 1654 continue; 1655 } 1656 i = ffs(~moea_vsid_bitmap[n]) - 1; 1657 mask = 1 << i; 1658 hash &= 0xfffff & ~(VSID_NBPW - 1); 1659 hash |= i; 1660 } 1661 KASSERT(!(moea_vsid_bitmap[n] & mask), 1662 ("Allocating in-use VSID group %#x\n", hash)); 1663 moea_vsid_bitmap[n] |= mask; 1664 for (i = 0; i < 16; i++) 1665 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1666 mtx_unlock(&moea_vsid_mutex); 1667 return; 1668 } 1669 1670 mtx_unlock(&moea_vsid_mutex); 1671 panic("moea_pinit: out of segments"); 1672 } 1673 1674 /* 1675 * Initialize the pmap associated with process 0. 1676 */ 1677 void 1678 moea_pinit0(mmu_t mmu, pmap_t pm) 1679 { 1680 1681 moea_pinit(mmu, pm); 1682 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1683 } 1684 1685 /* 1686 * Set the physical protection on the specified range of this map as requested. 1687 */ 1688 void 1689 moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1690 vm_prot_t prot) 1691 { 1692 struct pvo_entry *pvo, *tpvo, key; 1693 struct pte *pt; 1694 1695 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1696 ("moea_protect: non current pmap")); 1697 1698 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1699 moea_remove(mmu, pm, sva, eva); 1700 return; 1701 } 1702 1703 rw_wlock(&pvh_global_lock); 1704 PMAP_LOCK(pm); 1705 key.pvo_vaddr = sva; 1706 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1707 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1708 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1709 if ((prot & VM_PROT_EXECUTE) == 0) 1710 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1711 1712 /* 1713 * Grab the PTE pointer before we diddle with the cached PTE 1714 * copy. 1715 */ 1716 pt = moea_pvo_to_pte(pvo, -1); 1717 /* 1718 * Change the protection of the page. 1719 */ 1720 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1721 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1722 1723 /* 1724 * If the PVO is in the page table, update that pte as well. 1725 */ 1726 if (pt != NULL) { 1727 moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1728 mtx_unlock(&moea_table_mutex); 1729 } 1730 } 1731 rw_wunlock(&pvh_global_lock); 1732 PMAP_UNLOCK(pm); 1733 } 1734 1735 /* 1736 * Map a list of wired pages into kernel virtual address space. This is 1737 * intended for temporary mappings which do not need page modification or 1738 * references recorded. Existing mappings in the region are overwritten. 1739 */ 1740 void 1741 moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1742 { 1743 vm_offset_t va; 1744 1745 va = sva; 1746 while (count-- > 0) { 1747 moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1748 va += PAGE_SIZE; 1749 m++; 1750 } 1751 } 1752 1753 /* 1754 * Remove page mappings from kernel virtual address space. Intended for 1755 * temporary mappings entered by moea_qenter. 1756 */ 1757 void 1758 moea_qremove(mmu_t mmu, vm_offset_t sva, int count) 1759 { 1760 vm_offset_t va; 1761 1762 va = sva; 1763 while (count-- > 0) { 1764 moea_kremove(mmu, va); 1765 va += PAGE_SIZE; 1766 } 1767 } 1768 1769 void 1770 moea_release(mmu_t mmu, pmap_t pmap) 1771 { 1772 int idx, mask; 1773 1774 /* 1775 * Free segment register's VSID 1776 */ 1777 if (pmap->pm_sr[0] == 0) 1778 panic("moea_release"); 1779 1780 mtx_lock(&moea_vsid_mutex); 1781 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1782 mask = 1 << (idx % VSID_NBPW); 1783 idx /= VSID_NBPW; 1784 moea_vsid_bitmap[idx] &= ~mask; 1785 mtx_unlock(&moea_vsid_mutex); 1786 PMAP_LOCK_DESTROY(pmap); 1787 } 1788 1789 /* 1790 * Remove the given range of addresses from the specified map. 1791 */ 1792 void 1793 moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1794 { 1795 struct pvo_entry *pvo, *tpvo, key; 1796 1797 rw_wlock(&pvh_global_lock); 1798 PMAP_LOCK(pm); 1799 key.pvo_vaddr = sva; 1800 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1801 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1802 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1803 moea_pvo_remove(pvo, -1); 1804 } 1805 PMAP_UNLOCK(pm); 1806 rw_wunlock(&pvh_global_lock); 1807 } 1808 1809 /* 1810 * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1811 * will reflect changes in pte's back to the vm_page. 1812 */ 1813 void 1814 moea_remove_all(mmu_t mmu, vm_page_t m) 1815 { 1816 struct pvo_head *pvo_head; 1817 struct pvo_entry *pvo, *next_pvo; 1818 pmap_t pmap; 1819 1820 rw_wlock(&pvh_global_lock); 1821 pvo_head = vm_page_to_pvoh(m); 1822 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1823 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1824 1825 pmap = pvo->pvo_pmap; 1826 PMAP_LOCK(pmap); 1827 moea_pvo_remove(pvo, -1); 1828 PMAP_UNLOCK(pmap); 1829 } 1830 if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) { 1831 moea_attr_clear(m, PTE_CHG); 1832 vm_page_dirty(m); 1833 } 1834 vm_page_aflag_clear(m, PGA_WRITEABLE); 1835 rw_wunlock(&pvh_global_lock); 1836 } 1837 1838 /* 1839 * Allocate a physical page of memory directly from the phys_avail map. 1840 * Can only be called from moea_bootstrap before avail start and end are 1841 * calculated. 1842 */ 1843 static vm_offset_t 1844 moea_bootstrap_alloc(vm_size_t size, u_int align) 1845 { 1846 vm_offset_t s, e; 1847 int i, j; 1848 1849 size = round_page(size); 1850 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1851 if (align != 0) 1852 s = (phys_avail[i] + align - 1) & ~(align - 1); 1853 else 1854 s = phys_avail[i]; 1855 e = s + size; 1856 1857 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1858 continue; 1859 1860 if (s == phys_avail[i]) { 1861 phys_avail[i] += size; 1862 } else if (e == phys_avail[i + 1]) { 1863 phys_avail[i + 1] -= size; 1864 } else { 1865 for (j = phys_avail_count * 2; j > i; j -= 2) { 1866 phys_avail[j] = phys_avail[j - 2]; 1867 phys_avail[j + 1] = phys_avail[j - 1]; 1868 } 1869 1870 phys_avail[i + 3] = phys_avail[i + 1]; 1871 phys_avail[i + 1] = s; 1872 phys_avail[i + 2] = e; 1873 phys_avail_count++; 1874 } 1875 1876 return (s); 1877 } 1878 panic("moea_bootstrap_alloc: could not allocate memory"); 1879 } 1880 1881 static void 1882 moea_syncicache(vm_offset_t pa, vm_size_t len) 1883 { 1884 __syncicache((void *)pa, len); 1885 } 1886 1887 static int 1888 moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1889 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1890 { 1891 struct pvo_entry *pvo; 1892 u_int sr; 1893 int first; 1894 u_int ptegidx; 1895 int i; 1896 int bootstrap; 1897 1898 moea_pvo_enter_calls++; 1899 first = 0; 1900 bootstrap = 0; 1901 1902 /* 1903 * Compute the PTE Group index. 1904 */ 1905 va &= ~ADDR_POFF; 1906 sr = va_to_sr(pm->pm_sr, va); 1907 ptegidx = va_to_pteg(sr, va); 1908 1909 /* 1910 * Remove any existing mapping for this page. Reuse the pvo entry if 1911 * there is a mapping. 1912 */ 1913 mtx_lock(&moea_table_mutex); 1914 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 1915 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1916 if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa && 1917 (pvo->pvo_pte.pte.pte_lo & PTE_PP) == 1918 (pte_lo & PTE_PP)) { 1919 mtx_unlock(&moea_table_mutex); 1920 return (0); 1921 } 1922 moea_pvo_remove(pvo, -1); 1923 break; 1924 } 1925 } 1926 1927 /* 1928 * If we aren't overwriting a mapping, try to allocate. 1929 */ 1930 if (moea_initialized) { 1931 pvo = uma_zalloc(zone, M_NOWAIT); 1932 } else { 1933 if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1934 panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1935 moea_bpvo_pool_index, BPVO_POOL_SIZE, 1936 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 1937 } 1938 pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1939 moea_bpvo_pool_index++; 1940 bootstrap = 1; 1941 } 1942 1943 if (pvo == NULL) { 1944 mtx_unlock(&moea_table_mutex); 1945 return (ENOMEM); 1946 } 1947 1948 moea_pvo_entries++; 1949 pvo->pvo_vaddr = va; 1950 pvo->pvo_pmap = pm; 1951 LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 1952 pvo->pvo_vaddr &= ~ADDR_POFF; 1953 if (flags & VM_PROT_EXECUTE) 1954 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1955 if (flags & PVO_WIRED) 1956 pvo->pvo_vaddr |= PVO_WIRED; 1957 if (pvo_head != &moea_pvo_kunmanaged) 1958 pvo->pvo_vaddr |= PVO_MANAGED; 1959 if (bootstrap) 1960 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1961 1962 moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); 1963 1964 /* 1965 * Add to pmap list 1966 */ 1967 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 1968 1969 /* 1970 * Remember if the list was empty and therefore will be the first 1971 * item. 1972 */ 1973 if (LIST_FIRST(pvo_head) == NULL) 1974 first = 1; 1975 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1976 1977 if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 1978 pm->pm_stats.wired_count++; 1979 pm->pm_stats.resident_count++; 1980 1981 /* 1982 * We hope this succeeds but it isn't required. 1983 */ 1984 i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 1985 if (i >= 0) { 1986 PVO_PTEGIDX_SET(pvo, i); 1987 } else { 1988 panic("moea_pvo_enter: overflow"); 1989 moea_pte_overflow++; 1990 } 1991 mtx_unlock(&moea_table_mutex); 1992 1993 return (first ? ENOENT : 0); 1994 } 1995 1996 static void 1997 moea_pvo_remove(struct pvo_entry *pvo, int pteidx) 1998 { 1999 struct pte *pt; 2000 2001 /* 2002 * If there is an active pte entry, we need to deactivate it (and 2003 * save the ref & cfg bits). 2004 */ 2005 pt = moea_pvo_to_pte(pvo, pteidx); 2006 if (pt != NULL) { 2007 moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 2008 mtx_unlock(&moea_table_mutex); 2009 PVO_PTEGIDX_CLR(pvo); 2010 } else { 2011 moea_pte_overflow--; 2012 } 2013 2014 /* 2015 * Update our statistics. 2016 */ 2017 pvo->pvo_pmap->pm_stats.resident_count--; 2018 if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 2019 pvo->pvo_pmap->pm_stats.wired_count--; 2020 2021 /* 2022 * Save the REF/CHG bits into their cache if the page is managed. 2023 */ 2024 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { 2025 struct vm_page *pg; 2026 2027 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 2028 if (pg != NULL) { 2029 moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & 2030 (PTE_REF | PTE_CHG)); 2031 } 2032 } 2033 2034 /* 2035 * Remove this PVO from the PV and pmap lists. 2036 */ 2037 LIST_REMOVE(pvo, pvo_vlink); 2038 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2039 2040 /* 2041 * Remove this from the overflow list and return it to the pool 2042 * if we aren't going to reuse it. 2043 */ 2044 LIST_REMOVE(pvo, pvo_olink); 2045 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2046 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2047 moea_upvo_zone, pvo); 2048 moea_pvo_entries--; 2049 moea_pvo_remove_calls++; 2050 } 2051 2052 static __inline int 2053 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2054 { 2055 int pteidx; 2056 2057 /* 2058 * We can find the actual pte entry without searching by grabbing 2059 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 2060 * noticing the HID bit. 2061 */ 2062 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2063 if (pvo->pvo_pte.pte.pte_hi & PTE_HID) 2064 pteidx ^= moea_pteg_mask * 8; 2065 2066 return (pteidx); 2067 } 2068 2069 static struct pvo_entry * 2070 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2071 { 2072 struct pvo_entry *pvo; 2073 int ptegidx; 2074 u_int sr; 2075 2076 va &= ~ADDR_POFF; 2077 sr = va_to_sr(pm->pm_sr, va); 2078 ptegidx = va_to_pteg(sr, va); 2079 2080 mtx_lock(&moea_table_mutex); 2081 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2082 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2083 if (pteidx_p) 2084 *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2085 break; 2086 } 2087 } 2088 mtx_unlock(&moea_table_mutex); 2089 2090 return (pvo); 2091 } 2092 2093 static struct pte * 2094 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2095 { 2096 struct pte *pt; 2097 2098 /* 2099 * If we haven't been supplied the ptegidx, calculate it. 2100 */ 2101 if (pteidx == -1) { 2102 int ptegidx; 2103 u_int sr; 2104 2105 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 2106 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2107 pteidx = moea_pvo_pte_index(pvo, ptegidx); 2108 } 2109 2110 pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2111 mtx_lock(&moea_table_mutex); 2112 2113 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2114 panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 2115 "valid pte index", pvo); 2116 } 2117 2118 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2119 panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 2120 "pvo but no valid pte", pvo); 2121 } 2122 2123 if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2124 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { 2125 panic("moea_pvo_to_pte: pvo %p has valid pte in " 2126 "moea_pteg_table %p but invalid in pvo", pvo, pt); 2127 } 2128 2129 if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 2130 != 0) { 2131 panic("moea_pvo_to_pte: pvo %p pte does not match " 2132 "pte %p in moea_pteg_table", pvo, pt); 2133 } 2134 2135 mtx_assert(&moea_table_mutex, MA_OWNED); 2136 return (pt); 2137 } 2138 2139 if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { 2140 panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2141 "moea_pteg_table but valid in pvo", pvo, pt); 2142 } 2143 2144 mtx_unlock(&moea_table_mutex); 2145 return (NULL); 2146 } 2147 2148 /* 2149 * XXX: THIS STUFF SHOULD BE IN pte.c? 2150 */ 2151 int 2152 moea_pte_spill(vm_offset_t addr) 2153 { 2154 struct pvo_entry *source_pvo, *victim_pvo; 2155 struct pvo_entry *pvo; 2156 int ptegidx, i, j; 2157 u_int sr; 2158 struct pteg *pteg; 2159 struct pte *pt; 2160 2161 moea_pte_spills++; 2162 2163 sr = mfsrin(addr); 2164 ptegidx = va_to_pteg(sr, addr); 2165 2166 /* 2167 * Have to substitute some entry. Use the primary hash for this. 2168 * Use low bits of timebase as random generator. 2169 */ 2170 pteg = &moea_pteg_table[ptegidx]; 2171 mtx_lock(&moea_table_mutex); 2172 __asm __volatile("mftb %0" : "=r"(i)); 2173 i &= 7; 2174 pt = &pteg->pt[i]; 2175 2176 source_pvo = NULL; 2177 victim_pvo = NULL; 2178 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2179 /* 2180 * We need to find a pvo entry for this address. 2181 */ 2182 if (source_pvo == NULL && 2183 moea_pte_match(&pvo->pvo_pte.pte, sr, addr, 2184 pvo->pvo_pte.pte.pte_hi & PTE_HID)) { 2185 /* 2186 * Now found an entry to be spilled into the pteg. 2187 * The PTE is now valid, so we know it's active. 2188 */ 2189 j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 2190 2191 if (j >= 0) { 2192 PVO_PTEGIDX_SET(pvo, j); 2193 moea_pte_overflow--; 2194 mtx_unlock(&moea_table_mutex); 2195 return (1); 2196 } 2197 2198 source_pvo = pvo; 2199 2200 if (victim_pvo != NULL) 2201 break; 2202 } 2203 2204 /* 2205 * We also need the pvo entry of the victim we are replacing 2206 * so save the R & C bits of the PTE. 2207 */ 2208 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2209 moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2210 victim_pvo = pvo; 2211 if (source_pvo != NULL) 2212 break; 2213 } 2214 } 2215 2216 if (source_pvo == NULL) { 2217 mtx_unlock(&moea_table_mutex); 2218 return (0); 2219 } 2220 2221 if (victim_pvo == NULL) { 2222 if ((pt->pte_hi & PTE_HID) == 0) 2223 panic("moea_pte_spill: victim p-pte (%p) has no pvo" 2224 "entry", pt); 2225 2226 /* 2227 * If this is a secondary PTE, we need to search it's primary 2228 * pvo bucket for the matching PVO. 2229 */ 2230 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 2231 pvo_olink) { 2232 /* 2233 * We also need the pvo entry of the victim we are 2234 * replacing so save the R & C bits of the PTE. 2235 */ 2236 if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2237 victim_pvo = pvo; 2238 break; 2239 } 2240 } 2241 2242 if (victim_pvo == NULL) 2243 panic("moea_pte_spill: victim s-pte (%p) has no pvo" 2244 "entry", pt); 2245 } 2246 2247 /* 2248 * We are invalidating the TLB entry for the EA we are replacing even 2249 * though it's valid. If we don't, we lose any ref/chg bit changes 2250 * contained in the TLB entry. 2251 */ 2252 source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; 2253 2254 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2255 moea_pte_set(pt, &source_pvo->pvo_pte.pte); 2256 2257 PVO_PTEGIDX_CLR(victim_pvo); 2258 PVO_PTEGIDX_SET(source_pvo, i); 2259 moea_pte_replacements++; 2260 2261 mtx_unlock(&moea_table_mutex); 2262 return (1); 2263 } 2264 2265 static int 2266 moea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2267 { 2268 struct pte *pt; 2269 int i; 2270 2271 mtx_assert(&moea_table_mutex, MA_OWNED); 2272 2273 /* 2274 * First try primary hash. 2275 */ 2276 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2277 if ((pt->pte_hi & PTE_VALID) == 0) { 2278 pvo_pt->pte_hi &= ~PTE_HID; 2279 moea_pte_set(pt, pvo_pt); 2280 return (i); 2281 } 2282 } 2283 2284 /* 2285 * Now try secondary hash. 2286 */ 2287 ptegidx ^= moea_pteg_mask; 2288 2289 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2290 if ((pt->pte_hi & PTE_VALID) == 0) { 2291 pvo_pt->pte_hi |= PTE_HID; 2292 moea_pte_set(pt, pvo_pt); 2293 return (i); 2294 } 2295 } 2296 2297 panic("moea_pte_insert: overflow"); 2298 return (-1); 2299 } 2300 2301 static boolean_t 2302 moea_query_bit(vm_page_t m, int ptebit) 2303 { 2304 struct pvo_entry *pvo; 2305 struct pte *pt; 2306 2307 rw_assert(&pvh_global_lock, RA_WLOCKED); 2308 if (moea_attr_fetch(m) & ptebit) 2309 return (TRUE); 2310 2311 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2312 2313 /* 2314 * See if we saved the bit off. If so, cache it and return 2315 * success. 2316 */ 2317 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2318 moea_attr_save(m, ptebit); 2319 return (TRUE); 2320 } 2321 } 2322 2323 /* 2324 * No luck, now go through the hard part of looking at the PTEs 2325 * themselves. Sync so that any pending REF/CHG bits are flushed to 2326 * the PTEs. 2327 */ 2328 powerpc_sync(); 2329 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2330 2331 /* 2332 * See if this pvo has a valid PTE. if so, fetch the 2333 * REF/CHG bits from the valid PTE. If the appropriate 2334 * ptebit is set, cache it and return success. 2335 */ 2336 pt = moea_pvo_to_pte(pvo, -1); 2337 if (pt != NULL) { 2338 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2339 mtx_unlock(&moea_table_mutex); 2340 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2341 moea_attr_save(m, ptebit); 2342 return (TRUE); 2343 } 2344 } 2345 } 2346 2347 return (FALSE); 2348 } 2349 2350 static u_int 2351 moea_clear_bit(vm_page_t m, int ptebit) 2352 { 2353 u_int count; 2354 struct pvo_entry *pvo; 2355 struct pte *pt; 2356 2357 rw_assert(&pvh_global_lock, RA_WLOCKED); 2358 2359 /* 2360 * Clear the cached value. 2361 */ 2362 moea_attr_clear(m, ptebit); 2363 2364 /* 2365 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2366 * we can reset the right ones). note that since the pvo entries and 2367 * list heads are accessed via BAT0 and are never placed in the page 2368 * table, we don't have to worry about further accesses setting the 2369 * REF/CHG bits. 2370 */ 2371 powerpc_sync(); 2372 2373 /* 2374 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2375 * valid pte clear the ptebit from the valid pte. 2376 */ 2377 count = 0; 2378 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2379 pt = moea_pvo_to_pte(pvo, -1); 2380 if (pt != NULL) { 2381 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2382 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2383 count++; 2384 moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2385 } 2386 mtx_unlock(&moea_table_mutex); 2387 } 2388 pvo->pvo_pte.pte.pte_lo &= ~ptebit; 2389 } 2390 2391 return (count); 2392 } 2393 2394 /* 2395 * Return true if the physical range is encompassed by the battable[idx] 2396 */ 2397 static int 2398 moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2399 { 2400 u_int prot; 2401 u_int32_t start; 2402 u_int32_t end; 2403 u_int32_t bat_ble; 2404 2405 /* 2406 * Return immediately if not a valid mapping 2407 */ 2408 if (!(battable[idx].batu & BAT_Vs)) 2409 return (EINVAL); 2410 2411 /* 2412 * The BAT entry must be cache-inhibited, guarded, and r/w 2413 * so it can function as an i/o page 2414 */ 2415 prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2416 if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2417 return (EPERM); 2418 2419 /* 2420 * The address should be within the BAT range. Assume that the 2421 * start address in the BAT has the correct alignment (thus 2422 * not requiring masking) 2423 */ 2424 start = battable[idx].batl & BAT_PBS; 2425 bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2426 end = start | (bat_ble << 15) | 0x7fff; 2427 2428 if ((pa < start) || ((pa + size) > end)) 2429 return (ERANGE); 2430 2431 return (0); 2432 } 2433 2434 boolean_t 2435 moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2436 { 2437 int i; 2438 2439 /* 2440 * This currently does not work for entries that 2441 * overlap 256M BAT segments. 2442 */ 2443 2444 for(i = 0; i < 16; i++) 2445 if (moea_bat_mapped(i, pa, size) == 0) 2446 return (0); 2447 2448 return (EFAULT); 2449 } 2450 2451 /* 2452 * Map a set of physical memory pages into the kernel virtual 2453 * address space. Return a pointer to where it is mapped. This 2454 * routine is intended to be used for mapping device memory, 2455 * NOT real memory. 2456 */ 2457 void * 2458 moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2459 { 2460 2461 return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2462 } 2463 2464 void * 2465 moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2466 { 2467 vm_offset_t va, tmpva, ppa, offset; 2468 int i; 2469 2470 ppa = trunc_page(pa); 2471 offset = pa & PAGE_MASK; 2472 size = roundup(offset + size, PAGE_SIZE); 2473 2474 /* 2475 * If the physical address lies within a valid BAT table entry, 2476 * return the 1:1 mapping. This currently doesn't work 2477 * for regions that overlap 256M BAT segments. 2478 */ 2479 for (i = 0; i < 16; i++) { 2480 if (moea_bat_mapped(i, pa, size) == 0) 2481 return ((void *) pa); 2482 } 2483 2484 va = kmem_alloc_nofault(kernel_map, size); 2485 if (!va) 2486 panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 2487 2488 for (tmpva = va; size > 0;) { 2489 moea_kenter_attr(mmu, tmpva, ppa, ma); 2490 tlbie(tmpva); 2491 size -= PAGE_SIZE; 2492 tmpva += PAGE_SIZE; 2493 ppa += PAGE_SIZE; 2494 } 2495 2496 return ((void *)(va + offset)); 2497 } 2498 2499 void 2500 moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2501 { 2502 vm_offset_t base, offset; 2503 2504 /* 2505 * If this is outside kernel virtual space, then it's a 2506 * battable entry and doesn't require unmapping 2507 */ 2508 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { 2509 base = trunc_page(va); 2510 offset = va & PAGE_MASK; 2511 size = roundup(offset + size, PAGE_SIZE); 2512 kmem_free(kernel_map, base, size); 2513 } 2514 } 2515 2516 static void 2517 moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2518 { 2519 struct pvo_entry *pvo; 2520 vm_offset_t lim; 2521 vm_paddr_t pa; 2522 vm_size_t len; 2523 2524 PMAP_LOCK(pm); 2525 while (sz > 0) { 2526 lim = round_page(va); 2527 len = MIN(lim - va, sz); 2528 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2529 if (pvo != NULL) { 2530 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2531 (va & ADDR_POFF); 2532 moea_syncicache(pa, len); 2533 } 2534 va += len; 2535 sz -= len; 2536 } 2537 PMAP_UNLOCK(pm); 2538 } 2539