1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-4-Clause 3 * 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 33 * Copyright (C) 1995, 1996 TooLs GmbH. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed by TooLs GmbH. 47 * 4. The name of TooLs GmbH may not be used to endorse or promote products 48 * derived from this software without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * 61 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 62 */ 63 /*- 64 * Copyright (C) 2001 Benno Rice. 65 * All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions 69 * are met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce the above copyright 73 * notice, this list of conditions and the following disclaimer in the 74 * documentation and/or other materials provided with the distribution. 75 * 76 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 77 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 78 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 79 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 80 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 81 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 82 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 83 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 84 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 85 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 86 */ 87 88 #include <sys/cdefs.h> 89 __FBSDID("$FreeBSD$"); 90 91 /* 92 * Manages physical address maps. 93 * 94 * Since the information managed by this module is also stored by the 95 * logical address mapping module, this module may throw away valid virtual 96 * to physical mappings at almost any time. However, invalidations of 97 * mappings must be done as requested. 98 * 99 * In order to cope with hardware architectures which make virtual to 100 * physical map invalidates expensive, this module may delay invalidate 101 * reduced protection operations until such time as they are actually 102 * necessary. This module is given full information as to which processors 103 * are currently using which maps, and to when physical maps must be made 104 * correct. 105 */ 106 107 #include "opt_kstack_pages.h" 108 109 #include <sys/param.h> 110 #include <sys/kernel.h> 111 #include <sys/conf.h> 112 #include <sys/queue.h> 113 #include <sys/cpuset.h> 114 #include <sys/kerneldump.h> 115 #include <sys/ktr.h> 116 #include <sys/lock.h> 117 #include <sys/mman.h> 118 #include <sys/msgbuf.h> 119 #include <sys/mutex.h> 120 #include <sys/proc.h> 121 #include <sys/rwlock.h> 122 #include <sys/sched.h> 123 #include <sys/sysctl.h> 124 #include <sys/systm.h> 125 #include <sys/vmmeter.h> 126 127 #include <dev/ofw/openfirm.h> 128 129 #include <vm/vm.h> 130 #include <vm/pmap.h> 131 #include <vm/vm_param.h> 132 #include <vm/vm_kern.h> 133 #include <vm/vm_page.h> 134 #include <vm/vm_map.h> 135 #include <vm/vm_object.h> 136 #include <vm/vm_extern.h> 137 #include <vm/vm_page.h> 138 #include <vm/vm_phys.h> 139 #include <vm/vm_pageout.h> 140 #include <vm/uma.h> 141 142 #include <machine/cpu.h> 143 #include <machine/platform.h> 144 #include <machine/bat.h> 145 #include <machine/frame.h> 146 #include <machine/md_var.h> 147 #include <machine/psl.h> 148 #include <machine/pte.h> 149 #include <machine/smp.h> 150 #include <machine/sr.h> 151 #include <machine/mmuvar.h> 152 #include <machine/trap.h> 153 154 #define MOEA_DEBUG 155 156 #define TODO panic("%s: not implemented", __func__); 157 158 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 159 #define VSID_TO_SR(vsid) ((vsid) & 0xf) 160 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 161 162 /* Get physical address from PVO. */ 163 #define PVO_PADDR(pvo) ((pvo)->pvo_pte.pte.pte_lo & PTE_RPGN) 164 165 struct ofw_map { 166 vm_offset_t om_va; 167 vm_size_t om_len; 168 vm_offset_t om_pa; 169 u_int om_mode; 170 }; 171 172 extern unsigned char _etext[]; 173 extern unsigned char _end[]; 174 175 /* 176 * Map of physical memory regions. 177 */ 178 static struct mem_region *regions; 179 static struct mem_region *pregions; 180 static u_int phys_avail_count; 181 static int regions_sz, pregions_sz; 182 static struct ofw_map *translations; 183 184 /* 185 * Lock for the pteg and pvo tables. 186 */ 187 struct mtx moea_table_mutex; 188 struct mtx moea_vsid_mutex; 189 190 /* tlbie instruction synchronization */ 191 static struct mtx tlbie_mtx; 192 193 /* 194 * PTEG data. 195 */ 196 static struct pteg *moea_pteg_table; 197 u_int moea_pteg_count; 198 u_int moea_pteg_mask; 199 200 /* 201 * PVO data. 202 */ 203 struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 204 struct pvo_head moea_pvo_kunmanaged = 205 LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 206 207 static struct rwlock_padalign pvh_global_lock; 208 209 uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 210 uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 211 212 #define BPVO_POOL_SIZE 32768 213 static struct pvo_entry *moea_bpvo_pool; 214 static int moea_bpvo_pool_index = 0; 215 216 #define VSID_NBPW (sizeof(u_int32_t) * 8) 217 static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 218 219 static boolean_t moea_initialized = FALSE; 220 221 /* 222 * Statistics. 223 */ 224 u_int moea_pte_valid = 0; 225 u_int moea_pte_overflow = 0; 226 u_int moea_pte_replacements = 0; 227 u_int moea_pvo_entries = 0; 228 u_int moea_pvo_enter_calls = 0; 229 u_int moea_pvo_remove_calls = 0; 230 u_int moea_pte_spills = 0; 231 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 232 0, ""); 233 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 234 &moea_pte_overflow, 0, ""); 235 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 236 &moea_pte_replacements, 0, ""); 237 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 238 0, ""); 239 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 240 &moea_pvo_enter_calls, 0, ""); 241 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 242 &moea_pvo_remove_calls, 0, ""); 243 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 244 &moea_pte_spills, 0, ""); 245 246 /* 247 * Allocate physical memory for use in moea_bootstrap. 248 */ 249 static vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 250 251 /* 252 * PTE calls. 253 */ 254 static int moea_pte_insert(u_int, struct pte *); 255 256 /* 257 * PVO calls. 258 */ 259 static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 260 vm_offset_t, vm_paddr_t, u_int, int); 261 static void moea_pvo_remove(struct pvo_entry *, int); 262 static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 263 static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 264 265 /* 266 * Utility routines. 267 */ 268 static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 269 vm_prot_t, u_int, int8_t); 270 static void moea_syncicache(vm_paddr_t, vm_size_t); 271 static boolean_t moea_query_bit(vm_page_t, int); 272 static u_int moea_clear_bit(vm_page_t, int); 273 static void moea_kremove(vm_offset_t); 274 int moea_pte_spill(vm_offset_t); 275 276 /* 277 * Kernel MMU interface 278 */ 279 void moea_clear_modify(vm_page_t); 280 void moea_copy_page(vm_page_t, vm_page_t); 281 void moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset, 282 vm_page_t *mb, vm_offset_t b_offset, int xfersize); 283 int moea_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, 284 int8_t); 285 void moea_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 286 vm_prot_t); 287 void moea_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 288 vm_paddr_t moea_extract(pmap_t, vm_offset_t); 289 vm_page_t moea_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); 290 void moea_init(void); 291 boolean_t moea_is_modified(vm_page_t); 292 boolean_t moea_is_prefaultable(pmap_t, vm_offset_t); 293 boolean_t moea_is_referenced(vm_page_t); 294 int moea_ts_referenced(vm_page_t); 295 vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 296 static int moea_mincore(pmap_t, vm_offset_t, vm_paddr_t *); 297 boolean_t moea_page_exists_quick(pmap_t, vm_page_t); 298 void moea_page_init(vm_page_t); 299 int moea_page_wired_mappings(vm_page_t); 300 int moea_pinit(pmap_t); 301 void moea_pinit0(pmap_t); 302 void moea_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 303 void moea_qenter(vm_offset_t, vm_page_t *, int); 304 void moea_qremove(vm_offset_t, int); 305 void moea_release(pmap_t); 306 void moea_remove(pmap_t, vm_offset_t, vm_offset_t); 307 void moea_remove_all(vm_page_t); 308 void moea_remove_write(vm_page_t); 309 void moea_unwire(pmap_t, vm_offset_t, vm_offset_t); 310 void moea_zero_page(vm_page_t); 311 void moea_zero_page_area(vm_page_t, int, int); 312 void moea_activate(struct thread *); 313 void moea_deactivate(struct thread *); 314 void moea_cpu_bootstrap(int); 315 void moea_bootstrap(vm_offset_t, vm_offset_t); 316 void *moea_mapdev(vm_paddr_t, vm_size_t); 317 void *moea_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); 318 void moea_unmapdev(vm_offset_t, vm_size_t); 319 vm_paddr_t moea_kextract(vm_offset_t); 320 void moea_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t); 321 void moea_kenter(vm_offset_t, vm_paddr_t); 322 void moea_page_set_memattr(vm_page_t m, vm_memattr_t ma); 323 boolean_t moea_dev_direct_mapped(vm_paddr_t, vm_size_t); 324 static void moea_sync_icache(pmap_t, vm_offset_t, vm_size_t); 325 void moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va); 326 void moea_scan_init(void); 327 vm_offset_t moea_quick_enter_page(vm_page_t m); 328 void moea_quick_remove_page(vm_offset_t addr); 329 boolean_t moea_page_is_mapped(vm_page_t m); 330 static int moea_map_user_ptr(pmap_t pm, 331 volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); 332 static int moea_decode_kernel_ptr(vm_offset_t addr, 333 int *is_user, vm_offset_t *decoded_addr); 334 335 static struct pmap_funcs moea_methods = { 336 .clear_modify = moea_clear_modify, 337 .copy_page = moea_copy_page, 338 .copy_pages = moea_copy_pages, 339 .enter = moea_enter, 340 .enter_object = moea_enter_object, 341 .enter_quick = moea_enter_quick, 342 .extract = moea_extract, 343 .extract_and_hold = moea_extract_and_hold, 344 .init = moea_init, 345 .is_modified = moea_is_modified, 346 .is_prefaultable = moea_is_prefaultable, 347 .is_referenced = moea_is_referenced, 348 .ts_referenced = moea_ts_referenced, 349 .map = moea_map, 350 .page_exists_quick = moea_page_exists_quick, 351 .page_init = moea_page_init, 352 .page_wired_mappings = moea_page_wired_mappings, 353 .pinit = moea_pinit, 354 .pinit0 = moea_pinit0, 355 .protect = moea_protect, 356 .qenter = moea_qenter, 357 .qremove = moea_qremove, 358 .release = moea_release, 359 .remove = moea_remove, 360 .remove_all = moea_remove_all, 361 .mincore = moea_mincore, 362 .remove_write = moea_remove_write, 363 .sync_icache = moea_sync_icache, 364 .unwire = moea_unwire, 365 .zero_page = moea_zero_page, 366 .zero_page_area = moea_zero_page_area, 367 .activate = moea_activate, 368 .deactivate = moea_deactivate, 369 .page_set_memattr = moea_page_set_memattr, 370 .quick_enter_page = moea_quick_enter_page, 371 .quick_remove_page = moea_quick_remove_page, 372 .page_is_mapped = moea_page_is_mapped, 373 374 /* Internal interfaces */ 375 .bootstrap = moea_bootstrap, 376 .cpu_bootstrap = moea_cpu_bootstrap, 377 .mapdev_attr = moea_mapdev_attr, 378 .mapdev = moea_mapdev, 379 .unmapdev = moea_unmapdev, 380 .kextract = moea_kextract, 381 .kenter = moea_kenter, 382 .kenter_attr = moea_kenter_attr, 383 .dev_direct_mapped = moea_dev_direct_mapped, 384 .dumpsys_pa_init = moea_scan_init, 385 .dumpsys_map_chunk = moea_dumpsys_map, 386 .map_user_ptr = moea_map_user_ptr, 387 .decode_kernel_ptr = moea_decode_kernel_ptr, 388 }; 389 390 MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods); 391 392 static __inline uint32_t 393 moea_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) 394 { 395 uint32_t pte_lo; 396 int i; 397 398 if (ma != VM_MEMATTR_DEFAULT) { 399 switch (ma) { 400 case VM_MEMATTR_UNCACHEABLE: 401 return (PTE_I | PTE_G); 402 case VM_MEMATTR_CACHEABLE: 403 return (PTE_M); 404 case VM_MEMATTR_WRITE_COMBINING: 405 case VM_MEMATTR_WRITE_BACK: 406 case VM_MEMATTR_PREFETCHABLE: 407 return (PTE_I); 408 case VM_MEMATTR_WRITE_THROUGH: 409 return (PTE_W | PTE_M); 410 } 411 } 412 413 /* 414 * Assume the page is cache inhibited and access is guarded unless 415 * it's in our available memory array. 416 */ 417 pte_lo = PTE_I | PTE_G; 418 for (i = 0; i < pregions_sz; i++) { 419 if ((pa >= pregions[i].mr_start) && 420 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 421 pte_lo = PTE_M; 422 break; 423 } 424 } 425 426 return pte_lo; 427 } 428 429 /* 430 * Translate OFW translations into VM attributes. 431 */ 432 static __inline vm_memattr_t 433 moea_bootstrap_convert_wimg(uint32_t mode) 434 { 435 436 switch (mode) { 437 case (PTE_I | PTE_G): 438 /* PCI device memory */ 439 return VM_MEMATTR_UNCACHEABLE; 440 case (PTE_M): 441 /* Explicitly coherent */ 442 return VM_MEMATTR_CACHEABLE; 443 case 0: /* Default claim */ 444 case 2: /* Alternate PP bits set by OF for the original payload */ 445 /* "Normal" memory. */ 446 return VM_MEMATTR_DEFAULT; 447 448 default: 449 /* Err on the side of caution for unknowns */ 450 /* XXX should we panic instead? */ 451 return VM_MEMATTR_UNCACHEABLE; 452 } 453 } 454 455 static void 456 tlbie(vm_offset_t va) 457 { 458 459 mtx_lock_spin(&tlbie_mtx); 460 __asm __volatile("ptesync"); 461 __asm __volatile("tlbie %0" :: "r"(va)); 462 __asm __volatile("eieio; tlbsync; ptesync"); 463 mtx_unlock_spin(&tlbie_mtx); 464 } 465 466 static void 467 tlbia(void) 468 { 469 vm_offset_t va; 470 471 for (va = 0; va < 0x00040000; va += 0x00001000) { 472 __asm __volatile("tlbie %0" :: "r"(va)); 473 powerpc_sync(); 474 } 475 __asm __volatile("tlbsync"); 476 powerpc_sync(); 477 } 478 479 static __inline int 480 va_to_sr(u_int *sr, vm_offset_t va) 481 { 482 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 483 } 484 485 static __inline u_int 486 va_to_pteg(u_int sr, vm_offset_t addr) 487 { 488 u_int hash; 489 490 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 491 ADDR_PIDX_SHFT); 492 return (hash & moea_pteg_mask); 493 } 494 495 static __inline struct pvo_head * 496 vm_page_to_pvoh(vm_page_t m) 497 { 498 499 return (&m->md.mdpg_pvoh); 500 } 501 502 static __inline void 503 moea_attr_clear(vm_page_t m, int ptebit) 504 { 505 506 rw_assert(&pvh_global_lock, RA_WLOCKED); 507 m->md.mdpg_attrs &= ~ptebit; 508 } 509 510 static __inline int 511 moea_attr_fetch(vm_page_t m) 512 { 513 514 return (m->md.mdpg_attrs); 515 } 516 517 static __inline void 518 moea_attr_save(vm_page_t m, int ptebit) 519 { 520 521 rw_assert(&pvh_global_lock, RA_WLOCKED); 522 m->md.mdpg_attrs |= ptebit; 523 } 524 525 static __inline int 526 moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 527 { 528 if (pt->pte_hi == pvo_pt->pte_hi) 529 return (1); 530 531 return (0); 532 } 533 534 static __inline int 535 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 536 { 537 return (pt->pte_hi & ~PTE_VALID) == 538 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 539 ((va >> ADDR_API_SHFT) & PTE_API) | which); 540 } 541 542 static __inline void 543 moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 544 { 545 546 mtx_assert(&moea_table_mutex, MA_OWNED); 547 548 /* 549 * Construct a PTE. Default to IMB initially. Valid bit only gets 550 * set when the real pte is set in memory. 551 * 552 * Note: Don't set the valid bit for correct operation of tlb update. 553 */ 554 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 555 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 556 pt->pte_lo = pte_lo; 557 } 558 559 static __inline void 560 moea_pte_synch(struct pte *pt, struct pte *pvo_pt) 561 { 562 563 mtx_assert(&moea_table_mutex, MA_OWNED); 564 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 565 } 566 567 static __inline void 568 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 569 { 570 571 mtx_assert(&moea_table_mutex, MA_OWNED); 572 573 /* 574 * As shown in Section 7.6.3.2.3 575 */ 576 pt->pte_lo &= ~ptebit; 577 tlbie(va); 578 } 579 580 static __inline void 581 moea_pte_set(struct pte *pt, struct pte *pvo_pt) 582 { 583 584 mtx_assert(&moea_table_mutex, MA_OWNED); 585 pvo_pt->pte_hi |= PTE_VALID; 586 587 /* 588 * Update the PTE as defined in section 7.6.3.1. 589 * Note that the REF/CHG bits are from pvo_pt and thus should have 590 * been saved so this routine can restore them (if desired). 591 */ 592 pt->pte_lo = pvo_pt->pte_lo; 593 powerpc_sync(); 594 pt->pte_hi = pvo_pt->pte_hi; 595 powerpc_sync(); 596 moea_pte_valid++; 597 } 598 599 static __inline void 600 moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 601 { 602 603 mtx_assert(&moea_table_mutex, MA_OWNED); 604 pvo_pt->pte_hi &= ~PTE_VALID; 605 606 /* 607 * Force the reg & chg bits back into the PTEs. 608 */ 609 powerpc_sync(); 610 611 /* 612 * Invalidate the pte. 613 */ 614 pt->pte_hi &= ~PTE_VALID; 615 616 tlbie(va); 617 618 /* 619 * Save the reg & chg bits. 620 */ 621 moea_pte_synch(pt, pvo_pt); 622 moea_pte_valid--; 623 } 624 625 static __inline void 626 moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 627 { 628 629 /* 630 * Invalidate the PTE 631 */ 632 moea_pte_unset(pt, pvo_pt, va); 633 moea_pte_set(pt, pvo_pt); 634 } 635 636 /* 637 * Quick sort callout for comparing memory regions. 638 */ 639 static int om_cmp(const void *a, const void *b); 640 641 static int 642 om_cmp(const void *a, const void *b) 643 { 644 const struct ofw_map *mapa; 645 const struct ofw_map *mapb; 646 647 mapa = a; 648 mapb = b; 649 if (mapa->om_pa < mapb->om_pa) 650 return (-1); 651 else if (mapa->om_pa > mapb->om_pa) 652 return (1); 653 else 654 return (0); 655 } 656 657 void 658 moea_cpu_bootstrap(int ap) 659 { 660 u_int sdr; 661 int i; 662 663 if (ap) { 664 powerpc_sync(); 665 __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); 666 __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); 667 isync(); 668 __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu)); 669 __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl)); 670 isync(); 671 } 672 673 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 674 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 675 isync(); 676 677 __asm __volatile("mtibatu 1,%0" :: "r"(0)); 678 __asm __volatile("mtdbatu 2,%0" :: "r"(0)); 679 __asm __volatile("mtibatu 2,%0" :: "r"(0)); 680 __asm __volatile("mtdbatu 3,%0" :: "r"(0)); 681 __asm __volatile("mtibatu 3,%0" :: "r"(0)); 682 isync(); 683 684 for (i = 0; i < 16; i++) 685 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 686 powerpc_sync(); 687 688 sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); 689 __asm __volatile("mtsdr1 %0" :: "r"(sdr)); 690 isync(); 691 692 tlbia(); 693 } 694 695 void 696 moea_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) 697 { 698 ihandle_t mmui; 699 phandle_t chosen, mmu; 700 int sz; 701 int i, j; 702 vm_size_t size, physsz, hwphyssz; 703 vm_offset_t pa, va, off; 704 void *dpcpu; 705 706 /* 707 * Map PCI memory space. 708 */ 709 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 710 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 711 712 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 713 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 714 715 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 716 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 717 718 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 719 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 720 721 powerpc_sync(); 722 723 /* map pci space */ 724 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 725 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 726 isync(); 727 728 /* set global direct map flag */ 729 hw_direct_map = 1; 730 731 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 732 CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 733 734 for (i = 0; i < pregions_sz; i++) { 735 vm_offset_t pa; 736 vm_offset_t end; 737 738 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 739 pregions[i].mr_start, 740 pregions[i].mr_start + pregions[i].mr_size, 741 pregions[i].mr_size); 742 /* 743 * Install entries into the BAT table to allow all 744 * of physmem to be convered by on-demand BAT entries. 745 * The loop will sometimes set the same battable element 746 * twice, but that's fine since they won't be used for 747 * a while yet. 748 */ 749 pa = pregions[i].mr_start & 0xf0000000; 750 end = pregions[i].mr_start + pregions[i].mr_size; 751 do { 752 u_int n = pa >> ADDR_SR_SHFT; 753 754 battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 755 battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 756 pa += SEGMENT_LENGTH; 757 } while (pa < end); 758 } 759 760 if (PHYS_AVAIL_ENTRIES < regions_sz) 761 panic("moea_bootstrap: phys_avail too small"); 762 763 phys_avail_count = 0; 764 physsz = 0; 765 hwphyssz = 0; 766 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 767 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 768 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 769 regions[i].mr_start + regions[i].mr_size, 770 regions[i].mr_size); 771 if (hwphyssz != 0 && 772 (physsz + regions[i].mr_size) >= hwphyssz) { 773 if (physsz < hwphyssz) { 774 phys_avail[j] = regions[i].mr_start; 775 phys_avail[j + 1] = regions[i].mr_start + 776 hwphyssz - physsz; 777 physsz = hwphyssz; 778 phys_avail_count++; 779 } 780 break; 781 } 782 phys_avail[j] = regions[i].mr_start; 783 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 784 phys_avail_count++; 785 physsz += regions[i].mr_size; 786 } 787 788 /* Check for overlap with the kernel and exception vectors */ 789 for (j = 0; j < 2*phys_avail_count; j+=2) { 790 if (phys_avail[j] < EXC_LAST) 791 phys_avail[j] += EXC_LAST; 792 793 if (kernelstart >= phys_avail[j] && 794 kernelstart < phys_avail[j+1]) { 795 if (kernelend < phys_avail[j+1]) { 796 phys_avail[2*phys_avail_count] = 797 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 798 phys_avail[2*phys_avail_count + 1] = 799 phys_avail[j+1]; 800 phys_avail_count++; 801 } 802 803 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 804 } 805 806 if (kernelend >= phys_avail[j] && 807 kernelend < phys_avail[j+1]) { 808 if (kernelstart > phys_avail[j]) { 809 phys_avail[2*phys_avail_count] = phys_avail[j]; 810 phys_avail[2*phys_avail_count + 1] = 811 kernelstart & ~PAGE_MASK; 812 phys_avail_count++; 813 } 814 815 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 816 } 817 } 818 819 physmem = btoc(physsz); 820 821 /* 822 * Allocate PTEG table. 823 */ 824 #ifdef PTEGCOUNT 825 moea_pteg_count = PTEGCOUNT; 826 #else 827 moea_pteg_count = 0x1000; 828 829 while (moea_pteg_count < physmem) 830 moea_pteg_count <<= 1; 831 832 moea_pteg_count >>= 1; 833 #endif /* PTEGCOUNT */ 834 835 size = moea_pteg_count * sizeof(struct pteg); 836 CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 837 size); 838 moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 839 CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 840 bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 841 moea_pteg_mask = moea_pteg_count - 1; 842 843 /* 844 * Allocate pv/overflow lists. 845 */ 846 size = sizeof(struct pvo_head) * moea_pteg_count; 847 moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 848 PAGE_SIZE); 849 CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 850 for (i = 0; i < moea_pteg_count; i++) 851 LIST_INIT(&moea_pvo_table[i]); 852 853 /* 854 * Initialize the lock that synchronizes access to the pteg and pvo 855 * tables. 856 */ 857 mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 858 MTX_RECURSE); 859 mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF); 860 861 mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN); 862 863 /* 864 * Initialise the unmanaged pvo pool. 865 */ 866 moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 867 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 868 moea_bpvo_pool_index = 0; 869 870 /* 871 * Make sure kernel vsid is allocated as well as VSID 0. 872 */ 873 moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 874 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 875 moea_vsid_bitmap[0] |= 1; 876 877 /* 878 * Initialize the kernel pmap (which is statically allocated). 879 */ 880 PMAP_LOCK_INIT(kernel_pmap); 881 for (i = 0; i < 16; i++) 882 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 883 CPU_FILL(&kernel_pmap->pm_active); 884 RB_INIT(&kernel_pmap->pmap_pvo); 885 886 /* 887 * Initialize the global pv list lock. 888 */ 889 rw_init(&pvh_global_lock, "pmap pv global"); 890 891 /* 892 * Set up the Open Firmware mappings 893 */ 894 chosen = OF_finddevice("/chosen"); 895 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 && 896 (mmu = OF_instance_to_package(mmui)) != -1 && 897 (sz = OF_getproplen(mmu, "translations")) != -1) { 898 translations = NULL; 899 for (i = 0; phys_avail[i] != 0; i += 2) { 900 if (phys_avail[i + 1] >= sz) { 901 translations = (struct ofw_map *)phys_avail[i]; 902 break; 903 } 904 } 905 if (translations == NULL) 906 panic("moea_bootstrap: no space to copy translations"); 907 bzero(translations, sz); 908 if (OF_getprop(mmu, "translations", translations, sz) == -1) 909 panic("moea_bootstrap: can't get ofw translations"); 910 CTR0(KTR_PMAP, "moea_bootstrap: translations"); 911 sz /= sizeof(*translations); 912 qsort(translations, sz, sizeof (*translations), om_cmp); 913 for (i = 0; i < sz; i++) { 914 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 915 translations[i].om_pa, translations[i].om_va, 916 translations[i].om_len); 917 918 /* 919 * If the mapping is 1:1, let the RAM and device 920 * on-demand BAT tables take care of the translation. 921 * 922 * However, always enter mappings for segment 16, 923 * which is mixed-protection and therefore not 924 * compatible with a BAT entry. 925 */ 926 if ((translations[i].om_va >> ADDR_SR_SHFT) != 0xf && 927 translations[i].om_va == translations[i].om_pa) 928 continue; 929 930 /* Enter the pages */ 931 for (off = 0; off < translations[i].om_len; 932 off += PAGE_SIZE) 933 moea_kenter_attr(translations[i].om_va + off, 934 translations[i].om_pa + off, 935 moea_bootstrap_convert_wimg(translations[i].om_mode)); 936 } 937 } 938 939 /* 940 * Calculate the last available physical address. 941 */ 942 for (i = 0; phys_avail[i + 2] != 0; i += 2) 943 ; 944 Maxmem = powerpc_btop(phys_avail[i + 1]); 945 946 moea_cpu_bootstrap(0); 947 mtmsr(mfmsr() | PSL_DR | PSL_IR); 948 pmap_bootstrapped++; 949 950 /* 951 * Set the start and end of kva. 952 */ 953 virtual_avail = VM_MIN_KERNEL_ADDRESS; 954 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 955 956 /* 957 * Allocate a kernel stack with a guard page for thread0 and map it 958 * into the kernel page map. 959 */ 960 pa = moea_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE); 961 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 962 virtual_avail = va + kstack_pages * PAGE_SIZE; 963 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 964 thread0.td_kstack = va; 965 thread0.td_kstack_pages = kstack_pages; 966 for (i = 0; i < kstack_pages; i++) { 967 moea_kenter(va, pa); 968 pa += PAGE_SIZE; 969 va += PAGE_SIZE; 970 } 971 972 /* 973 * Allocate virtual address space for the message buffer. 974 */ 975 pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE); 976 msgbufp = (struct msgbuf *)virtual_avail; 977 va = virtual_avail; 978 virtual_avail += round_page(msgbufsize); 979 while (va < virtual_avail) { 980 moea_kenter(va, pa); 981 pa += PAGE_SIZE; 982 va += PAGE_SIZE; 983 } 984 985 /* 986 * Allocate virtual address space for the dynamic percpu area. 987 */ 988 pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 989 dpcpu = (void *)virtual_avail; 990 va = virtual_avail; 991 virtual_avail += DPCPU_SIZE; 992 while (va < virtual_avail) { 993 moea_kenter(va, pa); 994 pa += PAGE_SIZE; 995 va += PAGE_SIZE; 996 } 997 dpcpu_init(dpcpu, 0); 998 } 999 1000 /* 1001 * Activate a user pmap. The pmap must be activated before it's address 1002 * space can be accessed in any way. 1003 */ 1004 void 1005 moea_activate(struct thread *td) 1006 { 1007 pmap_t pm, pmr; 1008 1009 /* 1010 * Load all the data we need up front to encourage the compiler to 1011 * not issue any loads while we have interrupts disabled below. 1012 */ 1013 pm = &td->td_proc->p_vmspace->vm_pmap; 1014 pmr = pm->pmap_phys; 1015 1016 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 1017 PCPU_SET(curpmap, pmr); 1018 1019 mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); 1020 } 1021 1022 void 1023 moea_deactivate(struct thread *td) 1024 { 1025 pmap_t pm; 1026 1027 pm = &td->td_proc->p_vmspace->vm_pmap; 1028 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1029 PCPU_SET(curpmap, NULL); 1030 } 1031 1032 void 1033 moea_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1034 { 1035 struct pvo_entry key, *pvo; 1036 1037 PMAP_LOCK(pm); 1038 key.pvo_vaddr = sva; 1039 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1040 pvo != NULL && PVO_VADDR(pvo) < eva; 1041 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { 1042 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1043 panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo); 1044 pvo->pvo_vaddr &= ~PVO_WIRED; 1045 pm->pm_stats.wired_count--; 1046 } 1047 PMAP_UNLOCK(pm); 1048 } 1049 1050 void 1051 moea_copy_page(vm_page_t msrc, vm_page_t mdst) 1052 { 1053 vm_offset_t dst; 1054 vm_offset_t src; 1055 1056 dst = VM_PAGE_TO_PHYS(mdst); 1057 src = VM_PAGE_TO_PHYS(msrc); 1058 1059 bcopy((void *)src, (void *)dst, PAGE_SIZE); 1060 } 1061 1062 void 1063 moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset, 1064 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1065 { 1066 void *a_cp, *b_cp; 1067 vm_offset_t a_pg_offset, b_pg_offset; 1068 int cnt; 1069 1070 while (xfersize > 0) { 1071 a_pg_offset = a_offset & PAGE_MASK; 1072 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1073 a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + 1074 a_pg_offset; 1075 b_pg_offset = b_offset & PAGE_MASK; 1076 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1077 b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + 1078 b_pg_offset; 1079 bcopy(a_cp, b_cp, cnt); 1080 a_offset += cnt; 1081 b_offset += cnt; 1082 xfersize -= cnt; 1083 } 1084 } 1085 1086 /* 1087 * Zero a page of physical memory by temporarily mapping it into the tlb. 1088 */ 1089 void 1090 moea_zero_page(vm_page_t m) 1091 { 1092 vm_offset_t off, pa = VM_PAGE_TO_PHYS(m); 1093 1094 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1095 __asm __volatile("dcbz 0,%0" :: "r"(pa + off)); 1096 } 1097 1098 void 1099 moea_zero_page_area(vm_page_t m, int off, int size) 1100 { 1101 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1102 void *va = (void *)(pa + off); 1103 1104 bzero(va, size); 1105 } 1106 1107 vm_offset_t 1108 moea_quick_enter_page(vm_page_t m) 1109 { 1110 1111 return (VM_PAGE_TO_PHYS(m)); 1112 } 1113 1114 void 1115 moea_quick_remove_page(vm_offset_t addr) 1116 { 1117 } 1118 1119 boolean_t 1120 moea_page_is_mapped(vm_page_t m) 1121 { 1122 return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); 1123 } 1124 1125 /* 1126 * Map the given physical page at the specified virtual address in the 1127 * target pmap with the protection requested. If specified the page 1128 * will be wired down. 1129 */ 1130 int 1131 moea_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1132 u_int flags, int8_t psind) 1133 { 1134 int error; 1135 1136 for (;;) { 1137 rw_wlock(&pvh_global_lock); 1138 PMAP_LOCK(pmap); 1139 error = moea_enter_locked(pmap, va, m, prot, flags, psind); 1140 rw_wunlock(&pvh_global_lock); 1141 PMAP_UNLOCK(pmap); 1142 if (error != ENOMEM) 1143 return (KERN_SUCCESS); 1144 if ((flags & PMAP_ENTER_NOSLEEP) != 0) 1145 return (KERN_RESOURCE_SHORTAGE); 1146 VM_OBJECT_ASSERT_UNLOCKED(m->object); 1147 vm_wait(NULL); 1148 } 1149 } 1150 1151 /* 1152 * Map the given physical page at the specified virtual address in the 1153 * target pmap with the protection requested. If specified the page 1154 * will be wired down. 1155 * 1156 * The global pvh and pmap must be locked. 1157 */ 1158 static int 1159 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1160 u_int flags, int8_t psind __unused) 1161 { 1162 struct pvo_head *pvo_head; 1163 uma_zone_t zone; 1164 u_int pte_lo, pvo_flags; 1165 int error; 1166 1167 if (pmap_bootstrapped) 1168 rw_assert(&pvh_global_lock, RA_WLOCKED); 1169 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1170 if ((m->oflags & VPO_UNMANAGED) == 0) { 1171 if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0) 1172 VM_PAGE_OBJECT_BUSY_ASSERT(m); 1173 else 1174 VM_OBJECT_ASSERT_LOCKED(m->object); 1175 } 1176 1177 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea_initialized) { 1178 pvo_head = &moea_pvo_kunmanaged; 1179 zone = moea_upvo_zone; 1180 pvo_flags = 0; 1181 } else { 1182 pvo_head = vm_page_to_pvoh(m); 1183 zone = moea_mpvo_zone; 1184 pvo_flags = PVO_MANAGED; 1185 } 1186 1187 pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1188 1189 if (prot & VM_PROT_WRITE) { 1190 pte_lo |= PTE_BW; 1191 if (pmap_bootstrapped && 1192 (m->oflags & VPO_UNMANAGED) == 0) 1193 vm_page_aflag_set(m, PGA_WRITEABLE); 1194 } else 1195 pte_lo |= PTE_BR; 1196 1197 if ((flags & PMAP_ENTER_WIRED) != 0) 1198 pvo_flags |= PVO_WIRED; 1199 1200 error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1201 pte_lo, pvo_flags); 1202 1203 /* 1204 * Flush the real page from the instruction cache. This has be done 1205 * for all user mappings to prevent information leakage via the 1206 * instruction cache. moea_pvo_enter() returns ENOENT for the first 1207 * mapping for a page. 1208 */ 1209 if (pmap != kernel_pmap && error == ENOENT && 1210 (pte_lo & (PTE_I | PTE_G)) == 0) 1211 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1212 1213 return (error); 1214 } 1215 1216 /* 1217 * Maps a sequence of resident pages belonging to the same object. 1218 * The sequence begins with the given page m_start. This page is 1219 * mapped at the given virtual address start. Each subsequent page is 1220 * mapped at a virtual address that is offset from start by the same 1221 * amount as the page is offset from m_start within the object. The 1222 * last page in the sequence is the page with the largest offset from 1223 * m_start that can be mapped at a virtual address less than the given 1224 * virtual address end. Not every virtual page between start and end 1225 * is mapped; only those for which a resident page exists with the 1226 * corresponding offset from m_start are mapped. 1227 */ 1228 void 1229 moea_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, 1230 vm_page_t m_start, vm_prot_t prot) 1231 { 1232 vm_page_t m; 1233 vm_pindex_t diff, psize; 1234 1235 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1236 1237 psize = atop(end - start); 1238 m = m_start; 1239 rw_wlock(&pvh_global_lock); 1240 PMAP_LOCK(pm); 1241 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1242 moea_enter_locked(pm, start + ptoa(diff), m, prot & 1243 (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_QUICK_LOCKED, 1244 0); 1245 m = TAILQ_NEXT(m, listq); 1246 } 1247 rw_wunlock(&pvh_global_lock); 1248 PMAP_UNLOCK(pm); 1249 } 1250 1251 void 1252 moea_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, 1253 vm_prot_t prot) 1254 { 1255 1256 rw_wlock(&pvh_global_lock); 1257 PMAP_LOCK(pm); 1258 moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1259 PMAP_ENTER_QUICK_LOCKED, 0); 1260 rw_wunlock(&pvh_global_lock); 1261 PMAP_UNLOCK(pm); 1262 } 1263 1264 vm_paddr_t 1265 moea_extract(pmap_t pm, vm_offset_t va) 1266 { 1267 struct pvo_entry *pvo; 1268 vm_paddr_t pa; 1269 1270 PMAP_LOCK(pm); 1271 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1272 if (pvo == NULL) 1273 pa = 0; 1274 else 1275 pa = PVO_PADDR(pvo) | (va & ADDR_POFF); 1276 PMAP_UNLOCK(pm); 1277 return (pa); 1278 } 1279 1280 /* 1281 * Atomically extract and hold the physical page with the given 1282 * pmap and virtual address pair if that mapping permits the given 1283 * protection. 1284 */ 1285 vm_page_t 1286 moea_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1287 { 1288 struct pvo_entry *pvo; 1289 vm_page_t m; 1290 1291 m = NULL; 1292 PMAP_LOCK(pmap); 1293 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1294 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && 1295 ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || 1296 (prot & VM_PROT_WRITE) == 0)) { 1297 m = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); 1298 if (!vm_page_wire_mapped(m)) 1299 m = NULL; 1300 } 1301 PMAP_UNLOCK(pmap); 1302 return (m); 1303 } 1304 1305 void 1306 moea_init() 1307 { 1308 1309 moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1310 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1311 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1312 moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1313 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1314 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1315 moea_initialized = TRUE; 1316 } 1317 1318 boolean_t 1319 moea_is_referenced(vm_page_t m) 1320 { 1321 boolean_t rv; 1322 1323 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1324 ("moea_is_referenced: page %p is not managed", m)); 1325 rw_wlock(&pvh_global_lock); 1326 rv = moea_query_bit(m, PTE_REF); 1327 rw_wunlock(&pvh_global_lock); 1328 return (rv); 1329 } 1330 1331 boolean_t 1332 moea_is_modified(vm_page_t m) 1333 { 1334 boolean_t rv; 1335 1336 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1337 ("moea_is_modified: page %p is not managed", m)); 1338 1339 /* 1340 * If the page is not busied then this check is racy. 1341 */ 1342 if (!pmap_page_is_write_mapped(m)) 1343 return (FALSE); 1344 1345 rw_wlock(&pvh_global_lock); 1346 rv = moea_query_bit(m, PTE_CHG); 1347 rw_wunlock(&pvh_global_lock); 1348 return (rv); 1349 } 1350 1351 boolean_t 1352 moea_is_prefaultable(pmap_t pmap, vm_offset_t va) 1353 { 1354 struct pvo_entry *pvo; 1355 boolean_t rv; 1356 1357 PMAP_LOCK(pmap); 1358 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1359 rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; 1360 PMAP_UNLOCK(pmap); 1361 return (rv); 1362 } 1363 1364 void 1365 moea_clear_modify(vm_page_t m) 1366 { 1367 1368 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1369 ("moea_clear_modify: page %p is not managed", m)); 1370 vm_page_assert_busied(m); 1371 1372 if (!pmap_page_is_write_mapped(m)) 1373 return; 1374 rw_wlock(&pvh_global_lock); 1375 moea_clear_bit(m, PTE_CHG); 1376 rw_wunlock(&pvh_global_lock); 1377 } 1378 1379 /* 1380 * Clear the write and modified bits in each of the given page's mappings. 1381 */ 1382 void 1383 moea_remove_write(vm_page_t m) 1384 { 1385 struct pvo_entry *pvo; 1386 struct pte *pt; 1387 pmap_t pmap; 1388 u_int lo; 1389 1390 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1391 ("moea_remove_write: page %p is not managed", m)); 1392 vm_page_assert_busied(m); 1393 1394 if (!pmap_page_is_write_mapped(m)) 1395 return; 1396 rw_wlock(&pvh_global_lock); 1397 lo = moea_attr_fetch(m); 1398 powerpc_sync(); 1399 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1400 pmap = pvo->pvo_pmap; 1401 PMAP_LOCK(pmap); 1402 if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { 1403 pt = moea_pvo_to_pte(pvo, -1); 1404 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1405 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1406 if (pt != NULL) { 1407 moea_pte_synch(pt, &pvo->pvo_pte.pte); 1408 lo |= pvo->pvo_pte.pte.pte_lo; 1409 pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; 1410 moea_pte_change(pt, &pvo->pvo_pte.pte, 1411 pvo->pvo_vaddr); 1412 mtx_unlock(&moea_table_mutex); 1413 } 1414 } 1415 PMAP_UNLOCK(pmap); 1416 } 1417 if ((lo & PTE_CHG) != 0) { 1418 moea_attr_clear(m, PTE_CHG); 1419 vm_page_dirty(m); 1420 } 1421 vm_page_aflag_clear(m, PGA_WRITEABLE); 1422 rw_wunlock(&pvh_global_lock); 1423 } 1424 1425 /* 1426 * moea_ts_referenced: 1427 * 1428 * Return a count of reference bits for a page, clearing those bits. 1429 * It is not necessary for every reference bit to be cleared, but it 1430 * is necessary that 0 only be returned when there are truly no 1431 * reference bits set. 1432 * 1433 * XXX: The exact number of bits to check and clear is a matter that 1434 * should be tested and standardized at some point in the future for 1435 * optimal aging of shared pages. 1436 */ 1437 int 1438 moea_ts_referenced(vm_page_t m) 1439 { 1440 int count; 1441 1442 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1443 ("moea_ts_referenced: page %p is not managed", m)); 1444 rw_wlock(&pvh_global_lock); 1445 count = moea_clear_bit(m, PTE_REF); 1446 rw_wunlock(&pvh_global_lock); 1447 return (count); 1448 } 1449 1450 /* 1451 * Modify the WIMG settings of all mappings for a page. 1452 */ 1453 void 1454 moea_page_set_memattr(vm_page_t m, vm_memattr_t ma) 1455 { 1456 struct pvo_entry *pvo; 1457 struct pvo_head *pvo_head; 1458 struct pte *pt; 1459 pmap_t pmap; 1460 u_int lo; 1461 1462 if ((m->oflags & VPO_UNMANAGED) != 0) { 1463 m->md.mdpg_cache_attrs = ma; 1464 return; 1465 } 1466 1467 rw_wlock(&pvh_global_lock); 1468 pvo_head = vm_page_to_pvoh(m); 1469 lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1470 1471 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1472 pmap = pvo->pvo_pmap; 1473 PMAP_LOCK(pmap); 1474 pt = moea_pvo_to_pte(pvo, -1); 1475 pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; 1476 pvo->pvo_pte.pte.pte_lo |= lo; 1477 if (pt != NULL) { 1478 moea_pte_change(pt, &pvo->pvo_pte.pte, 1479 pvo->pvo_vaddr); 1480 if (pvo->pvo_pmap == kernel_pmap) 1481 isync(); 1482 } 1483 mtx_unlock(&moea_table_mutex); 1484 PMAP_UNLOCK(pmap); 1485 } 1486 m->md.mdpg_cache_attrs = ma; 1487 rw_wunlock(&pvh_global_lock); 1488 } 1489 1490 /* 1491 * Map a wired page into kernel virtual address space. 1492 */ 1493 void 1494 moea_kenter(vm_offset_t va, vm_paddr_t pa) 1495 { 1496 1497 moea_kenter_attr(va, pa, VM_MEMATTR_DEFAULT); 1498 } 1499 1500 void 1501 moea_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 1502 { 1503 u_int pte_lo; 1504 int error; 1505 1506 #if 0 1507 if (va < VM_MIN_KERNEL_ADDRESS) 1508 panic("moea_kenter: attempt to enter non-kernel address %#x", 1509 va); 1510 #endif 1511 1512 pte_lo = moea_calc_wimg(pa, ma); 1513 1514 PMAP_LOCK(kernel_pmap); 1515 error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1516 &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1517 1518 if (error != 0 && error != ENOENT) 1519 panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 1520 pa, error); 1521 1522 PMAP_UNLOCK(kernel_pmap); 1523 } 1524 1525 /* 1526 * Extract the physical page address associated with the given kernel virtual 1527 * address. 1528 */ 1529 vm_paddr_t 1530 moea_kextract(vm_offset_t va) 1531 { 1532 struct pvo_entry *pvo; 1533 vm_paddr_t pa; 1534 1535 /* 1536 * Allow direct mappings on 32-bit OEA 1537 */ 1538 if (va < VM_MIN_KERNEL_ADDRESS) { 1539 return (va); 1540 } 1541 1542 PMAP_LOCK(kernel_pmap); 1543 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1544 KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1545 pa = PVO_PADDR(pvo) | (va & ADDR_POFF); 1546 PMAP_UNLOCK(kernel_pmap); 1547 return (pa); 1548 } 1549 1550 /* 1551 * Remove a wired page from kernel virtual address space. 1552 */ 1553 void 1554 moea_kremove(vm_offset_t va) 1555 { 1556 1557 moea_remove(kernel_pmap, va, va + PAGE_SIZE); 1558 } 1559 1560 /* 1561 * Provide a kernel pointer corresponding to a given userland pointer. 1562 * The returned pointer is valid until the next time this function is 1563 * called in this thread. This is used internally in copyin/copyout. 1564 */ 1565 int 1566 moea_map_user_ptr(pmap_t pm, volatile const void *uaddr, 1567 void **kaddr, size_t ulen, size_t *klen) 1568 { 1569 size_t l; 1570 register_t vsid; 1571 1572 *kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK); 1573 l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr); 1574 if (l > ulen) 1575 l = ulen; 1576 if (klen) 1577 *klen = l; 1578 else if (l != ulen) 1579 return (EFAULT); 1580 1581 vsid = va_to_vsid(pm, (vm_offset_t)uaddr); 1582 1583 /* Mark segment no-execute */ 1584 vsid |= SR_N; 1585 1586 /* If we have already set this VSID, we can just return */ 1587 if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == vsid) 1588 return (0); 1589 1590 __asm __volatile("isync"); 1591 curthread->td_pcb->pcb_cpu.aim.usr_segm = 1592 (uintptr_t)uaddr >> ADDR_SR_SHFT; 1593 curthread->td_pcb->pcb_cpu.aim.usr_vsid = vsid; 1594 __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(vsid)); 1595 1596 return (0); 1597 } 1598 1599 /* 1600 * Figure out where a given kernel pointer (usually in a fault) points 1601 * to from the VM's perspective, potentially remapping into userland's 1602 * address space. 1603 */ 1604 static int 1605 moea_decode_kernel_ptr(vm_offset_t addr, int *is_user, 1606 vm_offset_t *decoded_addr) 1607 { 1608 vm_offset_t user_sr; 1609 1610 if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { 1611 user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; 1612 addr &= ADDR_PIDX | ADDR_POFF; 1613 addr |= user_sr << ADDR_SR_SHFT; 1614 *decoded_addr = addr; 1615 *is_user = 1; 1616 } else { 1617 *decoded_addr = addr; 1618 *is_user = 0; 1619 } 1620 1621 return (0); 1622 } 1623 1624 /* 1625 * Map a range of physical addresses into kernel virtual address space. 1626 * 1627 * The value passed in *virt is a suggested virtual address for the mapping. 1628 * Architectures which can support a direct-mapped physical to virtual region 1629 * can return the appropriate address within that region, leaving '*virt' 1630 * unchanged. We cannot and therefore do not; *virt is updated with the 1631 * first usable address after the mapped region. 1632 */ 1633 vm_offset_t 1634 moea_map(vm_offset_t *virt, vm_paddr_t pa_start, 1635 vm_paddr_t pa_end, int prot) 1636 { 1637 vm_offset_t sva, va; 1638 1639 sva = *virt; 1640 va = sva; 1641 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1642 moea_kenter(va, pa_start); 1643 *virt = va; 1644 return (sva); 1645 } 1646 1647 /* 1648 * Returns true if the pmap's pv is one of the first 1649 * 16 pvs linked to from this page. This count may 1650 * be changed upwards or downwards in the future; it 1651 * is only necessary that true be returned for a small 1652 * subset of pmaps for proper page aging. 1653 */ 1654 boolean_t 1655 moea_page_exists_quick(pmap_t pmap, vm_page_t m) 1656 { 1657 int loops; 1658 struct pvo_entry *pvo; 1659 boolean_t rv; 1660 1661 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1662 ("moea_page_exists_quick: page %p is not managed", m)); 1663 loops = 0; 1664 rv = FALSE; 1665 rw_wlock(&pvh_global_lock); 1666 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1667 if (pvo->pvo_pmap == pmap) { 1668 rv = TRUE; 1669 break; 1670 } 1671 if (++loops >= 16) 1672 break; 1673 } 1674 rw_wunlock(&pvh_global_lock); 1675 return (rv); 1676 } 1677 1678 void 1679 moea_page_init(vm_page_t m) 1680 { 1681 1682 m->md.mdpg_attrs = 0; 1683 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; 1684 LIST_INIT(&m->md.mdpg_pvoh); 1685 } 1686 1687 /* 1688 * Return the number of managed mappings to the given physical page 1689 * that are wired. 1690 */ 1691 int 1692 moea_page_wired_mappings(vm_page_t m) 1693 { 1694 struct pvo_entry *pvo; 1695 int count; 1696 1697 count = 0; 1698 if ((m->oflags & VPO_UNMANAGED) != 0) 1699 return (count); 1700 rw_wlock(&pvh_global_lock); 1701 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1702 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1703 count++; 1704 rw_wunlock(&pvh_global_lock); 1705 return (count); 1706 } 1707 1708 static u_int moea_vsidcontext; 1709 1710 int 1711 moea_pinit(pmap_t pmap) 1712 { 1713 int i, mask; 1714 u_int entropy; 1715 1716 RB_INIT(&pmap->pmap_pvo); 1717 1718 entropy = 0; 1719 __asm __volatile("mftb %0" : "=r"(entropy)); 1720 1721 if ((pmap->pmap_phys = (pmap_t)moea_kextract((vm_offset_t)pmap)) 1722 == NULL) { 1723 pmap->pmap_phys = pmap; 1724 } 1725 1726 mtx_lock(&moea_vsid_mutex); 1727 /* 1728 * Allocate some segment registers for this pmap. 1729 */ 1730 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1731 u_int hash, n; 1732 1733 /* 1734 * Create a new value by mutiplying by a prime and adding in 1735 * entropy from the timebase register. This is to make the 1736 * VSID more random so that the PT hash function collides 1737 * less often. (Note that the prime casues gcc to do shifts 1738 * instead of a multiply.) 1739 */ 1740 moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1741 hash = moea_vsidcontext & (NPMAPS - 1); 1742 if (hash == 0) /* 0 is special, avoid it */ 1743 continue; 1744 n = hash >> 5; 1745 mask = 1 << (hash & (VSID_NBPW - 1)); 1746 hash = (moea_vsidcontext & 0xfffff); 1747 if (moea_vsid_bitmap[n] & mask) { /* collision? */ 1748 /* anything free in this bucket? */ 1749 if (moea_vsid_bitmap[n] == 0xffffffff) { 1750 entropy = (moea_vsidcontext >> 20); 1751 continue; 1752 } 1753 i = ffs(~moea_vsid_bitmap[n]) - 1; 1754 mask = 1 << i; 1755 hash &= rounddown2(0xfffff, VSID_NBPW); 1756 hash |= i; 1757 } 1758 KASSERT(!(moea_vsid_bitmap[n] & mask), 1759 ("Allocating in-use VSID group %#x\n", hash)); 1760 moea_vsid_bitmap[n] |= mask; 1761 for (i = 0; i < 16; i++) 1762 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1763 mtx_unlock(&moea_vsid_mutex); 1764 return (1); 1765 } 1766 1767 mtx_unlock(&moea_vsid_mutex); 1768 panic("moea_pinit: out of segments"); 1769 } 1770 1771 /* 1772 * Initialize the pmap associated with process 0. 1773 */ 1774 void 1775 moea_pinit0(pmap_t pm) 1776 { 1777 1778 PMAP_LOCK_INIT(pm); 1779 moea_pinit(pm); 1780 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1781 } 1782 1783 /* 1784 * Set the physical protection on the specified range of this map as requested. 1785 */ 1786 void 1787 moea_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1788 vm_prot_t prot) 1789 { 1790 struct pvo_entry *pvo, *tpvo, key; 1791 struct pte *pt; 1792 1793 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1794 ("moea_protect: non current pmap")); 1795 1796 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1797 moea_remove(pm, sva, eva); 1798 return; 1799 } 1800 1801 rw_wlock(&pvh_global_lock); 1802 PMAP_LOCK(pm); 1803 key.pvo_vaddr = sva; 1804 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1805 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1806 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1807 1808 /* 1809 * Grab the PTE pointer before we diddle with the cached PTE 1810 * copy. 1811 */ 1812 pt = moea_pvo_to_pte(pvo, -1); 1813 /* 1814 * Change the protection of the page. 1815 */ 1816 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1817 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1818 1819 /* 1820 * If the PVO is in the page table, update that pte as well. 1821 */ 1822 if (pt != NULL) { 1823 moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1824 mtx_unlock(&moea_table_mutex); 1825 } 1826 } 1827 rw_wunlock(&pvh_global_lock); 1828 PMAP_UNLOCK(pm); 1829 } 1830 1831 /* 1832 * Map a list of wired pages into kernel virtual address space. This is 1833 * intended for temporary mappings which do not need page modification or 1834 * references recorded. Existing mappings in the region are overwritten. 1835 */ 1836 void 1837 moea_qenter(vm_offset_t sva, vm_page_t *m, int count) 1838 { 1839 vm_offset_t va; 1840 1841 va = sva; 1842 while (count-- > 0) { 1843 moea_kenter(va, VM_PAGE_TO_PHYS(*m)); 1844 va += PAGE_SIZE; 1845 m++; 1846 } 1847 } 1848 1849 /* 1850 * Remove page mappings from kernel virtual address space. Intended for 1851 * temporary mappings entered by moea_qenter. 1852 */ 1853 void 1854 moea_qremove(vm_offset_t sva, int count) 1855 { 1856 vm_offset_t va; 1857 1858 va = sva; 1859 while (count-- > 0) { 1860 moea_kremove(va); 1861 va += PAGE_SIZE; 1862 } 1863 } 1864 1865 void 1866 moea_release(pmap_t pmap) 1867 { 1868 int idx, mask; 1869 1870 /* 1871 * Free segment register's VSID 1872 */ 1873 if (pmap->pm_sr[0] == 0) 1874 panic("moea_release"); 1875 1876 mtx_lock(&moea_vsid_mutex); 1877 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1878 mask = 1 << (idx % VSID_NBPW); 1879 idx /= VSID_NBPW; 1880 moea_vsid_bitmap[idx] &= ~mask; 1881 mtx_unlock(&moea_vsid_mutex); 1882 } 1883 1884 /* 1885 * Remove the given range of addresses from the specified map. 1886 */ 1887 void 1888 moea_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1889 { 1890 struct pvo_entry *pvo, *tpvo, key; 1891 1892 rw_wlock(&pvh_global_lock); 1893 PMAP_LOCK(pm); 1894 key.pvo_vaddr = sva; 1895 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1896 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1897 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1898 moea_pvo_remove(pvo, -1); 1899 } 1900 PMAP_UNLOCK(pm); 1901 rw_wunlock(&pvh_global_lock); 1902 } 1903 1904 /* 1905 * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1906 * will reflect changes in pte's back to the vm_page. 1907 */ 1908 void 1909 moea_remove_all(vm_page_t m) 1910 { 1911 struct pvo_head *pvo_head; 1912 struct pvo_entry *pvo, *next_pvo; 1913 pmap_t pmap; 1914 1915 rw_wlock(&pvh_global_lock); 1916 pvo_head = vm_page_to_pvoh(m); 1917 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1918 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1919 1920 pmap = pvo->pvo_pmap; 1921 PMAP_LOCK(pmap); 1922 moea_pvo_remove(pvo, -1); 1923 PMAP_UNLOCK(pmap); 1924 } 1925 if ((m->a.flags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) { 1926 moea_attr_clear(m, PTE_CHG); 1927 vm_page_dirty(m); 1928 } 1929 vm_page_aflag_clear(m, PGA_WRITEABLE); 1930 rw_wunlock(&pvh_global_lock); 1931 } 1932 1933 static int 1934 moea_mincore(pmap_t pm, vm_offset_t va, vm_paddr_t *pap) 1935 { 1936 struct pvo_entry *pvo; 1937 vm_paddr_t pa; 1938 vm_page_t m; 1939 int val; 1940 bool managed; 1941 1942 PMAP_LOCK(pm); 1943 1944 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1945 if (pvo != NULL) { 1946 pa = PVO_PADDR(pvo); 1947 m = PHYS_TO_VM_PAGE(pa); 1948 managed = (pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED; 1949 val = MINCORE_INCORE; 1950 } else { 1951 PMAP_UNLOCK(pm); 1952 return (0); 1953 } 1954 1955 PMAP_UNLOCK(pm); 1956 1957 if (m == NULL) 1958 return (0); 1959 1960 if (managed) { 1961 if (moea_is_modified(m)) 1962 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 1963 1964 if (moea_is_referenced(m)) 1965 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 1966 } 1967 1968 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 1969 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 1970 managed) { 1971 *pap = pa; 1972 } 1973 1974 return (val); 1975 } 1976 1977 /* 1978 * Allocate a physical page of memory directly from the phys_avail map. 1979 * Can only be called from moea_bootstrap before avail start and end are 1980 * calculated. 1981 */ 1982 static vm_offset_t 1983 moea_bootstrap_alloc(vm_size_t size, u_int align) 1984 { 1985 vm_offset_t s, e; 1986 int i, j; 1987 1988 size = round_page(size); 1989 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1990 if (align != 0) 1991 s = roundup2(phys_avail[i], align); 1992 else 1993 s = phys_avail[i]; 1994 e = s + size; 1995 1996 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1997 continue; 1998 1999 if (s == phys_avail[i]) { 2000 phys_avail[i] += size; 2001 } else if (e == phys_avail[i + 1]) { 2002 phys_avail[i + 1] -= size; 2003 } else { 2004 for (j = phys_avail_count * 2; j > i; j -= 2) { 2005 phys_avail[j] = phys_avail[j - 2]; 2006 phys_avail[j + 1] = phys_avail[j - 1]; 2007 } 2008 2009 phys_avail[i + 3] = phys_avail[i + 1]; 2010 phys_avail[i + 1] = s; 2011 phys_avail[i + 2] = e; 2012 phys_avail_count++; 2013 } 2014 2015 return (s); 2016 } 2017 panic("moea_bootstrap_alloc: could not allocate memory"); 2018 } 2019 2020 static void 2021 moea_syncicache(vm_paddr_t pa, vm_size_t len) 2022 { 2023 __syncicache((void *)pa, len); 2024 } 2025 2026 static int 2027 moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 2028 vm_offset_t va, vm_paddr_t pa, u_int pte_lo, int flags) 2029 { 2030 struct pvo_entry *pvo; 2031 u_int sr; 2032 int first; 2033 u_int ptegidx; 2034 int i; 2035 int bootstrap; 2036 2037 moea_pvo_enter_calls++; 2038 first = 0; 2039 bootstrap = 0; 2040 2041 /* 2042 * Compute the PTE Group index. 2043 */ 2044 va &= ~ADDR_POFF; 2045 sr = va_to_sr(pm->pm_sr, va); 2046 ptegidx = va_to_pteg(sr, va); 2047 2048 /* 2049 * Remove any existing mapping for this page. Reuse the pvo entry if 2050 * there is a mapping. 2051 */ 2052 mtx_lock(&moea_table_mutex); 2053 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2054 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2055 if (PVO_PADDR(pvo) == pa && 2056 (pvo->pvo_pte.pte.pte_lo & PTE_PP) == 2057 (pte_lo & PTE_PP)) { 2058 /* 2059 * The PTE is not changing. Instead, this may 2060 * be a request to change the mapping's wired 2061 * attribute. 2062 */ 2063 mtx_unlock(&moea_table_mutex); 2064 if ((flags & PVO_WIRED) != 0 && 2065 (pvo->pvo_vaddr & PVO_WIRED) == 0) { 2066 pvo->pvo_vaddr |= PVO_WIRED; 2067 pm->pm_stats.wired_count++; 2068 } else if ((flags & PVO_WIRED) == 0 && 2069 (pvo->pvo_vaddr & PVO_WIRED) != 0) { 2070 pvo->pvo_vaddr &= ~PVO_WIRED; 2071 pm->pm_stats.wired_count--; 2072 } 2073 return (0); 2074 } 2075 moea_pvo_remove(pvo, -1); 2076 break; 2077 } 2078 } 2079 2080 /* 2081 * If we aren't overwriting a mapping, try to allocate. 2082 */ 2083 if (moea_initialized) { 2084 pvo = uma_zalloc(zone, M_NOWAIT); 2085 } else { 2086 if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 2087 panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 2088 moea_bpvo_pool_index, BPVO_POOL_SIZE, 2089 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2090 } 2091 pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 2092 moea_bpvo_pool_index++; 2093 bootstrap = 1; 2094 } 2095 2096 if (pvo == NULL) { 2097 mtx_unlock(&moea_table_mutex); 2098 return (ENOMEM); 2099 } 2100 2101 moea_pvo_entries++; 2102 pvo->pvo_vaddr = va; 2103 pvo->pvo_pmap = pm; 2104 LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 2105 pvo->pvo_vaddr &= ~ADDR_POFF; 2106 if (flags & PVO_WIRED) 2107 pvo->pvo_vaddr |= PVO_WIRED; 2108 if (pvo_head != &moea_pvo_kunmanaged) 2109 pvo->pvo_vaddr |= PVO_MANAGED; 2110 if (bootstrap) 2111 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2112 2113 moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); 2114 2115 /* 2116 * Add to pmap list 2117 */ 2118 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 2119 2120 /* 2121 * Remember if the list was empty and therefore will be the first 2122 * item. 2123 */ 2124 if (LIST_FIRST(pvo_head) == NULL) 2125 first = 1; 2126 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2127 2128 if (pvo->pvo_vaddr & PVO_WIRED) 2129 pm->pm_stats.wired_count++; 2130 pm->pm_stats.resident_count++; 2131 2132 i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 2133 KASSERT(i < 8, ("Invalid PTE index")); 2134 if (i >= 0) { 2135 PVO_PTEGIDX_SET(pvo, i); 2136 } else { 2137 panic("moea_pvo_enter: overflow"); 2138 moea_pte_overflow++; 2139 } 2140 mtx_unlock(&moea_table_mutex); 2141 2142 return (first ? ENOENT : 0); 2143 } 2144 2145 static void 2146 moea_pvo_remove(struct pvo_entry *pvo, int pteidx) 2147 { 2148 struct pte *pt; 2149 2150 /* 2151 * If there is an active pte entry, we need to deactivate it (and 2152 * save the ref & cfg bits). 2153 */ 2154 pt = moea_pvo_to_pte(pvo, pteidx); 2155 if (pt != NULL) { 2156 moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 2157 mtx_unlock(&moea_table_mutex); 2158 PVO_PTEGIDX_CLR(pvo); 2159 } else { 2160 moea_pte_overflow--; 2161 } 2162 2163 /* 2164 * Update our statistics. 2165 */ 2166 pvo->pvo_pmap->pm_stats.resident_count--; 2167 if (pvo->pvo_vaddr & PVO_WIRED) 2168 pvo->pvo_pmap->pm_stats.wired_count--; 2169 2170 /* 2171 * Remove this PVO from the PV and pmap lists. 2172 */ 2173 LIST_REMOVE(pvo, pvo_vlink); 2174 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2175 2176 /* 2177 * Save the REF/CHG bits into their cache if the page is managed. 2178 * Clear PGA_WRITEABLE if all mappings of the page have been removed. 2179 */ 2180 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { 2181 struct vm_page *pg; 2182 2183 pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); 2184 if (pg != NULL) { 2185 moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & 2186 (PTE_REF | PTE_CHG)); 2187 if (LIST_EMPTY(&pg->md.mdpg_pvoh)) 2188 vm_page_aflag_clear(pg, PGA_WRITEABLE); 2189 } 2190 } 2191 2192 /* 2193 * Remove this from the overflow list and return it to the pool 2194 * if we aren't going to reuse it. 2195 */ 2196 LIST_REMOVE(pvo, pvo_olink); 2197 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2198 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2199 moea_upvo_zone, pvo); 2200 moea_pvo_entries--; 2201 moea_pvo_remove_calls++; 2202 } 2203 2204 static __inline int 2205 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2206 { 2207 int pteidx; 2208 2209 /* 2210 * We can find the actual pte entry without searching by grabbing 2211 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 2212 * noticing the HID bit. 2213 */ 2214 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2215 if (pvo->pvo_pte.pte.pte_hi & PTE_HID) 2216 pteidx ^= moea_pteg_mask * 8; 2217 2218 return (pteidx); 2219 } 2220 2221 static struct pvo_entry * 2222 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2223 { 2224 struct pvo_entry *pvo; 2225 int ptegidx; 2226 u_int sr; 2227 2228 va &= ~ADDR_POFF; 2229 sr = va_to_sr(pm->pm_sr, va); 2230 ptegidx = va_to_pteg(sr, va); 2231 2232 mtx_lock(&moea_table_mutex); 2233 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2234 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2235 if (pteidx_p) 2236 *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2237 break; 2238 } 2239 } 2240 mtx_unlock(&moea_table_mutex); 2241 2242 return (pvo); 2243 } 2244 2245 static struct pte * 2246 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2247 { 2248 struct pte *pt; 2249 2250 /* 2251 * If we haven't been supplied the ptegidx, calculate it. 2252 */ 2253 if (pteidx == -1) { 2254 int ptegidx; 2255 u_int sr; 2256 2257 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 2258 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2259 pteidx = moea_pvo_pte_index(pvo, ptegidx); 2260 } 2261 2262 pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2263 mtx_lock(&moea_table_mutex); 2264 2265 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2266 panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 2267 "valid pte index", pvo); 2268 } 2269 2270 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2271 panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 2272 "pvo but no valid pte", pvo); 2273 } 2274 2275 if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2276 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { 2277 panic("moea_pvo_to_pte: pvo %p has valid pte in " 2278 "moea_pteg_table %p but invalid in pvo", pvo, pt); 2279 } 2280 2281 if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 2282 != 0) { 2283 panic("moea_pvo_to_pte: pvo %p pte does not match " 2284 "pte %p in moea_pteg_table", pvo, pt); 2285 } 2286 2287 mtx_assert(&moea_table_mutex, MA_OWNED); 2288 return (pt); 2289 } 2290 2291 if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { 2292 panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2293 "moea_pteg_table but valid in pvo: %8x, %8x", pvo, pt, pvo->pvo_pte.pte.pte_hi, pt->pte_hi); 2294 } 2295 2296 mtx_unlock(&moea_table_mutex); 2297 return (NULL); 2298 } 2299 2300 /* 2301 * XXX: THIS STUFF SHOULD BE IN pte.c? 2302 */ 2303 int 2304 moea_pte_spill(vm_offset_t addr) 2305 { 2306 struct pvo_entry *source_pvo, *victim_pvo; 2307 struct pvo_entry *pvo; 2308 int ptegidx, i, j; 2309 u_int sr; 2310 struct pteg *pteg; 2311 struct pte *pt; 2312 2313 moea_pte_spills++; 2314 2315 sr = mfsrin(addr); 2316 ptegidx = va_to_pteg(sr, addr); 2317 2318 /* 2319 * Have to substitute some entry. Use the primary hash for this. 2320 * Use low bits of timebase as random generator. 2321 */ 2322 pteg = &moea_pteg_table[ptegidx]; 2323 mtx_lock(&moea_table_mutex); 2324 __asm __volatile("mftb %0" : "=r"(i)); 2325 i &= 7; 2326 pt = &pteg->pt[i]; 2327 2328 source_pvo = NULL; 2329 victim_pvo = NULL; 2330 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2331 /* 2332 * We need to find a pvo entry for this address. 2333 */ 2334 if (source_pvo == NULL && 2335 moea_pte_match(&pvo->pvo_pte.pte, sr, addr, 2336 pvo->pvo_pte.pte.pte_hi & PTE_HID)) { 2337 /* 2338 * Now found an entry to be spilled into the pteg. 2339 * The PTE is now valid, so we know it's active. 2340 */ 2341 j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 2342 2343 if (j >= 0) { 2344 PVO_PTEGIDX_SET(pvo, j); 2345 moea_pte_overflow--; 2346 mtx_unlock(&moea_table_mutex); 2347 return (1); 2348 } 2349 2350 source_pvo = pvo; 2351 2352 if (victim_pvo != NULL) 2353 break; 2354 } 2355 2356 /* 2357 * We also need the pvo entry of the victim we are replacing 2358 * so save the R & C bits of the PTE. 2359 */ 2360 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2361 moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2362 victim_pvo = pvo; 2363 if (source_pvo != NULL) 2364 break; 2365 } 2366 } 2367 2368 if (source_pvo == NULL) { 2369 mtx_unlock(&moea_table_mutex); 2370 return (0); 2371 } 2372 2373 if (victim_pvo == NULL) { 2374 if ((pt->pte_hi & PTE_HID) == 0) 2375 panic("moea_pte_spill: victim p-pte (%p) has no pvo" 2376 "entry", pt); 2377 2378 /* 2379 * If this is a secondary PTE, we need to search it's primary 2380 * pvo bucket for the matching PVO. 2381 */ 2382 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 2383 pvo_olink) { 2384 /* 2385 * We also need the pvo entry of the victim we are 2386 * replacing so save the R & C bits of the PTE. 2387 */ 2388 if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2389 victim_pvo = pvo; 2390 break; 2391 } 2392 } 2393 2394 if (victim_pvo == NULL) 2395 panic("moea_pte_spill: victim s-pte (%p) has no pvo" 2396 "entry", pt); 2397 } 2398 2399 /* 2400 * We are invalidating the TLB entry for the EA we are replacing even 2401 * though it's valid. If we don't, we lose any ref/chg bit changes 2402 * contained in the TLB entry. 2403 */ 2404 source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; 2405 2406 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2407 moea_pte_set(pt, &source_pvo->pvo_pte.pte); 2408 2409 PVO_PTEGIDX_CLR(victim_pvo); 2410 PVO_PTEGIDX_SET(source_pvo, i); 2411 moea_pte_replacements++; 2412 2413 mtx_unlock(&moea_table_mutex); 2414 return (1); 2415 } 2416 2417 static __inline struct pvo_entry * 2418 moea_pte_spillable_ident(u_int ptegidx) 2419 { 2420 struct pte *pt; 2421 struct pvo_entry *pvo_walk, *pvo = NULL; 2422 2423 LIST_FOREACH(pvo_walk, &moea_pvo_table[ptegidx], pvo_olink) { 2424 if (pvo_walk->pvo_vaddr & PVO_WIRED) 2425 continue; 2426 2427 if (!(pvo_walk->pvo_pte.pte.pte_hi & PTE_VALID)) 2428 continue; 2429 2430 pt = moea_pvo_to_pte(pvo_walk, -1); 2431 2432 if (pt == NULL) 2433 continue; 2434 2435 pvo = pvo_walk; 2436 2437 mtx_unlock(&moea_table_mutex); 2438 if (!(pt->pte_lo & PTE_REF)) 2439 return (pvo_walk); 2440 } 2441 2442 return (pvo); 2443 } 2444 2445 static int 2446 moea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2447 { 2448 struct pte *pt; 2449 struct pvo_entry *victim_pvo; 2450 int i; 2451 int victim_idx; 2452 u_int pteg_bkpidx = ptegidx; 2453 2454 mtx_assert(&moea_table_mutex, MA_OWNED); 2455 2456 /* 2457 * First try primary hash. 2458 */ 2459 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2460 if ((pt->pte_hi & PTE_VALID) == 0) { 2461 pvo_pt->pte_hi &= ~PTE_HID; 2462 moea_pte_set(pt, pvo_pt); 2463 return (i); 2464 } 2465 } 2466 2467 /* 2468 * Now try secondary hash. 2469 */ 2470 ptegidx ^= moea_pteg_mask; 2471 2472 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2473 if ((pt->pte_hi & PTE_VALID) == 0) { 2474 pvo_pt->pte_hi |= PTE_HID; 2475 moea_pte_set(pt, pvo_pt); 2476 return (i); 2477 } 2478 } 2479 2480 /* Try again, but this time try to force a PTE out. */ 2481 ptegidx = pteg_bkpidx; 2482 2483 victim_pvo = moea_pte_spillable_ident(ptegidx); 2484 if (victim_pvo == NULL) { 2485 ptegidx ^= moea_pteg_mask; 2486 victim_pvo = moea_pte_spillable_ident(ptegidx); 2487 } 2488 2489 if (victim_pvo == NULL) { 2490 panic("moea_pte_insert: overflow"); 2491 return (-1); 2492 } 2493 2494 victim_idx = moea_pvo_pte_index(victim_pvo, ptegidx); 2495 2496 if (pteg_bkpidx == ptegidx) 2497 pvo_pt->pte_hi &= ~PTE_HID; 2498 else 2499 pvo_pt->pte_hi |= PTE_HID; 2500 2501 /* 2502 * Synchronize the sacrifice PTE with its PVO, then mark both 2503 * invalid. The PVO will be reused when/if the VM system comes 2504 * here after a fault. 2505 */ 2506 pt = &moea_pteg_table[victim_idx >> 3].pt[victim_idx & 7]; 2507 2508 if (pt->pte_hi != victim_pvo->pvo_pte.pte.pte_hi) 2509 panic("Victim PVO doesn't match PTE! PVO: %8x, PTE: %8x", victim_pvo->pvo_pte.pte.pte_hi, pt->pte_hi); 2510 2511 /* 2512 * Set the new PTE. 2513 */ 2514 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2515 PVO_PTEGIDX_CLR(victim_pvo); 2516 moea_pte_overflow++; 2517 moea_pte_set(pt, pvo_pt); 2518 2519 return (victim_idx & 7); 2520 } 2521 2522 static boolean_t 2523 moea_query_bit(vm_page_t m, int ptebit) 2524 { 2525 struct pvo_entry *pvo; 2526 struct pte *pt; 2527 2528 rw_assert(&pvh_global_lock, RA_WLOCKED); 2529 if (moea_attr_fetch(m) & ptebit) 2530 return (TRUE); 2531 2532 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2533 /* 2534 * See if we saved the bit off. If so, cache it and return 2535 * success. 2536 */ 2537 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2538 moea_attr_save(m, ptebit); 2539 return (TRUE); 2540 } 2541 } 2542 2543 /* 2544 * No luck, now go through the hard part of looking at the PTEs 2545 * themselves. Sync so that any pending REF/CHG bits are flushed to 2546 * the PTEs. 2547 */ 2548 powerpc_sync(); 2549 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2550 /* 2551 * See if this pvo has a valid PTE. if so, fetch the 2552 * REF/CHG bits from the valid PTE. If the appropriate 2553 * ptebit is set, cache it and return success. 2554 */ 2555 pt = moea_pvo_to_pte(pvo, -1); 2556 if (pt != NULL) { 2557 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2558 mtx_unlock(&moea_table_mutex); 2559 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2560 moea_attr_save(m, ptebit); 2561 return (TRUE); 2562 } 2563 } 2564 } 2565 2566 return (FALSE); 2567 } 2568 2569 static u_int 2570 moea_clear_bit(vm_page_t m, int ptebit) 2571 { 2572 u_int count; 2573 struct pvo_entry *pvo; 2574 struct pte *pt; 2575 2576 rw_assert(&pvh_global_lock, RA_WLOCKED); 2577 2578 /* 2579 * Clear the cached value. 2580 */ 2581 moea_attr_clear(m, ptebit); 2582 2583 /* 2584 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2585 * we can reset the right ones). note that since the pvo entries and 2586 * list heads are accessed via BAT0 and are never placed in the page 2587 * table, we don't have to worry about further accesses setting the 2588 * REF/CHG bits. 2589 */ 2590 powerpc_sync(); 2591 2592 /* 2593 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2594 * valid pte clear the ptebit from the valid pte. 2595 */ 2596 count = 0; 2597 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2598 pt = moea_pvo_to_pte(pvo, -1); 2599 if (pt != NULL) { 2600 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2601 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2602 count++; 2603 moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2604 } 2605 mtx_unlock(&moea_table_mutex); 2606 } 2607 pvo->pvo_pte.pte.pte_lo &= ~ptebit; 2608 } 2609 2610 return (count); 2611 } 2612 2613 /* 2614 * Return true if the physical range is encompassed by the battable[idx] 2615 */ 2616 static int 2617 moea_bat_mapped(int idx, vm_paddr_t pa, vm_size_t size) 2618 { 2619 u_int prot; 2620 u_int32_t start; 2621 u_int32_t end; 2622 u_int32_t bat_ble; 2623 2624 /* 2625 * Return immediately if not a valid mapping 2626 */ 2627 if (!(battable[idx].batu & BAT_Vs)) 2628 return (EINVAL); 2629 2630 /* 2631 * The BAT entry must be cache-inhibited, guarded, and r/w 2632 * so it can function as an i/o page 2633 */ 2634 prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2635 if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2636 return (EPERM); 2637 2638 /* 2639 * The address should be within the BAT range. Assume that the 2640 * start address in the BAT has the correct alignment (thus 2641 * not requiring masking) 2642 */ 2643 start = battable[idx].batl & BAT_PBS; 2644 bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2645 end = start | (bat_ble << 15) | 0x7fff; 2646 2647 if ((pa < start) || ((pa + size) > end)) 2648 return (ERANGE); 2649 2650 return (0); 2651 } 2652 2653 boolean_t 2654 moea_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) 2655 { 2656 int i; 2657 2658 /* 2659 * This currently does not work for entries that 2660 * overlap 256M BAT segments. 2661 */ 2662 2663 for(i = 0; i < 16; i++) 2664 if (moea_bat_mapped(i, pa, size) == 0) 2665 return (0); 2666 2667 return (EFAULT); 2668 } 2669 2670 /* 2671 * Map a set of physical memory pages into the kernel virtual 2672 * address space. Return a pointer to where it is mapped. This 2673 * routine is intended to be used for mapping device memory, 2674 * NOT real memory. 2675 */ 2676 void * 2677 moea_mapdev(vm_paddr_t pa, vm_size_t size) 2678 { 2679 2680 return (moea_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT)); 2681 } 2682 2683 void * 2684 moea_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) 2685 { 2686 vm_offset_t va, tmpva, ppa, offset; 2687 int i; 2688 2689 ppa = trunc_page(pa); 2690 offset = pa & PAGE_MASK; 2691 size = roundup(offset + size, PAGE_SIZE); 2692 2693 /* 2694 * If the physical address lies within a valid BAT table entry, 2695 * return the 1:1 mapping. This currently doesn't work 2696 * for regions that overlap 256M BAT segments. 2697 */ 2698 for (i = 0; i < 16; i++) { 2699 if (moea_bat_mapped(i, pa, size) == 0) 2700 return ((void *) pa); 2701 } 2702 2703 va = kva_alloc(size); 2704 if (!va) 2705 panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 2706 2707 for (tmpva = va; size > 0;) { 2708 moea_kenter_attr(tmpva, ppa, ma); 2709 tlbie(tmpva); 2710 size -= PAGE_SIZE; 2711 tmpva += PAGE_SIZE; 2712 ppa += PAGE_SIZE; 2713 } 2714 2715 return ((void *)(va + offset)); 2716 } 2717 2718 void 2719 moea_unmapdev(vm_offset_t va, vm_size_t size) 2720 { 2721 vm_offset_t base, offset; 2722 2723 /* 2724 * If this is outside kernel virtual space, then it's a 2725 * battable entry and doesn't require unmapping 2726 */ 2727 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { 2728 base = trunc_page(va); 2729 offset = va & PAGE_MASK; 2730 size = roundup(offset + size, PAGE_SIZE); 2731 moea_qremove(base, atop(size)); 2732 kva_free(base, size); 2733 } 2734 } 2735 2736 static void 2737 moea_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 2738 { 2739 struct pvo_entry *pvo; 2740 vm_offset_t lim; 2741 vm_paddr_t pa; 2742 vm_size_t len; 2743 2744 PMAP_LOCK(pm); 2745 while (sz > 0) { 2746 lim = round_page(va + 1); 2747 len = MIN(lim - va, sz); 2748 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2749 if (pvo != NULL) { 2750 pa = PVO_PADDR(pvo) | (va & ADDR_POFF); 2751 moea_syncicache(pa, len); 2752 } 2753 va += len; 2754 sz -= len; 2755 } 2756 PMAP_UNLOCK(pm); 2757 } 2758 2759 void 2760 moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va) 2761 { 2762 2763 *va = (void *)pa; 2764 } 2765 2766 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; 2767 2768 void 2769 moea_scan_init() 2770 { 2771 struct pvo_entry *pvo; 2772 vm_offset_t va; 2773 int i; 2774 2775 if (!do_minidump) { 2776 /* Initialize phys. segments for dumpsys(). */ 2777 memset(&dump_map, 0, sizeof(dump_map)); 2778 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 2779 for (i = 0; i < pregions_sz; i++) { 2780 dump_map[i].pa_start = pregions[i].mr_start; 2781 dump_map[i].pa_size = pregions[i].mr_size; 2782 } 2783 return; 2784 } 2785 2786 /* Virtual segments for minidumps: */ 2787 memset(&dump_map, 0, sizeof(dump_map)); 2788 2789 /* 1st: kernel .data and .bss. */ 2790 dump_map[0].pa_start = trunc_page((uintptr_t)_etext); 2791 dump_map[0].pa_size = 2792 round_page((uintptr_t)_end) - dump_map[0].pa_start; 2793 2794 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2795 dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr; 2796 dump_map[1].pa_size = round_page(msgbufp->msg_size); 2797 2798 /* 3rd: kernel VM. */ 2799 va = dump_map[1].pa_start + dump_map[1].pa_size; 2800 /* Find start of next chunk (from va). */ 2801 while (va < virtual_end) { 2802 /* Don't dump the buffer cache. */ 2803 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) { 2804 va = kmi.buffer_eva; 2805 continue; 2806 } 2807 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 2808 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID)) 2809 break; 2810 va += PAGE_SIZE; 2811 } 2812 if (va < virtual_end) { 2813 dump_map[2].pa_start = va; 2814 va += PAGE_SIZE; 2815 /* Find last page in chunk. */ 2816 while (va < virtual_end) { 2817 /* Don't run into the buffer cache. */ 2818 if (va == kmi.buffer_sva) 2819 break; 2820 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, 2821 NULL); 2822 if (pvo == NULL || 2823 !(pvo->pvo_pte.pte.pte_hi & PTE_VALID)) 2824 break; 2825 va += PAGE_SIZE; 2826 } 2827 dump_map[2].pa_size = va - dump_map[2].pa_start; 2828 } 2829 } 2830