1 /*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68 /*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93 #include <sys/cdefs.h> 94 __FBSDID("$FreeBSD$"); 95 96 /* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117 #include "opt_kstack_pages.h" 118 119 #include <sys/param.h> 120 #include <sys/kernel.h> 121 #include <sys/ktr.h> 122 #include <sys/lock.h> 123 #include <sys/msgbuf.h> 124 #include <sys/mutex.h> 125 #include <sys/proc.h> 126 #include <sys/sysctl.h> 127 #include <sys/systm.h> 128 #include <sys/vmmeter.h> 129 130 #include <dev/ofw/openfirm.h> 131 132 #include <vm/vm.h> 133 #include <vm/vm_param.h> 134 #include <vm/vm_kern.h> 135 #include <vm/vm_page.h> 136 #include <vm/vm_map.h> 137 #include <vm/vm_object.h> 138 #include <vm/vm_extern.h> 139 #include <vm/vm_pageout.h> 140 #include <vm/vm_pager.h> 141 #include <vm/uma.h> 142 143 #include <machine/cpu.h> 144 #include <machine/platform.h> 145 #include <machine/bat.h> 146 #include <machine/frame.h> 147 #include <machine/md_var.h> 148 #include <machine/psl.h> 149 #include <machine/pte.h> 150 #include <machine/smp.h> 151 #include <machine/sr.h> 152 #include <machine/mmuvar.h> 153 154 #include "mmu_if.h" 155 156 #define MOEA_DEBUG 157 158 #define TODO panic("%s: not implemented", __func__); 159 160 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 161 #define VSID_TO_SR(vsid) ((vsid) & 0xf) 162 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 163 164 struct ofw_map { 165 vm_offset_t om_va; 166 vm_size_t om_len; 167 vm_offset_t om_pa; 168 u_int om_mode; 169 }; 170 171 /* 172 * Map of physical memory regions. 173 */ 174 static struct mem_region *regions; 175 static struct mem_region *pregions; 176 static u_int phys_avail_count; 177 static int regions_sz, pregions_sz; 178 static struct ofw_map *translations; 179 180 /* 181 * Lock for the pteg and pvo tables. 182 */ 183 struct mtx moea_table_mutex; 184 struct mtx moea_vsid_mutex; 185 186 /* tlbie instruction synchronization */ 187 static struct mtx tlbie_mtx; 188 189 /* 190 * PTEG data. 191 */ 192 static struct pteg *moea_pteg_table; 193 u_int moea_pteg_count; 194 u_int moea_pteg_mask; 195 196 /* 197 * PVO data. 198 */ 199 struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 200 struct pvo_head moea_pvo_kunmanaged = 201 LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 202 203 uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 204 uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 205 206 #define BPVO_POOL_SIZE 32768 207 static struct pvo_entry *moea_bpvo_pool; 208 static int moea_bpvo_pool_index = 0; 209 210 #define VSID_NBPW (sizeof(u_int32_t) * 8) 211 static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 212 213 static boolean_t moea_initialized = FALSE; 214 215 /* 216 * Statistics. 217 */ 218 u_int moea_pte_valid = 0; 219 u_int moea_pte_overflow = 0; 220 u_int moea_pte_replacements = 0; 221 u_int moea_pvo_entries = 0; 222 u_int moea_pvo_enter_calls = 0; 223 u_int moea_pvo_remove_calls = 0; 224 u_int moea_pte_spills = 0; 225 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 226 0, ""); 227 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 228 &moea_pte_overflow, 0, ""); 229 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 230 &moea_pte_replacements, 0, ""); 231 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 232 0, ""); 233 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 234 &moea_pvo_enter_calls, 0, ""); 235 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 236 &moea_pvo_remove_calls, 0, ""); 237 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 238 &moea_pte_spills, 0, ""); 239 240 /* 241 * Allocate physical memory for use in moea_bootstrap. 242 */ 243 static vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 244 245 /* 246 * PTE calls. 247 */ 248 static int moea_pte_insert(u_int, struct pte *); 249 250 /* 251 * PVO calls. 252 */ 253 static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 254 vm_offset_t, vm_offset_t, u_int, int); 255 static void moea_pvo_remove(struct pvo_entry *, int); 256 static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 257 static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 258 259 /* 260 * Utility routines. 261 */ 262 static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 263 vm_prot_t, boolean_t); 264 static void moea_syncicache(vm_offset_t, vm_size_t); 265 static boolean_t moea_query_bit(vm_page_t, int); 266 static u_int moea_clear_bit(vm_page_t, int); 267 static void moea_kremove(mmu_t, vm_offset_t); 268 int moea_pte_spill(vm_offset_t); 269 270 /* 271 * Kernel MMU interface 272 */ 273 void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 274 void moea_clear_modify(mmu_t, vm_page_t); 275 void moea_clear_reference(mmu_t, vm_page_t); 276 void moea_copy_page(mmu_t, vm_page_t, vm_page_t); 277 void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 278 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 279 vm_prot_t); 280 void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 281 vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 282 vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 283 void moea_init(mmu_t); 284 boolean_t moea_is_modified(mmu_t, vm_page_t); 285 boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 286 boolean_t moea_is_referenced(mmu_t, vm_page_t); 287 boolean_t moea_ts_referenced(mmu_t, vm_page_t); 288 vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 289 boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 290 int moea_page_wired_mappings(mmu_t, vm_page_t); 291 void moea_pinit(mmu_t, pmap_t); 292 void moea_pinit0(mmu_t, pmap_t); 293 void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 294 void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 295 void moea_qremove(mmu_t, vm_offset_t, int); 296 void moea_release(mmu_t, pmap_t); 297 void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 298 void moea_remove_all(mmu_t, vm_page_t); 299 void moea_remove_write(mmu_t, vm_page_t); 300 void moea_zero_page(mmu_t, vm_page_t); 301 void moea_zero_page_area(mmu_t, vm_page_t, int, int); 302 void moea_zero_page_idle(mmu_t, vm_page_t); 303 void moea_activate(mmu_t, struct thread *); 304 void moea_deactivate(mmu_t, struct thread *); 305 void moea_cpu_bootstrap(mmu_t, int); 306 void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 307 void *moea_mapdev(mmu_t, vm_offset_t, vm_size_t); 308 void *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 309 void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 310 vm_offset_t moea_kextract(mmu_t, vm_offset_t); 311 void moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t); 312 void moea_kenter(mmu_t, vm_offset_t, vm_offset_t); 313 void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma); 314 boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 315 static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 316 317 static mmu_method_t moea_methods[] = { 318 MMUMETHOD(mmu_change_wiring, moea_change_wiring), 319 MMUMETHOD(mmu_clear_modify, moea_clear_modify), 320 MMUMETHOD(mmu_clear_reference, moea_clear_reference), 321 MMUMETHOD(mmu_copy_page, moea_copy_page), 322 MMUMETHOD(mmu_enter, moea_enter), 323 MMUMETHOD(mmu_enter_object, moea_enter_object), 324 MMUMETHOD(mmu_enter_quick, moea_enter_quick), 325 MMUMETHOD(mmu_extract, moea_extract), 326 MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 327 MMUMETHOD(mmu_init, moea_init), 328 MMUMETHOD(mmu_is_modified, moea_is_modified), 329 MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable), 330 MMUMETHOD(mmu_is_referenced, moea_is_referenced), 331 MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 332 MMUMETHOD(mmu_map, moea_map), 333 MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 334 MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), 335 MMUMETHOD(mmu_pinit, moea_pinit), 336 MMUMETHOD(mmu_pinit0, moea_pinit0), 337 MMUMETHOD(mmu_protect, moea_protect), 338 MMUMETHOD(mmu_qenter, moea_qenter), 339 MMUMETHOD(mmu_qremove, moea_qremove), 340 MMUMETHOD(mmu_release, moea_release), 341 MMUMETHOD(mmu_remove, moea_remove), 342 MMUMETHOD(mmu_remove_all, moea_remove_all), 343 MMUMETHOD(mmu_remove_write, moea_remove_write), 344 MMUMETHOD(mmu_sync_icache, moea_sync_icache), 345 MMUMETHOD(mmu_zero_page, moea_zero_page), 346 MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 347 MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 348 MMUMETHOD(mmu_activate, moea_activate), 349 MMUMETHOD(mmu_deactivate, moea_deactivate), 350 MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), 351 352 /* Internal interfaces */ 353 MMUMETHOD(mmu_bootstrap, moea_bootstrap), 354 MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap), 355 MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr), 356 MMUMETHOD(mmu_mapdev, moea_mapdev), 357 MMUMETHOD(mmu_unmapdev, moea_unmapdev), 358 MMUMETHOD(mmu_kextract, moea_kextract), 359 MMUMETHOD(mmu_kenter, moea_kenter), 360 MMUMETHOD(mmu_kenter_attr, moea_kenter_attr), 361 MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 362 363 { 0, 0 } 364 }; 365 366 MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0); 367 368 static __inline uint32_t 369 moea_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 370 { 371 uint32_t pte_lo; 372 int i; 373 374 if (ma != VM_MEMATTR_DEFAULT) { 375 switch (ma) { 376 case VM_MEMATTR_UNCACHEABLE: 377 return (PTE_I | PTE_G); 378 case VM_MEMATTR_WRITE_COMBINING: 379 case VM_MEMATTR_WRITE_BACK: 380 case VM_MEMATTR_PREFETCHABLE: 381 return (PTE_I); 382 case VM_MEMATTR_WRITE_THROUGH: 383 return (PTE_W | PTE_M); 384 } 385 } 386 387 /* 388 * Assume the page is cache inhibited and access is guarded unless 389 * it's in our available memory array. 390 */ 391 pte_lo = PTE_I | PTE_G; 392 for (i = 0; i < pregions_sz; i++) { 393 if ((pa >= pregions[i].mr_start) && 394 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 395 pte_lo = PTE_M; 396 break; 397 } 398 } 399 400 return pte_lo; 401 } 402 403 static void 404 tlbie(vm_offset_t va) 405 { 406 407 mtx_lock_spin(&tlbie_mtx); 408 __asm __volatile("ptesync"); 409 __asm __volatile("tlbie %0" :: "r"(va)); 410 __asm __volatile("eieio; tlbsync; ptesync"); 411 mtx_unlock_spin(&tlbie_mtx); 412 } 413 414 static void 415 tlbia(void) 416 { 417 vm_offset_t va; 418 419 for (va = 0; va < 0x00040000; va += 0x00001000) { 420 __asm __volatile("tlbie %0" :: "r"(va)); 421 powerpc_sync(); 422 } 423 __asm __volatile("tlbsync"); 424 powerpc_sync(); 425 } 426 427 static __inline int 428 va_to_sr(u_int *sr, vm_offset_t va) 429 { 430 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 431 } 432 433 static __inline u_int 434 va_to_pteg(u_int sr, vm_offset_t addr) 435 { 436 u_int hash; 437 438 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 439 ADDR_PIDX_SHFT); 440 return (hash & moea_pteg_mask); 441 } 442 443 static __inline struct pvo_head * 444 vm_page_to_pvoh(vm_page_t m) 445 { 446 447 return (&m->md.mdpg_pvoh); 448 } 449 450 static __inline void 451 moea_attr_clear(vm_page_t m, int ptebit) 452 { 453 454 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 455 m->md.mdpg_attrs &= ~ptebit; 456 } 457 458 static __inline int 459 moea_attr_fetch(vm_page_t m) 460 { 461 462 return (m->md.mdpg_attrs); 463 } 464 465 static __inline void 466 moea_attr_save(vm_page_t m, int ptebit) 467 { 468 469 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 470 m->md.mdpg_attrs |= ptebit; 471 } 472 473 static __inline int 474 moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 475 { 476 if (pt->pte_hi == pvo_pt->pte_hi) 477 return (1); 478 479 return (0); 480 } 481 482 static __inline int 483 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 484 { 485 return (pt->pte_hi & ~PTE_VALID) == 486 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 487 ((va >> ADDR_API_SHFT) & PTE_API) | which); 488 } 489 490 static __inline void 491 moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 492 { 493 494 mtx_assert(&moea_table_mutex, MA_OWNED); 495 496 /* 497 * Construct a PTE. Default to IMB initially. Valid bit only gets 498 * set when the real pte is set in memory. 499 * 500 * Note: Don't set the valid bit for correct operation of tlb update. 501 */ 502 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 503 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 504 pt->pte_lo = pte_lo; 505 } 506 507 static __inline void 508 moea_pte_synch(struct pte *pt, struct pte *pvo_pt) 509 { 510 511 mtx_assert(&moea_table_mutex, MA_OWNED); 512 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 513 } 514 515 static __inline void 516 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 517 { 518 519 mtx_assert(&moea_table_mutex, MA_OWNED); 520 521 /* 522 * As shown in Section 7.6.3.2.3 523 */ 524 pt->pte_lo &= ~ptebit; 525 tlbie(va); 526 } 527 528 static __inline void 529 moea_pte_set(struct pte *pt, struct pte *pvo_pt) 530 { 531 532 mtx_assert(&moea_table_mutex, MA_OWNED); 533 pvo_pt->pte_hi |= PTE_VALID; 534 535 /* 536 * Update the PTE as defined in section 7.6.3.1. 537 * Note that the REF/CHG bits are from pvo_pt and thus should havce 538 * been saved so this routine can restore them (if desired). 539 */ 540 pt->pte_lo = pvo_pt->pte_lo; 541 powerpc_sync(); 542 pt->pte_hi = pvo_pt->pte_hi; 543 powerpc_sync(); 544 moea_pte_valid++; 545 } 546 547 static __inline void 548 moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 549 { 550 551 mtx_assert(&moea_table_mutex, MA_OWNED); 552 pvo_pt->pte_hi &= ~PTE_VALID; 553 554 /* 555 * Force the reg & chg bits back into the PTEs. 556 */ 557 powerpc_sync(); 558 559 /* 560 * Invalidate the pte. 561 */ 562 pt->pte_hi &= ~PTE_VALID; 563 564 tlbie(va); 565 566 /* 567 * Save the reg & chg bits. 568 */ 569 moea_pte_synch(pt, pvo_pt); 570 moea_pte_valid--; 571 } 572 573 static __inline void 574 moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 575 { 576 577 /* 578 * Invalidate the PTE 579 */ 580 moea_pte_unset(pt, pvo_pt, va); 581 moea_pte_set(pt, pvo_pt); 582 } 583 584 /* 585 * Quick sort callout for comparing memory regions. 586 */ 587 static int mr_cmp(const void *a, const void *b); 588 static int om_cmp(const void *a, const void *b); 589 590 static int 591 mr_cmp(const void *a, const void *b) 592 { 593 const struct mem_region *regiona; 594 const struct mem_region *regionb; 595 596 regiona = a; 597 regionb = b; 598 if (regiona->mr_start < regionb->mr_start) 599 return (-1); 600 else if (regiona->mr_start > regionb->mr_start) 601 return (1); 602 else 603 return (0); 604 } 605 606 static int 607 om_cmp(const void *a, const void *b) 608 { 609 const struct ofw_map *mapa; 610 const struct ofw_map *mapb; 611 612 mapa = a; 613 mapb = b; 614 if (mapa->om_pa < mapb->om_pa) 615 return (-1); 616 else if (mapa->om_pa > mapb->om_pa) 617 return (1); 618 else 619 return (0); 620 } 621 622 void 623 moea_cpu_bootstrap(mmu_t mmup, int ap) 624 { 625 u_int sdr; 626 int i; 627 628 if (ap) { 629 powerpc_sync(); 630 __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); 631 __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); 632 isync(); 633 __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu)); 634 __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl)); 635 isync(); 636 } 637 638 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 639 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 640 isync(); 641 642 __asm __volatile("mtibatu 1,%0" :: "r"(0)); 643 __asm __volatile("mtdbatu 2,%0" :: "r"(0)); 644 __asm __volatile("mtibatu 2,%0" :: "r"(0)); 645 __asm __volatile("mtdbatu 3,%0" :: "r"(0)); 646 __asm __volatile("mtibatu 3,%0" :: "r"(0)); 647 isync(); 648 649 for (i = 0; i < 16; i++) 650 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 651 powerpc_sync(); 652 653 sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); 654 __asm __volatile("mtsdr1 %0" :: "r"(sdr)); 655 isync(); 656 657 tlbia(); 658 } 659 660 void 661 moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 662 { 663 ihandle_t mmui; 664 phandle_t chosen, mmu; 665 int sz; 666 int i, j; 667 vm_size_t size, physsz, hwphyssz; 668 vm_offset_t pa, va, off; 669 void *dpcpu; 670 register_t msr; 671 672 /* 673 * Set up BAT0 to map the lowest 256 MB area 674 */ 675 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 676 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 677 678 /* 679 * Map PCI memory space. 680 */ 681 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 682 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 683 684 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 685 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 686 687 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 688 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 689 690 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 691 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 692 693 /* 694 * Map obio devices. 695 */ 696 battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 697 battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 698 699 /* 700 * Use an IBAT and a DBAT to map the bottom segment of memory 701 * where we are. Turn off instruction relocation temporarily 702 * to prevent faults while reprogramming the IBAT. 703 */ 704 msr = mfmsr(); 705 mtmsr(msr & ~PSL_IR); 706 __asm (".balign 32; \n" 707 "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 708 "mtdbatu 0,%0; mtdbatl 0,%1; isync" 709 :: "r"(battable[0].batu), "r"(battable[0].batl)); 710 mtmsr(msr); 711 712 /* map pci space */ 713 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 714 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 715 isync(); 716 717 /* set global direct map flag */ 718 hw_direct_map = 1; 719 720 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 721 CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 722 723 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 724 for (i = 0; i < pregions_sz; i++) { 725 vm_offset_t pa; 726 vm_offset_t end; 727 728 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 729 pregions[i].mr_start, 730 pregions[i].mr_start + pregions[i].mr_size, 731 pregions[i].mr_size); 732 /* 733 * Install entries into the BAT table to allow all 734 * of physmem to be convered by on-demand BAT entries. 735 * The loop will sometimes set the same battable element 736 * twice, but that's fine since they won't be used for 737 * a while yet. 738 */ 739 pa = pregions[i].mr_start & 0xf0000000; 740 end = pregions[i].mr_start + pregions[i].mr_size; 741 do { 742 u_int n = pa >> ADDR_SR_SHFT; 743 744 battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 745 battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 746 pa += SEGMENT_LENGTH; 747 } while (pa < end); 748 } 749 750 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 751 panic("moea_bootstrap: phys_avail too small"); 752 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 753 phys_avail_count = 0; 754 physsz = 0; 755 hwphyssz = 0; 756 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 757 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 758 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 759 regions[i].mr_start + regions[i].mr_size, 760 regions[i].mr_size); 761 if (hwphyssz != 0 && 762 (physsz + regions[i].mr_size) >= hwphyssz) { 763 if (physsz < hwphyssz) { 764 phys_avail[j] = regions[i].mr_start; 765 phys_avail[j + 1] = regions[i].mr_start + 766 hwphyssz - physsz; 767 physsz = hwphyssz; 768 phys_avail_count++; 769 } 770 break; 771 } 772 phys_avail[j] = regions[i].mr_start; 773 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 774 phys_avail_count++; 775 physsz += regions[i].mr_size; 776 } 777 physmem = btoc(physsz); 778 779 /* 780 * Allocate PTEG table. 781 */ 782 #ifdef PTEGCOUNT 783 moea_pteg_count = PTEGCOUNT; 784 #else 785 moea_pteg_count = 0x1000; 786 787 while (moea_pteg_count < physmem) 788 moea_pteg_count <<= 1; 789 790 moea_pteg_count >>= 1; 791 #endif /* PTEGCOUNT */ 792 793 size = moea_pteg_count * sizeof(struct pteg); 794 CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 795 size); 796 moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 797 CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 798 bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 799 moea_pteg_mask = moea_pteg_count - 1; 800 801 /* 802 * Allocate pv/overflow lists. 803 */ 804 size = sizeof(struct pvo_head) * moea_pteg_count; 805 moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 806 PAGE_SIZE); 807 CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 808 for (i = 0; i < moea_pteg_count; i++) 809 LIST_INIT(&moea_pvo_table[i]); 810 811 /* 812 * Initialize the lock that synchronizes access to the pteg and pvo 813 * tables. 814 */ 815 mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 816 MTX_RECURSE); 817 mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF); 818 819 mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN); 820 821 /* 822 * Initialise the unmanaged pvo pool. 823 */ 824 moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 825 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 826 moea_bpvo_pool_index = 0; 827 828 /* 829 * Make sure kernel vsid is allocated as well as VSID 0. 830 */ 831 moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 832 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 833 moea_vsid_bitmap[0] |= 1; 834 835 /* 836 * Initialize the kernel pmap (which is statically allocated). 837 */ 838 PMAP_LOCK_INIT(kernel_pmap); 839 for (i = 0; i < 16; i++) 840 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 841 kernel_pmap->pm_active = ~0; 842 843 /* 844 * Set up the Open Firmware mappings 845 */ 846 if ((chosen = OF_finddevice("/chosen")) == -1) 847 panic("moea_bootstrap: can't find /chosen"); 848 OF_getprop(chosen, "mmu", &mmui, 4); 849 if ((mmu = OF_instance_to_package(mmui)) == -1) 850 panic("moea_bootstrap: can't get mmu package"); 851 if ((sz = OF_getproplen(mmu, "translations")) == -1) 852 panic("moea_bootstrap: can't get ofw translation count"); 853 translations = NULL; 854 for (i = 0; phys_avail[i] != 0; i += 2) { 855 if (phys_avail[i + 1] >= sz) { 856 translations = (struct ofw_map *)phys_avail[i]; 857 break; 858 } 859 } 860 if (translations == NULL) 861 panic("moea_bootstrap: no space to copy translations"); 862 bzero(translations, sz); 863 if (OF_getprop(mmu, "translations", translations, sz) == -1) 864 panic("moea_bootstrap: can't get ofw translations"); 865 CTR0(KTR_PMAP, "moea_bootstrap: translations"); 866 sz /= sizeof(*translations); 867 qsort(translations, sz, sizeof (*translations), om_cmp); 868 for (i = 0; i < sz; i++) { 869 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 870 translations[i].om_pa, translations[i].om_va, 871 translations[i].om_len); 872 873 /* 874 * If the mapping is 1:1, let the RAM and device on-demand 875 * BAT tables take care of the translation. 876 */ 877 if (translations[i].om_va == translations[i].om_pa) 878 continue; 879 880 /* Enter the pages */ 881 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) 882 moea_kenter(mmup, translations[i].om_va + off, 883 translations[i].om_pa + off); 884 } 885 886 /* 887 * Calculate the last available physical address. 888 */ 889 for (i = 0; phys_avail[i + 2] != 0; i += 2) 890 ; 891 Maxmem = powerpc_btop(phys_avail[i + 1]); 892 893 moea_cpu_bootstrap(mmup,0); 894 895 pmap_bootstrapped++; 896 897 /* 898 * Set the start and end of kva. 899 */ 900 virtual_avail = VM_MIN_KERNEL_ADDRESS; 901 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 902 903 /* 904 * Allocate a kernel stack with a guard page for thread0 and map it 905 * into the kernel page map. 906 */ 907 pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 908 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 909 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 910 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 911 thread0.td_kstack = va; 912 thread0.td_kstack_pages = KSTACK_PAGES; 913 for (i = 0; i < KSTACK_PAGES; i++) { 914 moea_kenter(mmup, va, pa); 915 pa += PAGE_SIZE; 916 va += PAGE_SIZE; 917 } 918 919 /* 920 * Allocate virtual address space for the message buffer. 921 */ 922 pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE); 923 msgbufp = (struct msgbuf *)virtual_avail; 924 va = virtual_avail; 925 virtual_avail += round_page(msgbufsize); 926 while (va < virtual_avail) { 927 moea_kenter(mmup, va, pa); 928 pa += PAGE_SIZE; 929 va += PAGE_SIZE; 930 } 931 932 /* 933 * Allocate virtual address space for the dynamic percpu area. 934 */ 935 pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 936 dpcpu = (void *)virtual_avail; 937 va = virtual_avail; 938 virtual_avail += DPCPU_SIZE; 939 while (va < virtual_avail) { 940 moea_kenter(mmup, va, pa); 941 pa += PAGE_SIZE; 942 va += PAGE_SIZE; 943 } 944 dpcpu_init(dpcpu, 0); 945 } 946 947 /* 948 * Activate a user pmap. The pmap must be activated before it's address 949 * space can be accessed in any way. 950 */ 951 void 952 moea_activate(mmu_t mmu, struct thread *td) 953 { 954 pmap_t pm, pmr; 955 956 /* 957 * Load all the data we need up front to encourage the compiler to 958 * not issue any loads while we have interrupts disabled below. 959 */ 960 pm = &td->td_proc->p_vmspace->vm_pmap; 961 pmr = pm->pmap_phys; 962 963 pm->pm_active |= PCPU_GET(cpumask); 964 PCPU_SET(curpmap, pmr); 965 } 966 967 void 968 moea_deactivate(mmu_t mmu, struct thread *td) 969 { 970 pmap_t pm; 971 972 pm = &td->td_proc->p_vmspace->vm_pmap; 973 pm->pm_active &= ~PCPU_GET(cpumask); 974 PCPU_SET(curpmap, NULL); 975 } 976 977 void 978 moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 979 { 980 struct pvo_entry *pvo; 981 982 PMAP_LOCK(pm); 983 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 984 985 if (pvo != NULL) { 986 if (wired) { 987 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 988 pm->pm_stats.wired_count++; 989 pvo->pvo_vaddr |= PVO_WIRED; 990 } else { 991 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 992 pm->pm_stats.wired_count--; 993 pvo->pvo_vaddr &= ~PVO_WIRED; 994 } 995 } 996 PMAP_UNLOCK(pm); 997 } 998 999 void 1000 moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1001 { 1002 vm_offset_t dst; 1003 vm_offset_t src; 1004 1005 dst = VM_PAGE_TO_PHYS(mdst); 1006 src = VM_PAGE_TO_PHYS(msrc); 1007 1008 kcopy((void *)src, (void *)dst, PAGE_SIZE); 1009 } 1010 1011 /* 1012 * Zero a page of physical memory by temporarily mapping it into the tlb. 1013 */ 1014 void 1015 moea_zero_page(mmu_t mmu, vm_page_t m) 1016 { 1017 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1018 void *va = (void *)pa; 1019 1020 bzero(va, PAGE_SIZE); 1021 } 1022 1023 void 1024 moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1025 { 1026 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1027 void *va = (void *)(pa + off); 1028 1029 bzero(va, size); 1030 } 1031 1032 void 1033 moea_zero_page_idle(mmu_t mmu, vm_page_t m) 1034 { 1035 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1036 void *va = (void *)pa; 1037 1038 bzero(va, PAGE_SIZE); 1039 } 1040 1041 /* 1042 * Map the given physical page at the specified virtual address in the 1043 * target pmap with the protection requested. If specified the page 1044 * will be wired down. 1045 */ 1046 void 1047 moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1048 boolean_t wired) 1049 { 1050 1051 vm_page_lock_queues(); 1052 PMAP_LOCK(pmap); 1053 moea_enter_locked(pmap, va, m, prot, wired); 1054 vm_page_unlock_queues(); 1055 PMAP_UNLOCK(pmap); 1056 } 1057 1058 /* 1059 * Map the given physical page at the specified virtual address in the 1060 * target pmap with the protection requested. If specified the page 1061 * will be wired down. 1062 * 1063 * The page queues and pmap must be locked. 1064 */ 1065 static void 1066 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1067 boolean_t wired) 1068 { 1069 struct pvo_head *pvo_head; 1070 uma_zone_t zone; 1071 vm_page_t pg; 1072 u_int pte_lo, pvo_flags, was_exec; 1073 int error; 1074 1075 if (!moea_initialized) { 1076 pvo_head = &moea_pvo_kunmanaged; 1077 zone = moea_upvo_zone; 1078 pvo_flags = 0; 1079 pg = NULL; 1080 was_exec = PTE_EXEC; 1081 } else { 1082 pvo_head = vm_page_to_pvoh(m); 1083 pg = m; 1084 zone = moea_mpvo_zone; 1085 pvo_flags = PVO_MANAGED; 1086 was_exec = 0; 1087 } 1088 if (pmap_bootstrapped) 1089 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1090 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1091 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1092 (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), 1093 ("moea_enter_locked: page %p is not busy", m)); 1094 1095 /* XXX change the pvo head for fake pages */ 1096 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1097 pvo_flags &= ~PVO_MANAGED; 1098 pvo_head = &moea_pvo_kunmanaged; 1099 zone = moea_upvo_zone; 1100 } 1101 1102 /* 1103 * If this is a managed page, and it's the first reference to the page, 1104 * clear the execness of the page. Otherwise fetch the execness. 1105 */ 1106 if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) { 1107 if (LIST_EMPTY(pvo_head)) { 1108 moea_attr_clear(pg, PTE_EXEC); 1109 } else { 1110 was_exec = moea_attr_fetch(pg) & PTE_EXEC; 1111 } 1112 } 1113 1114 pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1115 1116 if (prot & VM_PROT_WRITE) { 1117 pte_lo |= PTE_BW; 1118 if (pmap_bootstrapped && 1119 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) 1120 vm_page_flag_set(m, PG_WRITEABLE); 1121 } else 1122 pte_lo |= PTE_BR; 1123 1124 if (prot & VM_PROT_EXECUTE) 1125 pvo_flags |= PVO_EXECUTABLE; 1126 1127 if (wired) 1128 pvo_flags |= PVO_WIRED; 1129 1130 if ((m->flags & PG_FICTITIOUS) != 0) 1131 pvo_flags |= PVO_FAKE; 1132 1133 error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1134 pte_lo, pvo_flags); 1135 1136 /* 1137 * Flush the real page from the instruction cache if this page is 1138 * mapped executable and cacheable and was not previously mapped (or 1139 * was not mapped executable). 1140 */ 1141 if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 1142 (pte_lo & PTE_I) == 0 && was_exec == 0) { 1143 /* 1144 * Flush the real memory from the cache. 1145 */ 1146 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1147 if (pg != NULL) 1148 moea_attr_save(pg, PTE_EXEC); 1149 } 1150 1151 /* XXX syncicache always until problems are sorted */ 1152 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1153 } 1154 1155 /* 1156 * Maps a sequence of resident pages belonging to the same object. 1157 * The sequence begins with the given page m_start. This page is 1158 * mapped at the given virtual address start. Each subsequent page is 1159 * mapped at a virtual address that is offset from start by the same 1160 * amount as the page is offset from m_start within the object. The 1161 * last page in the sequence is the page with the largest offset from 1162 * m_start that can be mapped at a virtual address less than the given 1163 * virtual address end. Not every virtual page between start and end 1164 * is mapped; only those for which a resident page exists with the 1165 * corresponding offset from m_start are mapped. 1166 */ 1167 void 1168 moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1169 vm_page_t m_start, vm_prot_t prot) 1170 { 1171 vm_page_t m; 1172 vm_pindex_t diff, psize; 1173 1174 psize = atop(end - start); 1175 m = m_start; 1176 vm_page_lock_queues(); 1177 PMAP_LOCK(pm); 1178 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1179 moea_enter_locked(pm, start + ptoa(diff), m, prot & 1180 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1181 m = TAILQ_NEXT(m, listq); 1182 } 1183 vm_page_unlock_queues(); 1184 PMAP_UNLOCK(pm); 1185 } 1186 1187 void 1188 moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1189 vm_prot_t prot) 1190 { 1191 1192 vm_page_lock_queues(); 1193 PMAP_LOCK(pm); 1194 moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1195 FALSE); 1196 vm_page_unlock_queues(); 1197 PMAP_UNLOCK(pm); 1198 } 1199 1200 vm_paddr_t 1201 moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1202 { 1203 struct pvo_entry *pvo; 1204 vm_paddr_t pa; 1205 1206 PMAP_LOCK(pm); 1207 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1208 if (pvo == NULL) 1209 pa = 0; 1210 else 1211 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1212 PMAP_UNLOCK(pm); 1213 return (pa); 1214 } 1215 1216 /* 1217 * Atomically extract and hold the physical page with the given 1218 * pmap and virtual address pair if that mapping permits the given 1219 * protection. 1220 */ 1221 vm_page_t 1222 moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1223 { 1224 struct pvo_entry *pvo; 1225 vm_page_t m; 1226 vm_paddr_t pa; 1227 1228 m = NULL; 1229 pa = 0; 1230 PMAP_LOCK(pmap); 1231 retry: 1232 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1233 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && 1234 ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || 1235 (prot & VM_PROT_WRITE) == 0)) { 1236 if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa)) 1237 goto retry; 1238 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 1239 vm_page_hold(m); 1240 } 1241 PA_UNLOCK_COND(pa); 1242 PMAP_UNLOCK(pmap); 1243 return (m); 1244 } 1245 1246 void 1247 moea_init(mmu_t mmu) 1248 { 1249 1250 moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1251 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1252 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1253 moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1254 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1255 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1256 moea_initialized = TRUE; 1257 } 1258 1259 boolean_t 1260 moea_is_referenced(mmu_t mmu, vm_page_t m) 1261 { 1262 1263 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1264 ("moea_is_referenced: page %p is not managed", m)); 1265 return (moea_query_bit(m, PTE_REF)); 1266 } 1267 1268 boolean_t 1269 moea_is_modified(mmu_t mmu, vm_page_t m) 1270 { 1271 1272 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1273 ("moea_is_modified: page %p is not managed", m)); 1274 1275 /* 1276 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 1277 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 1278 * is clear, no PTEs can have PTE_CHG set. 1279 */ 1280 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1281 if ((m->oflags & VPO_BUSY) == 0 && 1282 (m->flags & PG_WRITEABLE) == 0) 1283 return (FALSE); 1284 return (moea_query_bit(m, PTE_CHG)); 1285 } 1286 1287 boolean_t 1288 moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1289 { 1290 struct pvo_entry *pvo; 1291 boolean_t rv; 1292 1293 PMAP_LOCK(pmap); 1294 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1295 rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; 1296 PMAP_UNLOCK(pmap); 1297 return (rv); 1298 } 1299 1300 void 1301 moea_clear_reference(mmu_t mmu, vm_page_t m) 1302 { 1303 1304 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1305 ("moea_clear_reference: page %p is not managed", m)); 1306 moea_clear_bit(m, PTE_REF); 1307 } 1308 1309 void 1310 moea_clear_modify(mmu_t mmu, vm_page_t m) 1311 { 1312 1313 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1314 ("moea_clear_modify: page %p is not managed", m)); 1315 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1316 KASSERT((m->oflags & VPO_BUSY) == 0, 1317 ("moea_clear_modify: page %p is busy", m)); 1318 1319 /* 1320 * If the page is not PG_WRITEABLE, then no PTEs can have PTE_CHG 1321 * set. If the object containing the page is locked and the page is 1322 * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 1323 */ 1324 if ((m->flags & PG_WRITEABLE) == 0) 1325 return; 1326 moea_clear_bit(m, PTE_CHG); 1327 } 1328 1329 /* 1330 * Clear the write and modified bits in each of the given page's mappings. 1331 */ 1332 void 1333 moea_remove_write(mmu_t mmu, vm_page_t m) 1334 { 1335 struct pvo_entry *pvo; 1336 struct pte *pt; 1337 pmap_t pmap; 1338 u_int lo; 1339 1340 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1341 ("moea_remove_write: page %p is not managed", m)); 1342 1343 /* 1344 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 1345 * another thread while the object is locked. Thus, if PG_WRITEABLE 1346 * is clear, no page table entries need updating. 1347 */ 1348 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1349 if ((m->oflags & VPO_BUSY) == 0 && 1350 (m->flags & PG_WRITEABLE) == 0) 1351 return; 1352 vm_page_lock_queues(); 1353 lo = moea_attr_fetch(m); 1354 powerpc_sync(); 1355 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1356 pmap = pvo->pvo_pmap; 1357 PMAP_LOCK(pmap); 1358 if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { 1359 pt = moea_pvo_to_pte(pvo, -1); 1360 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1361 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1362 if (pt != NULL) { 1363 moea_pte_synch(pt, &pvo->pvo_pte.pte); 1364 lo |= pvo->pvo_pte.pte.pte_lo; 1365 pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; 1366 moea_pte_change(pt, &pvo->pvo_pte.pte, 1367 pvo->pvo_vaddr); 1368 mtx_unlock(&moea_table_mutex); 1369 } 1370 } 1371 PMAP_UNLOCK(pmap); 1372 } 1373 if ((lo & PTE_CHG) != 0) { 1374 moea_attr_clear(m, PTE_CHG); 1375 vm_page_dirty(m); 1376 } 1377 vm_page_flag_clear(m, PG_WRITEABLE); 1378 vm_page_unlock_queues(); 1379 } 1380 1381 /* 1382 * moea_ts_referenced: 1383 * 1384 * Return a count of reference bits for a page, clearing those bits. 1385 * It is not necessary for every reference bit to be cleared, but it 1386 * is necessary that 0 only be returned when there are truly no 1387 * reference bits set. 1388 * 1389 * XXX: The exact number of bits to check and clear is a matter that 1390 * should be tested and standardized at some point in the future for 1391 * optimal aging of shared pages. 1392 */ 1393 boolean_t 1394 moea_ts_referenced(mmu_t mmu, vm_page_t m) 1395 { 1396 1397 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1398 ("moea_ts_referenced: page %p is not managed", m)); 1399 return (moea_clear_bit(m, PTE_REF)); 1400 } 1401 1402 /* 1403 * Modify the WIMG settings of all mappings for a page. 1404 */ 1405 void 1406 moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1407 { 1408 struct pvo_entry *pvo; 1409 struct pvo_head *pvo_head; 1410 struct pte *pt; 1411 pmap_t pmap; 1412 u_int lo; 1413 1414 if (m->flags & PG_FICTITIOUS) { 1415 m->md.mdpg_cache_attrs = ma; 1416 return; 1417 } 1418 1419 vm_page_lock_queues(); 1420 pvo_head = vm_page_to_pvoh(m); 1421 lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1422 1423 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1424 pmap = pvo->pvo_pmap; 1425 PMAP_LOCK(pmap); 1426 pt = moea_pvo_to_pte(pvo, -1); 1427 pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; 1428 pvo->pvo_pte.pte.pte_lo |= lo; 1429 if (pt != NULL) { 1430 moea_pte_change(pt, &pvo->pvo_pte.pte, 1431 pvo->pvo_vaddr); 1432 if (pvo->pvo_pmap == kernel_pmap) 1433 isync(); 1434 } 1435 mtx_unlock(&moea_table_mutex); 1436 PMAP_UNLOCK(pmap); 1437 } 1438 m->md.mdpg_cache_attrs = ma; 1439 vm_page_unlock_queues(); 1440 } 1441 1442 /* 1443 * Map a wired page into kernel virtual address space. 1444 */ 1445 void 1446 moea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1447 { 1448 1449 moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1450 } 1451 1452 void 1453 moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1454 { 1455 u_int pte_lo; 1456 int error; 1457 1458 #if 0 1459 if (va < VM_MIN_KERNEL_ADDRESS) 1460 panic("moea_kenter: attempt to enter non-kernel address %#x", 1461 va); 1462 #endif 1463 1464 pte_lo = moea_calc_wimg(pa, ma); 1465 1466 PMAP_LOCK(kernel_pmap); 1467 error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1468 &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1469 1470 if (error != 0 && error != ENOENT) 1471 panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 1472 pa, error); 1473 1474 /* 1475 * Flush the real memory from the instruction cache. 1476 */ 1477 if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1478 moea_syncicache(pa, PAGE_SIZE); 1479 } 1480 PMAP_UNLOCK(kernel_pmap); 1481 } 1482 1483 /* 1484 * Extract the physical page address associated with the given kernel virtual 1485 * address. 1486 */ 1487 vm_offset_t 1488 moea_kextract(mmu_t mmu, vm_offset_t va) 1489 { 1490 struct pvo_entry *pvo; 1491 vm_paddr_t pa; 1492 1493 /* 1494 * Allow direct mappings on 32-bit OEA 1495 */ 1496 if (va < VM_MIN_KERNEL_ADDRESS) { 1497 return (va); 1498 } 1499 1500 PMAP_LOCK(kernel_pmap); 1501 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1502 KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1503 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1504 PMAP_UNLOCK(kernel_pmap); 1505 return (pa); 1506 } 1507 1508 /* 1509 * Remove a wired page from kernel virtual address space. 1510 */ 1511 void 1512 moea_kremove(mmu_t mmu, vm_offset_t va) 1513 { 1514 1515 moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1516 } 1517 1518 /* 1519 * Map a range of physical addresses into kernel virtual address space. 1520 * 1521 * The value passed in *virt is a suggested virtual address for the mapping. 1522 * Architectures which can support a direct-mapped physical to virtual region 1523 * can return the appropriate address within that region, leaving '*virt' 1524 * unchanged. We cannot and therefore do not; *virt is updated with the 1525 * first usable address after the mapped region. 1526 */ 1527 vm_offset_t 1528 moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1529 vm_offset_t pa_end, int prot) 1530 { 1531 vm_offset_t sva, va; 1532 1533 sva = *virt; 1534 va = sva; 1535 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1536 moea_kenter(mmu, va, pa_start); 1537 *virt = va; 1538 return (sva); 1539 } 1540 1541 /* 1542 * Returns true if the pmap's pv is one of the first 1543 * 16 pvs linked to from this page. This count may 1544 * be changed upwards or downwards in the future; it 1545 * is only necessary that true be returned for a small 1546 * subset of pmaps for proper page aging. 1547 */ 1548 boolean_t 1549 moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1550 { 1551 int loops; 1552 struct pvo_entry *pvo; 1553 boolean_t rv; 1554 1555 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1556 ("moea_page_exists_quick: page %p is not managed", m)); 1557 loops = 0; 1558 rv = FALSE; 1559 vm_page_lock_queues(); 1560 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1561 if (pvo->pvo_pmap == pmap) { 1562 rv = TRUE; 1563 break; 1564 } 1565 if (++loops >= 16) 1566 break; 1567 } 1568 vm_page_unlock_queues(); 1569 return (rv); 1570 } 1571 1572 /* 1573 * Return the number of managed mappings to the given physical page 1574 * that are wired. 1575 */ 1576 int 1577 moea_page_wired_mappings(mmu_t mmu, vm_page_t m) 1578 { 1579 struct pvo_entry *pvo; 1580 int count; 1581 1582 count = 0; 1583 if ((m->flags & PG_FICTITIOUS) != 0) 1584 return (count); 1585 vm_page_lock_queues(); 1586 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1587 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1588 count++; 1589 vm_page_unlock_queues(); 1590 return (count); 1591 } 1592 1593 static u_int moea_vsidcontext; 1594 1595 void 1596 moea_pinit(mmu_t mmu, pmap_t pmap) 1597 { 1598 int i, mask; 1599 u_int entropy; 1600 1601 KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1602 PMAP_LOCK_INIT(pmap); 1603 1604 entropy = 0; 1605 __asm __volatile("mftb %0" : "=r"(entropy)); 1606 1607 if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap)) 1608 == NULL) { 1609 pmap->pmap_phys = pmap; 1610 } 1611 1612 1613 mtx_lock(&moea_vsid_mutex); 1614 /* 1615 * Allocate some segment registers for this pmap. 1616 */ 1617 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1618 u_int hash, n; 1619 1620 /* 1621 * Create a new value by mutiplying by a prime and adding in 1622 * entropy from the timebase register. This is to make the 1623 * VSID more random so that the PT hash function collides 1624 * less often. (Note that the prime casues gcc to do shifts 1625 * instead of a multiply.) 1626 */ 1627 moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1628 hash = moea_vsidcontext & (NPMAPS - 1); 1629 if (hash == 0) /* 0 is special, avoid it */ 1630 continue; 1631 n = hash >> 5; 1632 mask = 1 << (hash & (VSID_NBPW - 1)); 1633 hash = (moea_vsidcontext & 0xfffff); 1634 if (moea_vsid_bitmap[n] & mask) { /* collision? */ 1635 /* anything free in this bucket? */ 1636 if (moea_vsid_bitmap[n] == 0xffffffff) { 1637 entropy = (moea_vsidcontext >> 20); 1638 continue; 1639 } 1640 i = ffs(~moea_vsid_bitmap[n]) - 1; 1641 mask = 1 << i; 1642 hash &= 0xfffff & ~(VSID_NBPW - 1); 1643 hash |= i; 1644 } 1645 moea_vsid_bitmap[n] |= mask; 1646 for (i = 0; i < 16; i++) 1647 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1648 mtx_unlock(&moea_vsid_mutex); 1649 return; 1650 } 1651 1652 mtx_unlock(&moea_vsid_mutex); 1653 panic("moea_pinit: out of segments"); 1654 } 1655 1656 /* 1657 * Initialize the pmap associated with process 0. 1658 */ 1659 void 1660 moea_pinit0(mmu_t mmu, pmap_t pm) 1661 { 1662 1663 moea_pinit(mmu, pm); 1664 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1665 } 1666 1667 /* 1668 * Set the physical protection on the specified range of this map as requested. 1669 */ 1670 void 1671 moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1672 vm_prot_t prot) 1673 { 1674 struct pvo_entry *pvo; 1675 struct pte *pt; 1676 int pteidx; 1677 1678 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1679 ("moea_protect: non current pmap")); 1680 1681 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1682 moea_remove(mmu, pm, sva, eva); 1683 return; 1684 } 1685 1686 vm_page_lock_queues(); 1687 PMAP_LOCK(pm); 1688 for (; sva < eva; sva += PAGE_SIZE) { 1689 pvo = moea_pvo_find_va(pm, sva, &pteidx); 1690 if (pvo == NULL) 1691 continue; 1692 1693 if ((prot & VM_PROT_EXECUTE) == 0) 1694 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1695 1696 /* 1697 * Grab the PTE pointer before we diddle with the cached PTE 1698 * copy. 1699 */ 1700 pt = moea_pvo_to_pte(pvo, pteidx); 1701 /* 1702 * Change the protection of the page. 1703 */ 1704 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1705 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1706 1707 /* 1708 * If the PVO is in the page table, update that pte as well. 1709 */ 1710 if (pt != NULL) { 1711 moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1712 mtx_unlock(&moea_table_mutex); 1713 } 1714 } 1715 vm_page_unlock_queues(); 1716 PMAP_UNLOCK(pm); 1717 } 1718 1719 /* 1720 * Map a list of wired pages into kernel virtual address space. This is 1721 * intended for temporary mappings which do not need page modification or 1722 * references recorded. Existing mappings in the region are overwritten. 1723 */ 1724 void 1725 moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1726 { 1727 vm_offset_t va; 1728 1729 va = sva; 1730 while (count-- > 0) { 1731 moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1732 va += PAGE_SIZE; 1733 m++; 1734 } 1735 } 1736 1737 /* 1738 * Remove page mappings from kernel virtual address space. Intended for 1739 * temporary mappings entered by moea_qenter. 1740 */ 1741 void 1742 moea_qremove(mmu_t mmu, vm_offset_t sva, int count) 1743 { 1744 vm_offset_t va; 1745 1746 va = sva; 1747 while (count-- > 0) { 1748 moea_kremove(mmu, va); 1749 va += PAGE_SIZE; 1750 } 1751 } 1752 1753 void 1754 moea_release(mmu_t mmu, pmap_t pmap) 1755 { 1756 int idx, mask; 1757 1758 /* 1759 * Free segment register's VSID 1760 */ 1761 if (pmap->pm_sr[0] == 0) 1762 panic("moea_release"); 1763 1764 mtx_lock(&moea_vsid_mutex); 1765 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1766 mask = 1 << (idx % VSID_NBPW); 1767 idx /= VSID_NBPW; 1768 moea_vsid_bitmap[idx] &= ~mask; 1769 mtx_unlock(&moea_vsid_mutex); 1770 PMAP_LOCK_DESTROY(pmap); 1771 } 1772 1773 /* 1774 * Remove the given range of addresses from the specified map. 1775 */ 1776 void 1777 moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1778 { 1779 struct pvo_entry *pvo; 1780 int pteidx; 1781 1782 vm_page_lock_queues(); 1783 PMAP_LOCK(pm); 1784 for (; sva < eva; sva += PAGE_SIZE) { 1785 pvo = moea_pvo_find_va(pm, sva, &pteidx); 1786 if (pvo != NULL) { 1787 moea_pvo_remove(pvo, pteidx); 1788 } 1789 } 1790 PMAP_UNLOCK(pm); 1791 vm_page_unlock_queues(); 1792 } 1793 1794 /* 1795 * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1796 * will reflect changes in pte's back to the vm_page. 1797 */ 1798 void 1799 moea_remove_all(mmu_t mmu, vm_page_t m) 1800 { 1801 struct pvo_head *pvo_head; 1802 struct pvo_entry *pvo, *next_pvo; 1803 pmap_t pmap; 1804 1805 vm_page_lock_queues(); 1806 pvo_head = vm_page_to_pvoh(m); 1807 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1808 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1809 1810 pmap = pvo->pvo_pmap; 1811 PMAP_LOCK(pmap); 1812 moea_pvo_remove(pvo, -1); 1813 PMAP_UNLOCK(pmap); 1814 } 1815 if ((m->flags & PG_WRITEABLE) && moea_is_modified(mmu, m)) { 1816 moea_attr_clear(m, PTE_CHG); 1817 vm_page_dirty(m); 1818 } 1819 vm_page_flag_clear(m, PG_WRITEABLE); 1820 vm_page_unlock_queues(); 1821 } 1822 1823 /* 1824 * Allocate a physical page of memory directly from the phys_avail map. 1825 * Can only be called from moea_bootstrap before avail start and end are 1826 * calculated. 1827 */ 1828 static vm_offset_t 1829 moea_bootstrap_alloc(vm_size_t size, u_int align) 1830 { 1831 vm_offset_t s, e; 1832 int i, j; 1833 1834 size = round_page(size); 1835 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1836 if (align != 0) 1837 s = (phys_avail[i] + align - 1) & ~(align - 1); 1838 else 1839 s = phys_avail[i]; 1840 e = s + size; 1841 1842 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1843 continue; 1844 1845 if (s == phys_avail[i]) { 1846 phys_avail[i] += size; 1847 } else if (e == phys_avail[i + 1]) { 1848 phys_avail[i + 1] -= size; 1849 } else { 1850 for (j = phys_avail_count * 2; j > i; j -= 2) { 1851 phys_avail[j] = phys_avail[j - 2]; 1852 phys_avail[j + 1] = phys_avail[j - 1]; 1853 } 1854 1855 phys_avail[i + 3] = phys_avail[i + 1]; 1856 phys_avail[i + 1] = s; 1857 phys_avail[i + 2] = e; 1858 phys_avail_count++; 1859 } 1860 1861 return (s); 1862 } 1863 panic("moea_bootstrap_alloc: could not allocate memory"); 1864 } 1865 1866 static void 1867 moea_syncicache(vm_offset_t pa, vm_size_t len) 1868 { 1869 __syncicache((void *)pa, len); 1870 } 1871 1872 static int 1873 moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1874 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1875 { 1876 struct pvo_entry *pvo; 1877 u_int sr; 1878 int first; 1879 u_int ptegidx; 1880 int i; 1881 int bootstrap; 1882 1883 moea_pvo_enter_calls++; 1884 first = 0; 1885 bootstrap = 0; 1886 1887 /* 1888 * Compute the PTE Group index. 1889 */ 1890 va &= ~ADDR_POFF; 1891 sr = va_to_sr(pm->pm_sr, va); 1892 ptegidx = va_to_pteg(sr, va); 1893 1894 /* 1895 * Remove any existing mapping for this page. Reuse the pvo entry if 1896 * there is a mapping. 1897 */ 1898 mtx_lock(&moea_table_mutex); 1899 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 1900 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1901 if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa && 1902 (pvo->pvo_pte.pte.pte_lo & PTE_PP) == 1903 (pte_lo & PTE_PP)) { 1904 mtx_unlock(&moea_table_mutex); 1905 return (0); 1906 } 1907 moea_pvo_remove(pvo, -1); 1908 break; 1909 } 1910 } 1911 1912 /* 1913 * If we aren't overwriting a mapping, try to allocate. 1914 */ 1915 if (moea_initialized) { 1916 pvo = uma_zalloc(zone, M_NOWAIT); 1917 } else { 1918 if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1919 panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1920 moea_bpvo_pool_index, BPVO_POOL_SIZE, 1921 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 1922 } 1923 pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1924 moea_bpvo_pool_index++; 1925 bootstrap = 1; 1926 } 1927 1928 if (pvo == NULL) { 1929 mtx_unlock(&moea_table_mutex); 1930 return (ENOMEM); 1931 } 1932 1933 moea_pvo_entries++; 1934 pvo->pvo_vaddr = va; 1935 pvo->pvo_pmap = pm; 1936 LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 1937 pvo->pvo_vaddr &= ~ADDR_POFF; 1938 if (flags & VM_PROT_EXECUTE) 1939 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1940 if (flags & PVO_WIRED) 1941 pvo->pvo_vaddr |= PVO_WIRED; 1942 if (pvo_head != &moea_pvo_kunmanaged) 1943 pvo->pvo_vaddr |= PVO_MANAGED; 1944 if (bootstrap) 1945 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1946 if (flags & PVO_FAKE) 1947 pvo->pvo_vaddr |= PVO_FAKE; 1948 1949 moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); 1950 1951 /* 1952 * Remember if the list was empty and therefore will be the first 1953 * item. 1954 */ 1955 if (LIST_FIRST(pvo_head) == NULL) 1956 first = 1; 1957 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1958 1959 if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 1960 pm->pm_stats.wired_count++; 1961 pm->pm_stats.resident_count++; 1962 1963 /* 1964 * We hope this succeeds but it isn't required. 1965 */ 1966 i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 1967 if (i >= 0) { 1968 PVO_PTEGIDX_SET(pvo, i); 1969 } else { 1970 panic("moea_pvo_enter: overflow"); 1971 moea_pte_overflow++; 1972 } 1973 mtx_unlock(&moea_table_mutex); 1974 1975 return (first ? ENOENT : 0); 1976 } 1977 1978 static void 1979 moea_pvo_remove(struct pvo_entry *pvo, int pteidx) 1980 { 1981 struct pte *pt; 1982 1983 /* 1984 * If there is an active pte entry, we need to deactivate it (and 1985 * save the ref & cfg bits). 1986 */ 1987 pt = moea_pvo_to_pte(pvo, pteidx); 1988 if (pt != NULL) { 1989 moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1990 mtx_unlock(&moea_table_mutex); 1991 PVO_PTEGIDX_CLR(pvo); 1992 } else { 1993 moea_pte_overflow--; 1994 } 1995 1996 /* 1997 * Update our statistics. 1998 */ 1999 pvo->pvo_pmap->pm_stats.resident_count--; 2000 if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 2001 pvo->pvo_pmap->pm_stats.wired_count--; 2002 2003 /* 2004 * Save the REF/CHG bits into their cache if the page is managed. 2005 */ 2006 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2007 struct vm_page *pg; 2008 2009 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 2010 if (pg != NULL) { 2011 moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & 2012 (PTE_REF | PTE_CHG)); 2013 } 2014 } 2015 2016 /* 2017 * Remove this PVO from the PV list. 2018 */ 2019 LIST_REMOVE(pvo, pvo_vlink); 2020 2021 /* 2022 * Remove this from the overflow list and return it to the pool 2023 * if we aren't going to reuse it. 2024 */ 2025 LIST_REMOVE(pvo, pvo_olink); 2026 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2027 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2028 moea_upvo_zone, pvo); 2029 moea_pvo_entries--; 2030 moea_pvo_remove_calls++; 2031 } 2032 2033 static __inline int 2034 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2035 { 2036 int pteidx; 2037 2038 /* 2039 * We can find the actual pte entry without searching by grabbing 2040 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 2041 * noticing the HID bit. 2042 */ 2043 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2044 if (pvo->pvo_pte.pte.pte_hi & PTE_HID) 2045 pteidx ^= moea_pteg_mask * 8; 2046 2047 return (pteidx); 2048 } 2049 2050 static struct pvo_entry * 2051 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2052 { 2053 struct pvo_entry *pvo; 2054 int ptegidx; 2055 u_int sr; 2056 2057 va &= ~ADDR_POFF; 2058 sr = va_to_sr(pm->pm_sr, va); 2059 ptegidx = va_to_pteg(sr, va); 2060 2061 mtx_lock(&moea_table_mutex); 2062 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2063 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2064 if (pteidx_p) 2065 *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2066 break; 2067 } 2068 } 2069 mtx_unlock(&moea_table_mutex); 2070 2071 return (pvo); 2072 } 2073 2074 static struct pte * 2075 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2076 { 2077 struct pte *pt; 2078 2079 /* 2080 * If we haven't been supplied the ptegidx, calculate it. 2081 */ 2082 if (pteidx == -1) { 2083 int ptegidx; 2084 u_int sr; 2085 2086 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 2087 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2088 pteidx = moea_pvo_pte_index(pvo, ptegidx); 2089 } 2090 2091 pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2092 mtx_lock(&moea_table_mutex); 2093 2094 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2095 panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 2096 "valid pte index", pvo); 2097 } 2098 2099 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2100 panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 2101 "pvo but no valid pte", pvo); 2102 } 2103 2104 if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2105 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { 2106 panic("moea_pvo_to_pte: pvo %p has valid pte in " 2107 "moea_pteg_table %p but invalid in pvo", pvo, pt); 2108 } 2109 2110 if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 2111 != 0) { 2112 panic("moea_pvo_to_pte: pvo %p pte does not match " 2113 "pte %p in moea_pteg_table", pvo, pt); 2114 } 2115 2116 mtx_assert(&moea_table_mutex, MA_OWNED); 2117 return (pt); 2118 } 2119 2120 if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { 2121 panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2122 "moea_pteg_table but valid in pvo", pvo, pt); 2123 } 2124 2125 mtx_unlock(&moea_table_mutex); 2126 return (NULL); 2127 } 2128 2129 /* 2130 * XXX: THIS STUFF SHOULD BE IN pte.c? 2131 */ 2132 int 2133 moea_pte_spill(vm_offset_t addr) 2134 { 2135 struct pvo_entry *source_pvo, *victim_pvo; 2136 struct pvo_entry *pvo; 2137 int ptegidx, i, j; 2138 u_int sr; 2139 struct pteg *pteg; 2140 struct pte *pt; 2141 2142 moea_pte_spills++; 2143 2144 sr = mfsrin(addr); 2145 ptegidx = va_to_pteg(sr, addr); 2146 2147 /* 2148 * Have to substitute some entry. Use the primary hash for this. 2149 * Use low bits of timebase as random generator. 2150 */ 2151 pteg = &moea_pteg_table[ptegidx]; 2152 mtx_lock(&moea_table_mutex); 2153 __asm __volatile("mftb %0" : "=r"(i)); 2154 i &= 7; 2155 pt = &pteg->pt[i]; 2156 2157 source_pvo = NULL; 2158 victim_pvo = NULL; 2159 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2160 /* 2161 * We need to find a pvo entry for this address. 2162 */ 2163 if (source_pvo == NULL && 2164 moea_pte_match(&pvo->pvo_pte.pte, sr, addr, 2165 pvo->pvo_pte.pte.pte_hi & PTE_HID)) { 2166 /* 2167 * Now found an entry to be spilled into the pteg. 2168 * The PTE is now valid, so we know it's active. 2169 */ 2170 j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 2171 2172 if (j >= 0) { 2173 PVO_PTEGIDX_SET(pvo, j); 2174 moea_pte_overflow--; 2175 mtx_unlock(&moea_table_mutex); 2176 return (1); 2177 } 2178 2179 source_pvo = pvo; 2180 2181 if (victim_pvo != NULL) 2182 break; 2183 } 2184 2185 /* 2186 * We also need the pvo entry of the victim we are replacing 2187 * so save the R & C bits of the PTE. 2188 */ 2189 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2190 moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2191 victim_pvo = pvo; 2192 if (source_pvo != NULL) 2193 break; 2194 } 2195 } 2196 2197 if (source_pvo == NULL) { 2198 mtx_unlock(&moea_table_mutex); 2199 return (0); 2200 } 2201 2202 if (victim_pvo == NULL) { 2203 if ((pt->pte_hi & PTE_HID) == 0) 2204 panic("moea_pte_spill: victim p-pte (%p) has no pvo" 2205 "entry", pt); 2206 2207 /* 2208 * If this is a secondary PTE, we need to search it's primary 2209 * pvo bucket for the matching PVO. 2210 */ 2211 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 2212 pvo_olink) { 2213 /* 2214 * We also need the pvo entry of the victim we are 2215 * replacing so save the R & C bits of the PTE. 2216 */ 2217 if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2218 victim_pvo = pvo; 2219 break; 2220 } 2221 } 2222 2223 if (victim_pvo == NULL) 2224 panic("moea_pte_spill: victim s-pte (%p) has no pvo" 2225 "entry", pt); 2226 } 2227 2228 /* 2229 * We are invalidating the TLB entry for the EA we are replacing even 2230 * though it's valid. If we don't, we lose any ref/chg bit changes 2231 * contained in the TLB entry. 2232 */ 2233 source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; 2234 2235 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2236 moea_pte_set(pt, &source_pvo->pvo_pte.pte); 2237 2238 PVO_PTEGIDX_CLR(victim_pvo); 2239 PVO_PTEGIDX_SET(source_pvo, i); 2240 moea_pte_replacements++; 2241 2242 mtx_unlock(&moea_table_mutex); 2243 return (1); 2244 } 2245 2246 static int 2247 moea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2248 { 2249 struct pte *pt; 2250 int i; 2251 2252 mtx_assert(&moea_table_mutex, MA_OWNED); 2253 2254 /* 2255 * First try primary hash. 2256 */ 2257 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2258 if ((pt->pte_hi & PTE_VALID) == 0) { 2259 pvo_pt->pte_hi &= ~PTE_HID; 2260 moea_pte_set(pt, pvo_pt); 2261 return (i); 2262 } 2263 } 2264 2265 /* 2266 * Now try secondary hash. 2267 */ 2268 ptegidx ^= moea_pteg_mask; 2269 2270 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2271 if ((pt->pte_hi & PTE_VALID) == 0) { 2272 pvo_pt->pte_hi |= PTE_HID; 2273 moea_pte_set(pt, pvo_pt); 2274 return (i); 2275 } 2276 } 2277 2278 panic("moea_pte_insert: overflow"); 2279 return (-1); 2280 } 2281 2282 static boolean_t 2283 moea_query_bit(vm_page_t m, int ptebit) 2284 { 2285 struct pvo_entry *pvo; 2286 struct pte *pt; 2287 2288 if (moea_attr_fetch(m) & ptebit) 2289 return (TRUE); 2290 2291 vm_page_lock_queues(); 2292 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2293 2294 /* 2295 * See if we saved the bit off. If so, cache it and return 2296 * success. 2297 */ 2298 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2299 moea_attr_save(m, ptebit); 2300 vm_page_unlock_queues(); 2301 return (TRUE); 2302 } 2303 } 2304 2305 /* 2306 * No luck, now go through the hard part of looking at the PTEs 2307 * themselves. Sync so that any pending REF/CHG bits are flushed to 2308 * the PTEs. 2309 */ 2310 powerpc_sync(); 2311 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2312 2313 /* 2314 * See if this pvo has a valid PTE. if so, fetch the 2315 * REF/CHG bits from the valid PTE. If the appropriate 2316 * ptebit is set, cache it and return success. 2317 */ 2318 pt = moea_pvo_to_pte(pvo, -1); 2319 if (pt != NULL) { 2320 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2321 mtx_unlock(&moea_table_mutex); 2322 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2323 moea_attr_save(m, ptebit); 2324 vm_page_unlock_queues(); 2325 return (TRUE); 2326 } 2327 } 2328 } 2329 2330 vm_page_unlock_queues(); 2331 return (FALSE); 2332 } 2333 2334 static u_int 2335 moea_clear_bit(vm_page_t m, int ptebit) 2336 { 2337 u_int count; 2338 struct pvo_entry *pvo; 2339 struct pte *pt; 2340 2341 vm_page_lock_queues(); 2342 2343 /* 2344 * Clear the cached value. 2345 */ 2346 moea_attr_clear(m, ptebit); 2347 2348 /* 2349 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2350 * we can reset the right ones). note that since the pvo entries and 2351 * list heads are accessed via BAT0 and are never placed in the page 2352 * table, we don't have to worry about further accesses setting the 2353 * REF/CHG bits. 2354 */ 2355 powerpc_sync(); 2356 2357 /* 2358 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2359 * valid pte clear the ptebit from the valid pte. 2360 */ 2361 count = 0; 2362 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2363 pt = moea_pvo_to_pte(pvo, -1); 2364 if (pt != NULL) { 2365 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2366 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2367 count++; 2368 moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2369 } 2370 mtx_unlock(&moea_table_mutex); 2371 } 2372 pvo->pvo_pte.pte.pte_lo &= ~ptebit; 2373 } 2374 2375 vm_page_unlock_queues(); 2376 return (count); 2377 } 2378 2379 /* 2380 * Return true if the physical range is encompassed by the battable[idx] 2381 */ 2382 static int 2383 moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2384 { 2385 u_int prot; 2386 u_int32_t start; 2387 u_int32_t end; 2388 u_int32_t bat_ble; 2389 2390 /* 2391 * Return immediately if not a valid mapping 2392 */ 2393 if (!(battable[idx].batu & BAT_Vs)) 2394 return (EINVAL); 2395 2396 /* 2397 * The BAT entry must be cache-inhibited, guarded, and r/w 2398 * so it can function as an i/o page 2399 */ 2400 prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2401 if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2402 return (EPERM); 2403 2404 /* 2405 * The address should be within the BAT range. Assume that the 2406 * start address in the BAT has the correct alignment (thus 2407 * not requiring masking) 2408 */ 2409 start = battable[idx].batl & BAT_PBS; 2410 bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2411 end = start | (bat_ble << 15) | 0x7fff; 2412 2413 if ((pa < start) || ((pa + size) > end)) 2414 return (ERANGE); 2415 2416 return (0); 2417 } 2418 2419 boolean_t 2420 moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2421 { 2422 int i; 2423 2424 /* 2425 * This currently does not work for entries that 2426 * overlap 256M BAT segments. 2427 */ 2428 2429 for(i = 0; i < 16; i++) 2430 if (moea_bat_mapped(i, pa, size) == 0) 2431 return (0); 2432 2433 return (EFAULT); 2434 } 2435 2436 /* 2437 * Map a set of physical memory pages into the kernel virtual 2438 * address space. Return a pointer to where it is mapped. This 2439 * routine is intended to be used for mapping device memory, 2440 * NOT real memory. 2441 */ 2442 void * 2443 moea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2444 { 2445 2446 return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2447 } 2448 2449 void * 2450 moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2451 { 2452 vm_offset_t va, tmpva, ppa, offset; 2453 int i; 2454 2455 ppa = trunc_page(pa); 2456 offset = pa & PAGE_MASK; 2457 size = roundup(offset + size, PAGE_SIZE); 2458 2459 /* 2460 * If the physical address lies within a valid BAT table entry, 2461 * return the 1:1 mapping. This currently doesn't work 2462 * for regions that overlap 256M BAT segments. 2463 */ 2464 for (i = 0; i < 16; i++) { 2465 if (moea_bat_mapped(i, pa, size) == 0) 2466 return ((void *) pa); 2467 } 2468 2469 va = kmem_alloc_nofault(kernel_map, size); 2470 if (!va) 2471 panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 2472 2473 for (tmpva = va; size > 0;) { 2474 moea_kenter_attr(mmu, tmpva, ppa, ma); 2475 tlbie(tmpva); 2476 size -= PAGE_SIZE; 2477 tmpva += PAGE_SIZE; 2478 ppa += PAGE_SIZE; 2479 } 2480 2481 return ((void *)(va + offset)); 2482 } 2483 2484 void 2485 moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2486 { 2487 vm_offset_t base, offset; 2488 2489 /* 2490 * If this is outside kernel virtual space, then it's a 2491 * battable entry and doesn't require unmapping 2492 */ 2493 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { 2494 base = trunc_page(va); 2495 offset = va & PAGE_MASK; 2496 size = roundup(offset + size, PAGE_SIZE); 2497 kmem_free(kernel_map, base, size); 2498 } 2499 } 2500 2501 static void 2502 moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2503 { 2504 struct pvo_entry *pvo; 2505 vm_offset_t lim; 2506 vm_paddr_t pa; 2507 vm_size_t len; 2508 2509 PMAP_LOCK(pm); 2510 while (sz > 0) { 2511 lim = round_page(va); 2512 len = MIN(lim - va, sz); 2513 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2514 if (pvo != NULL) { 2515 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2516 (va & ADDR_POFF); 2517 moea_syncicache(pa, len); 2518 } 2519 va += len; 2520 sz -= len; 2521 } 2522 PMAP_UNLOCK(pm); 2523 } 2524