1 /*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68 /*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93 #include <sys/cdefs.h> 94 __FBSDID("$FreeBSD$"); 95 96 /* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117 #include "opt_kstack_pages.h" 118 119 #include <sys/param.h> 120 #include <sys/kernel.h> 121 #include <sys/ktr.h> 122 #include <sys/lock.h> 123 #include <sys/msgbuf.h> 124 #include <sys/mutex.h> 125 #include <sys/proc.h> 126 #include <sys/sysctl.h> 127 #include <sys/systm.h> 128 #include <sys/vmmeter.h> 129 130 #include <dev/ofw/openfirm.h> 131 132 #include <vm/vm.h> 133 #include <vm/vm_param.h> 134 #include <vm/vm_kern.h> 135 #include <vm/vm_page.h> 136 #include <vm/vm_map.h> 137 #include <vm/vm_object.h> 138 #include <vm/vm_extern.h> 139 #include <vm/vm_pageout.h> 140 #include <vm/vm_pager.h> 141 #include <vm/uma.h> 142 143 #include <machine/cpu.h> 144 #include <machine/powerpc.h> 145 #include <machine/bat.h> 146 #include <machine/frame.h> 147 #include <machine/md_var.h> 148 #include <machine/psl.h> 149 #include <machine/pte.h> 150 #include <machine/smp.h> 151 #include <machine/sr.h> 152 #include <machine/mmuvar.h> 153 154 #include "mmu_if.h" 155 156 #define MOEA_DEBUG 157 158 #define TODO panic("%s: not implemented", __func__); 159 160 #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 161 #define TLBSYNC() __asm __volatile("tlbsync"); 162 #define SYNC() __asm __volatile("sync"); 163 #define EIEIO() __asm __volatile("eieio"); 164 165 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 166 #define VSID_TO_SR(vsid) ((vsid) & 0xf) 167 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 168 169 #define PVO_PTEGIDX_MASK 0x007 /* which PTEG slot */ 170 #define PVO_PTEGIDX_VALID 0x008 /* slot is valid */ 171 #define PVO_WIRED 0x010 /* PVO entry is wired */ 172 #define PVO_MANAGED 0x020 /* PVO entry is managed */ 173 #define PVO_EXECUTABLE 0x040 /* PVO entry is executable */ 174 #define PVO_BOOTSTRAP 0x080 /* PVO entry allocated during 175 bootstrap */ 176 #define PVO_FAKE 0x100 /* fictitious phys page */ 177 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 178 #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 179 #define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 180 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 181 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 182 #define PVO_PTEGIDX_CLR(pvo) \ 183 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 184 #define PVO_PTEGIDX_SET(pvo, i) \ 185 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 186 187 #define MOEA_PVO_CHECK(pvo) 188 189 struct ofw_map { 190 vm_offset_t om_va; 191 vm_size_t om_len; 192 vm_offset_t om_pa; 193 u_int om_mode; 194 }; 195 196 /* 197 * Map of physical memory regions. 198 */ 199 static struct mem_region *regions; 200 static struct mem_region *pregions; 201 u_int phys_avail_count; 202 int regions_sz, pregions_sz; 203 static struct ofw_map *translations; 204 205 extern struct pmap ofw_pmap; 206 207 /* 208 * Lock for the pteg and pvo tables. 209 */ 210 struct mtx moea_table_mutex; 211 212 /* 213 * PTEG data. 214 */ 215 static struct pteg *moea_pteg_table; 216 u_int moea_pteg_count; 217 u_int moea_pteg_mask; 218 219 /* 220 * PVO data. 221 */ 222 struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 223 struct pvo_head moea_pvo_kunmanaged = 224 LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 225 struct pvo_head moea_pvo_unmanaged = 226 LIST_HEAD_INITIALIZER(moea_pvo_unmanaged); /* list of unmanaged pages */ 227 228 uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 229 uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 230 231 #define BPVO_POOL_SIZE 32768 232 static struct pvo_entry *moea_bpvo_pool; 233 static int moea_bpvo_pool_index = 0; 234 235 #define VSID_NBPW (sizeof(u_int32_t) * 8) 236 static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 237 238 static boolean_t moea_initialized = FALSE; 239 240 /* 241 * Statistics. 242 */ 243 u_int moea_pte_valid = 0; 244 u_int moea_pte_overflow = 0; 245 u_int moea_pte_replacements = 0; 246 u_int moea_pvo_entries = 0; 247 u_int moea_pvo_enter_calls = 0; 248 u_int moea_pvo_remove_calls = 0; 249 u_int moea_pte_spills = 0; 250 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 251 0, ""); 252 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 253 &moea_pte_overflow, 0, ""); 254 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 255 &moea_pte_replacements, 0, ""); 256 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 257 0, ""); 258 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 259 &moea_pvo_enter_calls, 0, ""); 260 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 261 &moea_pvo_remove_calls, 0, ""); 262 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 263 &moea_pte_spills, 0, ""); 264 265 /* 266 * Allocate physical memory for use in moea_bootstrap. 267 */ 268 static vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 269 270 /* 271 * PTE calls. 272 */ 273 static int moea_pte_insert(u_int, struct pte *); 274 275 /* 276 * PVO calls. 277 */ 278 static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 279 vm_offset_t, vm_offset_t, u_int, int); 280 static void moea_pvo_remove(struct pvo_entry *, int); 281 static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 282 static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 283 284 /* 285 * Utility routines. 286 */ 287 static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 288 vm_prot_t, boolean_t); 289 static void moea_syncicache(vm_offset_t, vm_size_t); 290 static boolean_t moea_query_bit(vm_page_t, int); 291 static u_int moea_clear_bit(vm_page_t, int, int *); 292 static void moea_kremove(mmu_t, vm_offset_t); 293 static void tlbia(void); 294 int moea_pte_spill(vm_offset_t); 295 296 /* 297 * Kernel MMU interface 298 */ 299 void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 300 void moea_clear_modify(mmu_t, vm_page_t); 301 void moea_clear_reference(mmu_t, vm_page_t); 302 void moea_copy_page(mmu_t, vm_page_t, vm_page_t); 303 void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 304 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 305 vm_prot_t); 306 void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 307 vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 308 vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 309 void moea_init(mmu_t); 310 boolean_t moea_is_modified(mmu_t, vm_page_t); 311 boolean_t moea_ts_referenced(mmu_t, vm_page_t); 312 vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 313 boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 314 int moea_page_wired_mappings(mmu_t, vm_page_t); 315 void moea_pinit(mmu_t, pmap_t); 316 void moea_pinit0(mmu_t, pmap_t); 317 void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 318 void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 319 void moea_qremove(mmu_t, vm_offset_t, int); 320 void moea_release(mmu_t, pmap_t); 321 void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 322 void moea_remove_all(mmu_t, vm_page_t); 323 void moea_remove_write(mmu_t, vm_page_t); 324 void moea_zero_page(mmu_t, vm_page_t); 325 void moea_zero_page_area(mmu_t, vm_page_t, int, int); 326 void moea_zero_page_idle(mmu_t, vm_page_t); 327 void moea_activate(mmu_t, struct thread *); 328 void moea_deactivate(mmu_t, struct thread *); 329 void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 330 void *moea_mapdev(mmu_t, vm_offset_t, vm_size_t); 331 void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 332 vm_offset_t moea_kextract(mmu_t, vm_offset_t); 333 void moea_kenter(mmu_t, vm_offset_t, vm_offset_t); 334 boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 335 boolean_t moea_page_executable(mmu_t, vm_page_t); 336 337 static mmu_method_t moea_methods[] = { 338 MMUMETHOD(mmu_change_wiring, moea_change_wiring), 339 MMUMETHOD(mmu_clear_modify, moea_clear_modify), 340 MMUMETHOD(mmu_clear_reference, moea_clear_reference), 341 MMUMETHOD(mmu_copy_page, moea_copy_page), 342 MMUMETHOD(mmu_enter, moea_enter), 343 MMUMETHOD(mmu_enter_object, moea_enter_object), 344 MMUMETHOD(mmu_enter_quick, moea_enter_quick), 345 MMUMETHOD(mmu_extract, moea_extract), 346 MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 347 MMUMETHOD(mmu_init, moea_init), 348 MMUMETHOD(mmu_is_modified, moea_is_modified), 349 MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 350 MMUMETHOD(mmu_map, moea_map), 351 MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 352 MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), 353 MMUMETHOD(mmu_pinit, moea_pinit), 354 MMUMETHOD(mmu_pinit0, moea_pinit0), 355 MMUMETHOD(mmu_protect, moea_protect), 356 MMUMETHOD(mmu_qenter, moea_qenter), 357 MMUMETHOD(mmu_qremove, moea_qremove), 358 MMUMETHOD(mmu_release, moea_release), 359 MMUMETHOD(mmu_remove, moea_remove), 360 MMUMETHOD(mmu_remove_all, moea_remove_all), 361 MMUMETHOD(mmu_remove_write, moea_remove_write), 362 MMUMETHOD(mmu_zero_page, moea_zero_page), 363 MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 364 MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 365 MMUMETHOD(mmu_activate, moea_activate), 366 MMUMETHOD(mmu_deactivate, moea_deactivate), 367 368 /* Internal interfaces */ 369 MMUMETHOD(mmu_bootstrap, moea_bootstrap), 370 MMUMETHOD(mmu_mapdev, moea_mapdev), 371 MMUMETHOD(mmu_unmapdev, moea_unmapdev), 372 MMUMETHOD(mmu_kextract, moea_kextract), 373 MMUMETHOD(mmu_kenter, moea_kenter), 374 MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 375 MMUMETHOD(mmu_page_executable, moea_page_executable), 376 377 { 0, 0 } 378 }; 379 380 static mmu_def_t oea_mmu = { 381 MMU_TYPE_OEA, 382 moea_methods, 383 0 384 }; 385 MMU_DEF(oea_mmu); 386 387 388 static __inline int 389 va_to_sr(u_int *sr, vm_offset_t va) 390 { 391 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 392 } 393 394 static __inline u_int 395 va_to_pteg(u_int sr, vm_offset_t addr) 396 { 397 u_int hash; 398 399 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 400 ADDR_PIDX_SHFT); 401 return (hash & moea_pteg_mask); 402 } 403 404 static __inline struct pvo_head * 405 pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 406 { 407 struct vm_page *pg; 408 409 pg = PHYS_TO_VM_PAGE(pa); 410 411 if (pg_p != NULL) 412 *pg_p = pg; 413 414 if (pg == NULL) 415 return (&moea_pvo_unmanaged); 416 417 return (&pg->md.mdpg_pvoh); 418 } 419 420 static __inline struct pvo_head * 421 vm_page_to_pvoh(vm_page_t m) 422 { 423 424 return (&m->md.mdpg_pvoh); 425 } 426 427 static __inline void 428 moea_attr_clear(vm_page_t m, int ptebit) 429 { 430 431 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 432 m->md.mdpg_attrs &= ~ptebit; 433 } 434 435 static __inline int 436 moea_attr_fetch(vm_page_t m) 437 { 438 439 return (m->md.mdpg_attrs); 440 } 441 442 static __inline void 443 moea_attr_save(vm_page_t m, int ptebit) 444 { 445 446 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 447 m->md.mdpg_attrs |= ptebit; 448 } 449 450 static __inline int 451 moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 452 { 453 if (pt->pte_hi == pvo_pt->pte_hi) 454 return (1); 455 456 return (0); 457 } 458 459 static __inline int 460 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 461 { 462 return (pt->pte_hi & ~PTE_VALID) == 463 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 464 ((va >> ADDR_API_SHFT) & PTE_API) | which); 465 } 466 467 static __inline void 468 moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 469 { 470 471 mtx_assert(&moea_table_mutex, MA_OWNED); 472 473 /* 474 * Construct a PTE. Default to IMB initially. Valid bit only gets 475 * set when the real pte is set in memory. 476 * 477 * Note: Don't set the valid bit for correct operation of tlb update. 478 */ 479 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 480 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 481 pt->pte_lo = pte_lo; 482 } 483 484 static __inline void 485 moea_pte_synch(struct pte *pt, struct pte *pvo_pt) 486 { 487 488 mtx_assert(&moea_table_mutex, MA_OWNED); 489 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 490 } 491 492 static __inline void 493 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 494 { 495 496 mtx_assert(&moea_table_mutex, MA_OWNED); 497 498 /* 499 * As shown in Section 7.6.3.2.3 500 */ 501 pt->pte_lo &= ~ptebit; 502 TLBIE(va); 503 EIEIO(); 504 TLBSYNC(); 505 SYNC(); 506 } 507 508 static __inline void 509 moea_pte_set(struct pte *pt, struct pte *pvo_pt) 510 { 511 512 mtx_assert(&moea_table_mutex, MA_OWNED); 513 pvo_pt->pte_hi |= PTE_VALID; 514 515 /* 516 * Update the PTE as defined in section 7.6.3.1. 517 * Note that the REF/CHG bits are from pvo_pt and thus should havce 518 * been saved so this routine can restore them (if desired). 519 */ 520 pt->pte_lo = pvo_pt->pte_lo; 521 EIEIO(); 522 pt->pte_hi = pvo_pt->pte_hi; 523 SYNC(); 524 moea_pte_valid++; 525 } 526 527 static __inline void 528 moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 529 { 530 531 mtx_assert(&moea_table_mutex, MA_OWNED); 532 pvo_pt->pte_hi &= ~PTE_VALID; 533 534 /* 535 * Force the reg & chg bits back into the PTEs. 536 */ 537 SYNC(); 538 539 /* 540 * Invalidate the pte. 541 */ 542 pt->pte_hi &= ~PTE_VALID; 543 544 SYNC(); 545 TLBIE(va); 546 EIEIO(); 547 TLBSYNC(); 548 SYNC(); 549 550 /* 551 * Save the reg & chg bits. 552 */ 553 moea_pte_synch(pt, pvo_pt); 554 moea_pte_valid--; 555 } 556 557 static __inline void 558 moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 559 { 560 561 /* 562 * Invalidate the PTE 563 */ 564 moea_pte_unset(pt, pvo_pt, va); 565 moea_pte_set(pt, pvo_pt); 566 } 567 568 /* 569 * Quick sort callout for comparing memory regions. 570 */ 571 static int mr_cmp(const void *a, const void *b); 572 static int om_cmp(const void *a, const void *b); 573 574 static int 575 mr_cmp(const void *a, const void *b) 576 { 577 const struct mem_region *regiona; 578 const struct mem_region *regionb; 579 580 regiona = a; 581 regionb = b; 582 if (regiona->mr_start < regionb->mr_start) 583 return (-1); 584 else if (regiona->mr_start > regionb->mr_start) 585 return (1); 586 else 587 return (0); 588 } 589 590 static int 591 om_cmp(const void *a, const void *b) 592 { 593 const struct ofw_map *mapa; 594 const struct ofw_map *mapb; 595 596 mapa = a; 597 mapb = b; 598 if (mapa->om_pa < mapb->om_pa) 599 return (-1); 600 else if (mapa->om_pa > mapb->om_pa) 601 return (1); 602 else 603 return (0); 604 } 605 606 void 607 pmap_cpu_bootstrap(int ap) 608 { 609 u_int sdr; 610 int i; 611 612 if (ap) { 613 __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); 614 __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); 615 isync(); 616 __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu)); 617 __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl)); 618 isync(); 619 } 620 621 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 622 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 623 isync(); 624 625 __asm __volatile("mtibatu 1,%0" :: "r"(0)); 626 __asm __volatile("mtdbatu 2,%0" :: "r"(0)); 627 __asm __volatile("mtibatu 2,%0" :: "r"(0)); 628 __asm __volatile("mtdbatu 3,%0" :: "r"(0)); 629 __asm __volatile("mtibatu 3,%0" :: "r"(0)); 630 isync(); 631 632 for (i = 0; i < 16; i++) 633 mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); 634 635 __asm __volatile("mtsr %0,%1" :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 636 __asm __volatile("mtsr %0,%1" :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); 637 __asm __volatile("sync"); 638 639 sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); 640 __asm __volatile("mtsdr1 %0" :: "r"(sdr)); 641 isync(); 642 643 tlbia(); 644 } 645 646 void 647 moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 648 { 649 ihandle_t mmui; 650 phandle_t chosen, mmu; 651 int sz; 652 int i, j; 653 int ofw_mappings; 654 vm_size_t size, physsz, hwphyssz; 655 vm_offset_t pa, va, off; 656 657 /* 658 * Set up BAT0 to map the lowest 256 MB area 659 */ 660 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 661 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 662 663 /* 664 * Map PCI memory space. 665 */ 666 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 667 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 668 669 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 670 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 671 672 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 673 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 674 675 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 676 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 677 678 /* 679 * Map obio devices. 680 */ 681 battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 682 battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 683 684 /* 685 * Use an IBAT and a DBAT to map the bottom segment of memory 686 * where we are. 687 */ 688 __asm (".balign 32; \n" 689 "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 690 "mtdbatu 0,%0; mtdbatl 0,%1; isync" 691 :: "r"(battable[0].batu), "r"(battable[0].batl)); 692 693 /* map pci space */ 694 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 695 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 696 isync(); 697 698 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 699 CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 700 701 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 702 for (i = 0; i < pregions_sz; i++) { 703 vm_offset_t pa; 704 vm_offset_t end; 705 706 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 707 pregions[i].mr_start, 708 pregions[i].mr_start + pregions[i].mr_size, 709 pregions[i].mr_size); 710 /* 711 * Install entries into the BAT table to allow all 712 * of physmem to be convered by on-demand BAT entries. 713 * The loop will sometimes set the same battable element 714 * twice, but that's fine since they won't be used for 715 * a while yet. 716 */ 717 pa = pregions[i].mr_start & 0xf0000000; 718 end = pregions[i].mr_start + pregions[i].mr_size; 719 do { 720 u_int n = pa >> ADDR_SR_SHFT; 721 722 battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 723 battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 724 pa += SEGMENT_LENGTH; 725 } while (pa < end); 726 } 727 728 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 729 panic("moea_bootstrap: phys_avail too small"); 730 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 731 phys_avail_count = 0; 732 physsz = 0; 733 hwphyssz = 0; 734 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 735 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 736 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 737 regions[i].mr_start + regions[i].mr_size, 738 regions[i].mr_size); 739 if (hwphyssz != 0 && 740 (physsz + regions[i].mr_size) >= hwphyssz) { 741 if (physsz < hwphyssz) { 742 phys_avail[j] = regions[i].mr_start; 743 phys_avail[j + 1] = regions[i].mr_start + 744 hwphyssz - physsz; 745 physsz = hwphyssz; 746 phys_avail_count++; 747 } 748 break; 749 } 750 phys_avail[j] = regions[i].mr_start; 751 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 752 phys_avail_count++; 753 physsz += regions[i].mr_size; 754 } 755 physmem = btoc(physsz); 756 757 /* 758 * Allocate PTEG table. 759 */ 760 #ifdef PTEGCOUNT 761 moea_pteg_count = PTEGCOUNT; 762 #else 763 moea_pteg_count = 0x1000; 764 765 while (moea_pteg_count < physmem) 766 moea_pteg_count <<= 1; 767 768 moea_pteg_count >>= 1; 769 #endif /* PTEGCOUNT */ 770 771 size = moea_pteg_count * sizeof(struct pteg); 772 CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 773 size); 774 moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 775 CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 776 bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 777 moea_pteg_mask = moea_pteg_count - 1; 778 779 /* 780 * Allocate pv/overflow lists. 781 */ 782 size = sizeof(struct pvo_head) * moea_pteg_count; 783 moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 784 PAGE_SIZE); 785 CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 786 for (i = 0; i < moea_pteg_count; i++) 787 LIST_INIT(&moea_pvo_table[i]); 788 789 /* 790 * Initialize the lock that synchronizes access to the pteg and pvo 791 * tables. 792 */ 793 mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 794 MTX_RECURSE); 795 796 /* 797 * Initialise the unmanaged pvo pool. 798 */ 799 moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 800 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 801 moea_bpvo_pool_index = 0; 802 803 /* 804 * Make sure kernel vsid is allocated as well as VSID 0. 805 */ 806 moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 807 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 808 moea_vsid_bitmap[0] |= 1; 809 810 /* 811 * Set up the Open Firmware pmap and add it's mappings. 812 */ 813 moea_pinit(mmup, &ofw_pmap); 814 ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 815 ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 816 if ((chosen = OF_finddevice("/chosen")) == -1) 817 panic("moea_bootstrap: can't find /chosen"); 818 OF_getprop(chosen, "mmu", &mmui, 4); 819 if ((mmu = OF_instance_to_package(mmui)) == -1) 820 panic("moea_bootstrap: can't get mmu package"); 821 if ((sz = OF_getproplen(mmu, "translations")) == -1) 822 panic("moea_bootstrap: can't get ofw translation count"); 823 translations = NULL; 824 for (i = 0; phys_avail[i] != 0; i += 2) { 825 if (phys_avail[i + 1] >= sz) { 826 translations = (struct ofw_map *)phys_avail[i]; 827 break; 828 } 829 } 830 if (translations == NULL) 831 panic("moea_bootstrap: no space to copy translations"); 832 bzero(translations, sz); 833 if (OF_getprop(mmu, "translations", translations, sz) == -1) 834 panic("moea_bootstrap: can't get ofw translations"); 835 CTR0(KTR_PMAP, "moea_bootstrap: translations"); 836 sz /= sizeof(*translations); 837 qsort(translations, sz, sizeof (*translations), om_cmp); 838 for (i = 0, ofw_mappings = 0; i < sz; i++) { 839 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 840 translations[i].om_pa, translations[i].om_va, 841 translations[i].om_len); 842 843 /* 844 * If the mapping is 1:1, let the RAM and device on-demand 845 * BAT tables take care of the translation. 846 */ 847 if (translations[i].om_va == translations[i].om_pa) 848 continue; 849 850 /* Enter the pages */ 851 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 852 struct vm_page m; 853 854 m.phys_addr = translations[i].om_pa + off; 855 PMAP_LOCK(&ofw_pmap); 856 moea_enter_locked(&ofw_pmap, 857 translations[i].om_va + off, &m, 858 VM_PROT_ALL, 1); 859 PMAP_UNLOCK(&ofw_pmap); 860 ofw_mappings++; 861 } 862 } 863 864 /* 865 * Calculate the last available physical address. 866 */ 867 for (i = 0; phys_avail[i + 2] != 0; i += 2) 868 ; 869 Maxmem = powerpc_btop(phys_avail[i + 1]); 870 871 /* 872 * Initialize the kernel pmap (which is statically allocated). 873 */ 874 PMAP_LOCK_INIT(kernel_pmap); 875 for (i = 0; i < 16; i++) { 876 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 877 } 878 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 879 kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 880 kernel_pmap->pm_active = ~0; 881 882 pmap_cpu_bootstrap(0); 883 884 pmap_bootstrapped++; 885 886 /* 887 * Set the start and end of kva. 888 */ 889 virtual_avail = VM_MIN_KERNEL_ADDRESS; 890 virtual_end = VM_MAX_KERNEL_ADDRESS; 891 892 /* 893 * Allocate a kernel stack with a guard page for thread0 and map it 894 * into the kernel page map. 895 */ 896 pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 897 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 898 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 899 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 900 thread0.td_kstack = va; 901 thread0.td_kstack_pages = KSTACK_PAGES; 902 for (i = 0; i < KSTACK_PAGES; i++) { 903 moea_kenter(mmup, va, pa);; 904 pa += PAGE_SIZE; 905 va += PAGE_SIZE; 906 } 907 908 /* 909 * Allocate virtual address space for the message buffer. 910 */ 911 pa = msgbuf_phys = moea_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 912 msgbufp = (struct msgbuf *)virtual_avail; 913 va = virtual_avail; 914 virtual_avail += round_page(MSGBUF_SIZE); 915 while (va < virtual_avail) { 916 moea_kenter(mmup, va, pa);; 917 pa += PAGE_SIZE; 918 va += PAGE_SIZE; 919 } 920 } 921 922 /* 923 * Activate a user pmap. The pmap must be activated before it's address 924 * space can be accessed in any way. 925 */ 926 void 927 moea_activate(mmu_t mmu, struct thread *td) 928 { 929 pmap_t pm, pmr; 930 931 /* 932 * Load all the data we need up front to encourage the compiler to 933 * not issue any loads while we have interrupts disabled below. 934 */ 935 pm = &td->td_proc->p_vmspace->vm_pmap; 936 937 if ((pmr = (pmap_t)moea_kextract(mmu, (vm_offset_t)pm)) == NULL) 938 pmr = pm; 939 940 pm->pm_active |= PCPU_GET(cpumask); 941 PCPU_SET(curpmap, pmr); 942 } 943 944 void 945 moea_deactivate(mmu_t mmu, struct thread *td) 946 { 947 pmap_t pm; 948 949 pm = &td->td_proc->p_vmspace->vm_pmap; 950 pm->pm_active &= ~(PCPU_GET(cpumask)); 951 PCPU_SET(curpmap, NULL); 952 } 953 954 void 955 moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 956 { 957 struct pvo_entry *pvo; 958 959 PMAP_LOCK(pm); 960 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 961 962 if (pvo != NULL) { 963 if (wired) { 964 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 965 pm->pm_stats.wired_count++; 966 pvo->pvo_vaddr |= PVO_WIRED; 967 } else { 968 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 969 pm->pm_stats.wired_count--; 970 pvo->pvo_vaddr &= ~PVO_WIRED; 971 } 972 } 973 PMAP_UNLOCK(pm); 974 } 975 976 void 977 moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 978 { 979 vm_offset_t dst; 980 vm_offset_t src; 981 982 dst = VM_PAGE_TO_PHYS(mdst); 983 src = VM_PAGE_TO_PHYS(msrc); 984 985 kcopy((void *)src, (void *)dst, PAGE_SIZE); 986 } 987 988 /* 989 * Zero a page of physical memory by temporarily mapping it into the tlb. 990 */ 991 void 992 moea_zero_page(mmu_t mmu, vm_page_t m) 993 { 994 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 995 void *va = (void *)pa; 996 997 bzero(va, PAGE_SIZE); 998 } 999 1000 void 1001 moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1002 { 1003 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1004 void *va = (void *)(pa + off); 1005 1006 bzero(va, size); 1007 } 1008 1009 void 1010 moea_zero_page_idle(mmu_t mmu, vm_page_t m) 1011 { 1012 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1013 void *va = (void *)pa; 1014 1015 bzero(va, PAGE_SIZE); 1016 } 1017 1018 /* 1019 * Map the given physical page at the specified virtual address in the 1020 * target pmap with the protection requested. If specified the page 1021 * will be wired down. 1022 */ 1023 void 1024 moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1025 boolean_t wired) 1026 { 1027 1028 vm_page_lock_queues(); 1029 PMAP_LOCK(pmap); 1030 moea_enter_locked(pmap, va, m, prot, wired); 1031 vm_page_unlock_queues(); 1032 PMAP_UNLOCK(pmap); 1033 } 1034 1035 /* 1036 * Map the given physical page at the specified virtual address in the 1037 * target pmap with the protection requested. If specified the page 1038 * will be wired down. 1039 * 1040 * The page queues and pmap must be locked. 1041 */ 1042 static void 1043 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1044 boolean_t wired) 1045 { 1046 struct pvo_head *pvo_head; 1047 uma_zone_t zone; 1048 vm_page_t pg; 1049 u_int pte_lo, pvo_flags, was_exec, i; 1050 int error; 1051 1052 if (!moea_initialized) { 1053 pvo_head = &moea_pvo_kunmanaged; 1054 zone = moea_upvo_zone; 1055 pvo_flags = 0; 1056 pg = NULL; 1057 was_exec = PTE_EXEC; 1058 } else { 1059 pvo_head = vm_page_to_pvoh(m); 1060 pg = m; 1061 zone = moea_mpvo_zone; 1062 pvo_flags = PVO_MANAGED; 1063 was_exec = 0; 1064 } 1065 if (pmap_bootstrapped) 1066 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1067 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1068 1069 /* XXX change the pvo head for fake pages */ 1070 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) 1071 pvo_head = &moea_pvo_kunmanaged; 1072 1073 /* 1074 * If this is a managed page, and it's the first reference to the page, 1075 * clear the execness of the page. Otherwise fetch the execness. 1076 */ 1077 if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) { 1078 if (LIST_EMPTY(pvo_head)) { 1079 moea_attr_clear(pg, PTE_EXEC); 1080 } else { 1081 was_exec = moea_attr_fetch(pg) & PTE_EXEC; 1082 } 1083 } 1084 1085 /* 1086 * Assume the page is cache inhibited and access is guarded unless 1087 * it's in our available memory array. 1088 */ 1089 pte_lo = PTE_I | PTE_G; 1090 for (i = 0; i < pregions_sz; i++) { 1091 if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && 1092 (VM_PAGE_TO_PHYS(m) < 1093 (pregions[i].mr_start + pregions[i].mr_size))) { 1094 pte_lo &= ~(PTE_I | PTE_G); 1095 break; 1096 } 1097 } 1098 1099 if (prot & VM_PROT_WRITE) { 1100 pte_lo |= PTE_BW; 1101 if (pmap_bootstrapped) 1102 vm_page_flag_set(m, PG_WRITEABLE); 1103 } else 1104 pte_lo |= PTE_BR; 1105 1106 if (prot & VM_PROT_EXECUTE) 1107 pvo_flags |= PVO_EXECUTABLE; 1108 1109 if (wired) 1110 pvo_flags |= PVO_WIRED; 1111 1112 if ((m->flags & PG_FICTITIOUS) != 0) 1113 pvo_flags |= PVO_FAKE; 1114 1115 error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1116 pte_lo, pvo_flags); 1117 1118 /* 1119 * Flush the real page from the instruction cache if this page is 1120 * mapped executable and cacheable and was not previously mapped (or 1121 * was not mapped executable). 1122 */ 1123 if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 1124 (pte_lo & PTE_I) == 0 && was_exec == 0) { 1125 /* 1126 * Flush the real memory from the cache. 1127 */ 1128 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1129 if (pg != NULL) 1130 moea_attr_save(pg, PTE_EXEC); 1131 } 1132 1133 /* XXX syncicache always until problems are sorted */ 1134 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1135 } 1136 1137 /* 1138 * Maps a sequence of resident pages belonging to the same object. 1139 * The sequence begins with the given page m_start. This page is 1140 * mapped at the given virtual address start. Each subsequent page is 1141 * mapped at a virtual address that is offset from start by the same 1142 * amount as the page is offset from m_start within the object. The 1143 * last page in the sequence is the page with the largest offset from 1144 * m_start that can be mapped at a virtual address less than the given 1145 * virtual address end. Not every virtual page between start and end 1146 * is mapped; only those for which a resident page exists with the 1147 * corresponding offset from m_start are mapped. 1148 */ 1149 void 1150 moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1151 vm_page_t m_start, vm_prot_t prot) 1152 { 1153 vm_page_t m; 1154 vm_pindex_t diff, psize; 1155 1156 psize = atop(end - start); 1157 m = m_start; 1158 PMAP_LOCK(pm); 1159 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1160 moea_enter_locked(pm, start + ptoa(diff), m, prot & 1161 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1162 m = TAILQ_NEXT(m, listq); 1163 } 1164 PMAP_UNLOCK(pm); 1165 } 1166 1167 void 1168 moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1169 vm_prot_t prot) 1170 { 1171 1172 PMAP_LOCK(pm); 1173 moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1174 FALSE); 1175 PMAP_UNLOCK(pm); 1176 1177 } 1178 1179 vm_paddr_t 1180 moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1181 { 1182 struct pvo_entry *pvo; 1183 vm_paddr_t pa; 1184 1185 PMAP_LOCK(pm); 1186 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1187 if (pvo == NULL) 1188 pa = 0; 1189 else 1190 pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1191 PMAP_UNLOCK(pm); 1192 return (pa); 1193 } 1194 1195 /* 1196 * Atomically extract and hold the physical page with the given 1197 * pmap and virtual address pair if that mapping permits the given 1198 * protection. 1199 */ 1200 vm_page_t 1201 moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1202 { 1203 struct pvo_entry *pvo; 1204 vm_page_t m; 1205 1206 m = NULL; 1207 vm_page_lock_queues(); 1208 PMAP_LOCK(pmap); 1209 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1210 if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) && 1211 ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW || 1212 (prot & VM_PROT_WRITE) == 0)) { 1213 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1214 vm_page_hold(m); 1215 } 1216 vm_page_unlock_queues(); 1217 PMAP_UNLOCK(pmap); 1218 return (m); 1219 } 1220 1221 void 1222 moea_init(mmu_t mmu) 1223 { 1224 1225 moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1226 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1227 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1228 moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1229 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1230 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1231 moea_initialized = TRUE; 1232 } 1233 1234 boolean_t 1235 moea_is_modified(mmu_t mmu, vm_page_t m) 1236 { 1237 1238 if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0) 1239 return (FALSE); 1240 1241 return (moea_query_bit(m, PTE_CHG)); 1242 } 1243 1244 void 1245 moea_clear_reference(mmu_t mmu, vm_page_t m) 1246 { 1247 1248 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1249 return; 1250 moea_clear_bit(m, PTE_REF, NULL); 1251 } 1252 1253 void 1254 moea_clear_modify(mmu_t mmu, vm_page_t m) 1255 { 1256 1257 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1258 return; 1259 moea_clear_bit(m, PTE_CHG, NULL); 1260 } 1261 1262 /* 1263 * Clear the write and modified bits in each of the given page's mappings. 1264 */ 1265 void 1266 moea_remove_write(mmu_t mmu, vm_page_t m) 1267 { 1268 struct pvo_entry *pvo; 1269 struct pte *pt; 1270 pmap_t pmap; 1271 u_int lo; 1272 1273 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1274 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1275 (m->flags & PG_WRITEABLE) == 0) 1276 return; 1277 lo = moea_attr_fetch(m); 1278 SYNC(); 1279 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1280 pmap = pvo->pvo_pmap; 1281 PMAP_LOCK(pmap); 1282 if ((pvo->pvo_pte.pte_lo & PTE_PP) != PTE_BR) { 1283 pt = moea_pvo_to_pte(pvo, -1); 1284 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1285 pvo->pvo_pte.pte_lo |= PTE_BR; 1286 if (pt != NULL) { 1287 moea_pte_synch(pt, &pvo->pvo_pte); 1288 lo |= pvo->pvo_pte.pte_lo; 1289 pvo->pvo_pte.pte_lo &= ~PTE_CHG; 1290 moea_pte_change(pt, &pvo->pvo_pte, 1291 pvo->pvo_vaddr); 1292 mtx_unlock(&moea_table_mutex); 1293 } 1294 } 1295 PMAP_UNLOCK(pmap); 1296 } 1297 if ((lo & PTE_CHG) != 0) { 1298 moea_attr_clear(m, PTE_CHG); 1299 vm_page_dirty(m); 1300 } 1301 vm_page_flag_clear(m, PG_WRITEABLE); 1302 } 1303 1304 /* 1305 * moea_ts_referenced: 1306 * 1307 * Return a count of reference bits for a page, clearing those bits. 1308 * It is not necessary for every reference bit to be cleared, but it 1309 * is necessary that 0 only be returned when there are truly no 1310 * reference bits set. 1311 * 1312 * XXX: The exact number of bits to check and clear is a matter that 1313 * should be tested and standardized at some point in the future for 1314 * optimal aging of shared pages. 1315 */ 1316 boolean_t 1317 moea_ts_referenced(mmu_t mmu, vm_page_t m) 1318 { 1319 int count; 1320 1321 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1322 return (0); 1323 1324 count = moea_clear_bit(m, PTE_REF, NULL); 1325 1326 return (count); 1327 } 1328 1329 /* 1330 * Map a wired page into kernel virtual address space. 1331 */ 1332 void 1333 moea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1334 { 1335 u_int pte_lo; 1336 int error; 1337 int i; 1338 1339 #if 0 1340 if (va < VM_MIN_KERNEL_ADDRESS) 1341 panic("moea_kenter: attempt to enter non-kernel address %#x", 1342 va); 1343 #endif 1344 1345 pte_lo = PTE_I | PTE_G; 1346 for (i = 0; i < pregions_sz; i++) { 1347 if ((pa >= pregions[i].mr_start) && 1348 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 1349 pte_lo &= ~(PTE_I | PTE_G); 1350 break; 1351 } 1352 } 1353 1354 PMAP_LOCK(kernel_pmap); 1355 error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1356 &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1357 1358 if (error != 0 && error != ENOENT) 1359 panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 1360 pa, error); 1361 1362 /* 1363 * Flush the real memory from the instruction cache. 1364 */ 1365 if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1366 moea_syncicache(pa, PAGE_SIZE); 1367 } 1368 PMAP_UNLOCK(kernel_pmap); 1369 } 1370 1371 /* 1372 * Extract the physical page address associated with the given kernel virtual 1373 * address. 1374 */ 1375 vm_offset_t 1376 moea_kextract(mmu_t mmu, vm_offset_t va) 1377 { 1378 struct pvo_entry *pvo; 1379 vm_paddr_t pa; 1380 1381 #ifdef UMA_MD_SMALL_ALLOC 1382 /* 1383 * Allow direct mappings 1384 */ 1385 if (va < VM_MIN_KERNEL_ADDRESS) { 1386 return (va); 1387 } 1388 #endif 1389 1390 PMAP_LOCK(kernel_pmap); 1391 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1392 KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1393 pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1394 PMAP_UNLOCK(kernel_pmap); 1395 return (pa); 1396 } 1397 1398 /* 1399 * Remove a wired page from kernel virtual address space. 1400 */ 1401 void 1402 moea_kremove(mmu_t mmu, vm_offset_t va) 1403 { 1404 1405 moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1406 } 1407 1408 /* 1409 * Map a range of physical addresses into kernel virtual address space. 1410 * 1411 * The value passed in *virt is a suggested virtual address for the mapping. 1412 * Architectures which can support a direct-mapped physical to virtual region 1413 * can return the appropriate address within that region, leaving '*virt' 1414 * unchanged. We cannot and therefore do not; *virt is updated with the 1415 * first usable address after the mapped region. 1416 */ 1417 vm_offset_t 1418 moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1419 vm_offset_t pa_end, int prot) 1420 { 1421 vm_offset_t sva, va; 1422 1423 sva = *virt; 1424 va = sva; 1425 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1426 moea_kenter(mmu, va, pa_start); 1427 *virt = va; 1428 return (sva); 1429 } 1430 1431 /* 1432 * Returns true if the pmap's pv is one of the first 1433 * 16 pvs linked to from this page. This count may 1434 * be changed upwards or downwards in the future; it 1435 * is only necessary that true be returned for a small 1436 * subset of pmaps for proper page aging. 1437 */ 1438 boolean_t 1439 moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1440 { 1441 int loops; 1442 struct pvo_entry *pvo; 1443 1444 if (!moea_initialized || (m->flags & PG_FICTITIOUS)) 1445 return FALSE; 1446 1447 loops = 0; 1448 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1449 if (pvo->pvo_pmap == pmap) 1450 return (TRUE); 1451 if (++loops >= 16) 1452 break; 1453 } 1454 1455 return (FALSE); 1456 } 1457 1458 /* 1459 * Return the number of managed mappings to the given physical page 1460 * that are wired. 1461 */ 1462 int 1463 moea_page_wired_mappings(mmu_t mmu, vm_page_t m) 1464 { 1465 struct pvo_entry *pvo; 1466 int count; 1467 1468 count = 0; 1469 if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0) 1470 return (count); 1471 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1472 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1473 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1474 count++; 1475 return (count); 1476 } 1477 1478 static u_int moea_vsidcontext; 1479 1480 void 1481 moea_pinit(mmu_t mmu, pmap_t pmap) 1482 { 1483 int i, mask; 1484 u_int entropy; 1485 1486 KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1487 PMAP_LOCK_INIT(pmap); 1488 1489 entropy = 0; 1490 __asm __volatile("mftb %0" : "=r"(entropy)); 1491 1492 /* 1493 * Allocate some segment registers for this pmap. 1494 */ 1495 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1496 u_int hash, n; 1497 1498 /* 1499 * Create a new value by mutiplying by a prime and adding in 1500 * entropy from the timebase register. This is to make the 1501 * VSID more random so that the PT hash function collides 1502 * less often. (Note that the prime casues gcc to do shifts 1503 * instead of a multiply.) 1504 */ 1505 moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1506 hash = moea_vsidcontext & (NPMAPS - 1); 1507 if (hash == 0) /* 0 is special, avoid it */ 1508 continue; 1509 n = hash >> 5; 1510 mask = 1 << (hash & (VSID_NBPW - 1)); 1511 hash = (moea_vsidcontext & 0xfffff); 1512 if (moea_vsid_bitmap[n] & mask) { /* collision? */ 1513 /* anything free in this bucket? */ 1514 if (moea_vsid_bitmap[n] == 0xffffffff) { 1515 entropy = (moea_vsidcontext >> 20); 1516 continue; 1517 } 1518 i = ffs(~moea_vsid_bitmap[i]) - 1; 1519 mask = 1 << i; 1520 hash &= 0xfffff & ~(VSID_NBPW - 1); 1521 hash |= i; 1522 } 1523 moea_vsid_bitmap[n] |= mask; 1524 for (i = 0; i < 16; i++) 1525 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1526 return; 1527 } 1528 1529 panic("moea_pinit: out of segments"); 1530 } 1531 1532 /* 1533 * Initialize the pmap associated with process 0. 1534 */ 1535 void 1536 moea_pinit0(mmu_t mmu, pmap_t pm) 1537 { 1538 1539 moea_pinit(mmu, pm); 1540 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1541 } 1542 1543 /* 1544 * Set the physical protection on the specified range of this map as requested. 1545 */ 1546 void 1547 moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1548 vm_prot_t prot) 1549 { 1550 struct pvo_entry *pvo; 1551 struct pte *pt; 1552 int pteidx; 1553 1554 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1555 ("moea_protect: non current pmap")); 1556 1557 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1558 moea_remove(mmu, pm, sva, eva); 1559 return; 1560 } 1561 1562 vm_page_lock_queues(); 1563 PMAP_LOCK(pm); 1564 for (; sva < eva; sva += PAGE_SIZE) { 1565 pvo = moea_pvo_find_va(pm, sva, &pteidx); 1566 if (pvo == NULL) 1567 continue; 1568 1569 if ((prot & VM_PROT_EXECUTE) == 0) 1570 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1571 1572 /* 1573 * Grab the PTE pointer before we diddle with the cached PTE 1574 * copy. 1575 */ 1576 pt = moea_pvo_to_pte(pvo, pteidx); 1577 /* 1578 * Change the protection of the page. 1579 */ 1580 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1581 pvo->pvo_pte.pte_lo |= PTE_BR; 1582 1583 /* 1584 * If the PVO is in the page table, update that pte as well. 1585 */ 1586 if (pt != NULL) { 1587 moea_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1588 mtx_unlock(&moea_table_mutex); 1589 } 1590 } 1591 vm_page_unlock_queues(); 1592 PMAP_UNLOCK(pm); 1593 } 1594 1595 /* 1596 * Map a list of wired pages into kernel virtual address space. This is 1597 * intended for temporary mappings which do not need page modification or 1598 * references recorded. Existing mappings in the region are overwritten. 1599 */ 1600 void 1601 moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1602 { 1603 vm_offset_t va; 1604 1605 va = sva; 1606 while (count-- > 0) { 1607 moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1608 va += PAGE_SIZE; 1609 m++; 1610 } 1611 } 1612 1613 /* 1614 * Remove page mappings from kernel virtual address space. Intended for 1615 * temporary mappings entered by moea_qenter. 1616 */ 1617 void 1618 moea_qremove(mmu_t mmu, vm_offset_t sva, int count) 1619 { 1620 vm_offset_t va; 1621 1622 va = sva; 1623 while (count-- > 0) { 1624 moea_kremove(mmu, va); 1625 va += PAGE_SIZE; 1626 } 1627 } 1628 1629 void 1630 moea_release(mmu_t mmu, pmap_t pmap) 1631 { 1632 int idx, mask; 1633 1634 /* 1635 * Free segment register's VSID 1636 */ 1637 if (pmap->pm_sr[0] == 0) 1638 panic("moea_release"); 1639 1640 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1641 mask = 1 << (idx % VSID_NBPW); 1642 idx /= VSID_NBPW; 1643 moea_vsid_bitmap[idx] &= ~mask; 1644 PMAP_LOCK_DESTROY(pmap); 1645 } 1646 1647 /* 1648 * Remove the given range of addresses from the specified map. 1649 */ 1650 void 1651 moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1652 { 1653 struct pvo_entry *pvo; 1654 int pteidx; 1655 1656 vm_page_lock_queues(); 1657 PMAP_LOCK(pm); 1658 for (; sva < eva; sva += PAGE_SIZE) { 1659 pvo = moea_pvo_find_va(pm, sva, &pteidx); 1660 if (pvo != NULL) { 1661 moea_pvo_remove(pvo, pteidx); 1662 } 1663 } 1664 PMAP_UNLOCK(pm); 1665 vm_page_unlock_queues(); 1666 } 1667 1668 /* 1669 * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1670 * will reflect changes in pte's back to the vm_page. 1671 */ 1672 void 1673 moea_remove_all(mmu_t mmu, vm_page_t m) 1674 { 1675 struct pvo_head *pvo_head; 1676 struct pvo_entry *pvo, *next_pvo; 1677 pmap_t pmap; 1678 1679 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1680 1681 pvo_head = vm_page_to_pvoh(m); 1682 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1683 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1684 1685 MOEA_PVO_CHECK(pvo); /* sanity check */ 1686 pmap = pvo->pvo_pmap; 1687 PMAP_LOCK(pmap); 1688 moea_pvo_remove(pvo, -1); 1689 PMAP_UNLOCK(pmap); 1690 } 1691 vm_page_flag_clear(m, PG_WRITEABLE); 1692 } 1693 1694 /* 1695 * Allocate a physical page of memory directly from the phys_avail map. 1696 * Can only be called from moea_bootstrap before avail start and end are 1697 * calculated. 1698 */ 1699 static vm_offset_t 1700 moea_bootstrap_alloc(vm_size_t size, u_int align) 1701 { 1702 vm_offset_t s, e; 1703 int i, j; 1704 1705 size = round_page(size); 1706 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1707 if (align != 0) 1708 s = (phys_avail[i] + align - 1) & ~(align - 1); 1709 else 1710 s = phys_avail[i]; 1711 e = s + size; 1712 1713 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1714 continue; 1715 1716 if (s == phys_avail[i]) { 1717 phys_avail[i] += size; 1718 } else if (e == phys_avail[i + 1]) { 1719 phys_avail[i + 1] -= size; 1720 } else { 1721 for (j = phys_avail_count * 2; j > i; j -= 2) { 1722 phys_avail[j] = phys_avail[j - 2]; 1723 phys_avail[j + 1] = phys_avail[j - 1]; 1724 } 1725 1726 phys_avail[i + 3] = phys_avail[i + 1]; 1727 phys_avail[i + 1] = s; 1728 phys_avail[i + 2] = e; 1729 phys_avail_count++; 1730 } 1731 1732 return (s); 1733 } 1734 panic("moea_bootstrap_alloc: could not allocate memory"); 1735 } 1736 1737 static void 1738 moea_syncicache(vm_offset_t pa, vm_size_t len) 1739 { 1740 __syncicache((void *)pa, len); 1741 } 1742 1743 static void 1744 tlbia(void) 1745 { 1746 caddr_t i; 1747 1748 SYNC(); 1749 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 1750 TLBIE(i); 1751 EIEIO(); 1752 } 1753 TLBSYNC(); 1754 SYNC(); 1755 } 1756 1757 static int 1758 moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1759 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1760 { 1761 struct pvo_entry *pvo; 1762 u_int sr; 1763 int first; 1764 u_int ptegidx; 1765 int i; 1766 int bootstrap; 1767 1768 moea_pvo_enter_calls++; 1769 first = 0; 1770 bootstrap = 0; 1771 1772 /* 1773 * Compute the PTE Group index. 1774 */ 1775 va &= ~ADDR_POFF; 1776 sr = va_to_sr(pm->pm_sr, va); 1777 ptegidx = va_to_pteg(sr, va); 1778 1779 /* 1780 * Remove any existing mapping for this page. Reuse the pvo entry if 1781 * there is a mapping. 1782 */ 1783 mtx_lock(&moea_table_mutex); 1784 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 1785 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1786 if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && 1787 (pvo->pvo_pte.pte_lo & PTE_PP) == 1788 (pte_lo & PTE_PP)) { 1789 mtx_unlock(&moea_table_mutex); 1790 return (0); 1791 } 1792 moea_pvo_remove(pvo, -1); 1793 break; 1794 } 1795 } 1796 1797 /* 1798 * If we aren't overwriting a mapping, try to allocate. 1799 */ 1800 if (moea_initialized) { 1801 pvo = uma_zalloc(zone, M_NOWAIT); 1802 } else { 1803 if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1804 panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1805 moea_bpvo_pool_index, BPVO_POOL_SIZE, 1806 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 1807 } 1808 pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1809 moea_bpvo_pool_index++; 1810 bootstrap = 1; 1811 } 1812 1813 if (pvo == NULL) { 1814 mtx_unlock(&moea_table_mutex); 1815 return (ENOMEM); 1816 } 1817 1818 moea_pvo_entries++; 1819 pvo->pvo_vaddr = va; 1820 pvo->pvo_pmap = pm; 1821 LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 1822 pvo->pvo_vaddr &= ~ADDR_POFF; 1823 if (flags & VM_PROT_EXECUTE) 1824 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1825 if (flags & PVO_WIRED) 1826 pvo->pvo_vaddr |= PVO_WIRED; 1827 if (pvo_head != &moea_pvo_kunmanaged) 1828 pvo->pvo_vaddr |= PVO_MANAGED; 1829 if (bootstrap) 1830 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1831 if (flags & PVO_FAKE) 1832 pvo->pvo_vaddr |= PVO_FAKE; 1833 1834 moea_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 1835 1836 /* 1837 * Remember if the list was empty and therefore will be the first 1838 * item. 1839 */ 1840 if (LIST_FIRST(pvo_head) == NULL) 1841 first = 1; 1842 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1843 1844 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1845 pm->pm_stats.wired_count++; 1846 pm->pm_stats.resident_count++; 1847 1848 /* 1849 * We hope this succeeds but it isn't required. 1850 */ 1851 i = moea_pte_insert(ptegidx, &pvo->pvo_pte); 1852 if (i >= 0) { 1853 PVO_PTEGIDX_SET(pvo, i); 1854 } else { 1855 panic("moea_pvo_enter: overflow"); 1856 moea_pte_overflow++; 1857 } 1858 mtx_unlock(&moea_table_mutex); 1859 1860 return (first ? ENOENT : 0); 1861 } 1862 1863 static void 1864 moea_pvo_remove(struct pvo_entry *pvo, int pteidx) 1865 { 1866 struct pte *pt; 1867 1868 /* 1869 * If there is an active pte entry, we need to deactivate it (and 1870 * save the ref & cfg bits). 1871 */ 1872 pt = moea_pvo_to_pte(pvo, pteidx); 1873 if (pt != NULL) { 1874 moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1875 mtx_unlock(&moea_table_mutex); 1876 PVO_PTEGIDX_CLR(pvo); 1877 } else { 1878 moea_pte_overflow--; 1879 } 1880 1881 /* 1882 * Update our statistics. 1883 */ 1884 pvo->pvo_pmap->pm_stats.resident_count--; 1885 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1886 pvo->pvo_pmap->pm_stats.wired_count--; 1887 1888 /* 1889 * Save the REF/CHG bits into their cache if the page is managed. 1890 */ 1891 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 1892 struct vm_page *pg; 1893 1894 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1895 if (pg != NULL) { 1896 moea_attr_save(pg, pvo->pvo_pte.pte_lo & 1897 (PTE_REF | PTE_CHG)); 1898 } 1899 } 1900 1901 /* 1902 * Remove this PVO from the PV list. 1903 */ 1904 LIST_REMOVE(pvo, pvo_vlink); 1905 1906 /* 1907 * Remove this from the overflow list and return it to the pool 1908 * if we aren't going to reuse it. 1909 */ 1910 LIST_REMOVE(pvo, pvo_olink); 1911 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 1912 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 1913 moea_upvo_zone, pvo); 1914 moea_pvo_entries--; 1915 moea_pvo_remove_calls++; 1916 } 1917 1918 static __inline int 1919 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1920 { 1921 int pteidx; 1922 1923 /* 1924 * We can find the actual pte entry without searching by grabbing 1925 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 1926 * noticing the HID bit. 1927 */ 1928 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1929 if (pvo->pvo_pte.pte_hi & PTE_HID) 1930 pteidx ^= moea_pteg_mask * 8; 1931 1932 return (pteidx); 1933 } 1934 1935 static struct pvo_entry * 1936 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 1937 { 1938 struct pvo_entry *pvo; 1939 int ptegidx; 1940 u_int sr; 1941 1942 va &= ~ADDR_POFF; 1943 sr = va_to_sr(pm->pm_sr, va); 1944 ptegidx = va_to_pteg(sr, va); 1945 1946 mtx_lock(&moea_table_mutex); 1947 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 1948 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1949 if (pteidx_p) 1950 *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 1951 break; 1952 } 1953 } 1954 mtx_unlock(&moea_table_mutex); 1955 1956 return (pvo); 1957 } 1958 1959 static struct pte * 1960 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1961 { 1962 struct pte *pt; 1963 1964 /* 1965 * If we haven't been supplied the ptegidx, calculate it. 1966 */ 1967 if (pteidx == -1) { 1968 int ptegidx; 1969 u_int sr; 1970 1971 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 1972 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 1973 pteidx = moea_pvo_pte_index(pvo, ptegidx); 1974 } 1975 1976 pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1977 mtx_lock(&moea_table_mutex); 1978 1979 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1980 panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 1981 "valid pte index", pvo); 1982 } 1983 1984 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1985 panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 1986 "pvo but no valid pte", pvo); 1987 } 1988 1989 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1990 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1991 panic("moea_pvo_to_pte: pvo %p has valid pte in " 1992 "moea_pteg_table %p but invalid in pvo", pvo, pt); 1993 } 1994 1995 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 1996 != 0) { 1997 panic("moea_pvo_to_pte: pvo %p pte does not match " 1998 "pte %p in moea_pteg_table", pvo, pt); 1999 } 2000 2001 mtx_assert(&moea_table_mutex, MA_OWNED); 2002 return (pt); 2003 } 2004 2005 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 2006 panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2007 "moea_pteg_table but valid in pvo", pvo, pt); 2008 } 2009 2010 mtx_unlock(&moea_table_mutex); 2011 return (NULL); 2012 } 2013 2014 /* 2015 * XXX: THIS STUFF SHOULD BE IN pte.c? 2016 */ 2017 int 2018 moea_pte_spill(vm_offset_t addr) 2019 { 2020 struct pvo_entry *source_pvo, *victim_pvo; 2021 struct pvo_entry *pvo; 2022 int ptegidx, i, j; 2023 u_int sr; 2024 struct pteg *pteg; 2025 struct pte *pt; 2026 2027 moea_pte_spills++; 2028 2029 sr = mfsrin(addr); 2030 ptegidx = va_to_pteg(sr, addr); 2031 2032 /* 2033 * Have to substitute some entry. Use the primary hash for this. 2034 * Use low bits of timebase as random generator. 2035 */ 2036 pteg = &moea_pteg_table[ptegidx]; 2037 mtx_lock(&moea_table_mutex); 2038 __asm __volatile("mftb %0" : "=r"(i)); 2039 i &= 7; 2040 pt = &pteg->pt[i]; 2041 2042 source_pvo = NULL; 2043 victim_pvo = NULL; 2044 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2045 /* 2046 * We need to find a pvo entry for this address. 2047 */ 2048 MOEA_PVO_CHECK(pvo); 2049 if (source_pvo == NULL && 2050 moea_pte_match(&pvo->pvo_pte, sr, addr, 2051 pvo->pvo_pte.pte_hi & PTE_HID)) { 2052 /* 2053 * Now found an entry to be spilled into the pteg. 2054 * The PTE is now valid, so we know it's active. 2055 */ 2056 j = moea_pte_insert(ptegidx, &pvo->pvo_pte); 2057 2058 if (j >= 0) { 2059 PVO_PTEGIDX_SET(pvo, j); 2060 moea_pte_overflow--; 2061 MOEA_PVO_CHECK(pvo); 2062 mtx_unlock(&moea_table_mutex); 2063 return (1); 2064 } 2065 2066 source_pvo = pvo; 2067 2068 if (victim_pvo != NULL) 2069 break; 2070 } 2071 2072 /* 2073 * We also need the pvo entry of the victim we are replacing 2074 * so save the R & C bits of the PTE. 2075 */ 2076 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2077 moea_pte_compare(pt, &pvo->pvo_pte)) { 2078 victim_pvo = pvo; 2079 if (source_pvo != NULL) 2080 break; 2081 } 2082 } 2083 2084 if (source_pvo == NULL) { 2085 mtx_unlock(&moea_table_mutex); 2086 return (0); 2087 } 2088 2089 if (victim_pvo == NULL) { 2090 if ((pt->pte_hi & PTE_HID) == 0) 2091 panic("moea_pte_spill: victim p-pte (%p) has no pvo" 2092 "entry", pt); 2093 2094 /* 2095 * If this is a secondary PTE, we need to search it's primary 2096 * pvo bucket for the matching PVO. 2097 */ 2098 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 2099 pvo_olink) { 2100 MOEA_PVO_CHECK(pvo); 2101 /* 2102 * We also need the pvo entry of the victim we are 2103 * replacing so save the R & C bits of the PTE. 2104 */ 2105 if (moea_pte_compare(pt, &pvo->pvo_pte)) { 2106 victim_pvo = pvo; 2107 break; 2108 } 2109 } 2110 2111 if (victim_pvo == NULL) 2112 panic("moea_pte_spill: victim s-pte (%p) has no pvo" 2113 "entry", pt); 2114 } 2115 2116 /* 2117 * We are invalidating the TLB entry for the EA we are replacing even 2118 * though it's valid. If we don't, we lose any ref/chg bit changes 2119 * contained in the TLB entry. 2120 */ 2121 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 2122 2123 moea_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 2124 moea_pte_set(pt, &source_pvo->pvo_pte); 2125 2126 PVO_PTEGIDX_CLR(victim_pvo); 2127 PVO_PTEGIDX_SET(source_pvo, i); 2128 moea_pte_replacements++; 2129 2130 MOEA_PVO_CHECK(victim_pvo); 2131 MOEA_PVO_CHECK(source_pvo); 2132 2133 mtx_unlock(&moea_table_mutex); 2134 return (1); 2135 } 2136 2137 static int 2138 moea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2139 { 2140 struct pte *pt; 2141 int i; 2142 2143 mtx_assert(&moea_table_mutex, MA_OWNED); 2144 2145 /* 2146 * First try primary hash. 2147 */ 2148 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2149 if ((pt->pte_hi & PTE_VALID) == 0) { 2150 pvo_pt->pte_hi &= ~PTE_HID; 2151 moea_pte_set(pt, pvo_pt); 2152 return (i); 2153 } 2154 } 2155 2156 /* 2157 * Now try secondary hash. 2158 */ 2159 ptegidx ^= moea_pteg_mask; 2160 2161 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2162 if ((pt->pte_hi & PTE_VALID) == 0) { 2163 pvo_pt->pte_hi |= PTE_HID; 2164 moea_pte_set(pt, pvo_pt); 2165 return (i); 2166 } 2167 } 2168 2169 panic("moea_pte_insert: overflow"); 2170 return (-1); 2171 } 2172 2173 static boolean_t 2174 moea_query_bit(vm_page_t m, int ptebit) 2175 { 2176 struct pvo_entry *pvo; 2177 struct pte *pt; 2178 2179 #if 0 2180 if (moea_attr_fetch(m) & ptebit) 2181 return (TRUE); 2182 #endif 2183 2184 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2185 MOEA_PVO_CHECK(pvo); /* sanity check */ 2186 2187 /* 2188 * See if we saved the bit off. If so, cache it and return 2189 * success. 2190 */ 2191 if (pvo->pvo_pte.pte_lo & ptebit) { 2192 moea_attr_save(m, ptebit); 2193 MOEA_PVO_CHECK(pvo); /* sanity check */ 2194 return (TRUE); 2195 } 2196 } 2197 2198 /* 2199 * No luck, now go through the hard part of looking at the PTEs 2200 * themselves. Sync so that any pending REF/CHG bits are flushed to 2201 * the PTEs. 2202 */ 2203 SYNC(); 2204 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2205 MOEA_PVO_CHECK(pvo); /* sanity check */ 2206 2207 /* 2208 * See if this pvo has a valid PTE. if so, fetch the 2209 * REF/CHG bits from the valid PTE. If the appropriate 2210 * ptebit is set, cache it and return success. 2211 */ 2212 pt = moea_pvo_to_pte(pvo, -1); 2213 if (pt != NULL) { 2214 moea_pte_synch(pt, &pvo->pvo_pte); 2215 mtx_unlock(&moea_table_mutex); 2216 if (pvo->pvo_pte.pte_lo & ptebit) { 2217 moea_attr_save(m, ptebit); 2218 MOEA_PVO_CHECK(pvo); /* sanity check */ 2219 return (TRUE); 2220 } 2221 } 2222 } 2223 2224 return (FALSE); 2225 } 2226 2227 static u_int 2228 moea_clear_bit(vm_page_t m, int ptebit, int *origbit) 2229 { 2230 u_int count; 2231 struct pvo_entry *pvo; 2232 struct pte *pt; 2233 int rv; 2234 2235 /* 2236 * Clear the cached value. 2237 */ 2238 rv = moea_attr_fetch(m); 2239 moea_attr_clear(m, ptebit); 2240 2241 /* 2242 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2243 * we can reset the right ones). note that since the pvo entries and 2244 * list heads are accessed via BAT0 and are never placed in the page 2245 * table, we don't have to worry about further accesses setting the 2246 * REF/CHG bits. 2247 */ 2248 SYNC(); 2249 2250 /* 2251 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2252 * valid pte clear the ptebit from the valid pte. 2253 */ 2254 count = 0; 2255 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2256 MOEA_PVO_CHECK(pvo); /* sanity check */ 2257 pt = moea_pvo_to_pte(pvo, -1); 2258 if (pt != NULL) { 2259 moea_pte_synch(pt, &pvo->pvo_pte); 2260 if (pvo->pvo_pte.pte_lo & ptebit) { 2261 count++; 2262 moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2263 } 2264 mtx_unlock(&moea_table_mutex); 2265 } 2266 rv |= pvo->pvo_pte.pte_lo; 2267 pvo->pvo_pte.pte_lo &= ~ptebit; 2268 MOEA_PVO_CHECK(pvo); /* sanity check */ 2269 } 2270 2271 if (origbit != NULL) { 2272 *origbit = rv; 2273 } 2274 2275 return (count); 2276 } 2277 2278 /* 2279 * Return true if the physical range is encompassed by the battable[idx] 2280 */ 2281 static int 2282 moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2283 { 2284 u_int prot; 2285 u_int32_t start; 2286 u_int32_t end; 2287 u_int32_t bat_ble; 2288 2289 /* 2290 * Return immediately if not a valid mapping 2291 */ 2292 if (!battable[idx].batu & BAT_Vs) 2293 return (EINVAL); 2294 2295 /* 2296 * The BAT entry must be cache-inhibited, guarded, and r/w 2297 * so it can function as an i/o page 2298 */ 2299 prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2300 if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2301 return (EPERM); 2302 2303 /* 2304 * The address should be within the BAT range. Assume that the 2305 * start address in the BAT has the correct alignment (thus 2306 * not requiring masking) 2307 */ 2308 start = battable[idx].batl & BAT_PBS; 2309 bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2310 end = start | (bat_ble << 15) | 0x7fff; 2311 2312 if ((pa < start) || ((pa + size) > end)) 2313 return (ERANGE); 2314 2315 return (0); 2316 } 2317 2318 boolean_t 2319 moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2320 { 2321 int i; 2322 2323 /* 2324 * This currently does not work for entries that 2325 * overlap 256M BAT segments. 2326 */ 2327 2328 for(i = 0; i < 16; i++) 2329 if (moea_bat_mapped(i, pa, size) == 0) 2330 return (0); 2331 2332 return (EFAULT); 2333 } 2334 2335 boolean_t 2336 moea_page_executable(mmu_t mmu, vm_page_t pg) 2337 { 2338 return ((moea_attr_fetch(pg) & PTE_EXEC) == PTE_EXEC); 2339 } 2340 2341 /* 2342 * Map a set of physical memory pages into the kernel virtual 2343 * address space. Return a pointer to where it is mapped. This 2344 * routine is intended to be used for mapping device memory, 2345 * NOT real memory. 2346 */ 2347 void * 2348 moea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2349 { 2350 vm_offset_t va, tmpva, ppa, offset; 2351 int i; 2352 2353 ppa = trunc_page(pa); 2354 offset = pa & PAGE_MASK; 2355 size = roundup(offset + size, PAGE_SIZE); 2356 2357 GIANT_REQUIRED; 2358 2359 /* 2360 * If the physical address lies within a valid BAT table entry, 2361 * return the 1:1 mapping. This currently doesn't work 2362 * for regions that overlap 256M BAT segments. 2363 */ 2364 for (i = 0; i < 16; i++) { 2365 if (moea_bat_mapped(i, pa, size) == 0) 2366 return ((void *) pa); 2367 } 2368 2369 va = kmem_alloc_nofault(kernel_map, size); 2370 if (!va) 2371 panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 2372 2373 for (tmpva = va; size > 0;) { 2374 moea_kenter(mmu, tmpva, ppa); 2375 TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ 2376 size -= PAGE_SIZE; 2377 tmpva += PAGE_SIZE; 2378 ppa += PAGE_SIZE; 2379 } 2380 2381 return ((void *)(va + offset)); 2382 } 2383 2384 void 2385 moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2386 { 2387 vm_offset_t base, offset; 2388 2389 /* 2390 * If this is outside kernel virtual space, then it's a 2391 * battable entry and doesn't require unmapping 2392 */ 2393 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2394 base = trunc_page(va); 2395 offset = va & PAGE_MASK; 2396 size = roundup(offset + size, PAGE_SIZE); 2397 kmem_free(kernel_map, base, size); 2398 } 2399 } 2400