1 /* 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /* 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68 /* 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93 #ifndef lint 94 static const char rcsid[] = 95 "$FreeBSD$"; 96 #endif /* not lint */ 97 98 /* 99 * Manages physical address maps. 100 * 101 * In addition to hardware address maps, this module is called upon to 102 * provide software-use-only maps which may or may not be stored in the 103 * same form as hardware maps. These pseudo-maps are used to store 104 * intermediate results from copy operations to and from address spaces. 105 * 106 * Since the information managed by this module is also stored by the 107 * logical address mapping module, this module may throw away valid virtual 108 * to physical mappings at almost any time. However, invalidations of 109 * mappings must be done as requested. 110 * 111 * In order to cope with hardware architectures which make virtual to 112 * physical map invalidates expensive, this module may delay invalidate 113 * reduced protection operations until such time as they are actually 114 * necessary. This module is given full information as to which processors 115 * are currently using which maps, and to when physical maps must be made 116 * correct. 117 */ 118 119 #include <sys/param.h> 120 #include <sys/kernel.h> 121 #include <sys/ktr.h> 122 #include <sys/lock.h> 123 #include <sys/msgbuf.h> 124 #include <sys/mutex.h> 125 #include <sys/proc.h> 126 #include <sys/sysctl.h> 127 #include <sys/systm.h> 128 #include <sys/vmmeter.h> 129 130 #include <dev/ofw/openfirm.h> 131 132 #include <vm/vm.h> 133 #include <vm/vm_param.h> 134 #include <vm/vm_kern.h> 135 #include <vm/vm_page.h> 136 #include <vm/vm_map.h> 137 #include <vm/vm_object.h> 138 #include <vm/vm_extern.h> 139 #include <vm/vm_pageout.h> 140 #include <vm/vm_pager.h> 141 #include <vm/uma.h> 142 143 #include <machine/powerpc.h> 144 #include <machine/bat.h> 145 #include <machine/frame.h> 146 #include <machine/md_var.h> 147 #include <machine/psl.h> 148 #include <machine/pte.h> 149 #include <machine/sr.h> 150 151 #define PMAP_DEBUG 152 153 #define TODO panic("%s: not implemented", __func__); 154 155 #define PMAP_LOCK(pm) 156 #define PMAP_UNLOCK(pm) 157 158 #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 159 #define TLBSYNC() __asm __volatile("tlbsync"); 160 #define SYNC() __asm __volatile("sync"); 161 #define EIEIO() __asm __volatile("eieio"); 162 163 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 164 #define VSID_TO_SR(vsid) ((vsid) & 0xf) 165 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 166 167 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 168 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 169 #define PVO_WIRED 0x0010 /* PVO entry is wired */ 170 #define PVO_MANAGED 0x0020 /* PVO entry is managed */ 171 #define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ 172 #define PVO_BOOTSTRAP 0x0080 /* PVO entry allocated during 173 bootstrap */ 174 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 175 #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 176 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 177 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 178 #define PVO_PTEGIDX_CLR(pvo) \ 179 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 180 #define PVO_PTEGIDX_SET(pvo, i) \ 181 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 182 183 #define PMAP_PVO_CHECK(pvo) 184 185 struct ofw_map { 186 vm_offset_t om_va; 187 vm_size_t om_len; 188 vm_offset_t om_pa; 189 u_int om_mode; 190 }; 191 192 int pmap_bootstrapped = 0; 193 194 /* 195 * Virtual and physical address of message buffer. 196 */ 197 struct msgbuf *msgbufp; 198 vm_offset_t msgbuf_phys; 199 200 /* 201 * Physical addresses of first and last available physical page. 202 */ 203 vm_offset_t avail_start; 204 vm_offset_t avail_end; 205 206 /* 207 * Map of physical memory regions. 208 */ 209 vm_offset_t phys_avail[128]; 210 u_int phys_avail_count; 211 static struct mem_region *regions; 212 static struct mem_region *pregions; 213 int regions_sz, pregions_sz; 214 static struct ofw_map *translations; 215 216 /* 217 * First and last available kernel virtual addresses. 218 */ 219 vm_offset_t virtual_avail; 220 vm_offset_t virtual_end; 221 vm_offset_t kernel_vm_end; 222 223 /* 224 * Kernel pmap. 225 */ 226 struct pmap kernel_pmap_store; 227 extern struct pmap ofw_pmap; 228 229 /* 230 * PTEG data. 231 */ 232 static struct pteg *pmap_pteg_table; 233 u_int pmap_pteg_count; 234 u_int pmap_pteg_mask; 235 236 /* 237 * PVO data. 238 */ 239 struct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ 240 struct pvo_head pmap_pvo_kunmanaged = 241 LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 242 struct pvo_head pmap_pvo_unmanaged = 243 LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 244 245 uma_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ 246 uma_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ 247 struct vm_object pmap_upvo_zone_obj; 248 struct vm_object pmap_mpvo_zone_obj; 249 static vm_object_t pmap_pvo_obj; 250 static u_int pmap_pvo_count; 251 252 #define BPVO_POOL_SIZE 32768 253 static struct pvo_entry *pmap_bpvo_pool; 254 static int pmap_bpvo_pool_index = 0; 255 256 #define VSID_NBPW (sizeof(u_int32_t) * 8) 257 static u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 258 259 static boolean_t pmap_initialized = FALSE; 260 261 /* 262 * Statistics. 263 */ 264 u_int pmap_pte_valid = 0; 265 u_int pmap_pte_overflow = 0; 266 u_int pmap_pte_replacements = 0; 267 u_int pmap_pvo_entries = 0; 268 u_int pmap_pvo_enter_calls = 0; 269 u_int pmap_pvo_remove_calls = 0; 270 u_int pmap_pte_spills = 0; 271 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 272 0, ""); 273 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, 274 &pmap_pte_overflow, 0, ""); 275 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, 276 &pmap_pte_replacements, 0, ""); 277 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 278 0, ""); 279 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, 280 &pmap_pvo_enter_calls, 0, ""); 281 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, 282 &pmap_pvo_remove_calls, 0, ""); 283 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, 284 &pmap_pte_spills, 0, ""); 285 286 struct pvo_entry *pmap_pvo_zeropage; 287 288 vm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; 289 u_int pmap_rkva_count = 4; 290 291 /* 292 * Allocate physical memory for use in pmap_bootstrap. 293 */ 294 static vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); 295 296 /* 297 * PTE calls. 298 */ 299 static int pmap_pte_insert(u_int, struct pte *); 300 301 /* 302 * PVO calls. 303 */ 304 static int pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 305 vm_offset_t, vm_offset_t, u_int, int); 306 static void pmap_pvo_remove(struct pvo_entry *, int); 307 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); 308 static struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 309 310 /* 311 * Utility routines. 312 */ 313 static void * pmap_pvo_allocf(uma_zone_t, int, u_int8_t *, int); 314 static struct pvo_entry *pmap_rkva_alloc(void); 315 static void pmap_pa_map(struct pvo_entry *, vm_offset_t, 316 struct pte *, int *); 317 static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); 318 static void pmap_syncicache(vm_offset_t, vm_size_t); 319 static boolean_t pmap_query_bit(vm_page_t, int); 320 static boolean_t pmap_clear_bit(vm_page_t, int); 321 static void tlbia(void); 322 323 static __inline int 324 va_to_sr(u_int *sr, vm_offset_t va) 325 { 326 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 327 } 328 329 static __inline u_int 330 va_to_pteg(u_int sr, vm_offset_t addr) 331 { 332 u_int hash; 333 334 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 335 ADDR_PIDX_SHFT); 336 return (hash & pmap_pteg_mask); 337 } 338 339 static __inline struct pvo_head * 340 pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 341 { 342 struct vm_page *pg; 343 344 pg = PHYS_TO_VM_PAGE(pa); 345 346 if (pg_p != NULL) 347 *pg_p = pg; 348 349 if (pg == NULL) 350 return (&pmap_pvo_unmanaged); 351 352 return (&pg->md.mdpg_pvoh); 353 } 354 355 static __inline struct pvo_head * 356 vm_page_to_pvoh(vm_page_t m) 357 { 358 359 return (&m->md.mdpg_pvoh); 360 } 361 362 static __inline void 363 pmap_attr_clear(vm_page_t m, int ptebit) 364 { 365 366 m->md.mdpg_attrs &= ~ptebit; 367 } 368 369 static __inline int 370 pmap_attr_fetch(vm_page_t m) 371 { 372 373 return (m->md.mdpg_attrs); 374 } 375 376 static __inline void 377 pmap_attr_save(vm_page_t m, int ptebit) 378 { 379 380 m->md.mdpg_attrs |= ptebit; 381 } 382 383 static __inline int 384 pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 385 { 386 if (pt->pte_hi == pvo_pt->pte_hi) 387 return (1); 388 389 return (0); 390 } 391 392 static __inline int 393 pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 394 { 395 return (pt->pte_hi & ~PTE_VALID) == 396 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 397 ((va >> ADDR_API_SHFT) & PTE_API) | which); 398 } 399 400 static __inline void 401 pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 402 { 403 /* 404 * Construct a PTE. Default to IMB initially. Valid bit only gets 405 * set when the real pte is set in memory. 406 * 407 * Note: Don't set the valid bit for correct operation of tlb update. 408 */ 409 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 410 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 411 pt->pte_lo = pte_lo; 412 } 413 414 static __inline void 415 pmap_pte_synch(struct pte *pt, struct pte *pvo_pt) 416 { 417 418 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 419 } 420 421 static __inline void 422 pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 423 { 424 425 /* 426 * As shown in Section 7.6.3.2.3 427 */ 428 pt->pte_lo &= ~ptebit; 429 TLBIE(va); 430 EIEIO(); 431 TLBSYNC(); 432 SYNC(); 433 } 434 435 static __inline void 436 pmap_pte_set(struct pte *pt, struct pte *pvo_pt) 437 { 438 439 pvo_pt->pte_hi |= PTE_VALID; 440 441 /* 442 * Update the PTE as defined in section 7.6.3.1. 443 * Note that the REF/CHG bits are from pvo_pt and thus should havce 444 * been saved so this routine can restore them (if desired). 445 */ 446 pt->pte_lo = pvo_pt->pte_lo; 447 EIEIO(); 448 pt->pte_hi = pvo_pt->pte_hi; 449 SYNC(); 450 pmap_pte_valid++; 451 } 452 453 static __inline void 454 pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 455 { 456 457 pvo_pt->pte_hi &= ~PTE_VALID; 458 459 /* 460 * Force the reg & chg bits back into the PTEs. 461 */ 462 SYNC(); 463 464 /* 465 * Invalidate the pte. 466 */ 467 pt->pte_hi &= ~PTE_VALID; 468 469 SYNC(); 470 TLBIE(va); 471 EIEIO(); 472 TLBSYNC(); 473 SYNC(); 474 475 /* 476 * Save the reg & chg bits. 477 */ 478 pmap_pte_synch(pt, pvo_pt); 479 pmap_pte_valid--; 480 } 481 482 static __inline void 483 pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 484 { 485 486 /* 487 * Invalidate the PTE 488 */ 489 pmap_pte_unset(pt, pvo_pt, va); 490 pmap_pte_set(pt, pvo_pt); 491 } 492 493 /* 494 * Quick sort callout for comparing memory regions. 495 */ 496 static int mr_cmp(const void *a, const void *b); 497 static int om_cmp(const void *a, const void *b); 498 499 static int 500 mr_cmp(const void *a, const void *b) 501 { 502 const struct mem_region *regiona; 503 const struct mem_region *regionb; 504 505 regiona = a; 506 regionb = b; 507 if (regiona->mr_start < regionb->mr_start) 508 return (-1); 509 else if (regiona->mr_start > regionb->mr_start) 510 return (1); 511 else 512 return (0); 513 } 514 515 static int 516 om_cmp(const void *a, const void *b) 517 { 518 const struct ofw_map *mapa; 519 const struct ofw_map *mapb; 520 521 mapa = a; 522 mapb = b; 523 if (mapa->om_pa < mapb->om_pa) 524 return (-1); 525 else if (mapa->om_pa > mapb->om_pa) 526 return (1); 527 else 528 return (0); 529 } 530 531 void 532 pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) 533 { 534 ihandle_t mmui; 535 phandle_t chosen, mmu; 536 int sz; 537 int i, j; 538 vm_size_t size, physsz; 539 vm_offset_t pa, va, off; 540 u_int batl, batu; 541 542 /* 543 * Set up BAT0 to only map the lowest 256 MB area 544 */ 545 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 546 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 547 548 /* 549 * Map PCI memory space. 550 */ 551 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 552 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 553 554 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 555 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 556 557 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 558 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 559 560 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 561 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 562 563 /* 564 * Map obio devices. 565 */ 566 battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 567 battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 568 569 /* 570 * Use an IBAT and a DBAT to map the bottom segment of memory 571 * where we are. 572 */ 573 batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 574 batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 575 __asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1" 576 :: "r"(batu), "r"(batl)); 577 578 #if 0 579 /* map frame buffer */ 580 batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 581 batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 582 __asm ("mtdbatu 1,%0; mtdbatl 1,%1" 583 :: "r"(batu), "r"(batl)); 584 #endif 585 586 #if 1 587 /* map pci space */ 588 batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 589 batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 590 __asm ("mtdbatu 1,%0; mtdbatl 1,%1" 591 :: "r"(batu), "r"(batl)); 592 #endif 593 594 /* 595 * Set the start and end of kva. 596 */ 597 virtual_avail = VM_MIN_KERNEL_ADDRESS; 598 virtual_end = VM_MAX_KERNEL_ADDRESS; 599 600 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 601 CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); 602 603 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 604 for (i = 0; i < pregions_sz; i++) { 605 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 606 pregions[i].mr_start, 607 pregions[i].mr_start + pregions[i].mr_size, 608 pregions[i].mr_size); 609 } 610 611 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 612 panic("pmap_bootstrap: phys_avail too small"); 613 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 614 phys_avail_count = 0; 615 physsz = 0; 616 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 617 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 618 regions[i].mr_start + regions[i].mr_size, 619 regions[i].mr_size); 620 phys_avail[j] = regions[i].mr_start; 621 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 622 phys_avail_count++; 623 physsz += regions[i].mr_size; 624 } 625 physmem = btoc(physsz); 626 627 /* 628 * Allocate PTEG table. 629 */ 630 #ifdef PTEGCOUNT 631 pmap_pteg_count = PTEGCOUNT; 632 #else 633 pmap_pteg_count = 0x1000; 634 635 while (pmap_pteg_count < physmem) 636 pmap_pteg_count <<= 1; 637 638 pmap_pteg_count >>= 1; 639 #endif /* PTEGCOUNT */ 640 641 size = pmap_pteg_count * sizeof(struct pteg); 642 CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, 643 size); 644 pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); 645 CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); 646 bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); 647 pmap_pteg_mask = pmap_pteg_count - 1; 648 649 /* 650 * Allocate pv/overflow lists. 651 */ 652 size = sizeof(struct pvo_head) * pmap_pteg_count; 653 pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, 654 PAGE_SIZE); 655 CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); 656 for (i = 0; i < pmap_pteg_count; i++) 657 LIST_INIT(&pmap_pvo_table[i]); 658 659 /* 660 * Allocate the message buffer. 661 */ 662 msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); 663 664 /* 665 * Initialise the unmanaged pvo pool. 666 */ 667 pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc( 668 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 669 pmap_bpvo_pool_index = 0; 670 671 /* 672 * Make sure kernel vsid is allocated as well as VSID 0. 673 */ 674 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 675 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 676 pmap_vsid_bitmap[0] |= 1; 677 678 /* 679 * Set up the OpenFirmware pmap and add it's mappings. 680 */ 681 pmap_pinit(&ofw_pmap); 682 ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 683 if ((chosen = OF_finddevice("/chosen")) == -1) 684 panic("pmap_bootstrap: can't find /chosen"); 685 OF_getprop(chosen, "mmu", &mmui, 4); 686 if ((mmu = OF_instance_to_package(mmui)) == -1) 687 panic("pmap_bootstrap: can't get mmu package"); 688 if ((sz = OF_getproplen(mmu, "translations")) == -1) 689 panic("pmap_bootstrap: can't get ofw translation count"); 690 translations = NULL; 691 for (i = 0; phys_avail[i + 2] != 0; i += 2) { 692 if (phys_avail[i + 1] >= sz) 693 translations = (struct ofw_map *)phys_avail[i]; 694 } 695 if (translations == NULL) 696 panic("pmap_bootstrap: no space to copy translations"); 697 bzero(translations, sz); 698 if (OF_getprop(mmu, "translations", translations, sz) == -1) 699 panic("pmap_bootstrap: can't get ofw translations"); 700 CTR0(KTR_PMAP, "pmap_bootstrap: translations"); 701 sz /= sizeof(*translations); 702 qsort(translations, sz, sizeof (*translations), om_cmp); 703 for (i = 0; i < sz; i++) { 704 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 705 translations[i].om_pa, translations[i].om_va, 706 translations[i].om_len); 707 708 /* Drop stuff below something? */ 709 710 /* Enter the pages? */ 711 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 712 struct vm_page m; 713 714 m.phys_addr = translations[i].om_pa + off; 715 pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, 716 VM_PROT_ALL, 1); 717 } 718 } 719 #ifdef SMP 720 TLBSYNC(); 721 #endif 722 723 /* 724 * Initialize the kernel pmap (which is statically allocated). 725 */ 726 for (i = 0; i < 16; i++) { 727 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 728 } 729 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 730 kernel_pmap->pm_active = ~0; 731 732 /* 733 * Allocate a kernel stack with a guard page for thread0 and map it 734 * into the kernel page map. 735 */ 736 pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); 737 kstack0_phys = pa; 738 kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); 739 CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, 740 kstack0); 741 virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; 742 for (i = 0; i < KSTACK_PAGES; i++) { 743 pa = kstack0_phys + i * PAGE_SIZE; 744 va = kstack0 + i * PAGE_SIZE; 745 pmap_kenter(va, pa); 746 TLBIE(va); 747 } 748 749 /* 750 * Calculate the first and last available physical addresses. 751 */ 752 avail_start = phys_avail[0]; 753 for (i = 0; phys_avail[i + 2] != 0; i += 2) 754 ; 755 avail_end = phys_avail[i + 1]; 756 Maxmem = powerpc_btop(avail_end); 757 758 /* 759 * Allocate virtual address space for the message buffer. 760 */ 761 msgbufp = (struct msgbuf *)virtual_avail; 762 virtual_avail += round_page(MSGBUF_SIZE); 763 764 /* 765 * Initialize hardware. 766 */ 767 for (i = 0; i < 16; i++) { 768 mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); 769 } 770 __asm __volatile ("mtsr %0,%1" 771 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 772 __asm __volatile ("sync; mtsdr1 %0; isync" 773 :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); 774 tlbia(); 775 776 pmap_bootstrapped++; 777 } 778 779 /* 780 * Activate a user pmap. The pmap must be activated before it's address 781 * space can be accessed in any way. 782 */ 783 void 784 pmap_activate(struct thread *td) 785 { 786 pmap_t pm, pmr; 787 788 /* 789 * Load all the data we need up front to encourasge the compiler to 790 * not issue any loads while we have interrupts disabled below. 791 */ 792 pm = &td->td_proc->p_vmspace->vm_pmap; 793 794 KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?")); 795 796 if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL) 797 pmr = pm; 798 799 pm->pm_active |= PCPU_GET(cpumask); 800 PCPU_SET(curpmap, pmr); 801 } 802 803 void 804 pmap_deactivate(struct thread *td) 805 { 806 pmap_t pm; 807 808 pm = &td->td_proc->p_vmspace->vm_pmap; 809 pm->pm_active &= ~(PCPU_GET(cpumask)); 810 PCPU_SET(curpmap, NULL); 811 } 812 813 vm_offset_t 814 pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) 815 { 816 817 return (va); 818 } 819 820 void 821 pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) 822 { 823 struct pvo_entry *pvo; 824 825 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 826 827 if (pvo != NULL) { 828 if (wired) { 829 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 830 pm->pm_stats.wired_count++; 831 pvo->pvo_vaddr |= PVO_WIRED; 832 } else { 833 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 834 pm->pm_stats.wired_count--; 835 pvo->pvo_vaddr &= ~PVO_WIRED; 836 } 837 } 838 } 839 840 void 841 pmap_clear_modify(vm_page_t m) 842 { 843 844 if (m->flags * PG_FICTITIOUS) 845 return; 846 pmap_clear_bit(m, PTE_CHG); 847 } 848 849 void 850 pmap_collect(void) 851 { 852 TODO; 853 } 854 855 void 856 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 857 vm_size_t len, vm_offset_t src_addr) 858 { 859 860 /* 861 * This is not needed as it's mainly an optimisation. 862 * It may want to be implemented later though. 863 */ 864 } 865 866 void 867 pmap_copy_page(vm_page_t msrc, vm_page_t mdst) 868 { 869 vm_offset_t dst; 870 vm_offset_t src; 871 872 dst = VM_PAGE_TO_PHYS(mdst); 873 src = VM_PAGE_TO_PHYS(msrc); 874 875 kcopy((void *)src, (void *)dst, PAGE_SIZE); 876 } 877 878 /* 879 * Zero a page of physical memory by temporarily mapping it into the tlb. 880 */ 881 void 882 pmap_zero_page(vm_page_t m) 883 { 884 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 885 caddr_t va; 886 int i; 887 888 if (pa < SEGMENT_LENGTH) { 889 va = (caddr_t) pa; 890 } else if (pmap_initialized) { 891 if (pmap_pvo_zeropage == NULL) 892 pmap_pvo_zeropage = pmap_rkva_alloc(); 893 pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 894 va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 895 } else { 896 panic("pmap_zero_page: can't zero pa %#x", pa); 897 } 898 899 bzero(va, PAGE_SIZE); 900 901 for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) { 902 __asm __volatile("dcbz 0,%0" :: "r"(va)); 903 va += CACHELINESIZE; 904 } 905 906 if (pa >= SEGMENT_LENGTH) 907 pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 908 } 909 910 void 911 pmap_zero_page_area(vm_page_t m, int off, int size) 912 { 913 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 914 caddr_t va; 915 int i; 916 917 if (pa < SEGMENT_LENGTH) { 918 va = (caddr_t) pa; 919 } else if (pmap_initialized) { 920 if (pmap_pvo_zeropage == NULL) 921 pmap_pvo_zeropage = pmap_rkva_alloc(); 922 pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 923 va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 924 } else { 925 panic("pmap_zero_page: can't zero pa %#x", pa); 926 } 927 928 bzero(va, size); 929 930 for (i = size / CACHELINESIZE; i > 0; i--) { 931 __asm __volatile("dcbz 0,%0" :: "r"(va)); 932 va += CACHELINESIZE; 933 } 934 935 if (pa >= SEGMENT_LENGTH) 936 pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 937 } 938 939 void 940 pmap_zero_page_idle(vm_page_t m) 941 { 942 943 /* XXX this is called outside of Giant, is pmap_zero_page safe? */ 944 /* XXX maybe have a dedicated mapping for this to avoid the problem? */ 945 mtx_lock(&Giant); 946 pmap_zero_page(m); 947 mtx_unlock(&Giant); 948 } 949 950 /* 951 * Map the given physical page at the specified virtual address in the 952 * target pmap with the protection requested. If specified the page 953 * will be wired down. 954 */ 955 void 956 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 957 boolean_t wired) 958 { 959 struct pvo_head *pvo_head; 960 uma_zone_t zone; 961 vm_page_t pg; 962 u_int pte_lo, pvo_flags, was_exec, i; 963 int error; 964 965 if (!pmap_initialized) { 966 pvo_head = &pmap_pvo_kunmanaged; 967 zone = pmap_upvo_zone; 968 pvo_flags = 0; 969 pg = NULL; 970 was_exec = PTE_EXEC; 971 } else { 972 pvo_head = pa_to_pvoh(VM_PAGE_TO_PHYS(m), &pg); 973 zone = pmap_mpvo_zone; 974 pvo_flags = PVO_MANAGED; 975 was_exec = 0; 976 } 977 978 /* 979 * If this is a managed page, and it's the first reference to the page, 980 * clear the execness of the page. Otherwise fetch the execness. 981 */ 982 if (pg != NULL) { 983 if (LIST_EMPTY(pvo_head)) { 984 pmap_attr_clear(pg, PTE_EXEC); 985 } else { 986 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 987 } 988 } 989 990 991 /* 992 * Assume the page is cache inhibited and access is guarded unless 993 * it's in our available memory array. 994 */ 995 pte_lo = PTE_I | PTE_G; 996 for (i = 0; i < pregions_sz; i++) { 997 if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && 998 (VM_PAGE_TO_PHYS(m) < 999 (pregions[i].mr_start + pregions[i].mr_size))) { 1000 pte_lo &= ~(PTE_I | PTE_G); 1001 break; 1002 } 1003 } 1004 1005 if (prot & VM_PROT_WRITE) 1006 pte_lo |= PTE_BW; 1007 else 1008 pte_lo |= PTE_BR; 1009 1010 pvo_flags |= (prot & VM_PROT_EXECUTE); 1011 1012 if (wired) 1013 pvo_flags |= PVO_WIRED; 1014 1015 error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1016 pte_lo, pvo_flags); 1017 1018 /* 1019 * Flush the real page from the instruction cache if this page is 1020 * mapped executable and cacheable and was not previously mapped (or 1021 * was not mapped executable). 1022 */ 1023 if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 1024 (pte_lo & PTE_I) == 0 && was_exec == 0) { 1025 /* 1026 * Flush the real memory from the cache. 1027 */ 1028 pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1029 if (pg != NULL) 1030 pmap_attr_save(pg, PTE_EXEC); 1031 } 1032 } 1033 1034 vm_offset_t 1035 pmap_extract(pmap_t pm, vm_offset_t va) 1036 { 1037 struct pvo_entry *pvo; 1038 1039 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1040 1041 if (pvo != NULL) { 1042 return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); 1043 } 1044 1045 return (0); 1046 } 1047 1048 /* 1049 * Grow the number of kernel page table entries. Unneeded. 1050 */ 1051 void 1052 pmap_growkernel(vm_offset_t addr) 1053 { 1054 } 1055 1056 void 1057 pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) 1058 { 1059 1060 CTR0(KTR_PMAP, "pmap_init"); 1061 1062 pmap_pvo_obj = vm_object_allocate(OBJT_PHYS, 16); 1063 pmap_pvo_count = 0; 1064 pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1065 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 1066 uma_zone_set_allocf(pmap_upvo_zone, pmap_pvo_allocf); 1067 pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1068 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 1069 uma_zone_set_allocf(pmap_mpvo_zone, pmap_pvo_allocf); 1070 pmap_initialized = TRUE; 1071 } 1072 1073 void 1074 pmap_init2(void) 1075 { 1076 1077 CTR0(KTR_PMAP, "pmap_init2"); 1078 } 1079 1080 boolean_t 1081 pmap_is_modified(vm_page_t m) 1082 { 1083 1084 if (m->flags & PG_FICTITIOUS) 1085 return (FALSE); 1086 1087 return (pmap_query_bit(m, PTE_CHG)); 1088 } 1089 1090 void 1091 pmap_clear_reference(vm_page_t m) 1092 { 1093 TODO; 1094 } 1095 1096 /* 1097 * pmap_ts_referenced: 1098 * 1099 * Return a count of reference bits for a page, clearing those bits. 1100 * It is not necessary for every reference bit to be cleared, but it 1101 * is necessary that 0 only be returned when there are truly no 1102 * reference bits set. 1103 * 1104 * XXX: The exact number of bits to check and clear is a matter that 1105 * should be tested and standardized at some point in the future for 1106 * optimal aging of shared pages. 1107 */ 1108 1109 int 1110 pmap_ts_referenced(vm_page_t m) 1111 { 1112 TODO; 1113 return (0); 1114 } 1115 1116 /* 1117 * Map a wired page into kernel virtual address space. 1118 */ 1119 void 1120 pmap_kenter(vm_offset_t va, vm_offset_t pa) 1121 { 1122 u_int pte_lo; 1123 int error; 1124 int i; 1125 1126 #if 0 1127 if (va < VM_MIN_KERNEL_ADDRESS) 1128 panic("pmap_kenter: attempt to enter non-kernel address %#x", 1129 va); 1130 #endif 1131 1132 pte_lo = PTE_I | PTE_G | PTE_BW; 1133 for (i = 0; phys_avail[i + 2] != 0; i += 2) { 1134 if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) { 1135 pte_lo &= ~(PTE_I | PTE_G); 1136 break; 1137 } 1138 } 1139 1140 error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, 1141 &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1142 1143 if (error != 0 && error != ENOENT) 1144 panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, 1145 pa, error); 1146 1147 /* 1148 * Flush the real memory from the instruction cache. 1149 */ 1150 if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1151 pmap_syncicache(pa, PAGE_SIZE); 1152 } 1153 } 1154 1155 /* 1156 * Extract the physical page address associated with the given kernel virtual 1157 * address. 1158 */ 1159 vm_offset_t 1160 pmap_kextract(vm_offset_t va) 1161 { 1162 struct pvo_entry *pvo; 1163 1164 pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1165 if (pvo == NULL) { 1166 return (0); 1167 } 1168 1169 return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); 1170 } 1171 1172 /* 1173 * Remove a wired page from kernel virtual address space. 1174 */ 1175 void 1176 pmap_kremove(vm_offset_t va) 1177 { 1178 1179 pmap_remove(kernel_pmap, va, roundup(va, PAGE_SIZE)); 1180 } 1181 1182 /* 1183 * Map a range of physical addresses into kernel virtual address space. 1184 * 1185 * The value passed in *virt is a suggested virtual address for the mapping. 1186 * Architectures which can support a direct-mapped physical to virtual region 1187 * can return the appropriate address within that region, leaving '*virt' 1188 * unchanged. We cannot and therefore do not; *virt is updated with the 1189 * first usable address after the mapped region. 1190 */ 1191 vm_offset_t 1192 pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) 1193 { 1194 vm_offset_t sva, va; 1195 1196 sva = *virt; 1197 va = sva; 1198 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1199 pmap_kenter(va, pa_start); 1200 *virt = va; 1201 return (sva); 1202 } 1203 1204 int 1205 pmap_mincore(pmap_t pmap, vm_offset_t addr) 1206 { 1207 TODO; 1208 return (0); 1209 } 1210 1211 void 1212 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, 1213 vm_pindex_t pindex, vm_size_t size, int limit) 1214 { 1215 1216 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1217 ("pmap_remove_pages: non current pmap")); 1218 /* XXX */ 1219 } 1220 1221 /* 1222 * Lower the permission for all mappings to a given page. 1223 */ 1224 void 1225 pmap_page_protect(vm_page_t m, vm_prot_t prot) 1226 { 1227 struct pvo_head *pvo_head; 1228 struct pvo_entry *pvo, *next_pvo; 1229 struct pte *pt; 1230 1231 /* 1232 * Since the routine only downgrades protection, if the 1233 * maximal protection is desired, there isn't any change 1234 * to be made. 1235 */ 1236 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == 1237 (VM_PROT_READ|VM_PROT_WRITE)) 1238 return; 1239 1240 pvo_head = vm_page_to_pvoh(m); 1241 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1242 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1243 PMAP_PVO_CHECK(pvo); /* sanity check */ 1244 1245 /* 1246 * Downgrading to no mapping at all, we just remove the entry. 1247 */ 1248 if ((prot & VM_PROT_READ) == 0) { 1249 pmap_pvo_remove(pvo, -1); 1250 continue; 1251 } 1252 1253 /* 1254 * If EXEC permission is being revoked, just clear the flag 1255 * in the PVO. 1256 */ 1257 if ((prot & VM_PROT_EXECUTE) == 0) 1258 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1259 1260 /* 1261 * If this entry is already RO, don't diddle with the page 1262 * table. 1263 */ 1264 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 1265 PMAP_PVO_CHECK(pvo); 1266 continue; 1267 } 1268 1269 /* 1270 * Grab the PTE before we diddle the bits so pvo_to_pte can 1271 * verify the pte contents are as expected. 1272 */ 1273 pt = pmap_pvo_to_pte(pvo, -1); 1274 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1275 pvo->pvo_pte.pte_lo |= PTE_BR; 1276 if (pt != NULL) 1277 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1278 PMAP_PVO_CHECK(pvo); /* sanity check */ 1279 } 1280 } 1281 1282 /* 1283 * Make the specified page pageable (or not). Unneeded. 1284 */ 1285 void 1286 pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1287 boolean_t pageable) 1288 { 1289 } 1290 1291 /* 1292 * Returns true if the pmap's pv is one of the first 1293 * 16 pvs linked to from this page. This count may 1294 * be changed upwards or downwards in the future; it 1295 * is only necessary that true be returned for a small 1296 * subset of pmaps for proper page aging. 1297 */ 1298 boolean_t 1299 pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 1300 { 1301 TODO; 1302 return (0); 1303 } 1304 1305 static u_int pmap_vsidcontext; 1306 1307 void 1308 pmap_pinit(pmap_t pmap) 1309 { 1310 int i, mask; 1311 u_int entropy; 1312 1313 entropy = 0; 1314 __asm __volatile("mftb %0" : "=r"(entropy)); 1315 1316 /* 1317 * Allocate some segment registers for this pmap. 1318 */ 1319 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1320 u_int hash, n; 1321 1322 /* 1323 * Create a new value by mutiplying by a prime and adding in 1324 * entropy from the timebase register. This is to make the 1325 * VSID more random so that the PT hash function collides 1326 * less often. (Note that the prime casues gcc to do shifts 1327 * instead of a multiply.) 1328 */ 1329 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1330 hash = pmap_vsidcontext & (NPMAPS - 1); 1331 if (hash == 0) /* 0 is special, avoid it */ 1332 continue; 1333 n = hash >> 5; 1334 mask = 1 << (hash & (VSID_NBPW - 1)); 1335 hash = (pmap_vsidcontext & 0xfffff); 1336 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1337 /* anything free in this bucket? */ 1338 if (pmap_vsid_bitmap[n] == 0xffffffff) { 1339 entropy = (pmap_vsidcontext >> 20); 1340 continue; 1341 } 1342 i = ffs(~pmap_vsid_bitmap[i]) - 1; 1343 mask = 1 << i; 1344 hash &= 0xfffff & ~(VSID_NBPW - 1); 1345 hash |= i; 1346 } 1347 pmap_vsid_bitmap[n] |= mask; 1348 for (i = 0; i < 16; i++) 1349 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1350 return; 1351 } 1352 1353 panic("pmap_pinit: out of segments"); 1354 } 1355 1356 /* 1357 * Initialize the pmap associated with process 0. 1358 */ 1359 void 1360 pmap_pinit0(pmap_t pm) 1361 { 1362 1363 pmap_pinit(pm); 1364 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1365 } 1366 1367 void 1368 pmap_pinit2(pmap_t pmap) 1369 { 1370 /* XXX: Remove this stub when no longer called */ 1371 } 1372 1373 void 1374 pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry) 1375 { 1376 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1377 ("pmap_prefault: non current pmap")); 1378 /* XXX */ 1379 } 1380 1381 /* 1382 * Set the physical protection on the specified range of this map as requested. 1383 */ 1384 void 1385 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1386 { 1387 struct pvo_entry *pvo; 1388 struct pte *pt; 1389 int pteidx; 1390 1391 CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1392 eva, prot); 1393 1394 1395 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1396 ("pmap_protect: non current pmap")); 1397 1398 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1399 pmap_remove(pm, sva, eva); 1400 return; 1401 } 1402 1403 for (; sva < eva; sva += PAGE_SIZE) { 1404 pvo = pmap_pvo_find_va(pm, sva, &pteidx); 1405 if (pvo == NULL) 1406 continue; 1407 1408 if ((prot & VM_PROT_EXECUTE) == 0) 1409 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1410 1411 /* 1412 * Grab the PTE pointer before we diddle with the cached PTE 1413 * copy. 1414 */ 1415 pt = pmap_pvo_to_pte(pvo, pteidx); 1416 /* 1417 * Change the protection of the page. 1418 */ 1419 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1420 pvo->pvo_pte.pte_lo |= PTE_BR; 1421 1422 /* 1423 * If the PVO is in the page table, update that pte as well. 1424 */ 1425 if (pt != NULL) 1426 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1427 } 1428 } 1429 1430 vm_offset_t 1431 pmap_phys_address(int ppn) 1432 { 1433 TODO; 1434 return (0); 1435 } 1436 1437 /* 1438 * Map a list of wired pages into kernel virtual address space. This is 1439 * intended for temporary mappings which do not need page modification or 1440 * references recorded. Existing mappings in the region are overwritten. 1441 */ 1442 void 1443 pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 1444 { 1445 int i; 1446 1447 for (i = 0; i < count; i++, va += PAGE_SIZE) 1448 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); 1449 } 1450 1451 /* 1452 * Remove page mappings from kernel virtual address space. Intended for 1453 * temporary mappings entered by pmap_qenter. 1454 */ 1455 void 1456 pmap_qremove(vm_offset_t va, int count) 1457 { 1458 int i; 1459 1460 for (i = 0; i < count; i++, va += PAGE_SIZE) 1461 pmap_kremove(va); 1462 } 1463 1464 void 1465 pmap_release(pmap_t pmap) 1466 { 1467 TODO; 1468 } 1469 1470 /* 1471 * Remove the given range of addresses from the specified map. 1472 */ 1473 void 1474 pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1475 { 1476 struct pvo_entry *pvo; 1477 int pteidx; 1478 1479 for (; sva < eva; sva += PAGE_SIZE) { 1480 pvo = pmap_pvo_find_va(pm, sva, &pteidx); 1481 if (pvo != NULL) { 1482 pmap_pvo_remove(pvo, pteidx); 1483 } 1484 } 1485 } 1486 1487 /* 1488 * Remove all pages from specified address space, this aids process exit 1489 * speeds. This is much faster than pmap_remove in the case of running down 1490 * an entire address space. Only works for the current pmap. 1491 */ 1492 void 1493 pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1494 { 1495 1496 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1497 ("pmap_remove_pages: non current pmap")); 1498 pmap_remove(pm, sva, eva); 1499 } 1500 1501 /* 1502 * Create the kernel stack and pcb for a new thread. 1503 * This routine directly affects the fork perf for a process and 1504 * create performance for a thread. 1505 */ 1506 void 1507 pmap_new_thread(struct thread *td) 1508 { 1509 vm_object_t ksobj; 1510 vm_offset_t ks; 1511 vm_page_t m; 1512 u_int i; 1513 1514 /* 1515 * Allocate object for the kstack. 1516 */ 1517 ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES); 1518 td->td_kstack_obj = ksobj; 1519 1520 /* 1521 * Get a kernel virtual address for the kstack for this thread. 1522 */ 1523 ks = kmem_alloc_nofault(kernel_map, 1524 (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE); 1525 if (ks == 0) 1526 panic("pmap_new_thread: kstack allocation failed"); 1527 TLBIE(ks); 1528 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 1529 td->td_kstack = ks; 1530 1531 for (i = 0; i < KSTACK_PAGES; i++) { 1532 /* 1533 * Get a kernel stack page. 1534 */ 1535 m = vm_page_grab(ksobj, i, 1536 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 1537 1538 /* 1539 * Enter the page into the kernel address space. 1540 */ 1541 pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); 1542 1543 vm_page_wakeup(m); 1544 vm_page_flag_clear(m, PG_ZERO); 1545 m->valid = VM_PAGE_BITS_ALL; 1546 } 1547 } 1548 1549 void 1550 pmap_dispose_thread(struct thread *td) 1551 { 1552 TODO; 1553 } 1554 1555 void 1556 pmap_swapin_thread(struct thread *td) 1557 { 1558 TODO; 1559 } 1560 1561 void 1562 pmap_swapout_thread(struct thread *td) 1563 { 1564 TODO; 1565 } 1566 1567 /* 1568 * Allocate a physical page of memory directly from the phys_avail map. 1569 * Can only be called from pmap_bootstrap before avail start and end are 1570 * calculated. 1571 */ 1572 static vm_offset_t 1573 pmap_bootstrap_alloc(vm_size_t size, u_int align) 1574 { 1575 vm_offset_t s, e; 1576 int i, j; 1577 1578 size = round_page(size); 1579 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1580 if (align != 0) 1581 s = (phys_avail[i] + align - 1) & ~(align - 1); 1582 else 1583 s = phys_avail[i]; 1584 e = s + size; 1585 1586 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1587 continue; 1588 1589 if (s == phys_avail[i]) { 1590 phys_avail[i] += size; 1591 } else if (e == phys_avail[i + 1]) { 1592 phys_avail[i + 1] -= size; 1593 } else { 1594 for (j = phys_avail_count * 2; j > i; j -= 2) { 1595 phys_avail[j] = phys_avail[j - 2]; 1596 phys_avail[j + 1] = phys_avail[j - 1]; 1597 } 1598 1599 phys_avail[i + 3] = phys_avail[i + 1]; 1600 phys_avail[i + 1] = s; 1601 phys_avail[i + 2] = e; 1602 phys_avail_count++; 1603 } 1604 1605 return (s); 1606 } 1607 panic("pmap_bootstrap_alloc: could not allocate memory"); 1608 } 1609 1610 /* 1611 * Return an unmapped pvo for a kernel virtual address. 1612 * Used by pmap functions that operate on physical pages. 1613 */ 1614 static struct pvo_entry * 1615 pmap_rkva_alloc(void) 1616 { 1617 struct pvo_entry *pvo; 1618 struct pte *pt; 1619 vm_offset_t kva; 1620 int pteidx; 1621 1622 if (pmap_rkva_count == 0) 1623 panic("pmap_rkva_alloc: no more reserved KVAs"); 1624 1625 kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); 1626 pmap_kenter(kva, 0); 1627 1628 pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); 1629 1630 if (pvo == NULL) 1631 panic("pmap_kva_alloc: pmap_pvo_find_va failed"); 1632 1633 pt = pmap_pvo_to_pte(pvo, pteidx); 1634 1635 if (pt == NULL) 1636 panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); 1637 1638 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1639 PVO_PTEGIDX_CLR(pvo); 1640 1641 pmap_pte_overflow++; 1642 1643 return (pvo); 1644 } 1645 1646 static void 1647 pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, 1648 int *depth_p) 1649 { 1650 struct pte *pt; 1651 1652 /* 1653 * If this pvo already has a valid pte, we need to save it so it can 1654 * be restored later. We then just reload the new PTE over the old 1655 * slot. 1656 */ 1657 if (saved_pt != NULL) { 1658 pt = pmap_pvo_to_pte(pvo, -1); 1659 1660 if (pt != NULL) { 1661 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1662 PVO_PTEGIDX_CLR(pvo); 1663 pmap_pte_overflow++; 1664 } 1665 1666 *saved_pt = pvo->pvo_pte; 1667 1668 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1669 } 1670 1671 pvo->pvo_pte.pte_lo |= pa; 1672 1673 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1674 panic("pmap_pa_map: could not spill pvo %p", pvo); 1675 1676 if (depth_p != NULL) 1677 (*depth_p)++; 1678 } 1679 1680 static void 1681 pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) 1682 { 1683 struct pte *pt; 1684 1685 pt = pmap_pvo_to_pte(pvo, -1); 1686 1687 if (pt != NULL) { 1688 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1689 PVO_PTEGIDX_CLR(pvo); 1690 pmap_pte_overflow++; 1691 } 1692 1693 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1694 1695 /* 1696 * If there is a saved PTE and it's valid, restore it and return. 1697 */ 1698 if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { 1699 if (depth_p != NULL && --(*depth_p) == 0) 1700 panic("pmap_pa_unmap: restoring but depth == 0"); 1701 1702 pvo->pvo_pte = *saved_pt; 1703 1704 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1705 panic("pmap_pa_unmap: could not spill pvo %p", pvo); 1706 } 1707 } 1708 1709 static void 1710 pmap_syncicache(vm_offset_t pa, vm_size_t len) 1711 { 1712 __syncicache((void *)pa, len); 1713 } 1714 1715 static void 1716 tlbia(void) 1717 { 1718 caddr_t i; 1719 1720 SYNC(); 1721 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 1722 TLBIE(i); 1723 EIEIO(); 1724 } 1725 TLBSYNC(); 1726 SYNC(); 1727 } 1728 1729 static int 1730 pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1731 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1732 { 1733 struct pvo_entry *pvo; 1734 u_int sr; 1735 int first; 1736 u_int ptegidx; 1737 int i; 1738 1739 pmap_pvo_enter_calls++; 1740 first = 0; 1741 1742 /* 1743 * Compute the PTE Group index. 1744 */ 1745 va &= ~ADDR_POFF; 1746 sr = va_to_sr(pm->pm_sr, va); 1747 ptegidx = va_to_pteg(sr, va); 1748 1749 /* 1750 * Remove any existing mapping for this page. Reuse the pvo entry if 1751 * there is a mapping. 1752 */ 1753 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1754 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1755 if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && 1756 (pvo->pvo_pte.pte_lo & PTE_PP) == 1757 (pte_lo & PTE_PP)) { 1758 return (0); 1759 } 1760 pmap_pvo_remove(pvo, -1); 1761 break; 1762 } 1763 } 1764 1765 /* 1766 * If we aren't overwriting a mapping, try to allocate. 1767 */ 1768 if (pmap_initialized) { 1769 pvo = uma_zalloc(zone, M_NOWAIT); 1770 } else { 1771 if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) { 1772 panic("pmap_enter: bpvo pool exhausted, %d, %d, %d", 1773 pmap_bpvo_pool_index, BPVO_POOL_SIZE, 1774 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 1775 } 1776 pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; 1777 pmap_bpvo_pool_index++; 1778 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1779 } 1780 1781 if (pvo == NULL) { 1782 return (ENOMEM); 1783 } 1784 1785 pmap_pvo_entries++; 1786 pvo->pvo_vaddr = va; 1787 pvo->pvo_pmap = pm; 1788 LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1789 pvo->pvo_vaddr &= ~ADDR_POFF; 1790 if (flags & VM_PROT_EXECUTE) 1791 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1792 if (flags & PVO_WIRED) 1793 pvo->pvo_vaddr |= PVO_WIRED; 1794 if (pvo_head != &pmap_pvo_kunmanaged) 1795 pvo->pvo_vaddr |= PVO_MANAGED; 1796 pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 1797 1798 /* 1799 * Remember if the list was empty and therefore will be the first 1800 * item. 1801 */ 1802 if (LIST_FIRST(pvo_head) == NULL) 1803 first = 1; 1804 1805 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1806 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1807 pvo->pvo_pmap->pm_stats.wired_count++; 1808 pvo->pvo_pmap->pm_stats.resident_count++; 1809 1810 /* 1811 * We hope this succeeds but it isn't required. 1812 */ 1813 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1814 if (i >= 0) { 1815 PVO_PTEGIDX_SET(pvo, i); 1816 } else { 1817 panic("pmap_pvo_enter: overflow"); 1818 pmap_pte_overflow++; 1819 } 1820 1821 return (first ? ENOENT : 0); 1822 } 1823 1824 static void 1825 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) 1826 { 1827 struct pte *pt; 1828 1829 /* 1830 * If there is an active pte entry, we need to deactivate it (and 1831 * save the ref & cfg bits). 1832 */ 1833 pt = pmap_pvo_to_pte(pvo, pteidx); 1834 if (pt != NULL) { 1835 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1836 PVO_PTEGIDX_CLR(pvo); 1837 } else { 1838 pmap_pte_overflow--; 1839 } 1840 1841 /* 1842 * Update our statistics. 1843 */ 1844 pvo->pvo_pmap->pm_stats.resident_count--; 1845 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1846 pvo->pvo_pmap->pm_stats.wired_count--; 1847 1848 /* 1849 * Save the REF/CHG bits into their cache if the page is managed. 1850 */ 1851 if (pvo->pvo_vaddr & PVO_MANAGED) { 1852 struct vm_page *pg; 1853 1854 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1855 if (pg != NULL) { 1856 pmap_attr_save(pg, pvo->pvo_pte.pte_lo & 1857 (PTE_REF | PTE_CHG)); 1858 } 1859 } 1860 1861 /* 1862 * Remove this PVO from the PV list. 1863 */ 1864 LIST_REMOVE(pvo, pvo_vlink); 1865 1866 /* 1867 * Remove this from the overflow list and return it to the pool 1868 * if we aren't going to reuse it. 1869 */ 1870 LIST_REMOVE(pvo, pvo_olink); 1871 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 1872 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : 1873 pmap_upvo_zone, pvo); 1874 pmap_pvo_entries--; 1875 pmap_pvo_remove_calls++; 1876 } 1877 1878 static __inline int 1879 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1880 { 1881 int pteidx; 1882 1883 /* 1884 * We can find the actual pte entry without searching by grabbing 1885 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 1886 * noticing the HID bit. 1887 */ 1888 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1889 if (pvo->pvo_pte.pte_hi & PTE_HID) 1890 pteidx ^= pmap_pteg_mask * 8; 1891 1892 return (pteidx); 1893 } 1894 1895 static struct pvo_entry * 1896 pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 1897 { 1898 struct pvo_entry *pvo; 1899 int ptegidx; 1900 u_int sr; 1901 1902 va &= ~ADDR_POFF; 1903 sr = va_to_sr(pm->pm_sr, va); 1904 ptegidx = va_to_pteg(sr, va); 1905 1906 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1907 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1908 if (pteidx_p) 1909 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1910 return (pvo); 1911 } 1912 } 1913 1914 return (NULL); 1915 } 1916 1917 static struct pte * 1918 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1919 { 1920 struct pte *pt; 1921 1922 /* 1923 * If we haven't been supplied the ptegidx, calculate it. 1924 */ 1925 if (pteidx == -1) { 1926 int ptegidx; 1927 u_int sr; 1928 1929 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 1930 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 1931 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1932 } 1933 1934 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1935 1936 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1937 panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " 1938 "valid pte index", pvo); 1939 } 1940 1941 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1942 panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " 1943 "pvo but no valid pte", pvo); 1944 } 1945 1946 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1947 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1948 panic("pmap_pvo_to_pte: pvo %p has valid pte in " 1949 "pmap_pteg_table %p but invalid in pvo", pvo, pt); 1950 } 1951 1952 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 1953 != 0) { 1954 panic("pmap_pvo_to_pte: pvo %p pte does not match " 1955 "pte %p in pmap_pteg_table", pvo, pt); 1956 } 1957 1958 return (pt); 1959 } 1960 1961 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1962 panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " 1963 "pmap_pteg_table but valid in pvo", pvo, pt); 1964 } 1965 1966 return (NULL); 1967 } 1968 1969 static void * 1970 pmap_pvo_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1971 { 1972 vm_page_t m; 1973 1974 if (bytes != PAGE_SIZE) 1975 panic("pmap_pvo_allocf: benno was shortsighted. hit him."); 1976 1977 *flags = UMA_SLAB_PRIV; 1978 m = vm_page_alloc(pmap_pvo_obj, pmap_pvo_count, VM_ALLOC_SYSTEM); 1979 if (m == NULL) 1980 return (NULL); 1981 pmap_pvo_count++; 1982 return ((void *)VM_PAGE_TO_PHYS(m)); 1983 } 1984 1985 /* 1986 * XXX: THIS STUFF SHOULD BE IN pte.c? 1987 */ 1988 int 1989 pmap_pte_spill(vm_offset_t addr) 1990 { 1991 struct pvo_entry *source_pvo, *victim_pvo; 1992 struct pvo_entry *pvo; 1993 int ptegidx, i, j; 1994 u_int sr; 1995 struct pteg *pteg; 1996 struct pte *pt; 1997 1998 pmap_pte_spills++; 1999 2000 sr = mfsrin(addr); 2001 ptegidx = va_to_pteg(sr, addr); 2002 2003 /* 2004 * Have to substitute some entry. Use the primary hash for this. 2005 * Use low bits of timebase as random generator. 2006 */ 2007 pteg = &pmap_pteg_table[ptegidx]; 2008 __asm __volatile("mftb %0" : "=r"(i)); 2009 i &= 7; 2010 pt = &pteg->pt[i]; 2011 2012 source_pvo = NULL; 2013 victim_pvo = NULL; 2014 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2015 /* 2016 * We need to find a pvo entry for this address. 2017 */ 2018 PMAP_PVO_CHECK(pvo); 2019 if (source_pvo == NULL && 2020 pmap_pte_match(&pvo->pvo_pte, sr, addr, 2021 pvo->pvo_pte.pte_hi & PTE_HID)) { 2022 /* 2023 * Now found an entry to be spilled into the pteg. 2024 * The PTE is now valid, so we know it's active. 2025 */ 2026 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 2027 2028 if (j >= 0) { 2029 PVO_PTEGIDX_SET(pvo, j); 2030 pmap_pte_overflow--; 2031 PMAP_PVO_CHECK(pvo); 2032 return (1); 2033 } 2034 2035 source_pvo = pvo; 2036 2037 if (victim_pvo != NULL) 2038 break; 2039 } 2040 2041 /* 2042 * We also need the pvo entry of the victim we are replacing 2043 * so save the R & C bits of the PTE. 2044 */ 2045 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2046 pmap_pte_compare(pt, &pvo->pvo_pte)) { 2047 victim_pvo = pvo; 2048 if (source_pvo != NULL) 2049 break; 2050 } 2051 } 2052 2053 if (source_pvo == NULL) 2054 return (0); 2055 2056 if (victim_pvo == NULL) { 2057 if ((pt->pte_hi & PTE_HID) == 0) 2058 panic("pmap_pte_spill: victim p-pte (%p) has no pvo" 2059 "entry", pt); 2060 2061 /* 2062 * If this is a secondary PTE, we need to search it's primary 2063 * pvo bucket for the matching PVO. 2064 */ 2065 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], 2066 pvo_olink) { 2067 PMAP_PVO_CHECK(pvo); 2068 /* 2069 * We also need the pvo entry of the victim we are 2070 * replacing so save the R & C bits of the PTE. 2071 */ 2072 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 2073 victim_pvo = pvo; 2074 break; 2075 } 2076 } 2077 2078 if (victim_pvo == NULL) 2079 panic("pmap_pte_spill: victim s-pte (%p) has no pvo" 2080 "entry", pt); 2081 } 2082 2083 /* 2084 * We are invalidating the TLB entry for the EA we are replacing even 2085 * though it's valid. If we don't, we lose any ref/chg bit changes 2086 * contained in the TLB entry. 2087 */ 2088 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 2089 2090 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 2091 pmap_pte_set(pt, &source_pvo->pvo_pte); 2092 2093 PVO_PTEGIDX_CLR(victim_pvo); 2094 PVO_PTEGIDX_SET(source_pvo, i); 2095 pmap_pte_replacements++; 2096 2097 PMAP_PVO_CHECK(victim_pvo); 2098 PMAP_PVO_CHECK(source_pvo); 2099 2100 return (1); 2101 } 2102 2103 static int 2104 pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2105 { 2106 struct pte *pt; 2107 int i; 2108 2109 /* 2110 * First try primary hash. 2111 */ 2112 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2113 if ((pt->pte_hi & PTE_VALID) == 0) { 2114 pvo_pt->pte_hi &= ~PTE_HID; 2115 pmap_pte_set(pt, pvo_pt); 2116 return (i); 2117 } 2118 } 2119 2120 /* 2121 * Now try secondary hash. 2122 */ 2123 ptegidx ^= pmap_pteg_mask; 2124 ptegidx++; 2125 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2126 if ((pt->pte_hi & PTE_VALID) == 0) { 2127 pvo_pt->pte_hi |= PTE_HID; 2128 pmap_pte_set(pt, pvo_pt); 2129 return (i); 2130 } 2131 } 2132 2133 panic("pmap_pte_insert: overflow"); 2134 return (-1); 2135 } 2136 2137 static boolean_t 2138 pmap_query_bit(vm_page_t m, int ptebit) 2139 { 2140 struct pvo_entry *pvo; 2141 struct pte *pt; 2142 2143 if (pmap_attr_fetch(m) & ptebit) 2144 return (TRUE); 2145 2146 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2147 PMAP_PVO_CHECK(pvo); /* sanity check */ 2148 2149 /* 2150 * See if we saved the bit off. If so, cache it and return 2151 * success. 2152 */ 2153 if (pvo->pvo_pte.pte_lo & ptebit) { 2154 pmap_attr_save(m, ptebit); 2155 PMAP_PVO_CHECK(pvo); /* sanity check */ 2156 return (TRUE); 2157 } 2158 } 2159 2160 /* 2161 * No luck, now go through the hard part of looking at the PTEs 2162 * themselves. Sync so that any pending REF/CHG bits are flushed to 2163 * the PTEs. 2164 */ 2165 SYNC(); 2166 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2167 PMAP_PVO_CHECK(pvo); /* sanity check */ 2168 2169 /* 2170 * See if this pvo has a valid PTE. if so, fetch the 2171 * REF/CHG bits from the valid PTE. If the appropriate 2172 * ptebit is set, cache it and return success. 2173 */ 2174 pt = pmap_pvo_to_pte(pvo, -1); 2175 if (pt != NULL) { 2176 pmap_pte_synch(pt, &pvo->pvo_pte); 2177 if (pvo->pvo_pte.pte_lo & ptebit) { 2178 pmap_attr_save(m, ptebit); 2179 PMAP_PVO_CHECK(pvo); /* sanity check */ 2180 return (TRUE); 2181 } 2182 } 2183 } 2184 2185 return (TRUE); 2186 } 2187 2188 static boolean_t 2189 pmap_clear_bit(vm_page_t m, int ptebit) 2190 { 2191 struct pvo_entry *pvo; 2192 struct pte *pt; 2193 int rv; 2194 2195 /* 2196 * Clear the cached value. 2197 */ 2198 rv = pmap_attr_fetch(m); 2199 pmap_attr_clear(m, ptebit); 2200 2201 /* 2202 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2203 * we can reset the right ones). note that since the pvo entries and 2204 * list heads are accessed via BAT0 and are never placed in the page 2205 * table, we don't have to worry about further accesses setting the 2206 * REF/CHG bits. 2207 */ 2208 SYNC(); 2209 2210 /* 2211 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2212 * valid pte clear the ptebit from the valid pte. 2213 */ 2214 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2215 PMAP_PVO_CHECK(pvo); /* sanity check */ 2216 pt = pmap_pvo_to_pte(pvo, -1); 2217 if (pt != NULL) { 2218 pmap_pte_synch(pt, &pvo->pvo_pte); 2219 if (pvo->pvo_pte.pte_lo & ptebit) 2220 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2221 } 2222 rv |= pvo->pvo_pte.pte_lo; 2223 pvo->pvo_pte.pte_lo &= ~ptebit; 2224 PMAP_PVO_CHECK(pvo); /* sanity check */ 2225 } 2226 2227 return ((rv & ptebit) != 0); 2228 } 2229 2230 /* 2231 * Map a set of physical memory pages into the kernel virtual 2232 * address space. Return a pointer to where it is mapped. This 2233 * routine is intended to be used for mapping device memory, 2234 * NOT real memory. 2235 */ 2236 void * 2237 pmap_mapdev(vm_offset_t pa, vm_size_t size) 2238 { 2239 vm_offset_t va, tmpva, offset; 2240 2241 pa = trunc_page(pa); 2242 offset = pa & PAGE_MASK; 2243 size = roundup(offset + size, PAGE_SIZE); 2244 2245 GIANT_REQUIRED; 2246 2247 va = kmem_alloc_pageable(kernel_map, size); 2248 if (!va) 2249 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2250 2251 for (tmpva = va; size > 0;) { 2252 pmap_kenter(tmpva, pa); 2253 TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ 2254 size -= PAGE_SIZE; 2255 tmpva += PAGE_SIZE; 2256 pa += PAGE_SIZE; 2257 } 2258 2259 return ((void *)(va + offset)); 2260 } 2261 2262 void 2263 pmap_unmapdev(vm_offset_t va, vm_size_t size) 2264 { 2265 vm_offset_t base, offset; 2266 2267 base = trunc_page(va); 2268 offset = va & PAGE_MASK; 2269 size = roundup(offset + size, PAGE_SIZE); 2270 kmem_free(kernel_map, base, size); 2271 } 2272