1 /* 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /* 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68 /* 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93 #include <sys/cdefs.h> 94 __FBSDID("$FreeBSD$"); 95 96 /* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117 #include "opt_kstack_pages.h" 118 119 #include <sys/param.h> 120 #include <sys/kernel.h> 121 #include <sys/ktr.h> 122 #include <sys/lock.h> 123 #include <sys/msgbuf.h> 124 #include <sys/mutex.h> 125 #include <sys/proc.h> 126 #include <sys/sysctl.h> 127 #include <sys/systm.h> 128 #include <sys/vmmeter.h> 129 130 #include <dev/ofw/openfirm.h> 131 132 #include <vm/vm.h> 133 #include <vm/vm_param.h> 134 #include <vm/vm_kern.h> 135 #include <vm/vm_page.h> 136 #include <vm/vm_map.h> 137 #include <vm/vm_object.h> 138 #include <vm/vm_extern.h> 139 #include <vm/vm_pageout.h> 140 #include <vm/vm_pager.h> 141 #include <vm/uma.h> 142 143 #include <machine/cpu.h> 144 #include <machine/powerpc.h> 145 #include <machine/bat.h> 146 #include <machine/frame.h> 147 #include <machine/md_var.h> 148 #include <machine/psl.h> 149 #include <machine/pte.h> 150 #include <machine/sr.h> 151 152 #define PMAP_DEBUG 153 154 #define TODO panic("%s: not implemented", __func__); 155 156 #define PMAP_LOCK(pm) 157 #define PMAP_UNLOCK(pm) 158 159 #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 160 #define TLBSYNC() __asm __volatile("tlbsync"); 161 #define SYNC() __asm __volatile("sync"); 162 #define EIEIO() __asm __volatile("eieio"); 163 164 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 165 #define VSID_TO_SR(vsid) ((vsid) & 0xf) 166 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 167 168 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 169 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 170 #define PVO_WIRED 0x0010 /* PVO entry is wired */ 171 #define PVO_MANAGED 0x0020 /* PVO entry is managed */ 172 #define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ 173 #define PVO_BOOTSTRAP 0x0080 /* PVO entry allocated during 174 bootstrap */ 175 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 176 #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 177 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 178 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 179 #define PVO_PTEGIDX_CLR(pvo) \ 180 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 181 #define PVO_PTEGIDX_SET(pvo, i) \ 182 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 183 184 #define PMAP_PVO_CHECK(pvo) 185 186 struct ofw_map { 187 vm_offset_t om_va; 188 vm_size_t om_len; 189 vm_offset_t om_pa; 190 u_int om_mode; 191 }; 192 193 int pmap_bootstrapped = 0; 194 195 /* 196 * Virtual and physical address of message buffer. 197 */ 198 struct msgbuf *msgbufp; 199 vm_offset_t msgbuf_phys; 200 201 int pmap_pagedaemon_waken; 202 203 /* 204 * Map of physical memory regions. 205 */ 206 vm_offset_t phys_avail[128]; 207 u_int phys_avail_count; 208 static struct mem_region *regions; 209 static struct mem_region *pregions; 210 int regions_sz, pregions_sz; 211 static struct ofw_map *translations; 212 213 /* 214 * First and last available kernel virtual addresses. 215 */ 216 vm_offset_t virtual_avail; 217 vm_offset_t virtual_end; 218 vm_offset_t kernel_vm_end; 219 220 /* 221 * Kernel pmap. 222 */ 223 struct pmap kernel_pmap_store; 224 extern struct pmap ofw_pmap; 225 226 /* 227 * PTEG data. 228 */ 229 static struct pteg *pmap_pteg_table; 230 u_int pmap_pteg_count; 231 u_int pmap_pteg_mask; 232 233 /* 234 * PVO data. 235 */ 236 struct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ 237 struct pvo_head pmap_pvo_kunmanaged = 238 LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 239 struct pvo_head pmap_pvo_unmanaged = 240 LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 241 242 uma_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ 243 uma_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ 244 245 #define BPVO_POOL_SIZE 32768 246 static struct pvo_entry *pmap_bpvo_pool; 247 static int pmap_bpvo_pool_index = 0; 248 249 #define VSID_NBPW (sizeof(u_int32_t) * 8) 250 static u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 251 252 static boolean_t pmap_initialized = FALSE; 253 254 /* 255 * Statistics. 256 */ 257 u_int pmap_pte_valid = 0; 258 u_int pmap_pte_overflow = 0; 259 u_int pmap_pte_replacements = 0; 260 u_int pmap_pvo_entries = 0; 261 u_int pmap_pvo_enter_calls = 0; 262 u_int pmap_pvo_remove_calls = 0; 263 u_int pmap_pte_spills = 0; 264 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 265 0, ""); 266 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, 267 &pmap_pte_overflow, 0, ""); 268 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, 269 &pmap_pte_replacements, 0, ""); 270 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 271 0, ""); 272 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, 273 &pmap_pvo_enter_calls, 0, ""); 274 SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, 275 &pmap_pvo_remove_calls, 0, ""); 276 SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, 277 &pmap_pte_spills, 0, ""); 278 279 struct pvo_entry *pmap_pvo_zeropage; 280 281 vm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; 282 u_int pmap_rkva_count = 4; 283 284 /* 285 * Allocate physical memory for use in pmap_bootstrap. 286 */ 287 static vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); 288 289 /* 290 * PTE calls. 291 */ 292 static int pmap_pte_insert(u_int, struct pte *); 293 294 /* 295 * PVO calls. 296 */ 297 static int pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 298 vm_offset_t, vm_offset_t, u_int, int); 299 static void pmap_pvo_remove(struct pvo_entry *, int); 300 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); 301 static struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 302 303 /* 304 * Utility routines. 305 */ 306 static struct pvo_entry *pmap_rkva_alloc(void); 307 static void pmap_pa_map(struct pvo_entry *, vm_offset_t, 308 struct pte *, int *); 309 static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); 310 static void pmap_syncicache(vm_offset_t, vm_size_t); 311 static boolean_t pmap_query_bit(vm_page_t, int); 312 static u_int pmap_clear_bit(vm_page_t, int, int *); 313 static void tlbia(void); 314 315 static __inline int 316 va_to_sr(u_int *sr, vm_offset_t va) 317 { 318 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 319 } 320 321 static __inline u_int 322 va_to_pteg(u_int sr, vm_offset_t addr) 323 { 324 u_int hash; 325 326 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 327 ADDR_PIDX_SHFT); 328 return (hash & pmap_pteg_mask); 329 } 330 331 static __inline struct pvo_head * 332 pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 333 { 334 struct vm_page *pg; 335 336 pg = PHYS_TO_VM_PAGE(pa); 337 338 if (pg_p != NULL) 339 *pg_p = pg; 340 341 if (pg == NULL) 342 return (&pmap_pvo_unmanaged); 343 344 return (&pg->md.mdpg_pvoh); 345 } 346 347 static __inline struct pvo_head * 348 vm_page_to_pvoh(vm_page_t m) 349 { 350 351 return (&m->md.mdpg_pvoh); 352 } 353 354 static __inline void 355 pmap_attr_clear(vm_page_t m, int ptebit) 356 { 357 358 m->md.mdpg_attrs &= ~ptebit; 359 } 360 361 static __inline int 362 pmap_attr_fetch(vm_page_t m) 363 { 364 365 return (m->md.mdpg_attrs); 366 } 367 368 static __inline void 369 pmap_attr_save(vm_page_t m, int ptebit) 370 { 371 372 m->md.mdpg_attrs |= ptebit; 373 } 374 375 static __inline int 376 pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 377 { 378 if (pt->pte_hi == pvo_pt->pte_hi) 379 return (1); 380 381 return (0); 382 } 383 384 static __inline int 385 pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 386 { 387 return (pt->pte_hi & ~PTE_VALID) == 388 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 389 ((va >> ADDR_API_SHFT) & PTE_API) | which); 390 } 391 392 static __inline void 393 pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 394 { 395 /* 396 * Construct a PTE. Default to IMB initially. Valid bit only gets 397 * set when the real pte is set in memory. 398 * 399 * Note: Don't set the valid bit for correct operation of tlb update. 400 */ 401 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 402 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 403 pt->pte_lo = pte_lo; 404 } 405 406 static __inline void 407 pmap_pte_synch(struct pte *pt, struct pte *pvo_pt) 408 { 409 410 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 411 } 412 413 static __inline void 414 pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 415 { 416 417 /* 418 * As shown in Section 7.6.3.2.3 419 */ 420 pt->pte_lo &= ~ptebit; 421 TLBIE(va); 422 EIEIO(); 423 TLBSYNC(); 424 SYNC(); 425 } 426 427 static __inline void 428 pmap_pte_set(struct pte *pt, struct pte *pvo_pt) 429 { 430 431 pvo_pt->pte_hi |= PTE_VALID; 432 433 /* 434 * Update the PTE as defined in section 7.6.3.1. 435 * Note that the REF/CHG bits are from pvo_pt and thus should havce 436 * been saved so this routine can restore them (if desired). 437 */ 438 pt->pte_lo = pvo_pt->pte_lo; 439 EIEIO(); 440 pt->pte_hi = pvo_pt->pte_hi; 441 SYNC(); 442 pmap_pte_valid++; 443 } 444 445 static __inline void 446 pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 447 { 448 449 pvo_pt->pte_hi &= ~PTE_VALID; 450 451 /* 452 * Force the reg & chg bits back into the PTEs. 453 */ 454 SYNC(); 455 456 /* 457 * Invalidate the pte. 458 */ 459 pt->pte_hi &= ~PTE_VALID; 460 461 SYNC(); 462 TLBIE(va); 463 EIEIO(); 464 TLBSYNC(); 465 SYNC(); 466 467 /* 468 * Save the reg & chg bits. 469 */ 470 pmap_pte_synch(pt, pvo_pt); 471 pmap_pte_valid--; 472 } 473 474 static __inline void 475 pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 476 { 477 478 /* 479 * Invalidate the PTE 480 */ 481 pmap_pte_unset(pt, pvo_pt, va); 482 pmap_pte_set(pt, pvo_pt); 483 } 484 485 /* 486 * Quick sort callout for comparing memory regions. 487 */ 488 static int mr_cmp(const void *a, const void *b); 489 static int om_cmp(const void *a, const void *b); 490 491 static int 492 mr_cmp(const void *a, const void *b) 493 { 494 const struct mem_region *regiona; 495 const struct mem_region *regionb; 496 497 regiona = a; 498 regionb = b; 499 if (regiona->mr_start < regionb->mr_start) 500 return (-1); 501 else if (regiona->mr_start > regionb->mr_start) 502 return (1); 503 else 504 return (0); 505 } 506 507 static int 508 om_cmp(const void *a, const void *b) 509 { 510 const struct ofw_map *mapa; 511 const struct ofw_map *mapb; 512 513 mapa = a; 514 mapb = b; 515 if (mapa->om_pa < mapb->om_pa) 516 return (-1); 517 else if (mapa->om_pa > mapb->om_pa) 518 return (1); 519 else 520 return (0); 521 } 522 523 void 524 pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) 525 { 526 ihandle_t mmui; 527 phandle_t chosen, mmu; 528 int sz; 529 int i, j; 530 int ofw_mappings; 531 vm_size_t size, physsz; 532 vm_offset_t pa, va, off; 533 u_int batl, batu; 534 535 /* 536 * Set up BAT0 to map the lowest 256 MB area 537 */ 538 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 539 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 540 541 /* 542 * Map PCI memory space. 543 */ 544 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 545 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 546 547 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 548 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 549 550 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 551 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 552 553 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 554 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 555 556 /* 557 * Map obio devices. 558 */ 559 battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 560 battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 561 562 /* 563 * Use an IBAT and a DBAT to map the bottom segment of memory 564 * where we are. 565 */ 566 batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 567 batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 568 __asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1" 569 :: "r"(batu), "r"(batl)); 570 571 #if 0 572 /* map frame buffer */ 573 batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 574 batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 575 __asm ("mtdbatu 1,%0; mtdbatl 1,%1" 576 :: "r"(batu), "r"(batl)); 577 #endif 578 579 #if 1 580 /* map pci space */ 581 batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 582 batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 583 __asm ("mtdbatu 1,%0; mtdbatl 1,%1" 584 :: "r"(batu), "r"(batl)); 585 #endif 586 587 /* 588 * Set the start and end of kva. 589 */ 590 virtual_avail = VM_MIN_KERNEL_ADDRESS; 591 virtual_end = VM_MAX_KERNEL_ADDRESS; 592 593 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 594 CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); 595 596 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 597 for (i = 0; i < pregions_sz; i++) { 598 vm_offset_t pa; 599 vm_offset_t end; 600 601 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 602 pregions[i].mr_start, 603 pregions[i].mr_start + pregions[i].mr_size, 604 pregions[i].mr_size); 605 /* 606 * Install entries into the BAT table to allow all 607 * of physmem to be convered by on-demand BAT entries. 608 * The loop will sometimes set the same battable element 609 * twice, but that's fine since they won't be used for 610 * a while yet. 611 */ 612 pa = pregions[i].mr_start & 0xf0000000; 613 end = pregions[i].mr_start + pregions[i].mr_size; 614 do { 615 u_int n = pa >> ADDR_SR_SHFT; 616 617 battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 618 battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 619 pa += SEGMENT_LENGTH; 620 } while (pa < end); 621 } 622 623 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 624 panic("pmap_bootstrap: phys_avail too small"); 625 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 626 phys_avail_count = 0; 627 physsz = 0; 628 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 629 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 630 regions[i].mr_start + regions[i].mr_size, 631 regions[i].mr_size); 632 phys_avail[j] = regions[i].mr_start; 633 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 634 phys_avail_count++; 635 physsz += regions[i].mr_size; 636 } 637 physmem = btoc(physsz); 638 639 /* 640 * Allocate PTEG table. 641 */ 642 #ifdef PTEGCOUNT 643 pmap_pteg_count = PTEGCOUNT; 644 #else 645 pmap_pteg_count = 0x1000; 646 647 while (pmap_pteg_count < physmem) 648 pmap_pteg_count <<= 1; 649 650 pmap_pteg_count >>= 1; 651 #endif /* PTEGCOUNT */ 652 653 size = pmap_pteg_count * sizeof(struct pteg); 654 CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, 655 size); 656 pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); 657 CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); 658 bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); 659 pmap_pteg_mask = pmap_pteg_count - 1; 660 661 /* 662 * Allocate pv/overflow lists. 663 */ 664 size = sizeof(struct pvo_head) * pmap_pteg_count; 665 pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, 666 PAGE_SIZE); 667 CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); 668 for (i = 0; i < pmap_pteg_count; i++) 669 LIST_INIT(&pmap_pvo_table[i]); 670 671 /* 672 * Allocate the message buffer. 673 */ 674 msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); 675 676 /* 677 * Initialise the unmanaged pvo pool. 678 */ 679 pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc( 680 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 681 pmap_bpvo_pool_index = 0; 682 683 /* 684 * Make sure kernel vsid is allocated as well as VSID 0. 685 */ 686 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 687 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 688 pmap_vsid_bitmap[0] |= 1; 689 690 /* 691 * Set up the OpenFirmware pmap and add it's mappings. 692 */ 693 pmap_pinit(&ofw_pmap); 694 ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 695 ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 696 if ((chosen = OF_finddevice("/chosen")) == -1) 697 panic("pmap_bootstrap: can't find /chosen"); 698 OF_getprop(chosen, "mmu", &mmui, 4); 699 if ((mmu = OF_instance_to_package(mmui)) == -1) 700 panic("pmap_bootstrap: can't get mmu package"); 701 if ((sz = OF_getproplen(mmu, "translations")) == -1) 702 panic("pmap_bootstrap: can't get ofw translation count"); 703 translations = NULL; 704 for (i = 0; phys_avail[i + 2] != 0; i += 2) { 705 if (phys_avail[i + 1] >= sz) 706 translations = (struct ofw_map *)phys_avail[i]; 707 } 708 if (translations == NULL) 709 panic("pmap_bootstrap: no space to copy translations"); 710 bzero(translations, sz); 711 if (OF_getprop(mmu, "translations", translations, sz) == -1) 712 panic("pmap_bootstrap: can't get ofw translations"); 713 CTR0(KTR_PMAP, "pmap_bootstrap: translations"); 714 sz /= sizeof(*translations); 715 qsort(translations, sz, sizeof (*translations), om_cmp); 716 for (i = 0, ofw_mappings = 0; i < sz; i++) { 717 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 718 translations[i].om_pa, translations[i].om_va, 719 translations[i].om_len); 720 721 /* 722 * If the mapping is 1:1, let the RAM and device on-demand 723 * BAT tables take care of the translation. 724 */ 725 if (translations[i].om_va == translations[i].om_pa) 726 continue; 727 728 /* Enter the pages */ 729 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 730 struct vm_page m; 731 732 m.phys_addr = translations[i].om_pa + off; 733 pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, 734 VM_PROT_ALL, 1); 735 ofw_mappings++; 736 } 737 } 738 #ifdef SMP 739 TLBSYNC(); 740 #endif 741 742 /* 743 * Initialize the kernel pmap (which is statically allocated). 744 */ 745 for (i = 0; i < 16; i++) { 746 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 747 } 748 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 749 kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL_SEGMENT; 750 kernel_pmap->pm_active = ~0; 751 752 /* 753 * Allocate a kernel stack with a guard page for thread0 and map it 754 * into the kernel page map. 755 */ 756 pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); 757 kstack0_phys = pa; 758 kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); 759 CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, 760 kstack0); 761 virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; 762 for (i = 0; i < KSTACK_PAGES; i++) { 763 pa = kstack0_phys + i * PAGE_SIZE; 764 va = kstack0 + i * PAGE_SIZE; 765 pmap_kenter(va, pa); 766 TLBIE(va); 767 } 768 769 /* 770 * Calculate the last available physical address. 771 */ 772 for (i = 0; phys_avail[i + 2] != 0; i += 2) 773 ; 774 Maxmem = powerpc_btop(phys_avail[i + 1]); 775 776 /* 777 * Allocate virtual address space for the message buffer. 778 */ 779 msgbufp = (struct msgbuf *)virtual_avail; 780 virtual_avail += round_page(MSGBUF_SIZE); 781 782 /* 783 * Initialize hardware. 784 */ 785 for (i = 0; i < 16; i++) { 786 mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); 787 } 788 __asm __volatile ("mtsr %0,%1" 789 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 790 __asm __volatile ("sync; mtsdr1 %0; isync" 791 :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); 792 tlbia(); 793 794 pmap_bootstrapped++; 795 } 796 797 /* 798 * Activate a user pmap. The pmap must be activated before it's address 799 * space can be accessed in any way. 800 */ 801 void 802 pmap_activate(struct thread *td) 803 { 804 pmap_t pm, pmr; 805 806 /* 807 * Load all the data we need up front to encourage the compiler to 808 * not issue any loads while we have interrupts disabled below. 809 */ 810 pm = &td->td_proc->p_vmspace->vm_pmap; 811 812 if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL) 813 pmr = pm; 814 815 pm->pm_active |= PCPU_GET(cpumask); 816 PCPU_SET(curpmap, pmr); 817 } 818 819 void 820 pmap_deactivate(struct thread *td) 821 { 822 pmap_t pm; 823 824 pm = &td->td_proc->p_vmspace->vm_pmap; 825 pm->pm_active &= ~(PCPU_GET(cpumask)); 826 PCPU_SET(curpmap, NULL); 827 } 828 829 vm_offset_t 830 pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) 831 { 832 833 return (va); 834 } 835 836 void 837 pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) 838 { 839 struct pvo_entry *pvo; 840 841 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 842 843 if (pvo != NULL) { 844 if (wired) { 845 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 846 pm->pm_stats.wired_count++; 847 pvo->pvo_vaddr |= PVO_WIRED; 848 } else { 849 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 850 pm->pm_stats.wired_count--; 851 pvo->pvo_vaddr &= ~PVO_WIRED; 852 } 853 } 854 } 855 856 void 857 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 858 vm_size_t len, vm_offset_t src_addr) 859 { 860 861 /* 862 * This is not needed as it's mainly an optimisation. 863 * It may want to be implemented later though. 864 */ 865 } 866 867 void 868 pmap_copy_page(vm_page_t msrc, vm_page_t mdst) 869 { 870 vm_offset_t dst; 871 vm_offset_t src; 872 873 dst = VM_PAGE_TO_PHYS(mdst); 874 src = VM_PAGE_TO_PHYS(msrc); 875 876 kcopy((void *)src, (void *)dst, PAGE_SIZE); 877 } 878 879 /* 880 * Zero a page of physical memory by temporarily mapping it into the tlb. 881 */ 882 void 883 pmap_zero_page(vm_page_t m) 884 { 885 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 886 caddr_t va; 887 888 if (pa < SEGMENT_LENGTH) { 889 va = (caddr_t) pa; 890 } else if (pmap_initialized) { 891 if (pmap_pvo_zeropage == NULL) 892 pmap_pvo_zeropage = pmap_rkva_alloc(); 893 pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 894 va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 895 } else { 896 panic("pmap_zero_page: can't zero pa %#x", pa); 897 } 898 899 bzero(va, PAGE_SIZE); 900 901 if (pa >= SEGMENT_LENGTH) 902 pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 903 } 904 905 void 906 pmap_zero_page_area(vm_page_t m, int off, int size) 907 { 908 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 909 caddr_t va; 910 911 if (pa < SEGMENT_LENGTH) { 912 va = (caddr_t) pa; 913 } else if (pmap_initialized) { 914 if (pmap_pvo_zeropage == NULL) 915 pmap_pvo_zeropage = pmap_rkva_alloc(); 916 pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 917 va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 918 } else { 919 panic("pmap_zero_page: can't zero pa %#x", pa); 920 } 921 922 bzero(va + off, size); 923 924 if (pa >= SEGMENT_LENGTH) 925 pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 926 } 927 928 void 929 pmap_zero_page_idle(vm_page_t m) 930 { 931 932 /* XXX this is called outside of Giant, is pmap_zero_page safe? */ 933 /* XXX maybe have a dedicated mapping for this to avoid the problem? */ 934 mtx_lock(&Giant); 935 pmap_zero_page(m); 936 mtx_unlock(&Giant); 937 } 938 939 /* 940 * Map the given physical page at the specified virtual address in the 941 * target pmap with the protection requested. If specified the page 942 * will be wired down. 943 */ 944 void 945 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 946 boolean_t wired) 947 { 948 struct pvo_head *pvo_head; 949 uma_zone_t zone; 950 vm_page_t pg; 951 u_int pte_lo, pvo_flags, was_exec, i; 952 int error; 953 954 if (!pmap_initialized) { 955 pvo_head = &pmap_pvo_kunmanaged; 956 zone = pmap_upvo_zone; 957 pvo_flags = 0; 958 pg = NULL; 959 was_exec = PTE_EXEC; 960 } else { 961 pvo_head = vm_page_to_pvoh(m); 962 pg = m; 963 zone = pmap_mpvo_zone; 964 pvo_flags = PVO_MANAGED; 965 was_exec = 0; 966 } 967 968 /* 969 * If this is a managed page, and it's the first reference to the page, 970 * clear the execness of the page. Otherwise fetch the execness. 971 */ 972 if (pg != NULL) { 973 if (LIST_EMPTY(pvo_head)) { 974 pmap_attr_clear(pg, PTE_EXEC); 975 } else { 976 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 977 } 978 } 979 980 981 /* 982 * Assume the page is cache inhibited and access is guarded unless 983 * it's in our available memory array. 984 */ 985 pte_lo = PTE_I | PTE_G; 986 for (i = 0; i < pregions_sz; i++) { 987 if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && 988 (VM_PAGE_TO_PHYS(m) < 989 (pregions[i].mr_start + pregions[i].mr_size))) { 990 pte_lo &= ~(PTE_I | PTE_G); 991 break; 992 } 993 } 994 995 if (prot & VM_PROT_WRITE) 996 pte_lo |= PTE_BW; 997 else 998 pte_lo |= PTE_BR; 999 1000 pvo_flags |= (prot & VM_PROT_EXECUTE); 1001 1002 if (wired) 1003 pvo_flags |= PVO_WIRED; 1004 1005 error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1006 pte_lo, pvo_flags); 1007 1008 /* 1009 * Flush the real page from the instruction cache if this page is 1010 * mapped executable and cacheable and was not previously mapped (or 1011 * was not mapped executable). 1012 */ 1013 if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 1014 (pte_lo & PTE_I) == 0 && was_exec == 0) { 1015 /* 1016 * Flush the real memory from the cache. 1017 */ 1018 pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1019 if (pg != NULL) 1020 pmap_attr_save(pg, PTE_EXEC); 1021 } 1022 1023 /* XXX syncicache always until problems are sorted */ 1024 pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1025 } 1026 1027 vm_page_t 1028 pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte) 1029 { 1030 1031 pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE); 1032 return (NULL); 1033 } 1034 1035 vm_offset_t 1036 pmap_extract(pmap_t pm, vm_offset_t va) 1037 { 1038 struct pvo_entry *pvo; 1039 1040 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1041 1042 if (pvo != NULL) { 1043 return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); 1044 } 1045 1046 return (0); 1047 } 1048 1049 /* 1050 * Atomically extract and hold the physical page with the given 1051 * pmap and virtual address pair if that mapping permits the given 1052 * protection. 1053 */ 1054 vm_page_t 1055 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1056 { 1057 vm_paddr_t pa; 1058 vm_page_t m; 1059 1060 m = NULL; 1061 mtx_lock(&Giant); 1062 if ((pa = pmap_extract(pmap, va)) != 0) { 1063 m = PHYS_TO_VM_PAGE(pa); 1064 vm_page_lock_queues(); 1065 vm_page_hold(m); 1066 vm_page_unlock_queues(); 1067 } 1068 mtx_unlock(&Giant); 1069 return (m); 1070 } 1071 1072 /* 1073 * Grow the number of kernel page table entries. Unneeded. 1074 */ 1075 void 1076 pmap_growkernel(vm_offset_t addr) 1077 { 1078 } 1079 1080 void 1081 pmap_init(void) 1082 { 1083 1084 CTR0(KTR_PMAP, "pmap_init"); 1085 1086 pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1087 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1088 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1089 pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1090 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1091 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1092 pmap_initialized = TRUE; 1093 } 1094 1095 void 1096 pmap_init2(void) 1097 { 1098 1099 CTR0(KTR_PMAP, "pmap_init2"); 1100 } 1101 1102 boolean_t 1103 pmap_is_modified(vm_page_t m) 1104 { 1105 1106 if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0) 1107 return (FALSE); 1108 1109 return (pmap_query_bit(m, PTE_CHG)); 1110 } 1111 1112 /* 1113 * pmap_is_prefaultable: 1114 * 1115 * Return whether or not the specified virtual address is elgible 1116 * for prefault. 1117 */ 1118 boolean_t 1119 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 1120 { 1121 1122 return (FALSE); 1123 } 1124 1125 void 1126 pmap_clear_reference(vm_page_t m) 1127 { 1128 1129 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1130 return; 1131 pmap_clear_bit(m, PTE_REF, NULL); 1132 } 1133 1134 void 1135 pmap_clear_modify(vm_page_t m) 1136 { 1137 1138 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1139 return; 1140 pmap_clear_bit(m, PTE_CHG, NULL); 1141 } 1142 1143 /* 1144 * pmap_ts_referenced: 1145 * 1146 * Return a count of reference bits for a page, clearing those bits. 1147 * It is not necessary for every reference bit to be cleared, but it 1148 * is necessary that 0 only be returned when there are truly no 1149 * reference bits set. 1150 * 1151 * XXX: The exact number of bits to check and clear is a matter that 1152 * should be tested and standardized at some point in the future for 1153 * optimal aging of shared pages. 1154 */ 1155 int 1156 pmap_ts_referenced(vm_page_t m) 1157 { 1158 int count; 1159 1160 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1161 return (0); 1162 1163 count = pmap_clear_bit(m, PTE_REF, NULL); 1164 1165 return (count); 1166 } 1167 1168 /* 1169 * Map a wired page into kernel virtual address space. 1170 */ 1171 void 1172 pmap_kenter(vm_offset_t va, vm_offset_t pa) 1173 { 1174 u_int pte_lo; 1175 int error; 1176 int i; 1177 1178 #if 0 1179 if (va < VM_MIN_KERNEL_ADDRESS) 1180 panic("pmap_kenter: attempt to enter non-kernel address %#x", 1181 va); 1182 #endif 1183 1184 pte_lo = PTE_I | PTE_G; 1185 for (i = 0; i < pregions_sz; i++) { 1186 if ((pa >= pregions[i].mr_start) && 1187 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 1188 pte_lo &= ~(PTE_I | PTE_G); 1189 break; 1190 } 1191 } 1192 1193 error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, 1194 &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1195 1196 if (error != 0 && error != ENOENT) 1197 panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, 1198 pa, error); 1199 1200 /* 1201 * Flush the real memory from the instruction cache. 1202 */ 1203 if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1204 pmap_syncicache(pa, PAGE_SIZE); 1205 } 1206 } 1207 1208 /* 1209 * Extract the physical page address associated with the given kernel virtual 1210 * address. 1211 */ 1212 vm_offset_t 1213 pmap_kextract(vm_offset_t va) 1214 { 1215 struct pvo_entry *pvo; 1216 1217 #ifdef UMA_MD_SMALL_ALLOC 1218 /* 1219 * Allow direct mappings 1220 */ 1221 if (va < VM_MIN_KERNEL_ADDRESS) { 1222 return (va); 1223 } 1224 #endif 1225 1226 pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1227 KASSERT(pvo != NULL, ("pmap_kextract: no addr found")); 1228 if (pvo == NULL) { 1229 return (0); 1230 } 1231 1232 return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); 1233 } 1234 1235 /* 1236 * Remove a wired page from kernel virtual address space. 1237 */ 1238 void 1239 pmap_kremove(vm_offset_t va) 1240 { 1241 1242 pmap_remove(kernel_pmap, va, va + PAGE_SIZE); 1243 } 1244 1245 /* 1246 * Map a range of physical addresses into kernel virtual address space. 1247 * 1248 * The value passed in *virt is a suggested virtual address for the mapping. 1249 * Architectures which can support a direct-mapped physical to virtual region 1250 * can return the appropriate address within that region, leaving '*virt' 1251 * unchanged. We cannot and therefore do not; *virt is updated with the 1252 * first usable address after the mapped region. 1253 */ 1254 vm_offset_t 1255 pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) 1256 { 1257 vm_offset_t sva, va; 1258 1259 sva = *virt; 1260 va = sva; 1261 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1262 pmap_kenter(va, pa_start); 1263 *virt = va; 1264 return (sva); 1265 } 1266 1267 int 1268 pmap_mincore(pmap_t pmap, vm_offset_t addr) 1269 { 1270 TODO; 1271 return (0); 1272 } 1273 1274 void 1275 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, 1276 vm_pindex_t pindex, vm_size_t size) 1277 { 1278 1279 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1280 KASSERT(object->type == OBJT_DEVICE, 1281 ("pmap_object_init_pt: non-device object")); 1282 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1283 ("pmap_object_init_pt: non current pmap")); 1284 } 1285 1286 /* 1287 * Lower the permission for all mappings to a given page. 1288 */ 1289 void 1290 pmap_page_protect(vm_page_t m, vm_prot_t prot) 1291 { 1292 struct pvo_head *pvo_head; 1293 struct pvo_entry *pvo, *next_pvo; 1294 struct pte *pt; 1295 1296 /* 1297 * Since the routine only downgrades protection, if the 1298 * maximal protection is desired, there isn't any change 1299 * to be made. 1300 */ 1301 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == 1302 (VM_PROT_READ|VM_PROT_WRITE)) 1303 return; 1304 1305 pvo_head = vm_page_to_pvoh(m); 1306 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1307 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1308 PMAP_PVO_CHECK(pvo); /* sanity check */ 1309 1310 /* 1311 * Downgrading to no mapping at all, we just remove the entry. 1312 */ 1313 if ((prot & VM_PROT_READ) == 0) { 1314 pmap_pvo_remove(pvo, -1); 1315 continue; 1316 } 1317 1318 /* 1319 * If EXEC permission is being revoked, just clear the flag 1320 * in the PVO. 1321 */ 1322 if ((prot & VM_PROT_EXECUTE) == 0) 1323 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1324 1325 /* 1326 * If this entry is already RO, don't diddle with the page 1327 * table. 1328 */ 1329 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 1330 PMAP_PVO_CHECK(pvo); 1331 continue; 1332 } 1333 1334 /* 1335 * Grab the PTE before we diddle the bits so pvo_to_pte can 1336 * verify the pte contents are as expected. 1337 */ 1338 pt = pmap_pvo_to_pte(pvo, -1); 1339 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1340 pvo->pvo_pte.pte_lo |= PTE_BR; 1341 if (pt != NULL) 1342 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1343 PMAP_PVO_CHECK(pvo); /* sanity check */ 1344 } 1345 } 1346 1347 /* 1348 * Returns true if the pmap's pv is one of the first 1349 * 16 pvs linked to from this page. This count may 1350 * be changed upwards or downwards in the future; it 1351 * is only necessary that true be returned for a small 1352 * subset of pmaps for proper page aging. 1353 */ 1354 boolean_t 1355 pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 1356 { 1357 int loops; 1358 struct pvo_entry *pvo; 1359 1360 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1361 return FALSE; 1362 1363 loops = 0; 1364 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1365 if (pvo->pvo_pmap == pmap) 1366 return (TRUE); 1367 if (++loops >= 16) 1368 break; 1369 } 1370 1371 return (FALSE); 1372 } 1373 1374 static u_int pmap_vsidcontext; 1375 1376 void 1377 pmap_pinit(pmap_t pmap) 1378 { 1379 int i, mask; 1380 u_int entropy; 1381 1382 KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("pmap_pinit: virt pmap")); 1383 1384 entropy = 0; 1385 __asm __volatile("mftb %0" : "=r"(entropy)); 1386 1387 /* 1388 * Allocate some segment registers for this pmap. 1389 */ 1390 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1391 u_int hash, n; 1392 1393 /* 1394 * Create a new value by mutiplying by a prime and adding in 1395 * entropy from the timebase register. This is to make the 1396 * VSID more random so that the PT hash function collides 1397 * less often. (Note that the prime casues gcc to do shifts 1398 * instead of a multiply.) 1399 */ 1400 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1401 hash = pmap_vsidcontext & (NPMAPS - 1); 1402 if (hash == 0) /* 0 is special, avoid it */ 1403 continue; 1404 n = hash >> 5; 1405 mask = 1 << (hash & (VSID_NBPW - 1)); 1406 hash = (pmap_vsidcontext & 0xfffff); 1407 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1408 /* anything free in this bucket? */ 1409 if (pmap_vsid_bitmap[n] == 0xffffffff) { 1410 entropy = (pmap_vsidcontext >> 20); 1411 continue; 1412 } 1413 i = ffs(~pmap_vsid_bitmap[i]) - 1; 1414 mask = 1 << i; 1415 hash &= 0xfffff & ~(VSID_NBPW - 1); 1416 hash |= i; 1417 } 1418 pmap_vsid_bitmap[n] |= mask; 1419 for (i = 0; i < 16; i++) 1420 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1421 return; 1422 } 1423 1424 panic("pmap_pinit: out of segments"); 1425 } 1426 1427 /* 1428 * Initialize the pmap associated with process 0. 1429 */ 1430 void 1431 pmap_pinit0(pmap_t pm) 1432 { 1433 1434 pmap_pinit(pm); 1435 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1436 } 1437 1438 /* 1439 * Set the physical protection on the specified range of this map as requested. 1440 */ 1441 void 1442 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1443 { 1444 struct pvo_entry *pvo; 1445 struct pte *pt; 1446 int pteidx; 1447 1448 CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1449 eva, prot); 1450 1451 1452 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1453 ("pmap_protect: non current pmap")); 1454 1455 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1456 pmap_remove(pm, sva, eva); 1457 return; 1458 } 1459 1460 for (; sva < eva; sva += PAGE_SIZE) { 1461 pvo = pmap_pvo_find_va(pm, sva, &pteidx); 1462 if (pvo == NULL) 1463 continue; 1464 1465 if ((prot & VM_PROT_EXECUTE) == 0) 1466 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1467 1468 /* 1469 * Grab the PTE pointer before we diddle with the cached PTE 1470 * copy. 1471 */ 1472 pt = pmap_pvo_to_pte(pvo, pteidx); 1473 /* 1474 * Change the protection of the page. 1475 */ 1476 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1477 pvo->pvo_pte.pte_lo |= PTE_BR; 1478 1479 /* 1480 * If the PVO is in the page table, update that pte as well. 1481 */ 1482 if (pt != NULL) 1483 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1484 } 1485 } 1486 1487 /* 1488 * Map a list of wired pages into kernel virtual address space. This is 1489 * intended for temporary mappings which do not need page modification or 1490 * references recorded. Existing mappings in the region are overwritten. 1491 */ 1492 void 1493 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) 1494 { 1495 vm_offset_t va; 1496 1497 va = sva; 1498 while (count-- > 0) { 1499 pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); 1500 va += PAGE_SIZE; 1501 m++; 1502 } 1503 } 1504 1505 /* 1506 * Remove page mappings from kernel virtual address space. Intended for 1507 * temporary mappings entered by pmap_qenter. 1508 */ 1509 void 1510 pmap_qremove(vm_offset_t sva, int count) 1511 { 1512 vm_offset_t va; 1513 1514 va = sva; 1515 while (count-- > 0) { 1516 pmap_kremove(va); 1517 va += PAGE_SIZE; 1518 } 1519 } 1520 1521 void 1522 pmap_release(pmap_t pmap) 1523 { 1524 int idx, mask; 1525 1526 /* 1527 * Free segment register's VSID 1528 */ 1529 if (pmap->pm_sr[0] == 0) 1530 panic("pmap_release"); 1531 1532 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1533 mask = 1 << (idx % VSID_NBPW); 1534 idx /= VSID_NBPW; 1535 pmap_vsid_bitmap[idx] &= ~mask; 1536 } 1537 1538 /* 1539 * Remove the given range of addresses from the specified map. 1540 */ 1541 void 1542 pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1543 { 1544 struct pvo_entry *pvo; 1545 int pteidx; 1546 1547 for (; sva < eva; sva += PAGE_SIZE) { 1548 pvo = pmap_pvo_find_va(pm, sva, &pteidx); 1549 if (pvo != NULL) { 1550 pmap_pvo_remove(pvo, pteidx); 1551 } 1552 } 1553 } 1554 1555 /* 1556 * Remove physical page from all pmaps in which it resides. pmap_pvo_remove() 1557 * will reflect changes in pte's back to the vm_page. 1558 */ 1559 void 1560 pmap_remove_all(vm_page_t m) 1561 { 1562 struct pvo_head *pvo_head; 1563 struct pvo_entry *pvo, *next_pvo; 1564 1565 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1566 1567 pvo_head = vm_page_to_pvoh(m); 1568 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1569 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1570 1571 PMAP_PVO_CHECK(pvo); /* sanity check */ 1572 pmap_pvo_remove(pvo, -1); 1573 } 1574 vm_page_flag_clear(m, PG_WRITEABLE); 1575 } 1576 1577 /* 1578 * Remove all pages from specified address space, this aids process exit 1579 * speeds. This is much faster than pmap_remove in the case of running down 1580 * an entire address space. Only works for the current pmap. 1581 */ 1582 void 1583 pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1584 { 1585 1586 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1587 ("pmap_remove_pages: non current pmap")); 1588 pmap_remove(pm, sva, eva); 1589 } 1590 1591 /* 1592 * Allocate a physical page of memory directly from the phys_avail map. 1593 * Can only be called from pmap_bootstrap before avail start and end are 1594 * calculated. 1595 */ 1596 static vm_offset_t 1597 pmap_bootstrap_alloc(vm_size_t size, u_int align) 1598 { 1599 vm_offset_t s, e; 1600 int i, j; 1601 1602 size = round_page(size); 1603 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1604 if (align != 0) 1605 s = (phys_avail[i] + align - 1) & ~(align - 1); 1606 else 1607 s = phys_avail[i]; 1608 e = s + size; 1609 1610 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1611 continue; 1612 1613 if (s == phys_avail[i]) { 1614 phys_avail[i] += size; 1615 } else if (e == phys_avail[i + 1]) { 1616 phys_avail[i + 1] -= size; 1617 } else { 1618 for (j = phys_avail_count * 2; j > i; j -= 2) { 1619 phys_avail[j] = phys_avail[j - 2]; 1620 phys_avail[j + 1] = phys_avail[j - 1]; 1621 } 1622 1623 phys_avail[i + 3] = phys_avail[i + 1]; 1624 phys_avail[i + 1] = s; 1625 phys_avail[i + 2] = e; 1626 phys_avail_count++; 1627 } 1628 1629 return (s); 1630 } 1631 panic("pmap_bootstrap_alloc: could not allocate memory"); 1632 } 1633 1634 /* 1635 * Return an unmapped pvo for a kernel virtual address. 1636 * Used by pmap functions that operate on physical pages. 1637 */ 1638 static struct pvo_entry * 1639 pmap_rkva_alloc(void) 1640 { 1641 struct pvo_entry *pvo; 1642 struct pte *pt; 1643 vm_offset_t kva; 1644 int pteidx; 1645 1646 if (pmap_rkva_count == 0) 1647 panic("pmap_rkva_alloc: no more reserved KVAs"); 1648 1649 kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); 1650 pmap_kenter(kva, 0); 1651 1652 pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); 1653 1654 if (pvo == NULL) 1655 panic("pmap_kva_alloc: pmap_pvo_find_va failed"); 1656 1657 pt = pmap_pvo_to_pte(pvo, pteidx); 1658 1659 if (pt == NULL) 1660 panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); 1661 1662 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1663 PVO_PTEGIDX_CLR(pvo); 1664 1665 pmap_pte_overflow++; 1666 1667 return (pvo); 1668 } 1669 1670 static void 1671 pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, 1672 int *depth_p) 1673 { 1674 struct pte *pt; 1675 1676 /* 1677 * If this pvo already has a valid pte, we need to save it so it can 1678 * be restored later. We then just reload the new PTE over the old 1679 * slot. 1680 */ 1681 if (saved_pt != NULL) { 1682 pt = pmap_pvo_to_pte(pvo, -1); 1683 1684 if (pt != NULL) { 1685 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1686 PVO_PTEGIDX_CLR(pvo); 1687 pmap_pte_overflow++; 1688 } 1689 1690 *saved_pt = pvo->pvo_pte; 1691 1692 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1693 } 1694 1695 pvo->pvo_pte.pte_lo |= pa; 1696 1697 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1698 panic("pmap_pa_map: could not spill pvo %p", pvo); 1699 1700 if (depth_p != NULL) 1701 (*depth_p)++; 1702 } 1703 1704 static void 1705 pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) 1706 { 1707 struct pte *pt; 1708 1709 pt = pmap_pvo_to_pte(pvo, -1); 1710 1711 if (pt != NULL) { 1712 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1713 PVO_PTEGIDX_CLR(pvo); 1714 pmap_pte_overflow++; 1715 } 1716 1717 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1718 1719 /* 1720 * If there is a saved PTE and it's valid, restore it and return. 1721 */ 1722 if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { 1723 if (depth_p != NULL && --(*depth_p) == 0) 1724 panic("pmap_pa_unmap: restoring but depth == 0"); 1725 1726 pvo->pvo_pte = *saved_pt; 1727 1728 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1729 panic("pmap_pa_unmap: could not spill pvo %p", pvo); 1730 } 1731 } 1732 1733 static void 1734 pmap_syncicache(vm_offset_t pa, vm_size_t len) 1735 { 1736 __syncicache((void *)pa, len); 1737 } 1738 1739 static void 1740 tlbia(void) 1741 { 1742 caddr_t i; 1743 1744 SYNC(); 1745 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 1746 TLBIE(i); 1747 EIEIO(); 1748 } 1749 TLBSYNC(); 1750 SYNC(); 1751 } 1752 1753 static int 1754 pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1755 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1756 { 1757 struct pvo_entry *pvo; 1758 u_int sr; 1759 int first; 1760 u_int ptegidx; 1761 int i; 1762 int bootstrap; 1763 1764 pmap_pvo_enter_calls++; 1765 first = 0; 1766 1767 bootstrap = 0; 1768 1769 /* 1770 * Compute the PTE Group index. 1771 */ 1772 va &= ~ADDR_POFF; 1773 sr = va_to_sr(pm->pm_sr, va); 1774 ptegidx = va_to_pteg(sr, va); 1775 1776 /* 1777 * Remove any existing mapping for this page. Reuse the pvo entry if 1778 * there is a mapping. 1779 */ 1780 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1781 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1782 if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && 1783 (pvo->pvo_pte.pte_lo & PTE_PP) == 1784 (pte_lo & PTE_PP)) { 1785 return (0); 1786 } 1787 pmap_pvo_remove(pvo, -1); 1788 break; 1789 } 1790 } 1791 1792 /* 1793 * If we aren't overwriting a mapping, try to allocate. 1794 */ 1795 if (pmap_initialized) { 1796 pvo = uma_zalloc(zone, M_NOWAIT); 1797 } else { 1798 if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) { 1799 panic("pmap_enter: bpvo pool exhausted, %d, %d, %d", 1800 pmap_bpvo_pool_index, BPVO_POOL_SIZE, 1801 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 1802 } 1803 pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; 1804 pmap_bpvo_pool_index++; 1805 bootstrap = 1; 1806 } 1807 1808 if (pvo == NULL) { 1809 return (ENOMEM); 1810 } 1811 1812 pmap_pvo_entries++; 1813 pvo->pvo_vaddr = va; 1814 pvo->pvo_pmap = pm; 1815 LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1816 pvo->pvo_vaddr &= ~ADDR_POFF; 1817 if (flags & VM_PROT_EXECUTE) 1818 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1819 if (flags & PVO_WIRED) 1820 pvo->pvo_vaddr |= PVO_WIRED; 1821 if (pvo_head != &pmap_pvo_kunmanaged) 1822 pvo->pvo_vaddr |= PVO_MANAGED; 1823 if (bootstrap) 1824 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1825 pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 1826 1827 /* 1828 * Remember if the list was empty and therefore will be the first 1829 * item. 1830 */ 1831 if (LIST_FIRST(pvo_head) == NULL) 1832 first = 1; 1833 1834 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1835 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1836 pvo->pvo_pmap->pm_stats.wired_count++; 1837 pvo->pvo_pmap->pm_stats.resident_count++; 1838 1839 /* 1840 * We hope this succeeds but it isn't required. 1841 */ 1842 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1843 if (i >= 0) { 1844 PVO_PTEGIDX_SET(pvo, i); 1845 } else { 1846 panic("pmap_pvo_enter: overflow"); 1847 pmap_pte_overflow++; 1848 } 1849 1850 return (first ? ENOENT : 0); 1851 } 1852 1853 static void 1854 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) 1855 { 1856 struct pte *pt; 1857 1858 /* 1859 * If there is an active pte entry, we need to deactivate it (and 1860 * save the ref & cfg bits). 1861 */ 1862 pt = pmap_pvo_to_pte(pvo, pteidx); 1863 if (pt != NULL) { 1864 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1865 PVO_PTEGIDX_CLR(pvo); 1866 } else { 1867 pmap_pte_overflow--; 1868 } 1869 1870 /* 1871 * Update our statistics. 1872 */ 1873 pvo->pvo_pmap->pm_stats.resident_count--; 1874 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1875 pvo->pvo_pmap->pm_stats.wired_count--; 1876 1877 /* 1878 * Save the REF/CHG bits into their cache if the page is managed. 1879 */ 1880 if (pvo->pvo_vaddr & PVO_MANAGED) { 1881 struct vm_page *pg; 1882 1883 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1884 if (pg != NULL) { 1885 pmap_attr_save(pg, pvo->pvo_pte.pte_lo & 1886 (PTE_REF | PTE_CHG)); 1887 } 1888 } 1889 1890 /* 1891 * Remove this PVO from the PV list. 1892 */ 1893 LIST_REMOVE(pvo, pvo_vlink); 1894 1895 /* 1896 * Remove this from the overflow list and return it to the pool 1897 * if we aren't going to reuse it. 1898 */ 1899 LIST_REMOVE(pvo, pvo_olink); 1900 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 1901 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : 1902 pmap_upvo_zone, pvo); 1903 pmap_pvo_entries--; 1904 pmap_pvo_remove_calls++; 1905 } 1906 1907 static __inline int 1908 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1909 { 1910 int pteidx; 1911 1912 /* 1913 * We can find the actual pte entry without searching by grabbing 1914 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 1915 * noticing the HID bit. 1916 */ 1917 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1918 if (pvo->pvo_pte.pte_hi & PTE_HID) 1919 pteidx ^= pmap_pteg_mask * 8; 1920 1921 return (pteidx); 1922 } 1923 1924 static struct pvo_entry * 1925 pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 1926 { 1927 struct pvo_entry *pvo; 1928 int ptegidx; 1929 u_int sr; 1930 1931 va &= ~ADDR_POFF; 1932 sr = va_to_sr(pm->pm_sr, va); 1933 ptegidx = va_to_pteg(sr, va); 1934 1935 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1936 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1937 if (pteidx_p) 1938 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1939 return (pvo); 1940 } 1941 } 1942 1943 return (NULL); 1944 } 1945 1946 static struct pte * 1947 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1948 { 1949 struct pte *pt; 1950 1951 /* 1952 * If we haven't been supplied the ptegidx, calculate it. 1953 */ 1954 if (pteidx == -1) { 1955 int ptegidx; 1956 u_int sr; 1957 1958 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 1959 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 1960 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1961 } 1962 1963 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1964 1965 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1966 panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " 1967 "valid pte index", pvo); 1968 } 1969 1970 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1971 panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " 1972 "pvo but no valid pte", pvo); 1973 } 1974 1975 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1976 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1977 panic("pmap_pvo_to_pte: pvo %p has valid pte in " 1978 "pmap_pteg_table %p but invalid in pvo", pvo, pt); 1979 } 1980 1981 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 1982 != 0) { 1983 panic("pmap_pvo_to_pte: pvo %p pte does not match " 1984 "pte %p in pmap_pteg_table", pvo, pt); 1985 } 1986 1987 return (pt); 1988 } 1989 1990 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1991 panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " 1992 "pmap_pteg_table but valid in pvo", pvo, pt); 1993 } 1994 1995 return (NULL); 1996 } 1997 1998 /* 1999 * XXX: THIS STUFF SHOULD BE IN pte.c? 2000 */ 2001 int 2002 pmap_pte_spill(vm_offset_t addr) 2003 { 2004 struct pvo_entry *source_pvo, *victim_pvo; 2005 struct pvo_entry *pvo; 2006 int ptegidx, i, j; 2007 u_int sr; 2008 struct pteg *pteg; 2009 struct pte *pt; 2010 2011 pmap_pte_spills++; 2012 2013 sr = mfsrin(addr); 2014 ptegidx = va_to_pteg(sr, addr); 2015 2016 /* 2017 * Have to substitute some entry. Use the primary hash for this. 2018 * Use low bits of timebase as random generator. 2019 */ 2020 pteg = &pmap_pteg_table[ptegidx]; 2021 __asm __volatile("mftb %0" : "=r"(i)); 2022 i &= 7; 2023 pt = &pteg->pt[i]; 2024 2025 source_pvo = NULL; 2026 victim_pvo = NULL; 2027 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2028 /* 2029 * We need to find a pvo entry for this address. 2030 */ 2031 PMAP_PVO_CHECK(pvo); 2032 if (source_pvo == NULL && 2033 pmap_pte_match(&pvo->pvo_pte, sr, addr, 2034 pvo->pvo_pte.pte_hi & PTE_HID)) { 2035 /* 2036 * Now found an entry to be spilled into the pteg. 2037 * The PTE is now valid, so we know it's active. 2038 */ 2039 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 2040 2041 if (j >= 0) { 2042 PVO_PTEGIDX_SET(pvo, j); 2043 pmap_pte_overflow--; 2044 PMAP_PVO_CHECK(pvo); 2045 return (1); 2046 } 2047 2048 source_pvo = pvo; 2049 2050 if (victim_pvo != NULL) 2051 break; 2052 } 2053 2054 /* 2055 * We also need the pvo entry of the victim we are replacing 2056 * so save the R & C bits of the PTE. 2057 */ 2058 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2059 pmap_pte_compare(pt, &pvo->pvo_pte)) { 2060 victim_pvo = pvo; 2061 if (source_pvo != NULL) 2062 break; 2063 } 2064 } 2065 2066 if (source_pvo == NULL) 2067 return (0); 2068 2069 if (victim_pvo == NULL) { 2070 if ((pt->pte_hi & PTE_HID) == 0) 2071 panic("pmap_pte_spill: victim p-pte (%p) has no pvo" 2072 "entry", pt); 2073 2074 /* 2075 * If this is a secondary PTE, we need to search it's primary 2076 * pvo bucket for the matching PVO. 2077 */ 2078 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], 2079 pvo_olink) { 2080 PMAP_PVO_CHECK(pvo); 2081 /* 2082 * We also need the pvo entry of the victim we are 2083 * replacing so save the R & C bits of the PTE. 2084 */ 2085 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 2086 victim_pvo = pvo; 2087 break; 2088 } 2089 } 2090 2091 if (victim_pvo == NULL) 2092 panic("pmap_pte_spill: victim s-pte (%p) has no pvo" 2093 "entry", pt); 2094 } 2095 2096 /* 2097 * We are invalidating the TLB entry for the EA we are replacing even 2098 * though it's valid. If we don't, we lose any ref/chg bit changes 2099 * contained in the TLB entry. 2100 */ 2101 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 2102 2103 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 2104 pmap_pte_set(pt, &source_pvo->pvo_pte); 2105 2106 PVO_PTEGIDX_CLR(victim_pvo); 2107 PVO_PTEGIDX_SET(source_pvo, i); 2108 pmap_pte_replacements++; 2109 2110 PMAP_PVO_CHECK(victim_pvo); 2111 PMAP_PVO_CHECK(source_pvo); 2112 2113 return (1); 2114 } 2115 2116 static int 2117 pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2118 { 2119 struct pte *pt; 2120 int i; 2121 2122 /* 2123 * First try primary hash. 2124 */ 2125 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2126 if ((pt->pte_hi & PTE_VALID) == 0) { 2127 pvo_pt->pte_hi &= ~PTE_HID; 2128 pmap_pte_set(pt, pvo_pt); 2129 return (i); 2130 } 2131 } 2132 2133 /* 2134 * Now try secondary hash. 2135 */ 2136 ptegidx ^= pmap_pteg_mask; 2137 ptegidx++; 2138 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2139 if ((pt->pte_hi & PTE_VALID) == 0) { 2140 pvo_pt->pte_hi |= PTE_HID; 2141 pmap_pte_set(pt, pvo_pt); 2142 return (i); 2143 } 2144 } 2145 2146 panic("pmap_pte_insert: overflow"); 2147 return (-1); 2148 } 2149 2150 static boolean_t 2151 pmap_query_bit(vm_page_t m, int ptebit) 2152 { 2153 struct pvo_entry *pvo; 2154 struct pte *pt; 2155 2156 #if 0 2157 if (pmap_attr_fetch(m) & ptebit) 2158 return (TRUE); 2159 #endif 2160 2161 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2162 PMAP_PVO_CHECK(pvo); /* sanity check */ 2163 2164 /* 2165 * See if we saved the bit off. If so, cache it and return 2166 * success. 2167 */ 2168 if (pvo->pvo_pte.pte_lo & ptebit) { 2169 pmap_attr_save(m, ptebit); 2170 PMAP_PVO_CHECK(pvo); /* sanity check */ 2171 return (TRUE); 2172 } 2173 } 2174 2175 /* 2176 * No luck, now go through the hard part of looking at the PTEs 2177 * themselves. Sync so that any pending REF/CHG bits are flushed to 2178 * the PTEs. 2179 */ 2180 SYNC(); 2181 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2182 PMAP_PVO_CHECK(pvo); /* sanity check */ 2183 2184 /* 2185 * See if this pvo has a valid PTE. if so, fetch the 2186 * REF/CHG bits from the valid PTE. If the appropriate 2187 * ptebit is set, cache it and return success. 2188 */ 2189 pt = pmap_pvo_to_pte(pvo, -1); 2190 if (pt != NULL) { 2191 pmap_pte_synch(pt, &pvo->pvo_pte); 2192 if (pvo->pvo_pte.pte_lo & ptebit) { 2193 pmap_attr_save(m, ptebit); 2194 PMAP_PVO_CHECK(pvo); /* sanity check */ 2195 return (TRUE); 2196 } 2197 } 2198 } 2199 2200 return (FALSE); 2201 } 2202 2203 static u_int 2204 pmap_clear_bit(vm_page_t m, int ptebit, int *origbit) 2205 { 2206 u_int count; 2207 struct pvo_entry *pvo; 2208 struct pte *pt; 2209 int rv; 2210 2211 /* 2212 * Clear the cached value. 2213 */ 2214 rv = pmap_attr_fetch(m); 2215 pmap_attr_clear(m, ptebit); 2216 2217 /* 2218 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2219 * we can reset the right ones). note that since the pvo entries and 2220 * list heads are accessed via BAT0 and are never placed in the page 2221 * table, we don't have to worry about further accesses setting the 2222 * REF/CHG bits. 2223 */ 2224 SYNC(); 2225 2226 /* 2227 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2228 * valid pte clear the ptebit from the valid pte. 2229 */ 2230 count = 0; 2231 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2232 PMAP_PVO_CHECK(pvo); /* sanity check */ 2233 pt = pmap_pvo_to_pte(pvo, -1); 2234 if (pt != NULL) { 2235 pmap_pte_synch(pt, &pvo->pvo_pte); 2236 if (pvo->pvo_pte.pte_lo & ptebit) { 2237 count++; 2238 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2239 } 2240 } 2241 rv |= pvo->pvo_pte.pte_lo; 2242 pvo->pvo_pte.pte_lo &= ~ptebit; 2243 PMAP_PVO_CHECK(pvo); /* sanity check */ 2244 } 2245 2246 if (origbit != NULL) { 2247 *origbit = rv; 2248 } 2249 2250 return (count); 2251 } 2252 2253 /* 2254 * Return true if the physical range is encompassed by the battable[idx] 2255 */ 2256 static int 2257 pmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2258 { 2259 u_int prot; 2260 u_int32_t start; 2261 u_int32_t end; 2262 u_int32_t bat_ble; 2263 2264 /* 2265 * Return immediately if not a valid mapping 2266 */ 2267 if (!battable[idx].batu & BAT_Vs) 2268 return (EINVAL); 2269 2270 /* 2271 * The BAT entry must be cache-inhibited, guarded, and r/w 2272 * so it can function as an i/o page 2273 */ 2274 prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2275 if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2276 return (EPERM); 2277 2278 /* 2279 * The address should be within the BAT range. Assume that the 2280 * start address in the BAT has the correct alignment (thus 2281 * not requiring masking) 2282 */ 2283 start = battable[idx].batl & BAT_PBS; 2284 bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2285 end = start | (bat_ble << 15) | 0x7fff; 2286 2287 if ((pa < start) || ((pa + size) > end)) 2288 return (ERANGE); 2289 2290 return (0); 2291 } 2292 2293 2294 /* 2295 * Map a set of physical memory pages into the kernel virtual 2296 * address space. Return a pointer to where it is mapped. This 2297 * routine is intended to be used for mapping device memory, 2298 * NOT real memory. 2299 */ 2300 void * 2301 pmap_mapdev(vm_offset_t pa, vm_size_t size) 2302 { 2303 vm_offset_t va, tmpva, ppa, offset; 2304 int i; 2305 2306 ppa = trunc_page(pa); 2307 offset = pa & PAGE_MASK; 2308 size = roundup(offset + size, PAGE_SIZE); 2309 2310 GIANT_REQUIRED; 2311 2312 /* 2313 * If the physical address lies within a valid BAT table entry, 2314 * return the 1:1 mapping. This currently doesn't work 2315 * for regions that overlap 256M BAT segments. 2316 */ 2317 for (i = 0; i < 16; i++) { 2318 if (pmap_bat_mapped(i, pa, size) == 0) 2319 return ((void *) pa); 2320 } 2321 2322 va = kmem_alloc_nofault(kernel_map, size); 2323 if (!va) 2324 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2325 2326 for (tmpva = va; size > 0;) { 2327 pmap_kenter(tmpva, ppa); 2328 TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ 2329 size -= PAGE_SIZE; 2330 tmpva += PAGE_SIZE; 2331 ppa += PAGE_SIZE; 2332 } 2333 2334 return ((void *)(va + offset)); 2335 } 2336 2337 void 2338 pmap_unmapdev(vm_offset_t va, vm_size_t size) 2339 { 2340 vm_offset_t base, offset; 2341 2342 /* 2343 * If this is outside kernel virtual space, then it's a 2344 * battable entry and doesn't require unmapping 2345 */ 2346 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2347 base = trunc_page(va); 2348 offset = va & PAGE_MASK; 2349 size = roundup(offset + size, PAGE_SIZE); 2350 kmem_free(kernel_map, base, size); 2351 } 2352 } 2353