1 /* 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 32 */ 33 /* 34 * Copyright (C) 2001 Benno Rice. 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 49 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 51 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 52 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 53 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 54 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 55 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 56 */ 57 58 #ifndef lint 59 static const char rcsid[] = 60 "$FreeBSD$"; 61 #endif /* not lint */ 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/kernel.h> 66 #include <sys/proc.h> 67 #include <sys/malloc.h> 68 #include <sys/msgbuf.h> 69 #include <sys/vmmeter.h> 70 #include <sys/mman.h> 71 #include <sys/queue.h> 72 #include <sys/mutex.h> 73 74 #include <vm/vm.h> 75 #include <vm/vm_param.h> 76 #include <sys/lock.h> 77 #include <vm/vm_kern.h> 78 #include <vm/vm_page.h> 79 #include <vm/vm_map.h> 80 #include <vm/vm_object.h> 81 #include <vm/vm_extern.h> 82 #include <vm/vm_pageout.h> 83 #include <vm/vm_pager.h> 84 #include <vm/vm_zone.h> 85 86 #include <sys/user.h> 87 88 #include <machine/pcb.h> 89 #include <machine/powerpc.h> 90 #include <machine/pte.h> 91 92 pte_t *ptable; 93 int ptab_cnt; 94 u_int ptab_mask; 95 #define HTABSIZE (ptab_cnt * 64) 96 97 #define MINPV 2048 98 99 struct pte_ovfl { 100 LIST_ENTRY(pte_ovfl) po_list; /* Linked list of overflow entries */ 101 struct pte po_pte; /* PTE for this mapping */ 102 }; 103 104 LIST_HEAD(pte_ovtab, pte_ovfl) *potable; /* Overflow entries for ptable */ 105 106 static struct pmap kernel_pmap_store; 107 pmap_t kernel_pmap; 108 109 static int npgs; 110 static u_int nextavail; 111 112 #ifndef MSGBUFADDR 113 extern vm_offset_t msgbuf_paddr; 114 #endif 115 116 static struct mem_region *mem, *avail; 117 118 vm_offset_t avail_start; 119 vm_offset_t avail_end; 120 vm_offset_t virtual_avail; 121 vm_offset_t virtual_end; 122 123 vm_offset_t kernel_vm_end; 124 125 static int pmap_pagedaemon_waken = 0; 126 127 extern unsigned int Maxmem; 128 129 #define ATTRSHFT 4 130 131 struct pv_entry *pv_table; 132 133 static vm_zone_t pvzone; 134 static struct vm_zone pvzone_store; 135 static struct vm_object pvzone_obj; 136 static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; 137 static struct pv_entry *pvinit; 138 139 #if !defined(PMAP_SHPGPERPROC) 140 #define PMAP_SHPGPERPROC 200 141 #endif 142 143 struct pv_page; 144 struct pv_page_info { 145 LIST_ENTRY(pv_page) pgi_list; 146 struct pv_entry *pgi_freelist; 147 int pgi_nfree; 148 }; 149 #define NPVPPG ((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry)) 150 struct pv_page { 151 struct pv_page_info pvp_pgi; 152 struct pv_entry pvp_pv[NPVPPG]; 153 }; 154 LIST_HEAD(pv_page_list, pv_page) pv_page_freelist; 155 int pv_nfree; 156 int pv_pcnt; 157 static struct pv_entry *pmap_alloc_pv(void); 158 static void pmap_free_pv(struct pv_entry *); 159 160 struct po_page; 161 struct po_page_info { 162 LIST_ENTRY(po_page) pgi_list; 163 vm_page_t pgi_page; 164 LIST_HEAD(po_freelist, pte_ovfl) pgi_freelist; 165 int pgi_nfree; 166 }; 167 #define NPOPPG ((PAGE_SIZE - sizeof(struct po_page_info)) / sizeof(struct pte_ovfl)) 168 struct po_page { 169 struct po_page_info pop_pgi; 170 struct pte_ovfl pop_po[NPOPPG]; 171 }; 172 LIST_HEAD(po_page_list, po_page) po_page_freelist; 173 int po_nfree; 174 int po_pcnt; 175 static struct pte_ovfl *poalloc(void); 176 static void pofree(struct pte_ovfl *, int); 177 178 static u_int usedsr[NPMAPS / sizeof(u_int) / 8]; 179 180 static int pmap_initialized; 181 182 int pte_spill(vm_offset_t); 183 184 /* 185 * These small routines may have to be replaced, 186 * if/when we support processors other that the 604. 187 */ 188 static __inline void 189 tlbie(vm_offset_t ea) 190 { 191 192 __asm __volatile ("tlbie %0" :: "r"(ea)); 193 } 194 195 static __inline void 196 tlbsync(void) 197 { 198 199 __asm __volatile ("sync; tlbsync; sync"); 200 } 201 202 static __inline void 203 tlbia(void) 204 { 205 vm_offset_t i; 206 207 __asm __volatile ("sync"); 208 for (i = 0; i < (vm_offset_t)0x00040000; i += 0x00001000) { 209 tlbie(i); 210 } 211 tlbsync(); 212 } 213 214 static __inline int 215 ptesr(sr_t *sr, vm_offset_t addr) 216 { 217 218 return sr[(u_int)addr >> ADDR_SR_SHFT]; 219 } 220 221 static __inline int 222 pteidx(sr_t sr, vm_offset_t addr) 223 { 224 int hash; 225 226 hash = (sr & SR_VSID) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 227 return hash & ptab_mask; 228 } 229 230 static __inline int 231 ptematch(pte_t *ptp, sr_t sr, vm_offset_t va, int which) 232 { 233 234 return ptp->pte_hi == (((sr & SR_VSID) << PTE_VSID_SHFT) | 235 (((u_int)va >> ADDR_API_SHFT) & PTE_API) | which); 236 } 237 238 static __inline struct pv_entry * 239 pa_to_pv(vm_offset_t pa) 240 { 241 #if 0 /* XXX */ 242 int bank, pg; 243 244 bank = vm_physseg_find(atop(pa), &pg); 245 if (bank == -1) 246 return NULL; 247 return &vm_physmem[bank].pmseg.pvent[pg]; 248 #endif 249 return (NULL); 250 } 251 252 static __inline char * 253 pa_to_attr(vm_offset_t pa) 254 { 255 #if 0 /* XXX */ 256 int bank, pg; 257 258 bank = vm_physseg_find(atop(pa), &pg); 259 if (bank == -1) 260 return NULL; 261 return &vm_physmem[bank].pmseg.attrs[pg]; 262 #endif 263 return (NULL); 264 } 265 266 /* 267 * Try to insert page table entry *pt into the ptable at idx. 268 * 269 * Note: *pt mustn't have PTE_VALID set. 270 * This is done here as required by Book III, 4.12. 271 */ 272 static int 273 pte_insert(int idx, pte_t *pt) 274 { 275 pte_t *ptp; 276 int i; 277 278 /* 279 * First try primary hash. 280 */ 281 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 282 if (!(ptp->pte_hi & PTE_VALID)) { 283 *ptp = *pt; 284 ptp->pte_hi &= ~PTE_HID; 285 __asm __volatile ("sync"); 286 ptp->pte_hi |= PTE_VALID; 287 return 1; 288 } 289 } 290 291 /* 292 * Then try secondary hash. 293 */ 294 295 idx ^= ptab_mask; 296 297 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 298 if (!(ptp->pte_hi & PTE_VALID)) { 299 *ptp = *pt; 300 ptp->pte_hi |= PTE_HID; 301 __asm __volatile ("sync"); 302 ptp->pte_hi |= PTE_VALID; 303 return 1; 304 } 305 } 306 307 return 0; 308 } 309 310 /* 311 * Spill handler. 312 * 313 * Tries to spill a page table entry from the overflow area. 314 * Note that this routine runs in real mode on a separate stack, 315 * with interrupts disabled. 316 */ 317 int 318 pte_spill(vm_offset_t addr) 319 { 320 int idx, i; 321 sr_t sr; 322 struct pte_ovfl *po; 323 pte_t ps; 324 pte_t *pt; 325 326 __asm ("mfsrin %0,%1" : "=r"(sr) : "r"(addr)); 327 idx = pteidx(sr, addr); 328 for (po = potable[idx].lh_first; po; po = po->po_list.le_next) { 329 if (ptematch(&po->po_pte, sr, addr, 0)) { 330 /* 331 * Now found an entry to be spilled into the real 332 * ptable. 333 */ 334 if (pte_insert(idx, &po->po_pte)) { 335 LIST_REMOVE(po, po_list); 336 pofree(po, 0); 337 return 1; 338 } 339 /* 340 * Have to substitute some entry. Use the primary 341 * hash for this. 342 * 343 * Use low bits of timebase as random generator 344 */ 345 __asm ("mftb %0" : "=r"(i)); 346 pt = ptable + idx * 8 + (i & 7); 347 pt->pte_hi &= ~PTE_VALID; 348 ps = *pt; 349 __asm __volatile ("sync"); 350 tlbie(addr); 351 tlbsync(); 352 *pt = po->po_pte; 353 __asm __volatile ("sync"); 354 pt->pte_hi |= PTE_VALID; 355 po->po_pte = ps; 356 if (ps.pte_hi & PTE_HID) { 357 /* 358 * We took an entry that was on the alternate 359 * hash chain, so move it to it's original 360 * chain. 361 */ 362 po->po_pte.pte_hi &= ~PTE_HID; 363 LIST_REMOVE(po, po_list); 364 LIST_INSERT_HEAD(potable + (idx ^ ptab_mask), 365 po, po_list); 366 } 367 return 1; 368 } 369 } 370 371 return 0; 372 } 373 374 /* 375 * This is called during powerpc_init, before the system is really initialized. 376 */ 377 void 378 pmap_bootstrap(u_int kernelstart, u_int kernelend) 379 { 380 struct mem_region *mp, *mp1; 381 int cnt, i; 382 u_int s, e, sz; 383 384 /* 385 * Get memory. 386 */ 387 mem_regions(&mem, &avail); 388 for (mp = mem; mp->size; mp++) 389 Maxmem += btoc(mp->size); 390 391 /* 392 * Count the number of available entries. 393 */ 394 for (cnt = 0, mp = avail; mp->size; mp++) { 395 cnt++; 396 } 397 398 /* 399 * Page align all regions. 400 * Non-page aligned memory isn't very interesting to us. 401 * Also, sort the entries for ascending addresses. 402 */ 403 kernelstart &= ~PAGE_MASK; 404 kernelend = (kernelend + PAGE_MASK) & ~PAGE_MASK; 405 for (mp = avail; mp->size; mp++) { 406 s = mp->start; 407 e = mp->start + mp->size; 408 /* 409 * Check whether this region holds all of the kernel. 410 */ 411 if (s < kernelstart && e > kernelend) { 412 avail[cnt].start = kernelend; 413 avail[cnt++].size = e - kernelend; 414 e = kernelstart; 415 } 416 /* 417 * Look whether this regions starts within the kernel. 418 */ 419 if (s >= kernelstart && s < kernelend) { 420 if (e <= kernelend) 421 goto empty; 422 s = kernelend; 423 } 424 /* 425 * Now look whether this region ends within the kernel. 426 */ 427 if (e > kernelstart && e <= kernelend) { 428 if (s >= kernelstart) 429 goto empty; 430 e = kernelstart; 431 } 432 /* 433 * Now page align the start and size of the region. 434 */ 435 s = round_page(s); 436 e = trunc_page(e); 437 if (e < s) { 438 e = s; 439 } 440 sz = e - s; 441 /* 442 * Check whether some memory is left here. 443 */ 444 if (sz == 0) { 445 empty: 446 bcopy(mp + 1, mp, 447 (cnt - (mp - avail)) * sizeof *mp); 448 cnt--; 449 mp--; 450 continue; 451 } 452 453 /* 454 * Do an insertion sort. 455 */ 456 npgs += btoc(sz); 457 458 for (mp1 = avail; mp1 < mp; mp1++) { 459 if (s < mp1->start) { 460 break; 461 } 462 } 463 464 if (mp1 < mp) { 465 bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1); 466 mp1->start = s; 467 mp1->size = sz; 468 } else { 469 mp->start = s; 470 mp->size = sz; 471 } 472 } 473 474 #ifdef HTABENTS 475 ptab_cnt = HTABENTS; 476 #else 477 ptab_cnt = (Maxmem + 1) / 2; 478 479 /* The minimum is 1024 PTEGs. */ 480 if (ptab_cnt < 1024) { 481 ptab_cnt = 1024; 482 } 483 484 /* Round up to power of 2. */ 485 __asm ("cntlzw %0,%1" : "=r"(i) : "r"(ptab_cnt - 1)); 486 ptab_cnt = 1 << (32 - i); 487 #endif 488 489 /* 490 * Find suitably aligned memory for HTAB. 491 */ 492 for (mp = avail; mp->size; mp++) { 493 s = roundup(mp->start, HTABSIZE) - mp->start; 494 495 if (mp->size < s + HTABSIZE) { 496 continue; 497 } 498 499 ptable = (pte_t *)(mp->start + s); 500 501 if (mp->size == s + HTABSIZE) { 502 if (s) 503 mp->size = s; 504 else { 505 bcopy(mp + 1, mp, 506 (cnt - (mp - avail)) * sizeof *mp); 507 mp = avail; 508 } 509 break; 510 } 511 512 if (s != 0) { 513 bcopy(mp, mp + 1, 514 (cnt - (mp - avail)) * sizeof *mp); 515 mp++->size = s; 516 cnt++; 517 } 518 519 mp->start += s + HTABSIZE; 520 mp->size -= s + HTABSIZE; 521 break; 522 } 523 524 if (!mp->size) { 525 panic("not enough memory?"); 526 } 527 528 npgs -= btoc(HTABSIZE); 529 bzero((void *)ptable, HTABSIZE); 530 ptab_mask = ptab_cnt - 1; 531 532 /* 533 * We cannot do pmap_steal_memory here, 534 * since we don't run with translation enabled yet. 535 */ 536 s = sizeof(struct pte_ovtab) * ptab_cnt; 537 sz = round_page(s); 538 539 for (mp = avail; mp->size; mp++) { 540 if (mp->size >= sz) { 541 break; 542 } 543 } 544 545 if (!mp->size) { 546 panic("not enough memory?"); 547 } 548 549 npgs -= btoc(sz); 550 potable = (struct pte_ovtab *)mp->start; 551 mp->size -= sz; 552 mp->start += sz; 553 554 if (mp->size <= 0) { 555 bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp); 556 } 557 558 for (i = 0; i < ptab_cnt; i++) { 559 LIST_INIT(potable + i); 560 } 561 562 #ifndef MSGBUFADDR 563 /* 564 * allow for msgbuf 565 */ 566 sz = round_page(MSGBUFSIZE); 567 mp = NULL; 568 569 for (mp1 = avail; mp1->size; mp1++) { 570 if (mp1->size >= sz) { 571 mp = mp1; 572 } 573 } 574 575 if (mp == NULL) { 576 panic("not enough memory?"); 577 } 578 579 npgs -= btoc(sz); 580 msgbuf_paddr = mp->start + mp->size - sz; 581 mp->size -= sz; 582 583 if (mp->size <= 0) { 584 bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp); 585 } 586 #endif 587 588 /* 589 * Initialize kernel pmap and hardware. 590 */ 591 kernel_pmap = &kernel_pmap_store; 592 593 { 594 int batu, batl; 595 596 batu = 0x80001ffe; 597 batl = 0x80000012; 598 599 __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r" (batu), "r" (batl)); 600 } 601 602 603 #if NPMAPS >= KERNEL_SEGMENT / 16 604 usedsr[KERNEL_SEGMENT / 16 / (sizeof usedsr[0] * 8)] 605 |= 1 << ((KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8)); 606 #endif 607 608 #if 0 /* XXX */ 609 for (i = 0; i < 16; i++) { 610 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 611 __asm __volatile ("mtsrin %0,%1" 612 :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT)); 613 } 614 #endif 615 616 for (i = 0; i < 16; i++) { 617 int j; 618 619 __asm __volatile ("mfsrin %0,%1" 620 : "=r" (j) 621 : "r" (i << ADDR_SR_SHFT)); 622 623 kernel_pmap->pm_sr[i] = j; 624 } 625 626 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 627 __asm __volatile ("mtsr %0,%1" 628 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 629 630 __asm __volatile ("sync; mtsdr1 %0; isync" 631 :: "r"((u_int)ptable | (ptab_mask >> 10))); 632 633 tlbia(); 634 635 nextavail = avail->start; 636 avail_start = avail->start; 637 for (mp = avail, i = 0; mp->size; mp++) { 638 avail_end = mp->start + mp->size; 639 phys_avail[i++] = mp->start; 640 phys_avail[i++] = mp->start + mp->size; 641 } 642 643 virtual_avail = VM_MIN_KERNEL_ADDRESS; 644 virtual_end = VM_MAX_KERNEL_ADDRESS; 645 } 646 647 /* 648 * Initialize anything else for pmap handling. 649 * Called during vm_init(). 650 */ 651 void 652 pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) 653 { 654 int initial_pvs; 655 656 /* 657 * init the pv free list 658 */ 659 initial_pvs = vm_page_array_size; 660 if (initial_pvs < MINPV) { 661 initial_pvs = MINPV; 662 } 663 pvzone = &pvzone_store; 664 pvinit = (struct pv_entry *) kmem_alloc(kernel_map, 665 initial_pvs * sizeof(struct pv_entry)); 666 zbootinit(pvzone, "PV ENTRY", sizeof(struct pv_entry), pvinit, 667 vm_page_array_size); 668 669 pmap_initialized = TRUE; 670 } 671 672 /* 673 * Initialize a preallocated and zeroed pmap structure. 674 */ 675 void 676 pmap_pinit(struct pmap *pm) 677 { 678 int i, j; 679 680 /* 681 * Allocate some segment registers for this pmap. 682 */ 683 pm->pm_refs = 1; 684 for (i = 0; i < sizeof usedsr / sizeof usedsr[0]; i++) { 685 if (usedsr[i] != 0xffffffff) { 686 j = ffs(~usedsr[i]) - 1; 687 usedsr[i] |= 1 << j; 688 pm->pm_sr[0] = (i * sizeof usedsr[0] * 8 + j) * 16; 689 for (i = 1; i < 16; i++) { 690 pm->pm_sr[i] = pm->pm_sr[i - 1] + 1; 691 } 692 return; 693 } 694 } 695 panic("out of segments"); 696 } 697 698 void 699 pmap_pinit2(pmap_t pmap) 700 { 701 702 /* 703 * Nothing to be done. 704 */ 705 return; 706 } 707 708 /* 709 * Add a reference to the given pmap. 710 */ 711 void 712 pmap_reference(struct pmap *pm) 713 { 714 715 pm->pm_refs++; 716 } 717 718 /* 719 * Retire the given pmap from service. 720 * Should only be called if the map contains no valid mappings. 721 */ 722 void 723 pmap_destroy(struct pmap *pm) 724 { 725 726 if (--pm->pm_refs == 0) { 727 pmap_release(pm); 728 free((caddr_t)pm, M_VMPGDATA); 729 } 730 } 731 732 /* 733 * Release any resources held by the given physical map. 734 * Called when a pmap initialized by pmap_pinit is being released. 735 */ 736 void 737 pmap_release(struct pmap *pm) 738 { 739 int i, j; 740 741 if (!pm->pm_sr[0]) { 742 panic("pmap_release"); 743 } 744 i = pm->pm_sr[0] / 16; 745 j = i % (sizeof usedsr[0] * 8); 746 i /= sizeof usedsr[0] * 8; 747 usedsr[i] &= ~(1 << j); 748 } 749 750 /* 751 * Copy the range specified by src_addr/len 752 * from the source map to the range dst_addr/len 753 * in the destination map. 754 * 755 * This routine is only advisory and need not do anything. 756 */ 757 void 758 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vm_offset_t dst_addr, 759 vm_size_t len, vm_offset_t src_addr) 760 { 761 762 return; 763 } 764 765 /* 766 * Garbage collects the physical map system for 767 * pages which are no longer used. 768 * Success need not be guaranteed -- that is, there 769 * may well be pages which are not referenced, but 770 * others may be collected. 771 * Called by the pageout daemon when pages are scarce. 772 */ 773 void 774 pmap_collect(void) 775 { 776 777 return; 778 } 779 780 /* 781 * Fill the given physical page with zeroes. 782 */ 783 void 784 pmap_zero_page(vm_offset_t pa) 785 { 786 #if 0 787 bzero((caddr_t)pa, PAGE_SIZE); 788 #else 789 int i; 790 791 for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) { 792 __asm __volatile ("dcbz 0,%0" :: "r"(pa)); 793 pa += CACHELINESIZE; 794 } 795 #endif 796 } 797 798 void 799 pmap_zero_page_area(vm_offset_t pa, int off, int size) 800 { 801 802 bzero((caddr_t)pa + off, size); 803 } 804 805 /* 806 * Copy the given physical source page to its destination. 807 */ 808 void 809 pmap_copy_page(vm_offset_t src, vm_offset_t dst) 810 { 811 812 bcopy((caddr_t)src, (caddr_t)dst, PAGE_SIZE); 813 } 814 815 static struct pv_entry * 816 pmap_alloc_pv() 817 { 818 pv_entry_count++; 819 820 if (pv_entry_high_water && 821 (pv_entry_count > pv_entry_high_water) && 822 (pmap_pagedaemon_waken == 0)) { 823 pmap_pagedaemon_waken = 1; 824 wakeup(&vm_pages_needed); 825 } 826 827 return zalloc(pvzone); 828 } 829 830 static void 831 pmap_free_pv(struct pv_entry *pv) 832 { 833 834 pv_entry_count--; 835 zfree(pvzone, pv); 836 } 837 838 /* 839 * We really hope that we don't need overflow entries 840 * before the VM system is initialized! 841 * 842 * XXX: Should really be switched over to the zone allocator. 843 */ 844 static struct pte_ovfl * 845 poalloc() 846 { 847 struct po_page *pop; 848 struct pte_ovfl *po; 849 vm_page_t mem; 850 int i; 851 852 if (!pmap_initialized) { 853 panic("poalloc"); 854 } 855 856 if (po_nfree == 0) { 857 /* 858 * Since we cannot use maps for potable allocation, 859 * we have to steal some memory from the VM system. XXX 860 */ 861 mem = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM); 862 po_pcnt++; 863 pop = (struct po_page *)VM_PAGE_TO_PHYS(mem); 864 pop->pop_pgi.pgi_page = mem; 865 LIST_INIT(&pop->pop_pgi.pgi_freelist); 866 for (i = NPOPPG - 1, po = pop->pop_po + 1; --i >= 0; po++) { 867 LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, 868 po_list); 869 } 870 po_nfree += pop->pop_pgi.pgi_nfree = NPOPPG - 1; 871 LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list); 872 po = pop->pop_po; 873 } else { 874 po_nfree--; 875 pop = po_page_freelist.lh_first; 876 if (--pop->pop_pgi.pgi_nfree <= 0) { 877 LIST_REMOVE(pop, pop_pgi.pgi_list); 878 } 879 po = pop->pop_pgi.pgi_freelist.lh_first; 880 LIST_REMOVE(po, po_list); 881 } 882 883 return po; 884 } 885 886 static void 887 pofree(struct pte_ovfl *po, int freepage) 888 { 889 struct po_page *pop; 890 891 pop = (struct po_page *)trunc_page((vm_offset_t)po); 892 switch (++pop->pop_pgi.pgi_nfree) { 893 case NPOPPG: 894 if (!freepage) { 895 break; 896 } 897 po_nfree -= NPOPPG - 1; 898 po_pcnt--; 899 LIST_REMOVE(pop, pop_pgi.pgi_list); 900 vm_page_free(pop->pop_pgi.pgi_page); 901 return; 902 case 1: 903 LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list); 904 default: 905 break; 906 } 907 LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, po_list); 908 po_nfree++; 909 } 910 911 /* 912 * This returns whether this is the first mapping of a page. 913 */ 914 static int 915 pmap_enter_pv(int pteidx, vm_offset_t va, vm_offset_t pa) 916 { 917 struct pv_entry *pv, *npv; 918 int s, first; 919 920 if (!pmap_initialized) { 921 return 0; 922 } 923 924 s = splimp(); 925 926 pv = pa_to_pv(pa); 927 first = pv->pv_idx; 928 if (pv->pv_idx == -1) { 929 /* 930 * No entries yet, use header as the first entry. 931 */ 932 pv->pv_va = va; 933 pv->pv_idx = pteidx; 934 pv->pv_next = NULL; 935 } else { 936 /* 937 * There is at least one other VA mapping this page. 938 * Place this entry after the header. 939 */ 940 npv = pmap_alloc_pv(); 941 npv->pv_va = va; 942 npv->pv_idx = pteidx; 943 npv->pv_next = pv->pv_next; 944 pv->pv_next = npv; 945 } 946 splx(s); 947 return first; 948 } 949 950 static void 951 pmap_remove_pv(int pteidx, vm_offset_t va, vm_offset_t pa, struct pte *pte) 952 { 953 struct pv_entry *pv, *npv; 954 char *attr; 955 956 /* 957 * First transfer reference/change bits to cache. 958 */ 959 attr = pa_to_attr(pa); 960 if (attr == NULL) { 961 return; 962 } 963 *attr |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT; 964 965 /* 966 * Remove from the PV table. 967 */ 968 pv = pa_to_pv(pa); 969 970 /* 971 * If it is the first entry on the list, it is actually 972 * in the header and we must copy the following entry up 973 * to the header. Otherwise we must search the list for 974 * the entry. In either case we free the now unused entry. 975 */ 976 if (pteidx == pv->pv_idx && va == pv->pv_va) { 977 npv = pv->pv_next; 978 if (npv) { 979 *pv = *npv; 980 pmap_free_pv(npv); 981 } else { 982 pv->pv_idx = -1; 983 } 984 } else { 985 for (; (npv = pv->pv_next); pv = npv) { 986 if (pteidx == npv->pv_idx && va == npv->pv_va) { 987 break; 988 } 989 } 990 if (npv) { 991 pv->pv_next = npv->pv_next; 992 pmap_free_pv(npv); 993 } 994 #ifdef DIAGNOSTIC 995 else { 996 panic("pmap_remove_pv: not on list\n"); 997 } 998 #endif 999 } 1000 } 1001 1002 /* 1003 * Insert physical page at pa into the given pmap at virtual address va. 1004 */ 1005 void 1006 pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t pg, vm_prot_t prot, 1007 boolean_t wired) 1008 { 1009 sr_t sr; 1010 int idx, s; 1011 pte_t pte; 1012 struct pte_ovfl *po; 1013 struct mem_region *mp; 1014 vm_offset_t pa; 1015 1016 pa = VM_PAGE_TO_PHYS(pg) & ~PAGE_MASK; 1017 1018 /* 1019 * Have to remove any existing mapping first. 1020 */ 1021 pmap_remove(pm, va, va + PAGE_SIZE); 1022 1023 /* 1024 * Compute the HTAB index. 1025 */ 1026 idx = pteidx(sr = ptesr(pm->pm_sr, va), va); 1027 /* 1028 * Construct the PTE. 1029 * 1030 * Note: Don't set the valid bit for correct operation of tlb update. 1031 */ 1032 pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT) 1033 | ((va & ADDR_PIDX) >> ADDR_API_SHFT); 1034 pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G; 1035 1036 for (mp = mem; mp->size; mp++) { 1037 if (pa >= mp->start && pa < mp->start + mp->size) { 1038 pte.pte_lo &= ~(PTE_I | PTE_G); 1039 break; 1040 } 1041 } 1042 if (prot & VM_PROT_WRITE) { 1043 pte.pte_lo |= PTE_RW; 1044 } else { 1045 pte.pte_lo |= PTE_RO; 1046 } 1047 1048 /* 1049 * Now record mapping for later back-translation. 1050 */ 1051 if (pmap_initialized && (pg->flags & PG_FICTITIOUS) == 0) { 1052 if (pmap_enter_pv(idx, va, pa)) { 1053 /* 1054 * Flush the real memory from the cache. 1055 */ 1056 __syncicache((void *)pa, PAGE_SIZE); 1057 } 1058 } 1059 1060 s = splimp(); 1061 pm->pm_stats.resident_count++; 1062 /* 1063 * Try to insert directly into HTAB. 1064 */ 1065 if (pte_insert(idx, &pte)) { 1066 splx(s); 1067 return; 1068 } 1069 1070 /* 1071 * Have to allocate overflow entry. 1072 * 1073 * Note, that we must use real addresses for these. 1074 */ 1075 po = poalloc(); 1076 po->po_pte = pte; 1077 LIST_INSERT_HEAD(potable + idx, po, po_list); 1078 splx(s); 1079 } 1080 1081 void 1082 pmap_kenter(vm_offset_t va, vm_offset_t pa) 1083 { 1084 struct vm_page pg; 1085 1086 pg.phys_addr = pa; 1087 pmap_enter(kernel_pmap, va, &pg, VM_PROT_READ|VM_PROT_WRITE, TRUE); 1088 } 1089 1090 void 1091 pmap_kremove(vm_offset_t va) 1092 { 1093 pmap_remove(kernel_pmap, va, va + PAGE_SIZE); 1094 } 1095 1096 /* 1097 * Remove the given range of mapping entries. 1098 */ 1099 void 1100 pmap_remove(struct pmap *pm, vm_offset_t va, vm_offset_t endva) 1101 { 1102 int idx, i, s; 1103 sr_t sr; 1104 pte_t *ptp; 1105 struct pte_ovfl *po, *npo; 1106 1107 s = splimp(); 1108 while (va < endva) { 1109 idx = pteidx(sr = ptesr(pm->pm_sr, va), va); 1110 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 1111 if (ptematch(ptp, sr, va, PTE_VALID)) { 1112 pmap_remove_pv(idx, va, ptp->pte_lo, ptp); 1113 ptp->pte_hi &= ~PTE_VALID; 1114 __asm __volatile ("sync"); 1115 tlbie(va); 1116 tlbsync(); 1117 pm->pm_stats.resident_count--; 1118 } 1119 } 1120 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; 1121 ptp++) { 1122 if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) { 1123 pmap_remove_pv(idx, va, ptp->pte_lo, ptp); 1124 ptp->pte_hi &= ~PTE_VALID; 1125 __asm __volatile ("sync"); 1126 tlbie(va); 1127 tlbsync(); 1128 pm->pm_stats.resident_count--; 1129 } 1130 } 1131 for (po = potable[idx].lh_first; po; po = npo) { 1132 npo = po->po_list.le_next; 1133 if (ptematch(&po->po_pte, sr, va, 0)) { 1134 pmap_remove_pv(idx, va, po->po_pte.pte_lo, 1135 &po->po_pte); 1136 LIST_REMOVE(po, po_list); 1137 pofree(po, 1); 1138 pm->pm_stats.resident_count--; 1139 } 1140 } 1141 va += PAGE_SIZE; 1142 } 1143 splx(s); 1144 } 1145 1146 static pte_t * 1147 pte_find(struct pmap *pm, vm_offset_t va) 1148 { 1149 int idx, i; 1150 sr_t sr; 1151 pte_t *ptp; 1152 struct pte_ovfl *po; 1153 1154 idx = pteidx(sr = ptesr(pm->pm_sr, va), va); 1155 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 1156 if (ptematch(ptp, sr, va, PTE_VALID)) { 1157 return ptp; 1158 } 1159 } 1160 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++) { 1161 if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) { 1162 return ptp; 1163 } 1164 } 1165 for (po = potable[idx].lh_first; po; po = po->po_list.le_next) { 1166 if (ptematch(&po->po_pte, sr, va, 0)) { 1167 return &po->po_pte; 1168 } 1169 } 1170 return 0; 1171 } 1172 1173 /* 1174 * Get the physical page address for the given pmap/virtual address. 1175 */ 1176 vm_offset_t 1177 pmap_extract(pmap_t pm, vm_offset_t va) 1178 { 1179 pte_t *ptp; 1180 int s; 1181 1182 s = splimp(); 1183 1184 if (!(ptp = pte_find(pm, va))) { 1185 splx(s); 1186 return (0); 1187 } 1188 splx(s); 1189 return ((ptp->pte_lo & PTE_RPGN) | (va & ADDR_POFF)); 1190 } 1191 1192 /* 1193 * Lower the protection on the specified range of this pmap. 1194 * 1195 * There are only two cases: either the protection is going to 0, 1196 * or it is going to read-only. 1197 */ 1198 void 1199 pmap_protect(struct pmap *pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1200 { 1201 pte_t *ptp; 1202 int valid, s; 1203 1204 if (prot & VM_PROT_READ) { 1205 s = splimp(); 1206 while (sva < eva) { 1207 ptp = pte_find(pm, sva); 1208 if (ptp) { 1209 valid = ptp->pte_hi & PTE_VALID; 1210 ptp->pte_hi &= ~PTE_VALID; 1211 __asm __volatile ("sync"); 1212 tlbie(sva); 1213 tlbsync(); 1214 ptp->pte_lo &= ~PTE_PP; 1215 ptp->pte_lo |= PTE_RO; 1216 __asm __volatile ("sync"); 1217 ptp->pte_hi |= valid; 1218 } 1219 sva += PAGE_SIZE; 1220 } 1221 splx(s); 1222 return; 1223 } 1224 pmap_remove(pm, sva, eva); 1225 } 1226 1227 boolean_t 1228 ptemodify(vm_page_t pg, u_int mask, u_int val) 1229 { 1230 vm_offset_t pa; 1231 struct pv_entry *pv; 1232 pte_t *ptp; 1233 struct pte_ovfl *po; 1234 int i, s; 1235 char *attr; 1236 int rv; 1237 1238 pa = VM_PAGE_TO_PHYS(pg); 1239 1240 /* 1241 * First modify bits in cache. 1242 */ 1243 attr = pa_to_attr(pa); 1244 if (attr == NULL) { 1245 return FALSE; 1246 } 1247 1248 *attr &= ~mask >> ATTRSHFT; 1249 *attr |= val >> ATTRSHFT; 1250 1251 pv = pa_to_pv(pa); 1252 if (pv->pv_idx < 0) { 1253 return FALSE; 1254 } 1255 1256 rv = FALSE; 1257 s = splimp(); 1258 for (; pv; pv = pv->pv_next) { 1259 for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) { 1260 if ((ptp->pte_hi & PTE_VALID) 1261 && (ptp->pte_lo & PTE_RPGN) == pa) { 1262 ptp->pte_hi &= ~PTE_VALID; 1263 __asm __volatile ("sync"); 1264 tlbie(pv->pv_va); 1265 tlbsync(); 1266 rv |= ptp->pte_lo & mask; 1267 ptp->pte_lo &= ~mask; 1268 ptp->pte_lo |= val; 1269 __asm __volatile ("sync"); 1270 ptp->pte_hi |= PTE_VALID; 1271 } 1272 } 1273 for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8; 1274 --i >= 0; ptp++) { 1275 if ((ptp->pte_hi & PTE_VALID) 1276 && (ptp->pte_lo & PTE_RPGN) == pa) { 1277 ptp->pte_hi &= ~PTE_VALID; 1278 __asm __volatile ("sync"); 1279 tlbie(pv->pv_va); 1280 tlbsync(); 1281 rv |= ptp->pte_lo & mask; 1282 ptp->pte_lo &= ~mask; 1283 ptp->pte_lo |= val; 1284 __asm __volatile ("sync"); 1285 ptp->pte_hi |= PTE_VALID; 1286 } 1287 } 1288 for (po = potable[pv->pv_idx].lh_first; po; 1289 po = po->po_list.le_next) { 1290 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) { 1291 rv |= ptp->pte_lo & mask; 1292 po->po_pte.pte_lo &= ~mask; 1293 po->po_pte.pte_lo |= val; 1294 } 1295 } 1296 } 1297 splx(s); 1298 return rv != 0; 1299 } 1300 1301 int 1302 ptebits(vm_page_t pg, int bit) 1303 { 1304 struct pv_entry *pv; 1305 pte_t *ptp; 1306 struct pte_ovfl *po; 1307 int i, s, bits; 1308 char *attr; 1309 vm_offset_t pa; 1310 1311 bits = 0; 1312 pa = VM_PAGE_TO_PHYS(pg); 1313 1314 /* 1315 * First try the cache. 1316 */ 1317 attr = pa_to_attr(pa); 1318 if (attr == NULL) { 1319 return 0; 1320 } 1321 bits |= (*attr << ATTRSHFT) & bit; 1322 if (bits == bit) { 1323 return bits; 1324 } 1325 1326 pv = pa_to_pv(pa); 1327 if (pv->pv_idx < 0) { 1328 return 0; 1329 } 1330 1331 s = splimp(); 1332 for (; pv; pv = pv->pv_next) { 1333 for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) { 1334 if ((ptp->pte_hi & PTE_VALID) 1335 && (ptp->pte_lo & PTE_RPGN) == pa) { 1336 bits |= ptp->pte_lo & bit; 1337 if (bits == bit) { 1338 splx(s); 1339 return bits; 1340 } 1341 } 1342 } 1343 for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8; 1344 --i >= 0; ptp++) { 1345 if ((ptp->pte_hi & PTE_VALID) 1346 && (ptp->pte_lo & PTE_RPGN) == pa) { 1347 bits |= ptp->pte_lo & bit; 1348 if (bits == bit) { 1349 splx(s); 1350 return bits; 1351 } 1352 } 1353 } 1354 for (po = potable[pv->pv_idx].lh_first; po; 1355 po = po->po_list.le_next) { 1356 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) { 1357 bits |= po->po_pte.pte_lo & bit; 1358 if (bits == bit) { 1359 splx(s); 1360 return bits; 1361 } 1362 } 1363 } 1364 } 1365 splx(s); 1366 return bits; 1367 } 1368 1369 /* 1370 * Lower the protection on the specified physical page. 1371 * 1372 * There are only two cases: either the protection is going to 0, 1373 * or it is going to read-only. 1374 */ 1375 void 1376 pmap_page_protect(vm_page_t m, vm_prot_t prot) 1377 { 1378 vm_offset_t pa; 1379 vm_offset_t va; 1380 pte_t *ptp; 1381 struct pte_ovfl *po, *npo; 1382 int i, s, idx; 1383 struct pv_entry *pv; 1384 1385 pa = VM_PAGE_TO_PHYS(m); 1386 1387 pa &= ~ADDR_POFF; 1388 if (prot & VM_PROT_READ) { 1389 ptemodify(m, PTE_PP, PTE_RO); 1390 return; 1391 } 1392 1393 pv = pa_to_pv(pa); 1394 if (pv == NULL) { 1395 return; 1396 } 1397 1398 s = splimp(); 1399 while (pv->pv_idx >= 0) { 1400 idx = pv->pv_idx; 1401 va = pv->pv_va; 1402 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 1403 if ((ptp->pte_hi & PTE_VALID) 1404 && (ptp->pte_lo & PTE_RPGN) == pa) { 1405 pmap_remove_pv(idx, va, pa, ptp); 1406 ptp->pte_hi &= ~PTE_VALID; 1407 __asm __volatile ("sync"); 1408 tlbie(va); 1409 tlbsync(); 1410 goto next; 1411 } 1412 } 1413 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; 1414 ptp++) { 1415 if ((ptp->pte_hi & PTE_VALID) 1416 && (ptp->pte_lo & PTE_RPGN) == pa) { 1417 pmap_remove_pv(idx, va, pa, ptp); 1418 ptp->pte_hi &= ~PTE_VALID; 1419 __asm __volatile ("sync"); 1420 tlbie(va); 1421 tlbsync(); 1422 goto next; 1423 } 1424 } 1425 for (po = potable[idx].lh_first; po; po = npo) { 1426 npo = po->po_list.le_next; 1427 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) { 1428 pmap_remove_pv(idx, va, pa, &po->po_pte); 1429 LIST_REMOVE(po, po_list); 1430 pofree(po, 1); 1431 goto next; 1432 } 1433 } 1434 next: 1435 } 1436 splx(s); 1437 } 1438 1439 /* 1440 * Activate the address space for the specified process. If the process 1441 * is the current process, load the new MMU context. 1442 */ 1443 void 1444 pmap_activate(struct proc *p) 1445 { 1446 struct pcb *pcb; 1447 pmap_t pmap; 1448 pmap_t rpm; 1449 int psl, i, ksr, seg; 1450 1451 pcb = &p->p_addr->u_pcb; 1452 pmap = p->p_vmspace->vm_map.pmap; 1453 1454 /* 1455 * XXX Normally performed in cpu_fork(). 1456 */ 1457 if (pcb->pcb_pm != pmap) { 1458 pcb->pcb_pm = pmap; 1459 (vm_offset_t) pcb->pcb_pmreal = pmap_extract(kernel_pmap, 1460 (vm_offset_t)pcb->pcb_pm); 1461 } 1462 1463 if (p == curproc) { 1464 /* Disable interrupts while switching. */ 1465 psl = mfmsr(); 1466 mtmsr(psl & ~PSL_EE); 1467 1468 #if 0 /* XXX */ 1469 /* Store pointer to new current pmap. */ 1470 curpm = pcb->pcb_pmreal; 1471 #endif 1472 1473 /* Save kernel SR. */ 1474 __asm __volatile("mfsr %0,14" : "=r"(ksr) :); 1475 1476 /* 1477 * Set new segment registers. We use the pmap's real 1478 * address to avoid accessibility problems. 1479 */ 1480 rpm = pcb->pcb_pmreal; 1481 for (i = 0; i < 16; i++) { 1482 seg = rpm->pm_sr[i]; 1483 __asm __volatile("mtsrin %0,%1" 1484 :: "r"(seg), "r"(i << ADDR_SR_SHFT)); 1485 } 1486 1487 /* Restore kernel SR. */ 1488 __asm __volatile("mtsr 14,%0" :: "r"(ksr)); 1489 1490 /* Interrupts are OK again. */ 1491 mtmsr(psl); 1492 } 1493 } 1494 1495 /* 1496 * Add a list of wired pages to the kva 1497 * this routine is only used for temporary 1498 * kernel mappings that do not need to have 1499 * page modification or references recorded. 1500 * Note that old mappings are simply written 1501 * over. The page *must* be wired. 1502 */ 1503 void 1504 pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 1505 { 1506 int i; 1507 1508 for (i = 0; i < count; i++) { 1509 vm_offset_t tva = va + i * PAGE_SIZE; 1510 pmap_kenter(tva, VM_PAGE_TO_PHYS(m[i])); 1511 } 1512 } 1513 1514 /* 1515 * this routine jerks page mappings from the 1516 * kernel -- it is meant only for temporary mappings. 1517 */ 1518 void 1519 pmap_qremove(vm_offset_t va, int count) 1520 { 1521 vm_offset_t end_va; 1522 1523 end_va = va + count*PAGE_SIZE; 1524 1525 while (va < end_va) { 1526 unsigned *pte; 1527 1528 pte = (unsigned *)vtopte(va); 1529 *pte = 0; 1530 tlbie(va); 1531 va += PAGE_SIZE; 1532 } 1533 } 1534 1535 /* 1536 * pmap_ts_referenced: 1537 * 1538 * Return the count of reference bits for a page, clearing all of them. 1539 */ 1540 int 1541 pmap_ts_referenced(vm_page_t m) 1542 { 1543 1544 /* XXX: coming soon... */ 1545 return (0); 1546 } 1547 1548 /* 1549 * this routine returns true if a physical page resides 1550 * in the given pmap. 1551 */ 1552 boolean_t 1553 pmap_page_exists(pmap_t pmap, vm_page_t m) 1554 { 1555 #if 0 /* XXX: This must go! */ 1556 register pv_entry_t pv; 1557 int s; 1558 1559 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1560 return FALSE; 1561 1562 s = splvm(); 1563 1564 /* 1565 * Not found, check current mappings returning immediately if found. 1566 */ 1567 for (pv = pv_table; pv; pv = pv->pv_next) { 1568 if (pv->pv_pmap == pmap) { 1569 splx(s); 1570 return TRUE; 1571 } 1572 } 1573 splx(s); 1574 #endif 1575 return (FALSE); 1576 } 1577 1578 /* 1579 * Used to map a range of physical addresses into kernel 1580 * virtual address space. 1581 * 1582 * For now, VM is already on, we only need to map the 1583 * specified memory. 1584 */ 1585 vm_offset_t 1586 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 1587 { 1588 vm_offset_t sva, va; 1589 1590 sva = *virt; 1591 va = sva; 1592 1593 while (start < end) { 1594 pmap_kenter(va, start); 1595 va += PAGE_SIZE; 1596 start += PAGE_SIZE; 1597 } 1598 1599 *virt = va; 1600 return (sva); 1601 } 1602 1603 vm_offset_t 1604 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 1605 { 1606 1607 return (addr); 1608 } 1609 1610 int 1611 pmap_mincore(pmap_t pmap, vm_offset_t addr) 1612 { 1613 1614 /* XXX: coming soon... */ 1615 return (0); 1616 } 1617 1618 void 1619 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 1620 vm_pindex_t pindex, vm_size_t size, int limit) 1621 { 1622 1623 /* XXX: coming soon... */ 1624 return; 1625 } 1626 1627 void 1628 pmap_growkernel(vm_offset_t addr) 1629 { 1630 1631 /* XXX: coming soon... */ 1632 return; 1633 } 1634 1635 /* 1636 * Initialize the address space (zone) for the pv_entries. Set a 1637 * high water mark so that the system can recover from excessive 1638 * numbers of pv entries. 1639 */ 1640 void 1641 pmap_init2() 1642 { 1643 int shpgperproc = PMAP_SHPGPERPROC; 1644 1645 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1646 pv_entry_max = shpgperproc * maxproc + vm_page_array_size; 1647 pv_entry_high_water = 9 * (pv_entry_max / 10); 1648 zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1); 1649 } 1650 1651 void 1652 pmap_swapin_proc(struct proc *p) 1653 { 1654 1655 /* XXX: coming soon... */ 1656 return; 1657 } 1658 1659 void 1660 pmap_swapout_proc(struct proc *p) 1661 { 1662 1663 /* XXX: coming soon... */ 1664 return; 1665 } 1666 1667 void 1668 pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable) 1669 { 1670 1671 return; 1672 } 1673 1674 void 1675 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 1676 { 1677 1678 /* XXX: coming soon... */ 1679 return; 1680 } 1681 1682 void 1683 pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) 1684 { 1685 1686 /* XXX: coming soon... */ 1687 return; 1688 } 1689 1690 void 1691 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1692 { 1693 1694 /* XXX: coming soon... */ 1695 return; 1696 } 1697 1698 void 1699 pmap_pinit0(pmap_t pmap) 1700 { 1701 1702 /* XXX: coming soon... */ 1703 return; 1704 } 1705 1706 void 1707 pmap_dispose_proc(struct proc *p) 1708 { 1709 1710 /* XXX: coming soon... */ 1711 return; 1712 } 1713 1714 vm_offset_t 1715 pmap_steal_memory(vm_size_t size) 1716 { 1717 vm_size_t bank_size; 1718 vm_offset_t pa; 1719 1720 size = round_page(size); 1721 1722 bank_size = phys_avail[1] - phys_avail[0]; 1723 while (size > bank_size) { 1724 int i; 1725 for (i = 0; phys_avail[i+2]; i+= 2) { 1726 phys_avail[i] = phys_avail[i+2]; 1727 phys_avail[i+1] = phys_avail[i+3]; 1728 } 1729 phys_avail[i] = 0; 1730 phys_avail[i+1] = 0; 1731 if (!phys_avail[0]) 1732 panic("pmap_steal_memory: out of memory"); 1733 bank_size = phys_avail[1] - phys_avail[0]; 1734 } 1735 1736 pa = phys_avail[0]; 1737 phys_avail[0] += size; 1738 1739 bzero((caddr_t) pa, size); 1740 return pa; 1741 } 1742 1743 /* 1744 * Create the UPAGES for a new process. 1745 * This routine directly affects the fork perf for a process. 1746 */ 1747 void 1748 pmap_new_proc(struct proc *p) 1749 { 1750 int i; 1751 vm_object_t upobj; 1752 vm_page_t m; 1753 struct user *up; 1754 pte_t pte; 1755 sr_t sr; 1756 int idx; 1757 1758 /* 1759 * allocate object for the upages 1760 */ 1761 if ((upobj = p->p_upages_obj) == NULL) { 1762 upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES); 1763 p->p_upages_obj = upobj; 1764 } 1765 1766 /* get a kernel virtual address for the UPAGES for this proc */ 1767 if ((up = p->p_addr) == NULL) { 1768 up = (struct user *) kmem_alloc_nofault(kernel_map, 1769 UPAGES * PAGE_SIZE); 1770 if (up == NULL) 1771 panic("pmap_new_proc: u_map allocation failed"); 1772 p->p_addr = up; 1773 } 1774 1775 for(i=0;i<UPAGES;i++) { 1776 vm_offset_t va; 1777 1778 /* 1779 * Get a kernel stack page 1780 */ 1781 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1782 1783 /* 1784 * Wire the page 1785 */ 1786 m->wire_count++; 1787 cnt.v_wire_count++; 1788 1789 /* 1790 * Enter the page into the kernel address space. 1791 */ 1792 va = (vm_offset_t)(up + i * PAGE_SIZE); 1793 idx = pteidx(sr = ptesr(kernel_pmap->pm_sr, va), va); 1794 1795 pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT) 1796 | ((va & ADDR_PIDX) >> ADDR_API_SHFT); 1797 pte.pte_lo = (VM_PAGE_TO_PHYS(m) & PTE_RPGN) | PTE_M | PTE_I | 1798 PTE_G | PTE_RW; 1799 1800 if (!pte_insert(idx, &pte)) { 1801 struct pte_ovfl *po; 1802 1803 po = poalloc(); 1804 po->po_pte = pte; 1805 LIST_INSERT_HEAD(potable + idx, po, po_list); 1806 } 1807 1808 tlbie(va); 1809 1810 vm_page_wakeup(m); 1811 vm_page_flag_clear(m, PG_ZERO); 1812 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1813 m->valid = VM_PAGE_BITS_ALL; 1814 } 1815 } 1816