1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/sysmacros.h> 31 #include <sys/kmem.h> 32 #include <sys/atomic.h> 33 #include <sys/bitmap.h> 34 #include <sys/machparam.h> 35 #include <sys/machsystm.h> 36 #include <sys/mman.h> 37 #include <sys/systm.h> 38 #include <sys/cpuvar.h> 39 #include <sys/thread.h> 40 #include <sys/proc.h> 41 #include <sys/cpu.h> 42 #include <sys/kmem.h> 43 #include <sys/disp.h> 44 #include <sys/vmem.h> 45 #include <sys/vmsystm.h> 46 #include <sys/promif.h> 47 #include <sys/var.h> 48 #include <sys/x86_archext.h> 49 #include <sys/archsystm.h> 50 #include <sys/bootconf.h> 51 #include <sys/dumphdr.h> 52 #include <vm/seg_kmem.h> 53 #include <vm/seg_kpm.h> 54 #include <vm/hat.h> 55 #include <vm/hat_i86.h> 56 #include <sys/cmn_err.h> 57 58 #include <sys/bootinfo.h> 59 #include <vm/kboot_mmu.h> 60 61 static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 62 63 kmem_cache_t *htable_cache; 64 65 /* 66 * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 67 * is used in order to facilitate testing of the htable_steal() code. 68 * By resetting htable_reserve_amount to a lower value, we can force 69 * stealing to occur. The reserve amount is a guess to get us through boot. 70 */ 71 #define HTABLE_RESERVE_AMOUNT (200) 72 uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 73 kmutex_t htable_reserve_mutex; 74 uint_t htable_reserve_cnt; 75 htable_t *htable_reserve_pool; 76 77 /* 78 * Used to hand test htable_steal(). 79 */ 80 #ifdef DEBUG 81 ulong_t force_steal = 0; 82 ulong_t ptable_cnt = 0; 83 #endif 84 85 /* 86 * This variable is so that we can tune this via /etc/system 87 * Any value works, but a power of two <= mmu.ptes_per_table is best. 88 */ 89 uint_t htable_steal_passes = 8; 90 91 /* 92 * mutex stuff for access to htable hash 93 */ 94 #define NUM_HTABLE_MUTEX 128 95 kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 96 #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 97 98 #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 99 #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 100 101 /* 102 * forward declarations 103 */ 104 static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 105 static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 106 static void htable_free(htable_t *ht); 107 static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 108 static void x86pte_release_pagetable(htable_t *ht); 109 static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 110 x86pte_t new); 111 112 /* 113 * A counter to track if we are stealing or reaping htables. When non-zero 114 * htable_free() will directly free htables (either to the reserve or kmem) 115 * instead of putting them in a hat's htable cache. 116 */ 117 uint32_t htable_dont_cache = 0; 118 119 /* 120 * Track the number of active pagetables, so we can know how many to reap 121 */ 122 static uint32_t active_ptables = 0; 123 124 /* 125 * Allocate a memory page for a hardware page table. 126 * 127 * A wrapper around page_get_physical(), with some extra checks. 128 */ 129 static pfn_t 130 ptable_alloc(uintptr_t seed) 131 { 132 pfn_t pfn; 133 page_t *pp; 134 135 pfn = PFN_INVALID; 136 atomic_add_32(&active_ptables, 1); 137 138 /* 139 * The first check is to see if there is memory in the system. If we 140 * drop to throttlefree, then fail the ptable_alloc() and let the 141 * stealing code kick in. Note that we have to do this test here, 142 * since the test in page_create_throttle() would let the NOSLEEP 143 * allocation go through and deplete the page reserves. 144 * 145 * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 146 */ 147 if (!NOMEMWAIT() && freemem <= throttlefree + 1) 148 return (PFN_INVALID); 149 150 #ifdef DEBUG 151 /* 152 * This code makes htable_steal() easier to test. By setting 153 * force_steal we force pagetable allocations to fall 154 * into the stealing code. Roughly 1 in ever "force_steal" 155 * page table allocations will fail. 156 */ 157 if (proc_pageout != NULL && force_steal > 1 && 158 ++ptable_cnt > force_steal) { 159 ptable_cnt = 0; 160 return (PFN_INVALID); 161 } 162 #endif /* DEBUG */ 163 164 pp = page_get_physical(seed); 165 if (pp == NULL) 166 return (PFN_INVALID); 167 pfn = pp->p_pagenum; 168 page_downgrade(pp); 169 ASSERT(PAGE_SHARED(pp)); 170 171 if (pfn == PFN_INVALID) 172 panic("ptable_alloc(): Invalid PFN!!"); 173 HATSTAT_INC(hs_ptable_allocs); 174 return (pfn); 175 } 176 177 /* 178 * Free an htable's associated page table page. See the comments 179 * for ptable_alloc(). 180 */ 181 static void 182 ptable_free(pfn_t pfn) 183 { 184 page_t *pp = page_numtopp_nolock(pfn); 185 186 /* 187 * need to destroy the page used for the pagetable 188 */ 189 ASSERT(pfn != PFN_INVALID); 190 HATSTAT_INC(hs_ptable_frees); 191 atomic_add_32(&active_ptables, -1); 192 if (pp == NULL) 193 panic("ptable_free(): no page for pfn!"); 194 ASSERT(PAGE_SHARED(pp)); 195 ASSERT(pfn == pp->p_pagenum); 196 197 /* 198 * Get an exclusive lock, might have to wait for a kmem reader. 199 */ 200 if (!page_tryupgrade(pp)) { 201 page_unlock(pp); 202 /* 203 * RFE: we could change this to not loop forever 204 * George Cameron had some idea on how to do that. 205 * For now looping works - it's just like sfmmu. 206 */ 207 while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 208 continue; 209 } 210 page_free(pp, 1); 211 page_unresv(1); 212 } 213 214 /* 215 * Put one htable on the reserve list. 216 */ 217 static void 218 htable_put_reserve(htable_t *ht) 219 { 220 ht->ht_hat = NULL; /* no longer tied to a hat */ 221 ASSERT(ht->ht_pfn == PFN_INVALID); 222 HATSTAT_INC(hs_htable_rputs); 223 mutex_enter(&htable_reserve_mutex); 224 ht->ht_next = htable_reserve_pool; 225 htable_reserve_pool = ht; 226 ++htable_reserve_cnt; 227 mutex_exit(&htable_reserve_mutex); 228 } 229 230 /* 231 * Take one htable from the reserve. 232 */ 233 static htable_t * 234 htable_get_reserve(void) 235 { 236 htable_t *ht = NULL; 237 238 mutex_enter(&htable_reserve_mutex); 239 if (htable_reserve_cnt != 0) { 240 ht = htable_reserve_pool; 241 ASSERT(ht != NULL); 242 ASSERT(ht->ht_pfn == PFN_INVALID); 243 htable_reserve_pool = ht->ht_next; 244 --htable_reserve_cnt; 245 HATSTAT_INC(hs_htable_rgets); 246 } 247 mutex_exit(&htable_reserve_mutex); 248 return (ht); 249 } 250 251 /* 252 * Allocate initial htables and put them on the reserve list 253 */ 254 void 255 htable_initial_reserve(uint_t count) 256 { 257 htable_t *ht; 258 259 count += HTABLE_RESERVE_AMOUNT; 260 while (count > 0) { 261 ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 262 ASSERT(ht != NULL); 263 264 ASSERT(use_boot_reserve); 265 ht->ht_pfn = PFN_INVALID; 266 htable_put_reserve(ht); 267 --count; 268 } 269 } 270 271 /* 272 * Readjust the reserves after a thread finishes using them. 273 */ 274 void 275 htable_adjust_reserve() 276 { 277 htable_t *ht; 278 279 /* 280 * Free any excess htables in the reserve list 281 */ 282 while (htable_reserve_cnt > htable_reserve_amount && 283 !USE_HAT_RESERVES()) { 284 ht = htable_get_reserve(); 285 if (ht == NULL) 286 return; 287 ASSERT(ht->ht_pfn == PFN_INVALID); 288 kmem_cache_free(htable_cache, ht); 289 } 290 } 291 292 293 /* 294 * This routine steals htables from user processes for htable_alloc() or 295 * for htable_reap(). 296 */ 297 static htable_t * 298 htable_steal(uint_t cnt) 299 { 300 hat_t *hat = kas.a_hat; /* list starts with khat */ 301 htable_t *list = NULL; 302 htable_t *ht; 303 htable_t *higher; 304 uint_t h; 305 uint_t h_start; 306 static uint_t h_seed = 0; 307 uint_t e; 308 uintptr_t va; 309 x86pte_t pte; 310 uint_t stolen = 0; 311 uint_t pass; 312 uint_t threshold; 313 314 /* 315 * Limit htable_steal_passes to something reasonable 316 */ 317 if (htable_steal_passes == 0) 318 htable_steal_passes = 1; 319 if (htable_steal_passes > mmu.ptes_per_table) 320 htable_steal_passes = mmu.ptes_per_table; 321 322 /* 323 * Loop through all user hats. The 1st pass takes cached htables that 324 * aren't in use. The later passes steal by removing mappings, too. 325 */ 326 atomic_add_32(&htable_dont_cache, 1); 327 for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 328 threshold = pass * mmu.ptes_per_table / htable_steal_passes; 329 hat = kas.a_hat; 330 for (;;) { 331 332 /* 333 * Clear the victim flag and move to next hat 334 */ 335 mutex_enter(&hat_list_lock); 336 if (hat != kas.a_hat) { 337 hat->hat_flags &= ~HAT_VICTIM; 338 cv_broadcast(&hat_list_cv); 339 } 340 hat = hat->hat_next; 341 342 /* 343 * Skip any hat that is already being stolen from. 344 * 345 * We skip SHARED hats, as these are dummy 346 * hats that host ISM shared page tables. 347 * 348 * We also skip if HAT_FREEING because hat_pte_unmap() 349 * won't zero out the PTE's. That would lead to hitting 350 * stale PTEs either here or under hat_unload() when we 351 * steal and unload the same page table in competing 352 * threads. 353 */ 354 while (hat != NULL && 355 (hat->hat_flags & 356 (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 357 hat = hat->hat_next; 358 359 if (hat == NULL) { 360 mutex_exit(&hat_list_lock); 361 break; 362 } 363 364 /* 365 * Are we finished? 366 */ 367 if (stolen == cnt) { 368 /* 369 * Try to spread the pain of stealing, 370 * move victim HAT to the end of the HAT list. 371 */ 372 if (pass >= 1 && cnt == 1 && 373 kas.a_hat->hat_prev != hat) { 374 375 /* unlink victim hat */ 376 if (hat->hat_prev) 377 hat->hat_prev->hat_next = 378 hat->hat_next; 379 else 380 kas.a_hat->hat_next = 381 hat->hat_next; 382 if (hat->hat_next) 383 hat->hat_next->hat_prev = 384 hat->hat_prev; 385 else 386 kas.a_hat->hat_prev = 387 hat->hat_prev; 388 389 390 /* relink at end of hat list */ 391 hat->hat_next = NULL; 392 hat->hat_prev = kas.a_hat->hat_prev; 393 if (hat->hat_prev) 394 hat->hat_prev->hat_next = hat; 395 else 396 kas.a_hat->hat_next = hat; 397 kas.a_hat->hat_prev = hat; 398 399 } 400 401 mutex_exit(&hat_list_lock); 402 break; 403 } 404 405 /* 406 * Mark the HAT as a stealing victim. 407 */ 408 hat->hat_flags |= HAT_VICTIM; 409 mutex_exit(&hat_list_lock); 410 411 /* 412 * Take any htables from the hat's cached "free" list. 413 */ 414 hat_enter(hat); 415 while ((ht = hat->hat_ht_cached) != NULL && 416 stolen < cnt) { 417 hat->hat_ht_cached = ht->ht_next; 418 ht->ht_next = list; 419 list = ht; 420 ++stolen; 421 } 422 hat_exit(hat); 423 424 /* 425 * Don't steal on first pass. 426 */ 427 if (pass == 0 || stolen == cnt) 428 continue; 429 430 /* 431 * Search the active htables for one to steal. 432 * Start at a different hash bucket every time to 433 * help spread the pain of stealing. 434 */ 435 h = h_start = h_seed++ % hat->hat_num_hash; 436 do { 437 higher = NULL; 438 HTABLE_ENTER(h); 439 for (ht = hat->hat_ht_hash[h]; ht; 440 ht = ht->ht_next) { 441 442 /* 443 * Can we rule out reaping? 444 */ 445 if (ht->ht_busy != 0 || 446 (ht->ht_flags & HTABLE_SHARED_PFN)|| 447 ht->ht_level > 0 || 448 ht->ht_valid_cnt > threshold || 449 ht->ht_lock_cnt != 0) 450 continue; 451 452 /* 453 * Increment busy so the htable can't 454 * disappear. We drop the htable mutex 455 * to avoid deadlocks with 456 * hat_pageunload() and the hment mutex 457 * while we call hat_pte_unmap() 458 */ 459 ++ht->ht_busy; 460 HTABLE_EXIT(h); 461 462 /* 463 * Try stealing. 464 * - unload and invalidate all PTEs 465 */ 466 for (e = 0, va = ht->ht_vaddr; 467 e < HTABLE_NUM_PTES(ht) && 468 ht->ht_valid_cnt > 0 && 469 ht->ht_busy == 1 && 470 ht->ht_lock_cnt == 0; 471 ++e, va += MMU_PAGESIZE) { 472 pte = x86pte_get(ht, e); 473 if (!PTE_ISVALID(pte)) 474 continue; 475 hat_pte_unmap(ht, e, 476 HAT_UNLOAD, pte, NULL); 477 } 478 479 /* 480 * Reacquire htable lock. If we didn't 481 * remove all mappings in the table, 482 * or another thread added a new mapping 483 * behind us, give up on this table. 484 */ 485 HTABLE_ENTER(h); 486 if (ht->ht_busy != 1 || 487 ht->ht_valid_cnt != 0 || 488 ht->ht_lock_cnt != 0) { 489 --ht->ht_busy; 490 continue; 491 } 492 493 /* 494 * Steal it and unlink the page table. 495 */ 496 higher = ht->ht_parent; 497 unlink_ptp(higher, ht, ht->ht_vaddr); 498 499 /* 500 * remove from the hash list 501 */ 502 if (ht->ht_next) 503 ht->ht_next->ht_prev = 504 ht->ht_prev; 505 506 if (ht->ht_prev) { 507 ht->ht_prev->ht_next = 508 ht->ht_next; 509 } else { 510 ASSERT(hat->hat_ht_hash[h] == 511 ht); 512 hat->hat_ht_hash[h] = 513 ht->ht_next; 514 } 515 516 /* 517 * Break to outer loop to release the 518 * higher (ht_parent) pagetable. This 519 * spreads out the pain caused by 520 * pagefaults. 521 */ 522 ht->ht_next = list; 523 list = ht; 524 ++stolen; 525 break; 526 } 527 HTABLE_EXIT(h); 528 if (higher != NULL) 529 htable_release(higher); 530 if (++h == hat->hat_num_hash) 531 h = 0; 532 } while (stolen < cnt && h != h_start); 533 } 534 } 535 atomic_add_32(&htable_dont_cache, -1); 536 return (list); 537 } 538 539 540 /* 541 * This is invoked from kmem when the system is low on memory. We try 542 * to free hments, htables, and ptables to improve the memory situation. 543 */ 544 /*ARGSUSED*/ 545 static void 546 htable_reap(void *handle) 547 { 548 uint_t reap_cnt; 549 htable_t *list; 550 htable_t *ht; 551 552 HATSTAT_INC(hs_reap_attempts); 553 if (!can_steal_post_boot) 554 return; 555 556 /* 557 * Try to reap 5% of the page tables bounded by a maximum of 558 * 5% of physmem and a minimum of 10. 559 */ 560 reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 561 562 /* 563 * Let htable_steal() do the work, we just call htable_free() 564 */ 565 list = htable_steal(reap_cnt); 566 while ((ht = list) != NULL) { 567 list = ht->ht_next; 568 HATSTAT_INC(hs_reaped); 569 htable_free(ht); 570 } 571 572 /* 573 * Free up excess reserves 574 */ 575 htable_adjust_reserve(); 576 hment_adjust_reserve(); 577 } 578 579 /* 580 * Allocate an htable, stealing one or using the reserve if necessary 581 */ 582 static htable_t * 583 htable_alloc( 584 hat_t *hat, 585 uintptr_t vaddr, 586 level_t level, 587 htable_t *shared) 588 { 589 htable_t *ht = NULL; 590 uint_t is_vlp; 591 uint_t is_bare = 0; 592 uint_t need_to_zero = 1; 593 int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 594 595 if (level < 0 || level > TOP_LEVEL(hat)) 596 panic("htable_alloc(): level %d out of range\n", level); 597 598 is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 599 if (is_vlp || shared != NULL) 600 is_bare = 1; 601 602 /* 603 * First reuse a cached htable from the hat_ht_cached field, this 604 * avoids unnecessary trips through kmem/page allocators. 605 */ 606 if (hat->hat_ht_cached != NULL && !is_bare) { 607 hat_enter(hat); 608 ht = hat->hat_ht_cached; 609 if (ht != NULL) { 610 hat->hat_ht_cached = ht->ht_next; 611 need_to_zero = 0; 612 /* XX64 ASSERT() they're all zero somehow */ 613 ASSERT(ht->ht_pfn != PFN_INVALID); 614 } 615 hat_exit(hat); 616 } 617 618 if (ht == NULL) { 619 /* 620 * Allocate an htable, possibly refilling the reserves. 621 */ 622 if (USE_HAT_RESERVES()) { 623 ht = htable_get_reserve(); 624 } else { 625 /* 626 * Donate successful htable allocations to the reserve. 627 */ 628 for (;;) { 629 ht = kmem_cache_alloc(htable_cache, kmflags); 630 if (ht == NULL) 631 break; 632 ht->ht_pfn = PFN_INVALID; 633 if (USE_HAT_RESERVES() || 634 htable_reserve_cnt >= htable_reserve_amount) 635 break; 636 htable_put_reserve(ht); 637 } 638 } 639 640 /* 641 * allocate a page for the hardware page table if needed 642 */ 643 if (ht != NULL && !is_bare) { 644 ht->ht_hat = hat; 645 ht->ht_pfn = ptable_alloc((uintptr_t)ht); 646 if (ht->ht_pfn == PFN_INVALID) { 647 if (USE_HAT_RESERVES()) 648 htable_put_reserve(ht); 649 else 650 kmem_cache_free(htable_cache, ht); 651 ht = NULL; 652 } 653 } 654 } 655 656 /* 657 * If allocations failed, kick off a kmem_reap() and resort to 658 * htable steal(). We may spin here if the system is very low on 659 * memory. If the kernel itself has consumed all memory and kmem_reap() 660 * can't free up anything, then we'll really get stuck here. 661 * That should only happen in a system where the administrator has 662 * misconfigured VM parameters via /etc/system. 663 */ 664 while (ht == NULL && can_steal_post_boot) { 665 kmem_reap(); 666 ht = htable_steal(1); 667 HATSTAT_INC(hs_steals); 668 669 /* 670 * If we stole for a bare htable, release the pagetable page. 671 */ 672 if (ht != NULL) { 673 if (is_bare) { 674 ptable_free(ht->ht_pfn); 675 ht->ht_pfn = PFN_INVALID; 676 } 677 } 678 } 679 680 /* 681 * All attempts to allocate or steal failed. This should only happen 682 * if we run out of memory during boot, due perhaps to a huge 683 * boot_archive. At this point there's no way to continue. 684 */ 685 if (ht == NULL) 686 panic("htable_alloc(): couldn't steal\n"); 687 688 /* 689 * Shared page tables have all entries locked and entries may not 690 * be added or deleted. 691 */ 692 ht->ht_flags = 0; 693 if (shared != NULL) { 694 ASSERT(level == 0); 695 ASSERT(shared->ht_valid_cnt > 0); 696 ht->ht_flags |= HTABLE_SHARED_PFN; 697 ht->ht_pfn = shared->ht_pfn; 698 ht->ht_lock_cnt = 0; 699 ht->ht_valid_cnt = 0; /* updated in hat_share() */ 700 ht->ht_shares = shared; 701 need_to_zero = 0; 702 } else { 703 ht->ht_shares = NULL; 704 ht->ht_lock_cnt = 0; 705 ht->ht_valid_cnt = 0; 706 } 707 708 /* 709 * setup flags, etc. for VLP htables 710 */ 711 if (is_vlp) { 712 ht->ht_flags |= HTABLE_VLP; 713 ASSERT(ht->ht_pfn == PFN_INVALID); 714 need_to_zero = 0; 715 } 716 717 /* 718 * fill in the htable 719 */ 720 ht->ht_hat = hat; 721 ht->ht_parent = NULL; 722 ht->ht_vaddr = vaddr; 723 ht->ht_level = level; 724 ht->ht_busy = 1; 725 ht->ht_next = NULL; 726 ht->ht_prev = NULL; 727 728 /* 729 * Zero out any freshly allocated page table 730 */ 731 if (need_to_zero) 732 x86pte_zero(ht, 0, mmu.ptes_per_table); 733 734 return (ht); 735 } 736 737 /* 738 * Free up an htable, either to a hat's cached list, the reserves or 739 * back to kmem. 740 */ 741 static void 742 htable_free(htable_t *ht) 743 { 744 hat_t *hat = ht->ht_hat; 745 746 /* 747 * If the process isn't exiting, cache the free htable in the hat 748 * structure. We always do this for the boot reserve. We don't 749 * do this if the hat is exiting or we are stealing/reaping htables. 750 */ 751 if (hat != NULL && 752 !(ht->ht_flags & HTABLE_SHARED_PFN) && 753 (use_boot_reserve || 754 (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 755 ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 756 ASSERT(ht->ht_pfn != PFN_INVALID); 757 hat_enter(hat); 758 ht->ht_next = hat->hat_ht_cached; 759 hat->hat_ht_cached = ht; 760 hat_exit(hat); 761 return; 762 } 763 764 /* 765 * If we have a hardware page table, free it. 766 * We don't free page tables that are accessed by sharing. 767 */ 768 if (ht->ht_flags & HTABLE_SHARED_PFN) { 769 ASSERT(ht->ht_pfn != PFN_INVALID); 770 } else if (!(ht->ht_flags & HTABLE_VLP)) { 771 ptable_free(ht->ht_pfn); 772 } 773 ht->ht_pfn = PFN_INVALID; 774 775 /* 776 * Free htables or put into reserves. 777 */ 778 if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 779 htable_put_reserve(ht); 780 } else { 781 kmem_cache_free(htable_cache, ht); 782 htable_adjust_reserve(); 783 } 784 } 785 786 787 /* 788 * This is called when a hat is being destroyed or swapped out. We reap all 789 * the remaining htables in the hat cache. If destroying all left over 790 * htables are also destroyed. 791 * 792 * We also don't need to invalidate any of the PTPs nor do any demapping. 793 */ 794 void 795 htable_purge_hat(hat_t *hat) 796 { 797 htable_t *ht; 798 int h; 799 800 /* 801 * Purge the htable cache if just reaping. 802 */ 803 if (!(hat->hat_flags & HAT_FREEING)) { 804 atomic_add_32(&htable_dont_cache, 1); 805 for (;;) { 806 hat_enter(hat); 807 ht = hat->hat_ht_cached; 808 if (ht == NULL) { 809 hat_exit(hat); 810 break; 811 } 812 hat->hat_ht_cached = ht->ht_next; 813 hat_exit(hat); 814 htable_free(ht); 815 } 816 atomic_add_32(&htable_dont_cache, -1); 817 return; 818 } 819 820 /* 821 * if freeing, no locking is needed 822 */ 823 while ((ht = hat->hat_ht_cached) != NULL) { 824 hat->hat_ht_cached = ht->ht_next; 825 htable_free(ht); 826 } 827 828 /* 829 * walk thru the htable hash table and free all the htables in it. 830 */ 831 for (h = 0; h < hat->hat_num_hash; ++h) { 832 while ((ht = hat->hat_ht_hash[h]) != NULL) { 833 if (ht->ht_next) 834 ht->ht_next->ht_prev = ht->ht_prev; 835 836 if (ht->ht_prev) { 837 ht->ht_prev->ht_next = ht->ht_next; 838 } else { 839 ASSERT(hat->hat_ht_hash[h] == ht); 840 hat->hat_ht_hash[h] = ht->ht_next; 841 } 842 htable_free(ht); 843 } 844 } 845 } 846 847 /* 848 * Unlink an entry for a table at vaddr and level out of the existing table 849 * one level higher. We are always holding the HASH_ENTER() when doing this. 850 */ 851 static void 852 unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 853 { 854 uint_t entry = htable_va2entry(vaddr, higher); 855 x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 856 x86pte_t found; 857 858 ASSERT(higher->ht_busy > 0); 859 ASSERT(higher->ht_valid_cnt > 0); 860 ASSERT(old->ht_valid_cnt == 0); 861 found = x86pte_cas(higher, entry, expect, 0); 862 if (found != expect) 863 panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 864 found, expect); 865 HTABLE_DEC(higher->ht_valid_cnt); 866 } 867 868 /* 869 * Link an entry for a new table at vaddr and level into the existing table 870 * one level higher. We are always holding the HASH_ENTER() when doing this. 871 */ 872 static void 873 link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 874 { 875 uint_t entry = htable_va2entry(vaddr, higher); 876 x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 877 x86pte_t found; 878 879 ASSERT(higher->ht_busy > 0); 880 881 ASSERT(new->ht_level != mmu.max_level); 882 883 HTABLE_INC(higher->ht_valid_cnt); 884 885 found = x86pte_cas(higher, entry, 0, newptp); 886 if ((found & ~PT_REF) != 0) 887 panic("HAT: ptp not 0, found=" FMT_PTE, found); 888 } 889 890 /* 891 * Release of hold on an htable. If this is the last use and the pagetable 892 * is empty we may want to free it, then recursively look at the pagetable 893 * above it. The recursion is handled by the outer while() loop. 894 */ 895 void 896 htable_release(htable_t *ht) 897 { 898 uint_t hashval; 899 htable_t *shared; 900 htable_t *higher; 901 hat_t *hat; 902 uintptr_t va; 903 level_t level; 904 905 while (ht != NULL) { 906 shared = NULL; 907 for (;;) { 908 hat = ht->ht_hat; 909 va = ht->ht_vaddr; 910 level = ht->ht_level; 911 hashval = HTABLE_HASH(hat, va, level); 912 913 /* 914 * The common case is that this isn't the last use of 915 * an htable so we don't want to free the htable. 916 */ 917 HTABLE_ENTER(hashval); 918 ASSERT(ht->ht_lock_cnt == 0 || ht->ht_valid_cnt > 0); 919 ASSERT(ht->ht_valid_cnt >= 0); 920 ASSERT(ht->ht_busy > 0); 921 if (ht->ht_valid_cnt > 0) 922 break; 923 if (ht->ht_busy > 1) 924 break; 925 926 /* 927 * we always release empty shared htables 928 */ 929 if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 930 931 /* 932 * don't release if in address space tear down 933 */ 934 if (hat->hat_flags & HAT_FREEING) 935 break; 936 937 /* 938 * At and above max_page_level, free if it's for 939 * a boot-time kernel mapping below kernelbase. 940 */ 941 if (level >= mmu.max_page_level && 942 (hat != kas.a_hat || va >= kernelbase)) 943 break; 944 } 945 946 /* 947 * Remember if we destroy an htable that shares its PFN 948 * from elsewhere. 949 */ 950 if (ht->ht_flags & HTABLE_SHARED_PFN) { 951 ASSERT(ht->ht_level == 0); 952 ASSERT(shared == NULL); 953 shared = ht->ht_shares; 954 HATSTAT_INC(hs_htable_unshared); 955 } 956 957 /* 958 * Handle release of a table and freeing the htable_t. 959 * Unlink it from the table higher (ie. ht_parent). 960 */ 961 ASSERT(ht->ht_lock_cnt == 0); 962 higher = ht->ht_parent; 963 ASSERT(higher != NULL); 964 965 /* 966 * Unlink the pagetable. 967 */ 968 unlink_ptp(higher, ht, va); 969 970 /* 971 * When any top level VLP page table entry changes, we 972 * must issue a reload of cr3 on all processors. 973 */ 974 if ((hat->hat_flags & HAT_VLP) && 975 level == VLP_LEVEL - 1) 976 hat_tlb_inval(hat, DEMAP_ALL_ADDR); 977 978 /* 979 * remove this htable from its hash list 980 */ 981 if (ht->ht_next) 982 ht->ht_next->ht_prev = ht->ht_prev; 983 984 if (ht->ht_prev) { 985 ht->ht_prev->ht_next = ht->ht_next; 986 } else { 987 ASSERT(hat->hat_ht_hash[hashval] == ht); 988 hat->hat_ht_hash[hashval] = ht->ht_next; 989 } 990 HTABLE_EXIT(hashval); 991 htable_free(ht); 992 ht = higher; 993 } 994 995 ASSERT(ht->ht_busy >= 1); 996 --ht->ht_busy; 997 HTABLE_EXIT(hashval); 998 999 /* 1000 * If we released a shared htable, do a release on the htable 1001 * from which it shared 1002 */ 1003 ht = shared; 1004 } 1005 } 1006 1007 /* 1008 * Find the htable for the pagetable at the given level for the given address. 1009 * If found acquires a hold that eventually needs to be htable_release()d 1010 */ 1011 htable_t * 1012 htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 1013 { 1014 uintptr_t base; 1015 uint_t hashval; 1016 htable_t *ht = NULL; 1017 1018 ASSERT(level >= 0); 1019 ASSERT(level <= TOP_LEVEL(hat)); 1020 1021 if (level == TOP_LEVEL(hat)) 1022 base = 0; 1023 else 1024 base = vaddr & LEVEL_MASK(level + 1); 1025 1026 hashval = HTABLE_HASH(hat, base, level); 1027 HTABLE_ENTER(hashval); 1028 for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 1029 if (ht->ht_hat == hat && 1030 ht->ht_vaddr == base && 1031 ht->ht_level == level) 1032 break; 1033 } 1034 if (ht) 1035 ++ht->ht_busy; 1036 1037 HTABLE_EXIT(hashval); 1038 return (ht); 1039 } 1040 1041 /* 1042 * Acquires a hold on a known htable (from a locked hment entry). 1043 */ 1044 void 1045 htable_acquire(htable_t *ht) 1046 { 1047 hat_t *hat = ht->ht_hat; 1048 level_t level = ht->ht_level; 1049 uintptr_t base = ht->ht_vaddr; 1050 uint_t hashval = HTABLE_HASH(hat, base, level); 1051 1052 HTABLE_ENTER(hashval); 1053 #ifdef DEBUG 1054 /* 1055 * make sure the htable is there 1056 */ 1057 { 1058 htable_t *h; 1059 1060 for (h = hat->hat_ht_hash[hashval]; 1061 h && h != ht; 1062 h = h->ht_next) 1063 ; 1064 ASSERT(h == ht); 1065 } 1066 #endif /* DEBUG */ 1067 ++ht->ht_busy; 1068 HTABLE_EXIT(hashval); 1069 } 1070 1071 /* 1072 * Find the htable for the pagetable at the given level for the given address. 1073 * If found acquires a hold that eventually needs to be htable_release()d 1074 * If not found the table is created. 1075 * 1076 * Since we can't hold a hash table mutex during allocation, we have to 1077 * drop it and redo the search on a create. Then we may have to free the newly 1078 * allocated htable if another thread raced in and created it ahead of us. 1079 */ 1080 htable_t * 1081 htable_create( 1082 hat_t *hat, 1083 uintptr_t vaddr, 1084 level_t level, 1085 htable_t *shared) 1086 { 1087 uint_t h; 1088 level_t l; 1089 uintptr_t base; 1090 htable_t *ht; 1091 htable_t *higher = NULL; 1092 htable_t *new = NULL; 1093 1094 if (level < 0 || level > TOP_LEVEL(hat)) 1095 panic("htable_create(): level %d out of range\n", level); 1096 1097 /* 1098 * Create the page tables in top down order. 1099 */ 1100 for (l = TOP_LEVEL(hat); l >= level; --l) { 1101 new = NULL; 1102 if (l == TOP_LEVEL(hat)) 1103 base = 0; 1104 else 1105 base = vaddr & LEVEL_MASK(l + 1); 1106 1107 h = HTABLE_HASH(hat, base, l); 1108 try_again: 1109 /* 1110 * look up the htable at this level 1111 */ 1112 HTABLE_ENTER(h); 1113 if (l == TOP_LEVEL(hat)) { 1114 ht = hat->hat_htable; 1115 } else { 1116 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 1117 ASSERT(ht->ht_hat == hat); 1118 if (ht->ht_vaddr == base && 1119 ht->ht_level == l) 1120 break; 1121 } 1122 } 1123 1124 /* 1125 * if we found the htable, increment its busy cnt 1126 * and if we had allocated a new htable, free it. 1127 */ 1128 if (ht != NULL) { 1129 /* 1130 * If we find a pre-existing shared table, it must 1131 * share from the same place. 1132 */ 1133 if (l == level && shared && ht->ht_shares && 1134 ht->ht_shares != shared) { 1135 panic("htable shared from wrong place " 1136 "found htable=%p shared=%p", ht, shared); 1137 } 1138 ++ht->ht_busy; 1139 HTABLE_EXIT(h); 1140 if (new) 1141 htable_free(new); 1142 if (higher != NULL) 1143 htable_release(higher); 1144 higher = ht; 1145 1146 /* 1147 * if we didn't find it on the first search 1148 * allocate a new one and search again 1149 */ 1150 } else if (new == NULL) { 1151 HTABLE_EXIT(h); 1152 new = htable_alloc(hat, base, l, 1153 l == level ? shared : NULL); 1154 goto try_again; 1155 1156 /* 1157 * 2nd search and still not there, use "new" table 1158 * Link new table into higher, when not at top level. 1159 */ 1160 } else { 1161 ht = new; 1162 if (higher != NULL) { 1163 link_ptp(higher, ht, base); 1164 ht->ht_parent = higher; 1165 1166 /* 1167 * When any top level VLP page table changes, 1168 * we must reload cr3 on all processors. 1169 */ 1170 #ifdef __i386 1171 if (mmu.pae_hat && 1172 #else /* !__i386 */ 1173 if ((hat->hat_flags & HAT_VLP) && 1174 #endif /* __i386 */ 1175 l == VLP_LEVEL - 1) 1176 hat_tlb_inval(hat, DEMAP_ALL_ADDR); 1177 } 1178 ht->ht_next = hat->hat_ht_hash[h]; 1179 ASSERT(ht->ht_prev == NULL); 1180 if (hat->hat_ht_hash[h]) 1181 hat->hat_ht_hash[h]->ht_prev = ht; 1182 hat->hat_ht_hash[h] = ht; 1183 HTABLE_EXIT(h); 1184 1185 /* 1186 * Note we don't do htable_release(higher). 1187 * That happens recursively when "new" is removed by 1188 * htable_release() or htable_steal(). 1189 */ 1190 higher = ht; 1191 1192 /* 1193 * If we just created a new shared page table we 1194 * increment the shared htable's busy count, so that 1195 * it can't be the victim of a steal even if it's empty. 1196 */ 1197 if (l == level && shared) { 1198 (void) htable_lookup(shared->ht_hat, 1199 shared->ht_vaddr, shared->ht_level); 1200 HATSTAT_INC(hs_htable_shared); 1201 } 1202 } 1203 } 1204 1205 return (ht); 1206 } 1207 1208 /* 1209 * Inherit initial pagetables from the boot program. 1210 */ 1211 void 1212 htable_attach( 1213 hat_t *hat, 1214 uintptr_t base, 1215 level_t level, 1216 htable_t *parent, 1217 pfn_t pfn) 1218 { 1219 htable_t *ht; 1220 uint_t h; 1221 uint_t i; 1222 x86pte_t pte; 1223 x86pte_t *ptep; 1224 page_t *pp; 1225 extern page_t *boot_claim_page(pfn_t); 1226 1227 ht = htable_get_reserve(); 1228 if (level == mmu.max_level) 1229 kas.a_hat->hat_htable = ht; 1230 ht->ht_hat = hat; 1231 ht->ht_parent = parent; 1232 ht->ht_vaddr = base; 1233 ht->ht_level = level; 1234 ht->ht_busy = 1; 1235 ht->ht_next = NULL; 1236 ht->ht_prev = NULL; 1237 ht->ht_flags = 0; 1238 ht->ht_pfn = pfn; 1239 ht->ht_lock_cnt = 0; 1240 ht->ht_valid_cnt = 0; 1241 if (parent != NULL) 1242 ++parent->ht_busy; 1243 1244 h = HTABLE_HASH(hat, base, level); 1245 HTABLE_ENTER(h); 1246 ht->ht_next = hat->hat_ht_hash[h]; 1247 ASSERT(ht->ht_prev == NULL); 1248 if (hat->hat_ht_hash[h]) 1249 hat->hat_ht_hash[h]->ht_prev = ht; 1250 hat->hat_ht_hash[h] = ht; 1251 HTABLE_EXIT(h); 1252 1253 /* 1254 * make sure the page table physical page is not FREE 1255 */ 1256 if (page_resv(1, KM_NOSLEEP) == 0) 1257 panic("page_resv() failed in ptable alloc"); 1258 1259 pp = boot_claim_page(pfn); 1260 ASSERT(pp != NULL); 1261 page_downgrade(pp); 1262 /* 1263 * Record in the page_t that is a pagetable for segkpm setup. 1264 */ 1265 if (kpm_vbase) 1266 pp->p_index = 1; 1267 1268 /* 1269 * Count valid mappings and recursively attach lower level pagetables. 1270 */ 1271 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1272 for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1273 if (mmu.pae_hat) 1274 pte = ptep[i]; 1275 else 1276 pte = ((x86pte32_t *)ptep)[i]; 1277 if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1278 ++ht->ht_valid_cnt; 1279 if (!PTE_ISPAGE(pte, level)) { 1280 htable_attach(hat, base, level - 1, 1281 ht, PTE2PFN(pte, level)); 1282 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1283 } 1284 } 1285 base += LEVEL_SIZE(level); 1286 if (base == mmu.hole_start) 1287 base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1288 } 1289 1290 /* 1291 * As long as all the mappings we had were below kernel base 1292 * we can release the htable. 1293 */ 1294 if (base < kernelbase) 1295 htable_release(ht); 1296 } 1297 1298 /* 1299 * Walk through a given htable looking for the first valid entry. This 1300 * routine takes both a starting and ending address. The starting address 1301 * is required to be within the htable provided by the caller, but there is 1302 * no such restriction on the ending address. 1303 * 1304 * If the routine finds a valid entry in the htable (at or beyond the 1305 * starting address), the PTE (and its address) will be returned. 1306 * This PTE may correspond to either a page or a pagetable - it is the 1307 * caller's responsibility to determine which. If no valid entry is 1308 * found, 0 (and invalid PTE) and the next unexamined address will be 1309 * returned. 1310 * 1311 * The loop has been carefully coded for optimization. 1312 */ 1313 static x86pte_t 1314 htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 1315 { 1316 uint_t e; 1317 x86pte_t found_pte = (x86pte_t)0; 1318 caddr_t pte_ptr; 1319 caddr_t end_pte_ptr; 1320 int l = ht->ht_level; 1321 uintptr_t va = *vap & LEVEL_MASK(l); 1322 size_t pgsize = LEVEL_SIZE(l); 1323 1324 ASSERT(va >= ht->ht_vaddr); 1325 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1326 1327 /* 1328 * Compute the starting index and ending virtual address 1329 */ 1330 e = htable_va2entry(va, ht); 1331 1332 /* 1333 * The following page table scan code knows that the valid 1334 * bit of a PTE is in the lowest byte AND that x86 is little endian!! 1335 */ 1336 pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1337 end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1338 pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 1339 while (!PTE_ISVALID(*pte_ptr)) { 1340 va += pgsize; 1341 if (va >= eaddr) 1342 break; 1343 pte_ptr += mmu.pte_size; 1344 ASSERT(pte_ptr <= end_pte_ptr); 1345 if (pte_ptr == end_pte_ptr) 1346 break; 1347 } 1348 1349 /* 1350 * if we found a valid PTE, load the entire PTE 1351 */ 1352 if (va < eaddr && pte_ptr != end_pte_ptr) 1353 found_pte = GET_PTE((x86pte_t *)pte_ptr); 1354 x86pte_release_pagetable(ht); 1355 1356 #if defined(__amd64) 1357 /* 1358 * deal with VA hole on amd64 1359 */ 1360 if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 1361 va = mmu.hole_end + va - mmu.hole_start; 1362 #endif /* __amd64 */ 1363 1364 *vap = va; 1365 return (found_pte); 1366 } 1367 1368 /* 1369 * Find the address and htable for the first populated translation at or 1370 * above the given virtual address. The caller may also specify an upper 1371 * limit to the address range to search. Uses level information to quickly 1372 * skip unpopulated sections of virtual address spaces. 1373 * 1374 * If not found returns NULL. When found, returns the htable and virt addr 1375 * and has a hold on the htable. 1376 */ 1377 x86pte_t 1378 htable_walk( 1379 struct hat *hat, 1380 htable_t **htp, 1381 uintptr_t *vaddr, 1382 uintptr_t eaddr) 1383 { 1384 uintptr_t va = *vaddr; 1385 htable_t *ht; 1386 htable_t *prev = *htp; 1387 level_t l; 1388 level_t max_mapped_level; 1389 x86pte_t pte; 1390 1391 ASSERT(eaddr > va); 1392 1393 /* 1394 * If this is a user address, then we know we need not look beyond 1395 * kernelbase. 1396 */ 1397 ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 1398 eaddr == HTABLE_WALK_TO_END); 1399 if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 1400 eaddr = kernelbase; 1401 1402 /* 1403 * If we're coming in with a previous page table, search it first 1404 * without doing an htable_lookup(), this should be frequent. 1405 */ 1406 if (prev) { 1407 ASSERT(prev->ht_busy > 0); 1408 ASSERT(prev->ht_vaddr <= va); 1409 l = prev->ht_level; 1410 if (va <= HTABLE_LAST_PAGE(prev)) { 1411 pte = htable_scan(prev, &va, eaddr); 1412 1413 if (PTE_ISPAGE(pte, l)) { 1414 *vaddr = va; 1415 *htp = prev; 1416 return (pte); 1417 } 1418 } 1419 1420 /* 1421 * We found nothing in the htable provided by the caller, 1422 * so fall through and do the full search 1423 */ 1424 htable_release(prev); 1425 } 1426 1427 /* 1428 * Find the level of the largest pagesize used by this HAT. 1429 */ 1430 max_mapped_level = 0; 1431 for (l = 1; l <= mmu.max_page_level; ++l) 1432 if (hat->hat_pages_mapped[l] != 0) 1433 max_mapped_level = l; 1434 1435 while (va < eaddr && va >= *vaddr) { 1436 ASSERT(!IN_VA_HOLE(va)); 1437 1438 /* 1439 * Find lowest table with any entry for given address. 1440 */ 1441 for (l = 0; l <= TOP_LEVEL(hat); ++l) { 1442 ht = htable_lookup(hat, va, l); 1443 if (ht != NULL) { 1444 pte = htable_scan(ht, &va, eaddr); 1445 if (PTE_ISPAGE(pte, l)) { 1446 *vaddr = va; 1447 *htp = ht; 1448 return (pte); 1449 } 1450 htable_release(ht); 1451 break; 1452 } 1453 1454 /* 1455 * The ht is never NULL at the top level since 1456 * the top level htable is created in hat_alloc(). 1457 */ 1458 ASSERT(l < TOP_LEVEL(hat)); 1459 1460 /* 1461 * No htable covers the address. If there is no 1462 * larger page size that could cover it, we 1463 * skip to the start of the next page table. 1464 */ 1465 if (l >= max_mapped_level) { 1466 va = NEXT_ENTRY_VA(va, l + 1); 1467 break; 1468 } 1469 } 1470 } 1471 1472 *vaddr = 0; 1473 *htp = NULL; 1474 return (0); 1475 } 1476 1477 /* 1478 * Find the htable and page table entry index of the given virtual address 1479 * with pagesize at or below given level. 1480 * If not found returns NULL. When found, returns the htable, sets 1481 * entry, and has a hold on the htable. 1482 */ 1483 htable_t * 1484 htable_getpte( 1485 struct hat *hat, 1486 uintptr_t vaddr, 1487 uint_t *entry, 1488 x86pte_t *pte, 1489 level_t level) 1490 { 1491 htable_t *ht; 1492 level_t l; 1493 uint_t e; 1494 1495 ASSERT(level <= mmu.max_page_level); 1496 1497 for (l = 0; l <= level; ++l) { 1498 ht = htable_lookup(hat, vaddr, l); 1499 if (ht == NULL) 1500 continue; 1501 e = htable_va2entry(vaddr, ht); 1502 if (entry != NULL) 1503 *entry = e; 1504 if (pte != NULL) 1505 *pte = x86pte_get(ht, e); 1506 return (ht); 1507 } 1508 return (NULL); 1509 } 1510 1511 /* 1512 * Find the htable and page table entry index of the given virtual address. 1513 * There must be a valid page mapped at the given address. 1514 * If not found returns NULL. When found, returns the htable, sets 1515 * entry, and has a hold on the htable. 1516 */ 1517 htable_t * 1518 htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 1519 { 1520 htable_t *ht; 1521 uint_t e; 1522 x86pte_t pte; 1523 1524 ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 1525 if (ht == NULL) 1526 return (NULL); 1527 1528 if (entry) 1529 *entry = e; 1530 1531 if (PTE_ISPAGE(pte, ht->ht_level)) 1532 return (ht); 1533 htable_release(ht); 1534 return (NULL); 1535 } 1536 1537 1538 void 1539 htable_init() 1540 { 1541 /* 1542 * To save on kernel VA usage, we avoid debug information in 32 bit 1543 * kernels. 1544 */ 1545 #if defined(__amd64) 1546 int kmem_flags = KMC_NOHASH; 1547 #elif defined(__i386) 1548 int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 1549 #endif 1550 1551 /* 1552 * initialize kmem caches 1553 */ 1554 htable_cache = kmem_cache_create("htable_t", 1555 sizeof (htable_t), 0, NULL, NULL, 1556 htable_reap, NULL, hat_memload_arena, kmem_flags); 1557 } 1558 1559 /* 1560 * get the pte index for the virtual address in the given htable's pagetable 1561 */ 1562 uint_t 1563 htable_va2entry(uintptr_t va, htable_t *ht) 1564 { 1565 level_t l = ht->ht_level; 1566 1567 ASSERT(va >= ht->ht_vaddr); 1568 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1569 return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 1570 } 1571 1572 /* 1573 * Given an htable and the index of a pte in it, return the virtual address 1574 * of the page. 1575 */ 1576 uintptr_t 1577 htable_e2va(htable_t *ht, uint_t entry) 1578 { 1579 level_t l = ht->ht_level; 1580 uintptr_t va; 1581 1582 ASSERT(entry < HTABLE_NUM_PTES(ht)); 1583 va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 1584 1585 /* 1586 * Need to skip over any VA hole in top level table 1587 */ 1588 #if defined(__amd64) 1589 if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 1590 va += ((mmu.hole_end - mmu.hole_start) + 1); 1591 #endif 1592 1593 return (va); 1594 } 1595 1596 /* 1597 * The code uses compare and swap instructions to read/write PTE's to 1598 * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 1599 * will naturally be atomic. 1600 * 1601 * The combination of using kpreempt_disable()/_enable() and the hci_mutex 1602 * are used to ensure that an interrupt won't overwrite a temporary mapping 1603 * while it's in use. If an interrupt thread tries to access a PTE, it will 1604 * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 1605 */ 1606 void 1607 x86pte_cpu_init(cpu_t *cpu) 1608 { 1609 struct hat_cpu_info *hci; 1610 1611 hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 1612 mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 1613 cpu->cpu_hat_info = hci; 1614 } 1615 1616 void 1617 x86pte_cpu_fini(cpu_t *cpu) 1618 { 1619 struct hat_cpu_info *hci = cpu->cpu_hat_info; 1620 1621 kmem_free(hci, sizeof (*hci)); 1622 cpu->cpu_hat_info = NULL; 1623 } 1624 1625 #ifdef __i386 1626 /* 1627 * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1628 */ 1629 x86pte_t 1630 get_pte64(x86pte_t *ptr) 1631 { 1632 volatile uint32_t *p = (uint32_t *)ptr; 1633 x86pte_t t; 1634 1635 ASSERT(mmu.pae_hat != 0); 1636 for (;;) { 1637 t = p[0]; 1638 t |= (uint64_t)p[1] << 32; 1639 if ((t & 0xffffffff) == p[0]) 1640 return (t); 1641 } 1642 } 1643 #endif /* __i386 */ 1644 1645 /* 1646 * Disable preemption and establish a mapping to the pagetable with the 1647 * given pfn. This is optimized for there case where it's the same 1648 * pfn as we last used referenced from this CPU. 1649 */ 1650 static x86pte_t * 1651 x86pte_access_pagetable(htable_t *ht, uint_t index) 1652 { 1653 /* 1654 * VLP pagetables are contained in the hat_t 1655 */ 1656 if (ht->ht_flags & HTABLE_VLP) 1657 return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1658 return (x86pte_mapin(ht->ht_pfn, index, ht)); 1659 } 1660 1661 /* 1662 * map the given pfn into the page table window. 1663 */ 1664 /*ARGSUSED*/ 1665 x86pte_t * 1666 x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1667 { 1668 x86pte_t *pteptr; 1669 x86pte_t pte; 1670 x86pte_t newpte; 1671 int x; 1672 1673 ASSERT(pfn != PFN_INVALID); 1674 1675 if (!khat_running) { 1676 caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1677 return (PT_INDEX_PTR(va, index)); 1678 } 1679 1680 /* 1681 * If kpm is available, use it. 1682 */ 1683 if (kpm_vbase) 1684 return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1685 1686 /* 1687 * Disable preemption and grab the CPU's hci_mutex 1688 */ 1689 kpreempt_disable(); 1690 ASSERT(CPU->cpu_hat_info != NULL); 1691 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1692 x = PWIN_TABLE(CPU->cpu_id); 1693 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 1694 if (mmu.pae_hat) 1695 pte = *pteptr; 1696 else 1697 pte = *(x86pte32_t *)pteptr; 1698 1699 newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1700 newpte |= PT_WRITABLE; 1701 1702 if (!PTE_EQUIV(newpte, pte)) { 1703 if (mmu.pae_hat) 1704 *pteptr = newpte; 1705 else 1706 *(x86pte32_t *)pteptr = newpte; 1707 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 1708 } 1709 return (PT_INDEX_PTR(PWIN_VA(x), index)); 1710 } 1711 1712 /* 1713 * Release access to a page table. 1714 */ 1715 static void 1716 x86pte_release_pagetable(htable_t *ht) 1717 { 1718 /* 1719 * nothing to do for VLP htables 1720 */ 1721 if (ht->ht_flags & HTABLE_VLP) 1722 return; 1723 1724 x86pte_mapout(); 1725 } 1726 1727 void 1728 x86pte_mapout(void) 1729 { 1730 if (mmu.pwin_base == NULL || !khat_running) 1731 return; 1732 1733 /* 1734 * Drop the CPU's hci_mutex and restore preemption. 1735 */ 1736 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 1737 kpreempt_enable(); 1738 } 1739 1740 /* 1741 * Atomic retrieval of a pagetable entry 1742 */ 1743 x86pte_t 1744 x86pte_get(htable_t *ht, uint_t entry) 1745 { 1746 x86pte_t pte; 1747 x86pte_t *ptep; 1748 1749 /* 1750 * Be careful that loading PAE entries in 32 bit kernel is atomic. 1751 */ 1752 ASSERT(entry < mmu.ptes_per_table); 1753 ptep = x86pte_access_pagetable(ht, entry); 1754 pte = GET_PTE(ptep); 1755 x86pte_release_pagetable(ht); 1756 return (pte); 1757 } 1758 1759 /* 1760 * Atomic unconditional set of a page table entry, it returns the previous 1761 * value. For pre-existing mappings if the PFN changes, then we don't care 1762 * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 1763 * the MOD/REF bits unchanged. 1764 * 1765 * If asked to overwrite a link to a lower page table with a large page 1766 * mapping, this routine returns the special value of LPAGE_ERROR. This 1767 * allows the upper HAT layers to retry with a smaller mapping size. 1768 */ 1769 x86pte_t 1770 x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 1771 { 1772 x86pte_t old; 1773 x86pte_t prev; 1774 x86pte_t *ptep; 1775 level_t l = ht->ht_level; 1776 x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 1777 x86pte_t n; 1778 uintptr_t addr = htable_e2va(ht, entry); 1779 hat_t *hat = ht->ht_hat; 1780 1781 ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 1782 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 1783 if (ptr == NULL) 1784 ptep = x86pte_access_pagetable(ht, entry); 1785 else 1786 ptep = ptr; 1787 1788 /* 1789 * Install the new PTE. If remapping the same PFN, then 1790 * copy existing REF/MOD bits to new mapping. 1791 */ 1792 do { 1793 prev = GET_PTE(ptep); 1794 n = new; 1795 if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 1796 n |= prev & (PT_REF | PT_MOD); 1797 1798 /* 1799 * Another thread may have installed this mapping already, 1800 * flush the local TLB and be done. 1801 */ 1802 if (prev == n) { 1803 old = new; 1804 mmu_tlbflush_entry((caddr_t)addr); 1805 goto done; 1806 } 1807 1808 /* 1809 * Detect if we have a collision of installing a large 1810 * page mapping where there already is a lower page table. 1811 */ 1812 if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 1813 old = LPAGE_ERROR; 1814 goto done; 1815 } 1816 1817 old = CAS_PTE(ptep, prev, n); 1818 } while (old != prev); 1819 1820 /* 1821 * Do a TLB demap if needed, ie. the old pte was valid. 1822 * 1823 * Note that a stale TLB writeback to the PTE here either can't happen 1824 * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 1825 * mappings, but they were created with REF and MOD already set, so 1826 * no stale writeback will happen. 1827 * 1828 * Segmap is the only place where remaps happen on the same pfn and for 1829 * that we want to preserve the stale REF/MOD bits. 1830 */ 1831 if (old & PT_REF) 1832 hat_tlb_inval(hat, addr); 1833 1834 done: 1835 if (ptr == NULL) 1836 x86pte_release_pagetable(ht); 1837 return (old); 1838 } 1839 1840 /* 1841 * Atomic compare and swap of a page table entry. No TLB invalidates are done. 1842 * This is used for links between pagetables of different levels. 1843 * Note we always create these links with dirty/access set, so they should 1844 * never change. 1845 */ 1846 x86pte_t 1847 x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 1848 { 1849 x86pte_t pte; 1850 x86pte_t *ptep; 1851 1852 ptep = x86pte_access_pagetable(ht, entry); 1853 pte = CAS_PTE(ptep, old, new); 1854 x86pte_release_pagetable(ht); 1855 return (pte); 1856 } 1857 1858 /* 1859 * data structure for cross call information 1860 */ 1861 typedef struct xcall_inval { 1862 caddr_t xi_addr; 1863 x86pte_t xi_found; 1864 x86pte_t xi_oldpte; 1865 x86pte_t *xi_pteptr; 1866 processorid_t xi_initiator; 1867 } xcall_inval_t; 1868 1869 /* 1870 * Cross call service routine to invalidate TLBs. On the 1871 * initiating CPU, this first clears the PTE in memory. 1872 */ 1873 /*ARGSUSED*/ 1874 static int 1875 x86pte_inval_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 1876 { 1877 xcall_inval_t *xi = (xcall_inval_t *)a1; 1878 1879 if (CPU->cpu_id == xi->xi_initiator) 1880 xi->xi_found = CAS_PTE(xi->xi_pteptr, xi->xi_oldpte, 0); 1881 1882 mmu_tlbflush_entry(xi->xi_addr); 1883 return (0); 1884 } 1885 1886 /* 1887 * Invalidate a page table entry as long as it currently maps something that 1888 * matches the value determined by expect. 1889 * 1890 * Also invalidates any TLB entries and returns the previous value of the PTE. 1891 */ 1892 x86pte_t 1893 x86pte_inval( 1894 htable_t *ht, 1895 uint_t entry, 1896 x86pte_t expect, 1897 x86pte_t *pte_ptr) 1898 { 1899 hat_t *hat = ht->ht_hat; 1900 x86pte_t *ptep; 1901 xcall_inval_t xi; 1902 cpuset_t cpus; 1903 1904 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 1905 ASSERT(ht->ht_level != VLP_LEVEL); 1906 1907 if (pte_ptr != NULL) 1908 ptep = pte_ptr; 1909 else 1910 ptep = x86pte_access_pagetable(ht, entry); 1911 xi.xi_pteptr = ptep; 1912 xi.xi_addr = (caddr_t)htable_e2va(ht, entry); 1913 1914 /* 1915 * Setup a cross call to any CPUs using this HAT 1916 */ 1917 kpreempt_disable(); 1918 xi.xi_initiator = CPU->cpu_id; 1919 CPUSET_ZERO(cpus); 1920 if (hat == kas.a_hat) { 1921 CPUSET_OR(cpus, khat_cpuset); 1922 } else { 1923 mutex_enter(&hat->hat_switch_mutex); 1924 CPUSET_OR(cpus, hat->hat_cpus); 1925 CPUSET_ADD(cpus, CPU->cpu_id); 1926 } 1927 1928 /* 1929 * Do the cross call to invalidate the PTE and flush TLBs. 1930 * Note that the loop is needed to handle changes due to h/w updating 1931 * of PT_MOD/PT_REF. 1932 */ 1933 do { 1934 xi.xi_oldpte = GET_PTE(ptep); 1935 if (expect != 0 && 1936 (xi.xi_oldpte & PT_PADDR) != (expect & PT_PADDR)) 1937 break; 1938 if (panicstr == NULL) 1939 xc_wait_sync((xc_arg_t)&xi, NULL, NULL, X_CALL_HIPRI, 1940 cpus, x86pte_inval_func); 1941 else 1942 (void) x86pte_inval_func((xc_arg_t)&xi, NULL, NULL); 1943 } while (xi.xi_found != xi.xi_oldpte); 1944 1945 if (hat != kas.a_hat) 1946 mutex_exit(&hat->hat_switch_mutex); 1947 kpreempt_enable(); 1948 1949 if (pte_ptr == NULL) 1950 x86pte_release_pagetable(ht); 1951 1952 return (xi.xi_oldpte); 1953 } 1954 1955 /* 1956 * Change a page table entry af it currently matches the value in expect. 1957 */ 1958 x86pte_t 1959 x86pte_update( 1960 htable_t *ht, 1961 uint_t entry, 1962 x86pte_t expect, 1963 x86pte_t new) 1964 { 1965 x86pte_t *ptep; 1966 x86pte_t found; 1967 1968 ASSERT(new != 0); 1969 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 1970 ASSERT(ht->ht_level != VLP_LEVEL); 1971 1972 ptep = x86pte_access_pagetable(ht, entry); 1973 found = CAS_PTE(ptep, expect, new); 1974 if (found == expect) { 1975 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 1976 1977 /* 1978 * When removing write permission *and* clearing the 1979 * MOD bit, check if a write happened via a stale 1980 * TLB entry before the TLB shootdown finished. 1981 * 1982 * If it did happen, simply re-enable write permission and 1983 * act like the original CAS failed. 1984 */ 1985 if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 1986 (new & (PT_WRITABLE | PT_MOD)) == 0 && 1987 (GET_PTE(ptep) & PT_MOD) != 0) { 1988 do { 1989 found = GET_PTE(ptep); 1990 found = 1991 CAS_PTE(ptep, found, found | PT_WRITABLE); 1992 } while ((found & PT_WRITABLE) == 0); 1993 } 1994 } 1995 x86pte_release_pagetable(ht); 1996 return (found); 1997 } 1998 1999 /* 2000 * Copy page tables - this is just a little more complicated than the 2001 * previous routines. Note that it's also not atomic! It also is never 2002 * used for VLP pagetables. 2003 */ 2004 void 2005 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2006 { 2007 caddr_t src_va; 2008 caddr_t dst_va; 2009 size_t size; 2010 x86pte_t *pteptr; 2011 x86pte_t pte; 2012 2013 ASSERT(khat_running); 2014 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2015 ASSERT(!(src->ht_flags & HTABLE_VLP)); 2016 ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 2017 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2018 2019 /* 2020 * Acquire access to the CPU pagetable windows for the dest and source. 2021 */ 2022 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2023 if (kpm_vbase) { 2024 src_va = (caddr_t) 2025 PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 2026 } else { 2027 uint_t x = PWIN_SRC(CPU->cpu_id); 2028 2029 /* 2030 * Finish defining the src pagetable mapping 2031 */ 2032 src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2033 pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 2034 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 2035 if (mmu.pae_hat) 2036 *pteptr = pte; 2037 else 2038 *(x86pte32_t *)pteptr = pte; 2039 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 2040 } 2041 2042 /* 2043 * now do the copy 2044 */ 2045 size = count << mmu.pte_size_shift; 2046 bcopy(src_va, dst_va, size); 2047 2048 x86pte_release_pagetable(dest); 2049 } 2050 2051 /* 2052 * Zero page table entries - Note this doesn't use atomic stores! 2053 */ 2054 static void 2055 x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 2056 { 2057 caddr_t dst_va; 2058 size_t size; 2059 2060 /* 2061 * Map in the page table to be zeroed. 2062 */ 2063 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2064 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2065 2066 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2067 2068 size = count << mmu.pte_size_shift; 2069 ASSERT(size > BLOCKZEROALIGN); 2070 #ifdef __i386 2071 if ((x86_feature & X86_SSE2) == 0) 2072 bzero(dst_va, size); 2073 else 2074 #endif 2075 block_zero_no_xmm(dst_va, size); 2076 2077 x86pte_release_pagetable(dest); 2078 } 2079 2080 /* 2081 * Called to ensure that all pagetables are in the system dump 2082 */ 2083 void 2084 hat_dump(void) 2085 { 2086 hat_t *hat; 2087 uint_t h; 2088 htable_t *ht; 2089 2090 /* 2091 * Dump all page tables 2092 */ 2093 for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 2094 for (h = 0; h < hat->hat_num_hash; ++h) { 2095 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2096 if ((ht->ht_flags & HTABLE_VLP) == 0) 2097 dump_page(ht->ht_pfn); 2098 } 2099 } 2100 } 2101 } 2102