1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/sysmacros.h> 31 #include <sys/kmem.h> 32 #include <sys/atomic.h> 33 #include <sys/bitmap.h> 34 #include <sys/machparam.h> 35 #include <sys/machsystm.h> 36 #include <sys/mman.h> 37 #include <sys/systm.h> 38 #include <sys/cpuvar.h> 39 #include <sys/thread.h> 40 #include <sys/proc.h> 41 #include <sys/cpu.h> 42 #include <sys/kmem.h> 43 #include <sys/disp.h> 44 #include <sys/vmem.h> 45 #include <sys/vmsystm.h> 46 #include <sys/promif.h> 47 #include <sys/var.h> 48 #include <sys/x86_archext.h> 49 #include <sys/archsystm.h> 50 #include <sys/bootconf.h> 51 #include <sys/dumphdr.h> 52 #include <vm/seg_kmem.h> 53 #include <vm/seg_kpm.h> 54 #include <vm/hat.h> 55 #include <vm/hat_i86.h> 56 #include <sys/cmn_err.h> 57 58 #include <sys/bootinfo.h> 59 #include <vm/kboot_mmu.h> 60 61 static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 62 63 kmem_cache_t *htable_cache; 64 65 /* 66 * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 67 * is used in order to facilitate testing of the htable_steal() code. 68 * By resetting htable_reserve_amount to a lower value, we can force 69 * stealing to occur. The reserve amount is a guess to get us through boot. 70 */ 71 #define HTABLE_RESERVE_AMOUNT (200) 72 uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 73 kmutex_t htable_reserve_mutex; 74 uint_t htable_reserve_cnt; 75 htable_t *htable_reserve_pool; 76 77 /* 78 * Used to hand test htable_steal(). 79 */ 80 #ifdef DEBUG 81 ulong_t force_steal = 0; 82 ulong_t ptable_cnt = 0; 83 #endif 84 85 /* 86 * This variable is so that we can tune this via /etc/system 87 * Any value works, but a power of two <= mmu.ptes_per_table is best. 88 */ 89 uint_t htable_steal_passes = 8; 90 91 /* 92 * mutex stuff for access to htable hash 93 */ 94 #define NUM_HTABLE_MUTEX 128 95 kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 96 #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 97 98 #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 99 #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 100 101 /* 102 * forward declarations 103 */ 104 static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 105 static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 106 static void htable_free(htable_t *ht); 107 static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 108 static void x86pte_release_pagetable(htable_t *ht); 109 static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 110 x86pte_t new); 111 112 /* 113 * A counter to track if we are stealing or reaping htables. When non-zero 114 * htable_free() will directly free htables (either to the reserve or kmem) 115 * instead of putting them in a hat's htable cache. 116 */ 117 uint32_t htable_dont_cache = 0; 118 119 /* 120 * Track the number of active pagetables, so we can know how many to reap 121 */ 122 static uint32_t active_ptables = 0; 123 124 /* 125 * Allocate a memory page for a hardware page table. 126 * 127 * A wrapper around page_get_physical(), with some extra checks. 128 */ 129 static pfn_t 130 ptable_alloc(uintptr_t seed) 131 { 132 pfn_t pfn; 133 page_t *pp; 134 135 pfn = PFN_INVALID; 136 atomic_add_32(&active_ptables, 1); 137 138 /* 139 * The first check is to see if there is memory in the system. If we 140 * drop to throttlefree, then fail the ptable_alloc() and let the 141 * stealing code kick in. Note that we have to do this test here, 142 * since the test in page_create_throttle() would let the NOSLEEP 143 * allocation go through and deplete the page reserves. 144 * 145 * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 146 */ 147 if (!NOMEMWAIT() && freemem <= throttlefree + 1) 148 return (PFN_INVALID); 149 150 #ifdef DEBUG 151 /* 152 * This code makes htable_steal() easier to test. By setting 153 * force_steal we force pagetable allocations to fall 154 * into the stealing code. Roughly 1 in ever "force_steal" 155 * page table allocations will fail. 156 */ 157 if (proc_pageout != NULL && force_steal > 1 && 158 ++ptable_cnt > force_steal) { 159 ptable_cnt = 0; 160 return (PFN_INVALID); 161 } 162 #endif /* DEBUG */ 163 164 pp = page_get_physical(seed); 165 if (pp == NULL) 166 return (PFN_INVALID); 167 pfn = pp->p_pagenum; 168 page_downgrade(pp); 169 ASSERT(PAGE_SHARED(pp)); 170 171 if (pfn == PFN_INVALID) 172 panic("ptable_alloc(): Invalid PFN!!"); 173 HATSTAT_INC(hs_ptable_allocs); 174 return (pfn); 175 } 176 177 /* 178 * Free an htable's associated page table page. See the comments 179 * for ptable_alloc(). 180 */ 181 static void 182 ptable_free(pfn_t pfn) 183 { 184 page_t *pp = page_numtopp_nolock(pfn); 185 186 /* 187 * need to destroy the page used for the pagetable 188 */ 189 ASSERT(pfn != PFN_INVALID); 190 HATSTAT_INC(hs_ptable_frees); 191 atomic_add_32(&active_ptables, -1); 192 if (pp == NULL) 193 panic("ptable_free(): no page for pfn!"); 194 ASSERT(PAGE_SHARED(pp)); 195 ASSERT(pfn == pp->p_pagenum); 196 197 /* 198 * Get an exclusive lock, might have to wait for a kmem reader. 199 */ 200 if (!page_tryupgrade(pp)) { 201 page_unlock(pp); 202 /* 203 * RFE: we could change this to not loop forever 204 * George Cameron had some idea on how to do that. 205 * For now looping works - it's just like sfmmu. 206 */ 207 while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 208 continue; 209 } 210 page_free(pp, 1); 211 page_unresv(1); 212 } 213 214 /* 215 * Put one htable on the reserve list. 216 */ 217 static void 218 htable_put_reserve(htable_t *ht) 219 { 220 ht->ht_hat = NULL; /* no longer tied to a hat */ 221 ASSERT(ht->ht_pfn == PFN_INVALID); 222 HATSTAT_INC(hs_htable_rputs); 223 mutex_enter(&htable_reserve_mutex); 224 ht->ht_next = htable_reserve_pool; 225 htable_reserve_pool = ht; 226 ++htable_reserve_cnt; 227 mutex_exit(&htable_reserve_mutex); 228 } 229 230 /* 231 * Take one htable from the reserve. 232 */ 233 static htable_t * 234 htable_get_reserve(void) 235 { 236 htable_t *ht = NULL; 237 238 mutex_enter(&htable_reserve_mutex); 239 if (htable_reserve_cnt != 0) { 240 ht = htable_reserve_pool; 241 ASSERT(ht != NULL); 242 ASSERT(ht->ht_pfn == PFN_INVALID); 243 htable_reserve_pool = ht->ht_next; 244 --htable_reserve_cnt; 245 HATSTAT_INC(hs_htable_rgets); 246 } 247 mutex_exit(&htable_reserve_mutex); 248 return (ht); 249 } 250 251 /* 252 * Allocate initial htables and put them on the reserve list 253 */ 254 void 255 htable_initial_reserve(uint_t count) 256 { 257 htable_t *ht; 258 259 count += HTABLE_RESERVE_AMOUNT; 260 while (count > 0) { 261 ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 262 ASSERT(ht != NULL); 263 264 ASSERT(use_boot_reserve); 265 ht->ht_pfn = PFN_INVALID; 266 htable_put_reserve(ht); 267 --count; 268 } 269 } 270 271 /* 272 * Readjust the reserves after a thread finishes using them. 273 */ 274 void 275 htable_adjust_reserve() 276 { 277 htable_t *ht; 278 279 ASSERT(curthread != hat_reserves_thread); 280 281 /* 282 * Free any excess htables in the reserve list 283 */ 284 while (htable_reserve_cnt > htable_reserve_amount) { 285 ht = htable_get_reserve(); 286 if (ht == NULL) 287 return; 288 ASSERT(ht->ht_pfn == PFN_INVALID); 289 kmem_cache_free(htable_cache, ht); 290 } 291 } 292 293 294 /* 295 * This routine steals htables from user processes for htable_alloc() or 296 * for htable_reap(). 297 */ 298 static htable_t * 299 htable_steal(uint_t cnt) 300 { 301 hat_t *hat = kas.a_hat; /* list starts with khat */ 302 htable_t *list = NULL; 303 htable_t *ht; 304 htable_t *higher; 305 uint_t h; 306 uint_t h_start; 307 static uint_t h_seed = 0; 308 uint_t e; 309 uintptr_t va; 310 x86pte_t pte; 311 uint_t stolen = 0; 312 uint_t pass; 313 uint_t threshold; 314 315 /* 316 * Limit htable_steal_passes to something reasonable 317 */ 318 if (htable_steal_passes == 0) 319 htable_steal_passes = 1; 320 if (htable_steal_passes > mmu.ptes_per_table) 321 htable_steal_passes = mmu.ptes_per_table; 322 323 /* 324 * Loop through all user hats. The 1st pass takes cached htables that 325 * aren't in use. The later passes steal by removing mappings, too. 326 */ 327 atomic_add_32(&htable_dont_cache, 1); 328 for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 329 threshold = pass * mmu.ptes_per_table / htable_steal_passes; 330 hat = kas.a_hat; 331 for (;;) { 332 333 /* 334 * Clear the victim flag and move to next hat 335 */ 336 mutex_enter(&hat_list_lock); 337 if (hat != kas.a_hat) { 338 hat->hat_flags &= ~HAT_VICTIM; 339 cv_broadcast(&hat_list_cv); 340 } 341 hat = hat->hat_next; 342 343 /* 344 * Skip any hat that is already being stolen from. 345 * 346 * We skip SHARED hats, as these are dummy 347 * hats that host ISM shared page tables. 348 * 349 * We also skip if HAT_FREEING because hat_pte_unmap() 350 * won't zero out the PTE's. That would lead to hitting 351 * stale PTEs either here or under hat_unload() when we 352 * steal and unload the same page table in competing 353 * threads. 354 */ 355 while (hat != NULL && 356 (hat->hat_flags & 357 (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 358 hat = hat->hat_next; 359 360 if (hat == NULL) { 361 mutex_exit(&hat_list_lock); 362 break; 363 } 364 365 /* 366 * Are we finished? 367 */ 368 if (stolen == cnt) { 369 /* 370 * Try to spread the pain of stealing, 371 * move victim HAT to the end of the HAT list. 372 */ 373 if (pass >= 1 && cnt == 1 && 374 kas.a_hat->hat_prev != hat) { 375 376 /* unlink victim hat */ 377 if (hat->hat_prev) 378 hat->hat_prev->hat_next = 379 hat->hat_next; 380 else 381 kas.a_hat->hat_next = 382 hat->hat_next; 383 if (hat->hat_next) 384 hat->hat_next->hat_prev = 385 hat->hat_prev; 386 else 387 kas.a_hat->hat_prev = 388 hat->hat_prev; 389 390 391 /* relink at end of hat list */ 392 hat->hat_next = NULL; 393 hat->hat_prev = kas.a_hat->hat_prev; 394 if (hat->hat_prev) 395 hat->hat_prev->hat_next = hat; 396 else 397 kas.a_hat->hat_next = hat; 398 kas.a_hat->hat_prev = hat; 399 400 } 401 402 mutex_exit(&hat_list_lock); 403 break; 404 } 405 406 /* 407 * Mark the HAT as a stealing victim. 408 */ 409 hat->hat_flags |= HAT_VICTIM; 410 mutex_exit(&hat_list_lock); 411 412 /* 413 * Take any htables from the hat's cached "free" list. 414 */ 415 hat_enter(hat); 416 while ((ht = hat->hat_ht_cached) != NULL && 417 stolen < cnt) { 418 hat->hat_ht_cached = ht->ht_next; 419 ht->ht_next = list; 420 list = ht; 421 ++stolen; 422 } 423 hat_exit(hat); 424 425 /* 426 * Don't steal on first pass. 427 */ 428 if (pass == 0 || stolen == cnt) 429 continue; 430 431 /* 432 * Search the active htables for one to steal. 433 * Start at a different hash bucket every time to 434 * help spread the pain of stealing. 435 */ 436 h = h_start = h_seed++ % hat->hat_num_hash; 437 do { 438 higher = NULL; 439 HTABLE_ENTER(h); 440 for (ht = hat->hat_ht_hash[h]; ht; 441 ht = ht->ht_next) { 442 443 /* 444 * Can we rule out reaping? 445 */ 446 if (ht->ht_busy != 0 || 447 (ht->ht_flags & HTABLE_SHARED_PFN)|| 448 ht->ht_level > 0 || 449 ht->ht_valid_cnt > threshold || 450 ht->ht_lock_cnt != 0) 451 continue; 452 453 /* 454 * Increment busy so the htable can't 455 * disappear. We drop the htable mutex 456 * to avoid deadlocks with 457 * hat_pageunload() and the hment mutex 458 * while we call hat_pte_unmap() 459 */ 460 ++ht->ht_busy; 461 HTABLE_EXIT(h); 462 463 /* 464 * Try stealing. 465 * - unload and invalidate all PTEs 466 */ 467 for (e = 0, va = ht->ht_vaddr; 468 e < HTABLE_NUM_PTES(ht) && 469 ht->ht_valid_cnt > 0 && 470 ht->ht_busy == 1 && 471 ht->ht_lock_cnt == 0; 472 ++e, va += MMU_PAGESIZE) { 473 pte = x86pte_get(ht, e); 474 if (!PTE_ISVALID(pte)) 475 continue; 476 hat_pte_unmap(ht, e, 477 HAT_UNLOAD, pte, NULL); 478 } 479 480 /* 481 * Reacquire htable lock. If we didn't 482 * remove all mappings in the table, 483 * or another thread added a new mapping 484 * behind us, give up on this table. 485 */ 486 HTABLE_ENTER(h); 487 if (ht->ht_busy != 1 || 488 ht->ht_valid_cnt != 0 || 489 ht->ht_lock_cnt != 0) { 490 --ht->ht_busy; 491 continue; 492 } 493 494 /* 495 * Steal it and unlink the page table. 496 */ 497 higher = ht->ht_parent; 498 unlink_ptp(higher, ht, ht->ht_vaddr); 499 500 /* 501 * remove from the hash list 502 */ 503 if (ht->ht_next) 504 ht->ht_next->ht_prev = 505 ht->ht_prev; 506 507 if (ht->ht_prev) { 508 ht->ht_prev->ht_next = 509 ht->ht_next; 510 } else { 511 ASSERT(hat->hat_ht_hash[h] == 512 ht); 513 hat->hat_ht_hash[h] = 514 ht->ht_next; 515 } 516 517 /* 518 * Break to outer loop to release the 519 * higher (ht_parent) pagetable. This 520 * spreads out the pain caused by 521 * pagefaults. 522 */ 523 ht->ht_next = list; 524 list = ht; 525 ++stolen; 526 break; 527 } 528 HTABLE_EXIT(h); 529 if (higher != NULL) 530 htable_release(higher); 531 if (++h == hat->hat_num_hash) 532 h = 0; 533 } while (stolen < cnt && h != h_start); 534 } 535 } 536 atomic_add_32(&htable_dont_cache, -1); 537 return (list); 538 } 539 540 541 /* 542 * This is invoked from kmem when the system is low on memory. We try 543 * to free hments, htables, and ptables to improve the memory situation. 544 */ 545 /*ARGSUSED*/ 546 static void 547 htable_reap(void *handle) 548 { 549 uint_t reap_cnt; 550 htable_t *list; 551 htable_t *ht; 552 553 HATSTAT_INC(hs_reap_attempts); 554 if (!can_steal_post_boot) 555 return; 556 557 /* 558 * Try to reap 5% of the page tables bounded by a maximum of 559 * 5% of physmem and a minimum of 10. 560 */ 561 reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 562 563 /* 564 * Let htable_steal() do the work, we just call htable_free() 565 */ 566 list = htable_steal(reap_cnt); 567 while ((ht = list) != NULL) { 568 list = ht->ht_next; 569 HATSTAT_INC(hs_reaped); 570 htable_free(ht); 571 } 572 573 /* 574 * Free up excess reserves 575 */ 576 htable_adjust_reserve(); 577 hment_adjust_reserve(); 578 } 579 580 /* 581 * Allocate an htable, stealing one or using the reserve if necessary 582 */ 583 static htable_t * 584 htable_alloc( 585 hat_t *hat, 586 uintptr_t vaddr, 587 level_t level, 588 htable_t *shared) 589 { 590 htable_t *ht = NULL; 591 uint_t is_vlp; 592 uint_t is_bare = 0; 593 uint_t need_to_zero = 1; 594 int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 595 596 if (level < 0 || level > TOP_LEVEL(hat)) 597 panic("htable_alloc(): level %d out of range\n", level); 598 599 is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 600 if (is_vlp || shared != NULL) 601 is_bare = 1; 602 603 /* 604 * First reuse a cached htable from the hat_ht_cached field, this 605 * avoids unnecessary trips through kmem/page allocators. 606 */ 607 if (hat->hat_ht_cached != NULL && !is_bare) { 608 hat_enter(hat); 609 ht = hat->hat_ht_cached; 610 if (ht != NULL) { 611 hat->hat_ht_cached = ht->ht_next; 612 need_to_zero = 0; 613 /* XX64 ASSERT() they're all zero somehow */ 614 ASSERT(ht->ht_pfn != PFN_INVALID); 615 } 616 hat_exit(hat); 617 } 618 619 if (ht == NULL) { 620 /* 621 * When allocating for hat_memload_arena, we use the reserve. 622 * Also use reserves if we are in a panic(). 623 */ 624 if (use_boot_reserve || curthread == hat_reserves_thread || 625 panicstr != NULL) { 626 ht = htable_get_reserve(); 627 } else { 628 /* 629 * Donate successful htable allocations to the reserve. 630 */ 631 for (;;) { 632 ASSERT(curthread != hat_reserves_thread); 633 ht = kmem_cache_alloc(htable_cache, kmflags); 634 if (ht == NULL) 635 break; 636 ht->ht_pfn = PFN_INVALID; 637 if (curthread == hat_reserves_thread || 638 panicstr != NULL || 639 htable_reserve_cnt >= htable_reserve_amount) 640 break; 641 htable_put_reserve(ht); 642 } 643 } 644 645 /* 646 * allocate a page for the hardware page table if needed 647 */ 648 if (ht != NULL && !is_bare) { 649 ht->ht_hat = hat; 650 ht->ht_pfn = ptable_alloc((uintptr_t)ht); 651 if (ht->ht_pfn == PFN_INVALID) { 652 kmem_cache_free(htable_cache, ht); 653 ht = NULL; 654 } 655 } 656 } 657 658 /* 659 * If allocations failed, kick off a kmem_reap() and resort to 660 * htable steal(). We may spin here if the system is very low on 661 * memory. If the kernel itself has consumed all memory and kmem_reap() 662 * can't free up anything, then we'll really get stuck here. 663 * That should only happen in a system where the administrator has 664 * misconfigured VM parameters via /etc/system. 665 */ 666 while (ht == NULL && can_steal_post_boot) { 667 kmem_reap(); 668 ht = htable_steal(1); 669 HATSTAT_INC(hs_steals); 670 671 /* 672 * If we stole for a bare htable, release the pagetable page. 673 */ 674 if (ht != NULL) { 675 if (is_bare) { 676 ptable_free(ht->ht_pfn); 677 ht->ht_pfn = PFN_INVALID; 678 } 679 } 680 } 681 682 /* 683 * All attempts to allocate or steal failed. This should only happen 684 * if we run out of memory during boot, due perhaps to a huge 685 * boot_archive. At this point there's no way to continue. 686 */ 687 if (ht == NULL) 688 panic("htable_alloc(): couldn't steal\n"); 689 690 /* 691 * Shared page tables have all entries locked and entries may not 692 * be added or deleted. 693 */ 694 ht->ht_flags = 0; 695 if (shared != NULL) { 696 ASSERT(level == 0); 697 ASSERT(shared->ht_valid_cnt > 0); 698 ht->ht_flags |= HTABLE_SHARED_PFN; 699 ht->ht_pfn = shared->ht_pfn; 700 ht->ht_lock_cnt = 0; 701 ht->ht_valid_cnt = 0; /* updated in hat_share() */ 702 ht->ht_shares = shared; 703 need_to_zero = 0; 704 } else { 705 ht->ht_shares = NULL; 706 ht->ht_lock_cnt = 0; 707 ht->ht_valid_cnt = 0; 708 } 709 710 /* 711 * setup flags, etc. for VLP htables 712 */ 713 if (is_vlp) { 714 ht->ht_flags |= HTABLE_VLP; 715 ASSERT(ht->ht_pfn == PFN_INVALID); 716 need_to_zero = 0; 717 } 718 719 /* 720 * fill in the htable 721 */ 722 ht->ht_hat = hat; 723 ht->ht_parent = NULL; 724 ht->ht_vaddr = vaddr; 725 ht->ht_level = level; 726 ht->ht_busy = 1; 727 ht->ht_next = NULL; 728 ht->ht_prev = NULL; 729 730 /* 731 * Zero out any freshly allocated page table 732 */ 733 if (need_to_zero) 734 x86pte_zero(ht, 0, mmu.ptes_per_table); 735 736 return (ht); 737 } 738 739 /* 740 * Free up an htable, either to a hat's cached list, the reserves or 741 * back to kmem. 742 */ 743 static void 744 htable_free(htable_t *ht) 745 { 746 hat_t *hat = ht->ht_hat; 747 748 /* 749 * If the process isn't exiting, cache the free htable in the hat 750 * structure. We always do this for the boot reserve. We don't 751 * do this if the hat is exiting or we are stealing/reaping htables. 752 */ 753 if (hat != NULL && 754 !(ht->ht_flags & HTABLE_SHARED_PFN) && 755 (use_boot_reserve || 756 (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 757 ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 758 ASSERT(ht->ht_pfn != PFN_INVALID); 759 hat_enter(hat); 760 ht->ht_next = hat->hat_ht_cached; 761 hat->hat_ht_cached = ht; 762 hat_exit(hat); 763 return; 764 } 765 766 /* 767 * If we have a hardware page table, free it. 768 * We don't free page tables that are accessed by sharing. 769 */ 770 if (ht->ht_flags & HTABLE_SHARED_PFN) { 771 ASSERT(ht->ht_pfn != PFN_INVALID); 772 } else if (!(ht->ht_flags & HTABLE_VLP)) { 773 ptable_free(ht->ht_pfn); 774 } 775 ht->ht_pfn = PFN_INVALID; 776 777 /* 778 * If we are the thread using the reserves, put free htables 779 * into reserves. 780 */ 781 if (curthread == hat_reserves_thread || 782 htable_reserve_cnt < htable_reserve_amount) 783 htable_put_reserve(ht); 784 else 785 kmem_cache_free(htable_cache, ht); 786 } 787 788 789 /* 790 * This is called when a hat is being destroyed or swapped out. We reap all 791 * the remaining htables in the hat cache. If destroying all left over 792 * htables are also destroyed. 793 * 794 * We also don't need to invalidate any of the PTPs nor do any demapping. 795 */ 796 void 797 htable_purge_hat(hat_t *hat) 798 { 799 htable_t *ht; 800 int h; 801 802 /* 803 * Purge the htable cache if just reaping. 804 */ 805 if (!(hat->hat_flags & HAT_FREEING)) { 806 atomic_add_32(&htable_dont_cache, 1); 807 for (;;) { 808 hat_enter(hat); 809 ht = hat->hat_ht_cached; 810 if (ht == NULL) { 811 hat_exit(hat); 812 break; 813 } 814 hat->hat_ht_cached = ht->ht_next; 815 hat_exit(hat); 816 htable_free(ht); 817 } 818 atomic_add_32(&htable_dont_cache, -1); 819 return; 820 } 821 822 /* 823 * if freeing, no locking is needed 824 */ 825 while ((ht = hat->hat_ht_cached) != NULL) { 826 hat->hat_ht_cached = ht->ht_next; 827 htable_free(ht); 828 } 829 830 /* 831 * walk thru the htable hash table and free all the htables in it. 832 */ 833 for (h = 0; h < hat->hat_num_hash; ++h) { 834 while ((ht = hat->hat_ht_hash[h]) != NULL) { 835 if (ht->ht_next) 836 ht->ht_next->ht_prev = ht->ht_prev; 837 838 if (ht->ht_prev) { 839 ht->ht_prev->ht_next = ht->ht_next; 840 } else { 841 ASSERT(hat->hat_ht_hash[h] == ht); 842 hat->hat_ht_hash[h] = ht->ht_next; 843 } 844 htable_free(ht); 845 } 846 } 847 } 848 849 /* 850 * Unlink an entry for a table at vaddr and level out of the existing table 851 * one level higher. We are always holding the HASH_ENTER() when doing this. 852 */ 853 static void 854 unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 855 { 856 uint_t entry = htable_va2entry(vaddr, higher); 857 x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 858 x86pte_t found; 859 860 ASSERT(higher->ht_busy > 0); 861 ASSERT(higher->ht_valid_cnt > 0); 862 ASSERT(old->ht_valid_cnt == 0); 863 found = x86pte_cas(higher, entry, expect, 0); 864 if (found != expect) 865 panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 866 found, expect); 867 HTABLE_DEC(higher->ht_valid_cnt); 868 } 869 870 /* 871 * Link an entry for a new table at vaddr and level into the existing table 872 * one level higher. We are always holding the HASH_ENTER() when doing this. 873 */ 874 static void 875 link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 876 { 877 uint_t entry = htable_va2entry(vaddr, higher); 878 x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 879 x86pte_t found; 880 881 ASSERT(higher->ht_busy > 0); 882 883 ASSERT(new->ht_level != mmu.max_level); 884 885 HTABLE_INC(higher->ht_valid_cnt); 886 887 found = x86pte_cas(higher, entry, 0, newptp); 888 if ((found & ~PT_REF) != 0) 889 panic("HAT: ptp not 0, found=" FMT_PTE, found); 890 } 891 892 /* 893 * Release of hold on an htable. If this is the last use and the pagetable 894 * is empty we may want to free it, then recursively look at the pagetable 895 * above it. The recursion is handled by the outer while() loop. 896 */ 897 void 898 htable_release(htable_t *ht) 899 { 900 uint_t hashval; 901 htable_t *shared; 902 htable_t *higher; 903 hat_t *hat; 904 uintptr_t va; 905 level_t level; 906 907 while (ht != NULL) { 908 shared = NULL; 909 for (;;) { 910 hat = ht->ht_hat; 911 va = ht->ht_vaddr; 912 level = ht->ht_level; 913 hashval = HTABLE_HASH(hat, va, level); 914 915 /* 916 * The common case is that this isn't the last use of 917 * an htable so we don't want to free the htable. 918 */ 919 HTABLE_ENTER(hashval); 920 ASSERT(ht->ht_lock_cnt == 0 || ht->ht_valid_cnt > 0); 921 ASSERT(ht->ht_valid_cnt >= 0); 922 ASSERT(ht->ht_busy > 0); 923 if (ht->ht_valid_cnt > 0) 924 break; 925 if (ht->ht_busy > 1) 926 break; 927 928 /* 929 * we always release empty shared htables 930 */ 931 if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 932 933 /* 934 * don't release if in address space tear down 935 */ 936 if (hat->hat_flags & HAT_FREEING) 937 break; 938 939 /* 940 * At and above max_page_level, free if it's for 941 * a boot-time kernel mapping below kernelbase. 942 */ 943 if (level >= mmu.max_page_level && 944 (hat != kas.a_hat || va >= kernelbase)) 945 break; 946 } 947 948 /* 949 * Remember if we destroy an htable that shares its PFN 950 * from elsewhere. 951 */ 952 if (ht->ht_flags & HTABLE_SHARED_PFN) { 953 ASSERT(ht->ht_level == 0); 954 ASSERT(shared == NULL); 955 shared = ht->ht_shares; 956 HATSTAT_INC(hs_htable_unshared); 957 } 958 959 /* 960 * Handle release of a table and freeing the htable_t. 961 * Unlink it from the table higher (ie. ht_parent). 962 */ 963 ASSERT(ht->ht_lock_cnt == 0); 964 higher = ht->ht_parent; 965 ASSERT(higher != NULL); 966 967 /* 968 * Unlink the pagetable. 969 */ 970 unlink_ptp(higher, ht, va); 971 972 /* 973 * When any top level VLP page table entry changes, we 974 * must issue a reload of cr3 on all processors. 975 */ 976 if ((hat->hat_flags & HAT_VLP) && 977 level == VLP_LEVEL - 1) 978 hat_tlb_inval(hat, DEMAP_ALL_ADDR); 979 980 /* 981 * remove this htable from its hash list 982 */ 983 if (ht->ht_next) 984 ht->ht_next->ht_prev = ht->ht_prev; 985 986 if (ht->ht_prev) { 987 ht->ht_prev->ht_next = ht->ht_next; 988 } else { 989 ASSERT(hat->hat_ht_hash[hashval] == ht); 990 hat->hat_ht_hash[hashval] = ht->ht_next; 991 } 992 HTABLE_EXIT(hashval); 993 htable_free(ht); 994 ht = higher; 995 } 996 997 ASSERT(ht->ht_busy >= 1); 998 --ht->ht_busy; 999 HTABLE_EXIT(hashval); 1000 1001 /* 1002 * If we released a shared htable, do a release on the htable 1003 * from which it shared 1004 */ 1005 ht = shared; 1006 } 1007 } 1008 1009 /* 1010 * Find the htable for the pagetable at the given level for the given address. 1011 * If found acquires a hold that eventually needs to be htable_release()d 1012 */ 1013 htable_t * 1014 htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 1015 { 1016 uintptr_t base; 1017 uint_t hashval; 1018 htable_t *ht = NULL; 1019 1020 ASSERT(level >= 0); 1021 ASSERT(level <= TOP_LEVEL(hat)); 1022 1023 if (level == TOP_LEVEL(hat)) 1024 base = 0; 1025 else 1026 base = vaddr & LEVEL_MASK(level + 1); 1027 1028 hashval = HTABLE_HASH(hat, base, level); 1029 HTABLE_ENTER(hashval); 1030 for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 1031 if (ht->ht_hat == hat && 1032 ht->ht_vaddr == base && 1033 ht->ht_level == level) 1034 break; 1035 } 1036 if (ht) 1037 ++ht->ht_busy; 1038 1039 HTABLE_EXIT(hashval); 1040 return (ht); 1041 } 1042 1043 /* 1044 * Acquires a hold on a known htable (from a locked hment entry). 1045 */ 1046 void 1047 htable_acquire(htable_t *ht) 1048 { 1049 hat_t *hat = ht->ht_hat; 1050 level_t level = ht->ht_level; 1051 uintptr_t base = ht->ht_vaddr; 1052 uint_t hashval = HTABLE_HASH(hat, base, level); 1053 1054 HTABLE_ENTER(hashval); 1055 #ifdef DEBUG 1056 /* 1057 * make sure the htable is there 1058 */ 1059 { 1060 htable_t *h; 1061 1062 for (h = hat->hat_ht_hash[hashval]; 1063 h && h != ht; 1064 h = h->ht_next) 1065 ; 1066 ASSERT(h == ht); 1067 } 1068 #endif /* DEBUG */ 1069 ++ht->ht_busy; 1070 HTABLE_EXIT(hashval); 1071 } 1072 1073 /* 1074 * Find the htable for the pagetable at the given level for the given address. 1075 * If found acquires a hold that eventually needs to be htable_release()d 1076 * If not found the table is created. 1077 * 1078 * Since we can't hold a hash table mutex during allocation, we have to 1079 * drop it and redo the search on a create. Then we may have to free the newly 1080 * allocated htable if another thread raced in and created it ahead of us. 1081 */ 1082 htable_t * 1083 htable_create( 1084 hat_t *hat, 1085 uintptr_t vaddr, 1086 level_t level, 1087 htable_t *shared) 1088 { 1089 uint_t h; 1090 level_t l; 1091 uintptr_t base; 1092 htable_t *ht; 1093 htable_t *higher = NULL; 1094 htable_t *new = NULL; 1095 1096 if (level < 0 || level > TOP_LEVEL(hat)) 1097 panic("htable_create(): level %d out of range\n", level); 1098 1099 /* 1100 * Create the page tables in top down order. 1101 */ 1102 for (l = TOP_LEVEL(hat); l >= level; --l) { 1103 new = NULL; 1104 if (l == TOP_LEVEL(hat)) 1105 base = 0; 1106 else 1107 base = vaddr & LEVEL_MASK(l + 1); 1108 1109 h = HTABLE_HASH(hat, base, l); 1110 try_again: 1111 /* 1112 * look up the htable at this level 1113 */ 1114 HTABLE_ENTER(h); 1115 if (l == TOP_LEVEL(hat)) { 1116 ht = hat->hat_htable; 1117 } else { 1118 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 1119 ASSERT(ht->ht_hat == hat); 1120 if (ht->ht_vaddr == base && 1121 ht->ht_level == l) 1122 break; 1123 } 1124 } 1125 1126 /* 1127 * if we found the htable, increment its busy cnt 1128 * and if we had allocated a new htable, free it. 1129 */ 1130 if (ht != NULL) { 1131 /* 1132 * If we find a pre-existing shared table, it must 1133 * share from the same place. 1134 */ 1135 if (l == level && shared && ht->ht_shares && 1136 ht->ht_shares != shared) { 1137 panic("htable shared from wrong place " 1138 "found htable=%p shared=%p", ht, shared); 1139 } 1140 ++ht->ht_busy; 1141 HTABLE_EXIT(h); 1142 if (new) 1143 htable_free(new); 1144 if (higher != NULL) 1145 htable_release(higher); 1146 higher = ht; 1147 1148 /* 1149 * if we didn't find it on the first search 1150 * allocate a new one and search again 1151 */ 1152 } else if (new == NULL) { 1153 HTABLE_EXIT(h); 1154 new = htable_alloc(hat, base, l, 1155 l == level ? shared : NULL); 1156 goto try_again; 1157 1158 /* 1159 * 2nd search and still not there, use "new" table 1160 * Link new table into higher, when not at top level. 1161 */ 1162 } else { 1163 ht = new; 1164 if (higher != NULL) { 1165 link_ptp(higher, ht, base); 1166 ht->ht_parent = higher; 1167 1168 /* 1169 * When any top level VLP page table changes, 1170 * we must reload cr3 on all processors. 1171 */ 1172 #ifdef __i386 1173 if (mmu.pae_hat && 1174 #else /* !__i386 */ 1175 if ((hat->hat_flags & HAT_VLP) && 1176 #endif /* __i386 */ 1177 l == VLP_LEVEL - 1) 1178 hat_tlb_inval(hat, DEMAP_ALL_ADDR); 1179 } 1180 ht->ht_next = hat->hat_ht_hash[h]; 1181 ASSERT(ht->ht_prev == NULL); 1182 if (hat->hat_ht_hash[h]) 1183 hat->hat_ht_hash[h]->ht_prev = ht; 1184 hat->hat_ht_hash[h] = ht; 1185 HTABLE_EXIT(h); 1186 1187 /* 1188 * Note we don't do htable_release(higher). 1189 * That happens recursively when "new" is removed by 1190 * htable_release() or htable_steal(). 1191 */ 1192 higher = ht; 1193 1194 /* 1195 * If we just created a new shared page table we 1196 * increment the shared htable's busy count, so that 1197 * it can't be the victim of a steal even if it's empty. 1198 */ 1199 if (l == level && shared) { 1200 (void) htable_lookup(shared->ht_hat, 1201 shared->ht_vaddr, shared->ht_level); 1202 HATSTAT_INC(hs_htable_shared); 1203 } 1204 } 1205 } 1206 1207 return (ht); 1208 } 1209 1210 /* 1211 * Inherit initial pagetables from the boot program. 1212 */ 1213 void 1214 htable_attach( 1215 hat_t *hat, 1216 uintptr_t base, 1217 level_t level, 1218 htable_t *parent, 1219 pfn_t pfn) 1220 { 1221 htable_t *ht; 1222 uint_t h; 1223 uint_t i; 1224 x86pte_t pte; 1225 x86pte_t *ptep; 1226 page_t *pp; 1227 extern page_t *boot_claim_page(pfn_t); 1228 1229 ht = htable_get_reserve(); 1230 if (level == mmu.max_level) 1231 kas.a_hat->hat_htable = ht; 1232 ht->ht_hat = hat; 1233 ht->ht_parent = parent; 1234 ht->ht_vaddr = base; 1235 ht->ht_level = level; 1236 ht->ht_busy = 1; 1237 ht->ht_next = NULL; 1238 ht->ht_prev = NULL; 1239 ht->ht_flags = 0; 1240 ht->ht_pfn = pfn; 1241 ht->ht_lock_cnt = 0; 1242 ht->ht_valid_cnt = 0; 1243 if (parent != NULL) 1244 ++parent->ht_busy; 1245 1246 h = HTABLE_HASH(hat, base, level); 1247 HTABLE_ENTER(h); 1248 ht->ht_next = hat->hat_ht_hash[h]; 1249 ASSERT(ht->ht_prev == NULL); 1250 if (hat->hat_ht_hash[h]) 1251 hat->hat_ht_hash[h]->ht_prev = ht; 1252 hat->hat_ht_hash[h] = ht; 1253 HTABLE_EXIT(h); 1254 1255 /* 1256 * make sure the page table physical page is not FREE 1257 */ 1258 if (page_resv(1, KM_NOSLEEP) == 0) 1259 panic("page_resv() failed in ptable alloc"); 1260 1261 pp = boot_claim_page(pfn); 1262 ASSERT(pp != NULL); 1263 page_downgrade(pp); 1264 /* 1265 * Record in the page_t that is a pagetable for segkpm setup. 1266 */ 1267 if (kpm_vbase) 1268 pp->p_index = 1; 1269 1270 /* 1271 * Count valid mappings and recursively attach lower level pagetables. 1272 */ 1273 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1274 for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1275 if (mmu.pae_hat) 1276 pte = ptep[i]; 1277 else 1278 pte = ((x86pte32_t *)ptep)[i]; 1279 if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1280 ++ht->ht_valid_cnt; 1281 if (!PTE_ISPAGE(pte, level)) { 1282 htable_attach(hat, base, level - 1, 1283 ht, PTE2PFN(pte, level)); 1284 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1285 } 1286 } 1287 base += LEVEL_SIZE(level); 1288 if (base == mmu.hole_start) 1289 base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1290 } 1291 1292 /* 1293 * As long as all the mappings we had were below kernel base 1294 * we can release the htable. 1295 */ 1296 if (base < kernelbase) 1297 htable_release(ht); 1298 } 1299 1300 /* 1301 * Walk through a given htable looking for the first valid entry. This 1302 * routine takes both a starting and ending address. The starting address 1303 * is required to be within the htable provided by the caller, but there is 1304 * no such restriction on the ending address. 1305 * 1306 * If the routine finds a valid entry in the htable (at or beyond the 1307 * starting address), the PTE (and its address) will be returned. 1308 * This PTE may correspond to either a page or a pagetable - it is the 1309 * caller's responsibility to determine which. If no valid entry is 1310 * found, 0 (and invalid PTE) and the next unexamined address will be 1311 * returned. 1312 * 1313 * The loop has been carefully coded for optimization. 1314 */ 1315 static x86pte_t 1316 htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 1317 { 1318 uint_t e; 1319 x86pte_t found_pte = (x86pte_t)0; 1320 caddr_t pte_ptr; 1321 caddr_t end_pte_ptr; 1322 int l = ht->ht_level; 1323 uintptr_t va = *vap & LEVEL_MASK(l); 1324 size_t pgsize = LEVEL_SIZE(l); 1325 1326 ASSERT(va >= ht->ht_vaddr); 1327 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1328 1329 /* 1330 * Compute the starting index and ending virtual address 1331 */ 1332 e = htable_va2entry(va, ht); 1333 1334 /* 1335 * The following page table scan code knows that the valid 1336 * bit of a PTE is in the lowest byte AND that x86 is little endian!! 1337 */ 1338 pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1339 end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1340 pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 1341 while (!PTE_ISVALID(*pte_ptr)) { 1342 va += pgsize; 1343 if (va >= eaddr) 1344 break; 1345 pte_ptr += mmu.pte_size; 1346 ASSERT(pte_ptr <= end_pte_ptr); 1347 if (pte_ptr == end_pte_ptr) 1348 break; 1349 } 1350 1351 /* 1352 * if we found a valid PTE, load the entire PTE 1353 */ 1354 if (va < eaddr && pte_ptr != end_pte_ptr) 1355 found_pte = GET_PTE((x86pte_t *)pte_ptr); 1356 x86pte_release_pagetable(ht); 1357 1358 #if defined(__amd64) 1359 /* 1360 * deal with VA hole on amd64 1361 */ 1362 if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 1363 va = mmu.hole_end + va - mmu.hole_start; 1364 #endif /* __amd64 */ 1365 1366 *vap = va; 1367 return (found_pte); 1368 } 1369 1370 /* 1371 * Find the address and htable for the first populated translation at or 1372 * above the given virtual address. The caller may also specify an upper 1373 * limit to the address range to search. Uses level information to quickly 1374 * skip unpopulated sections of virtual address spaces. 1375 * 1376 * If not found returns NULL. When found, returns the htable and virt addr 1377 * and has a hold on the htable. 1378 */ 1379 x86pte_t 1380 htable_walk( 1381 struct hat *hat, 1382 htable_t **htp, 1383 uintptr_t *vaddr, 1384 uintptr_t eaddr) 1385 { 1386 uintptr_t va = *vaddr; 1387 htable_t *ht; 1388 htable_t *prev = *htp; 1389 level_t l; 1390 level_t max_mapped_level; 1391 x86pte_t pte; 1392 1393 ASSERT(eaddr > va); 1394 1395 /* 1396 * If this is a user address, then we know we need not look beyond 1397 * kernelbase. 1398 */ 1399 ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 1400 eaddr == HTABLE_WALK_TO_END); 1401 if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 1402 eaddr = kernelbase; 1403 1404 /* 1405 * If we're coming in with a previous page table, search it first 1406 * without doing an htable_lookup(), this should be frequent. 1407 */ 1408 if (prev) { 1409 ASSERT(prev->ht_busy > 0); 1410 ASSERT(prev->ht_vaddr <= va); 1411 l = prev->ht_level; 1412 if (va <= HTABLE_LAST_PAGE(prev)) { 1413 pte = htable_scan(prev, &va, eaddr); 1414 1415 if (PTE_ISPAGE(pte, l)) { 1416 *vaddr = va; 1417 *htp = prev; 1418 return (pte); 1419 } 1420 } 1421 1422 /* 1423 * We found nothing in the htable provided by the caller, 1424 * so fall through and do the full search 1425 */ 1426 htable_release(prev); 1427 } 1428 1429 /* 1430 * Find the level of the largest pagesize used by this HAT. 1431 */ 1432 max_mapped_level = 0; 1433 for (l = 1; l <= mmu.max_page_level; ++l) 1434 if (hat->hat_pages_mapped[l] != 0) 1435 max_mapped_level = l; 1436 1437 while (va < eaddr && va >= *vaddr) { 1438 ASSERT(!IN_VA_HOLE(va)); 1439 1440 /* 1441 * Find lowest table with any entry for given address. 1442 */ 1443 for (l = 0; l <= TOP_LEVEL(hat); ++l) { 1444 ht = htable_lookup(hat, va, l); 1445 if (ht != NULL) { 1446 pte = htable_scan(ht, &va, eaddr); 1447 if (PTE_ISPAGE(pte, l)) { 1448 *vaddr = va; 1449 *htp = ht; 1450 return (pte); 1451 } 1452 htable_release(ht); 1453 break; 1454 } 1455 1456 /* 1457 * The ht is never NULL at the top level since 1458 * the top level htable is created in hat_alloc(). 1459 */ 1460 ASSERT(l < TOP_LEVEL(hat)); 1461 1462 /* 1463 * No htable covers the address. If there is no 1464 * larger page size that could cover it, we 1465 * skip to the start of the next page table. 1466 */ 1467 if (l >= max_mapped_level) { 1468 va = NEXT_ENTRY_VA(va, l + 1); 1469 break; 1470 } 1471 } 1472 } 1473 1474 *vaddr = 0; 1475 *htp = NULL; 1476 return (0); 1477 } 1478 1479 /* 1480 * Find the htable and page table entry index of the given virtual address 1481 * with pagesize at or below given level. 1482 * If not found returns NULL. When found, returns the htable, sets 1483 * entry, and has a hold on the htable. 1484 */ 1485 htable_t * 1486 htable_getpte( 1487 struct hat *hat, 1488 uintptr_t vaddr, 1489 uint_t *entry, 1490 x86pte_t *pte, 1491 level_t level) 1492 { 1493 htable_t *ht; 1494 level_t l; 1495 uint_t e; 1496 1497 ASSERT(level <= mmu.max_page_level); 1498 1499 for (l = 0; l <= level; ++l) { 1500 ht = htable_lookup(hat, vaddr, l); 1501 if (ht == NULL) 1502 continue; 1503 e = htable_va2entry(vaddr, ht); 1504 if (entry != NULL) 1505 *entry = e; 1506 if (pte != NULL) 1507 *pte = x86pte_get(ht, e); 1508 return (ht); 1509 } 1510 return (NULL); 1511 } 1512 1513 /* 1514 * Find the htable and page table entry index of the given virtual address. 1515 * There must be a valid page mapped at the given address. 1516 * If not found returns NULL. When found, returns the htable, sets 1517 * entry, and has a hold on the htable. 1518 */ 1519 htable_t * 1520 htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 1521 { 1522 htable_t *ht; 1523 uint_t e; 1524 x86pte_t pte; 1525 1526 ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 1527 if (ht == NULL) 1528 return (NULL); 1529 1530 if (entry) 1531 *entry = e; 1532 1533 if (PTE_ISPAGE(pte, ht->ht_level)) 1534 return (ht); 1535 htable_release(ht); 1536 return (NULL); 1537 } 1538 1539 1540 void 1541 htable_init() 1542 { 1543 /* 1544 * To save on kernel VA usage, we avoid debug information in 32 bit 1545 * kernels. 1546 */ 1547 #if defined(__amd64) 1548 int kmem_flags = KMC_NOHASH; 1549 #elif defined(__i386) 1550 int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 1551 #endif 1552 1553 /* 1554 * initialize kmem caches 1555 */ 1556 htable_cache = kmem_cache_create("htable_t", 1557 sizeof (htable_t), 0, NULL, NULL, 1558 htable_reap, NULL, hat_memload_arena, kmem_flags); 1559 } 1560 1561 /* 1562 * get the pte index for the virtual address in the given htable's pagetable 1563 */ 1564 uint_t 1565 htable_va2entry(uintptr_t va, htable_t *ht) 1566 { 1567 level_t l = ht->ht_level; 1568 1569 ASSERT(va >= ht->ht_vaddr); 1570 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1571 return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 1572 } 1573 1574 /* 1575 * Given an htable and the index of a pte in it, return the virtual address 1576 * of the page. 1577 */ 1578 uintptr_t 1579 htable_e2va(htable_t *ht, uint_t entry) 1580 { 1581 level_t l = ht->ht_level; 1582 uintptr_t va; 1583 1584 ASSERT(entry < HTABLE_NUM_PTES(ht)); 1585 va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 1586 1587 /* 1588 * Need to skip over any VA hole in top level table 1589 */ 1590 #if defined(__amd64) 1591 if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 1592 va += ((mmu.hole_end - mmu.hole_start) + 1); 1593 #endif 1594 1595 return (va); 1596 } 1597 1598 /* 1599 * The code uses compare and swap instructions to read/write PTE's to 1600 * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 1601 * will naturally be atomic. 1602 * 1603 * The combination of using kpreempt_disable()/_enable() and the hci_mutex 1604 * are used to ensure that an interrupt won't overwrite a temporary mapping 1605 * while it's in use. If an interrupt thread tries to access a PTE, it will 1606 * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 1607 */ 1608 void 1609 x86pte_cpu_init(cpu_t *cpu) 1610 { 1611 struct hat_cpu_info *hci; 1612 1613 hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 1614 mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 1615 cpu->cpu_hat_info = hci; 1616 } 1617 1618 void 1619 x86pte_cpu_fini(cpu_t *cpu) 1620 { 1621 struct hat_cpu_info *hci = cpu->cpu_hat_info; 1622 1623 kmem_free(hci, sizeof (*hci)); 1624 cpu->cpu_hat_info = NULL; 1625 } 1626 1627 #ifdef __i386 1628 /* 1629 * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1630 */ 1631 x86pte_t 1632 get_pte64(x86pte_t *ptr) 1633 { 1634 volatile uint32_t *p = (uint32_t *)ptr; 1635 x86pte_t t; 1636 1637 ASSERT(mmu.pae_hat != 0); 1638 for (;;) { 1639 t = p[0]; 1640 t |= (uint64_t)p[1] << 32; 1641 if ((t & 0xffffffff) == p[0]) 1642 return (t); 1643 } 1644 } 1645 #endif /* __i386 */ 1646 1647 /* 1648 * Disable preemption and establish a mapping to the pagetable with the 1649 * given pfn. This is optimized for there case where it's the same 1650 * pfn as we last used referenced from this CPU. 1651 */ 1652 static x86pte_t * 1653 x86pte_access_pagetable(htable_t *ht, uint_t index) 1654 { 1655 /* 1656 * VLP pagetables are contained in the hat_t 1657 */ 1658 if (ht->ht_flags & HTABLE_VLP) 1659 return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1660 return (x86pte_mapin(ht->ht_pfn, index, ht)); 1661 } 1662 1663 /* 1664 * map the given pfn into the page table window. 1665 */ 1666 /*ARGSUSED*/ 1667 x86pte_t * 1668 x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1669 { 1670 x86pte_t *pteptr; 1671 x86pte_t pte; 1672 x86pte_t newpte; 1673 int x; 1674 1675 ASSERT(pfn != PFN_INVALID); 1676 1677 if (!khat_running) { 1678 caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1679 return (PT_INDEX_PTR(va, index)); 1680 } 1681 1682 /* 1683 * If kpm is available, use it. 1684 */ 1685 if (kpm_vbase) 1686 return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1687 1688 /* 1689 * Disable preemption and grab the CPU's hci_mutex 1690 */ 1691 kpreempt_disable(); 1692 ASSERT(CPU->cpu_hat_info != NULL); 1693 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1694 x = PWIN_TABLE(CPU->cpu_id); 1695 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 1696 if (mmu.pae_hat) 1697 pte = *pteptr; 1698 else 1699 pte = *(x86pte32_t *)pteptr; 1700 1701 newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1702 newpte |= PT_WRITABLE; 1703 1704 if (!PTE_EQUIV(newpte, pte)) { 1705 if (mmu.pae_hat) 1706 *pteptr = newpte; 1707 else 1708 *(x86pte32_t *)pteptr = newpte; 1709 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 1710 } 1711 return (PT_INDEX_PTR(PWIN_VA(x), index)); 1712 } 1713 1714 /* 1715 * Release access to a page table. 1716 */ 1717 static void 1718 x86pte_release_pagetable(htable_t *ht) 1719 { 1720 /* 1721 * nothing to do for VLP htables 1722 */ 1723 if (ht->ht_flags & HTABLE_VLP) 1724 return; 1725 1726 x86pte_mapout(); 1727 } 1728 1729 void 1730 x86pte_mapout(void) 1731 { 1732 if (mmu.pwin_base == NULL || !khat_running) 1733 return; 1734 1735 /* 1736 * Drop the CPU's hci_mutex and restore preemption. 1737 */ 1738 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 1739 kpreempt_enable(); 1740 } 1741 1742 /* 1743 * Atomic retrieval of a pagetable entry 1744 */ 1745 x86pte_t 1746 x86pte_get(htable_t *ht, uint_t entry) 1747 { 1748 x86pte_t pte; 1749 x86pte_t *ptep; 1750 1751 /* 1752 * Be careful that loading PAE entries in 32 bit kernel is atomic. 1753 */ 1754 ASSERT(entry < mmu.ptes_per_table); 1755 ptep = x86pte_access_pagetable(ht, entry); 1756 pte = GET_PTE(ptep); 1757 x86pte_release_pagetable(ht); 1758 return (pte); 1759 } 1760 1761 /* 1762 * Atomic unconditional set of a page table entry, it returns the previous 1763 * value. For pre-existing mappings if the PFN changes, then we don't care 1764 * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 1765 * the MOD/REF bits unchanged. 1766 * 1767 * If asked to overwrite a link to a lower page table with a large page 1768 * mapping, this routine returns the special value of LPAGE_ERROR. This 1769 * allows the upper HAT layers to retry with a smaller mapping size. 1770 */ 1771 x86pte_t 1772 x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 1773 { 1774 x86pte_t old; 1775 x86pte_t prev; 1776 x86pte_t *ptep; 1777 level_t l = ht->ht_level; 1778 x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 1779 x86pte_t n; 1780 uintptr_t addr = htable_e2va(ht, entry); 1781 hat_t *hat = ht->ht_hat; 1782 1783 ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 1784 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 1785 if (ptr == NULL) 1786 ptep = x86pte_access_pagetable(ht, entry); 1787 else 1788 ptep = ptr; 1789 1790 /* 1791 * Install the new PTE. If remapping the same PFN, then 1792 * copy existing REF/MOD bits to new mapping. 1793 */ 1794 do { 1795 prev = GET_PTE(ptep); 1796 n = new; 1797 if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 1798 n |= prev & (PT_REF | PT_MOD); 1799 1800 /* 1801 * Another thread may have installed this mapping already, 1802 * flush the local TLB and be done. 1803 */ 1804 if (prev == n) { 1805 old = new; 1806 mmu_tlbflush_entry((caddr_t)addr); 1807 goto done; 1808 } 1809 1810 /* 1811 * Detect if we have a collision of installing a large 1812 * page mapping where there already is a lower page table. 1813 */ 1814 if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) 1815 return (LPAGE_ERROR); 1816 1817 old = CAS_PTE(ptep, prev, n); 1818 } while (old != prev); 1819 1820 /* 1821 * Do a TLB demap if needed, ie. the old pte was valid. 1822 * 1823 * Note that a stale TLB writeback to the PTE here either can't happen 1824 * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 1825 * mappings, but they were created with REF and MOD already set, so 1826 * no stale writeback will happen. 1827 * 1828 * Segmap is the only place where remaps happen on the same pfn and for 1829 * that we want to preserve the stale REF/MOD bits. 1830 */ 1831 if (old & PT_REF) 1832 hat_tlb_inval(hat, addr); 1833 1834 done: 1835 if (ptr == NULL) 1836 x86pte_release_pagetable(ht); 1837 return (old); 1838 } 1839 1840 /* 1841 * Atomic compare and swap of a page table entry. No TLB invalidates are done. 1842 * This is used for links between pagetables of different levels. 1843 * Note we always create these links with dirty/access set, so they should 1844 * never change. 1845 */ 1846 x86pte_t 1847 x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 1848 { 1849 x86pte_t pte; 1850 x86pte_t *ptep; 1851 1852 ptep = x86pte_access_pagetable(ht, entry); 1853 pte = CAS_PTE(ptep, old, new); 1854 x86pte_release_pagetable(ht); 1855 return (pte); 1856 } 1857 1858 /* 1859 * Make sure the zero we wrote to a page table entry sticks in memory 1860 * after invalidating all TLB entries on all CPUs. 1861 */ 1862 static x86pte_t 1863 handle_tlbs(x86pte_t oldpte, x86pte_t *ptep, htable_t *ht, uint_t entry) 1864 { 1865 hat_t *hat = ht->ht_hat; 1866 uintptr_t addr = htable_e2va(ht, entry); 1867 x86pte_t found; 1868 1869 /* 1870 * Was the PTE ever used? If not there can't be any TLB entries. 1871 */ 1872 if ((oldpte & PT_REF) == 0) 1873 return (oldpte); 1874 1875 /* 1876 * Do a full global TLB invalidation. 1877 * We may have to loop until the new PTE in memory stays zero. 1878 * Why? Because Intel/AMD don't document how the REF/MOD bits are 1879 * copied back from the TLB to the PTE, sigh. We're protecting 1880 * here against a blind write back of the MOD (and other) bits. 1881 */ 1882 for (;;) { 1883 hat_tlb_inval(hat, addr); 1884 1885 /* 1886 * Check for a stale writeback of a oldpte TLB entry. 1887 * Done when the PTE stays zero. 1888 */ 1889 found = GET_PTE(ptep); 1890 if (found == 0) 1891 return (oldpte); 1892 1893 /* 1894 * The only acceptable PTE change must be from a TLB 1895 * flush setting the MOD bit in, hence oldpte must 1896 * have been writable. 1897 */ 1898 if (!(oldpte & PT_WRITABLE) || !(found & PT_MOD)) 1899 break; 1900 1901 /* 1902 * Did we see a complete writeback of oldpte? 1903 * or 1904 * Did we see the MOD bit set (plus possibly other 1905 * bits rewritten) in a still invalid mapping? 1906 */ 1907 if (found == (oldpte | PT_MOD) || 1908 (!(found & PT_VALID) && 1909 (oldpte | found) == (oldpte | PT_MOD))) 1910 oldpte |= PT_MOD; 1911 else 1912 break; 1913 1914 (void) CAS_PTE(ptep, found, 0); 1915 } 1916 1917 /* 1918 * If we hit this, a processor attempted to set the DIRTY bit 1919 * of a page table entry happened in a way we didn't anticipate 1920 */ 1921 panic("handle_tlbs(): unanticipated TLB shootdown scenario" 1922 " oldpte=" FMT_PTE " found=" FMT_PTE, oldpte, found); 1923 /*LINTED*/ 1924 } 1925 1926 /* 1927 * Invalidate a page table entry as long as it currently maps something that 1928 * matches the value determined by expect. 1929 * 1930 * Also invalidates any TLB entries and returns the previous value of the PTE. 1931 */ 1932 x86pte_t 1933 x86pte_inval( 1934 htable_t *ht, 1935 uint_t entry, 1936 x86pte_t expect, 1937 x86pte_t *pte_ptr) 1938 { 1939 x86pte_t *ptep; 1940 x86pte_t oldpte; 1941 x86pte_t found; 1942 1943 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 1944 ASSERT(ht->ht_level != VLP_LEVEL); 1945 if (pte_ptr != NULL) 1946 ptep = pte_ptr; 1947 else 1948 ptep = x86pte_access_pagetable(ht, entry); 1949 1950 /* 1951 * This loop deals with REF/MOD bits changing between the 1952 * GET_PTE() and the CAS_PTE(). 1953 */ 1954 do { 1955 oldpte = GET_PTE(ptep); 1956 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 1957 goto give_up; 1958 found = CAS_PTE(ptep, oldpte, 0); 1959 } while (found != oldpte); 1960 oldpte = handle_tlbs(oldpte, ptep, ht, entry); 1961 1962 give_up: 1963 if (pte_ptr == NULL) 1964 x86pte_release_pagetable(ht); 1965 return (oldpte); 1966 } 1967 1968 /* 1969 * Change a page table entry af it currently matches the value in expect. 1970 */ 1971 x86pte_t 1972 x86pte_update( 1973 htable_t *ht, 1974 uint_t entry, 1975 x86pte_t expect, 1976 x86pte_t new) 1977 { 1978 x86pte_t *ptep; 1979 x86pte_t found; 1980 1981 ASSERT(new != 0); 1982 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 1983 ASSERT(ht->ht_level != VLP_LEVEL); 1984 1985 ptep = x86pte_access_pagetable(ht, entry); 1986 found = CAS_PTE(ptep, expect, new); 1987 if (found == expect) { 1988 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 1989 1990 /* 1991 * When removing write permission *and* clearing the 1992 * MOD bit, check if a write happened via a stale 1993 * TLB entry before the TLB shootdown finished. 1994 * 1995 * If it did happen, simply re-enable write permission and 1996 * act like the original CAS failed. 1997 */ 1998 if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 1999 (new & (PT_WRITABLE | PT_MOD)) == 0 && 2000 (GET_PTE(ptep) & PT_MOD) != 0) { 2001 do { 2002 found = GET_PTE(ptep); 2003 found = 2004 CAS_PTE(ptep, found, found | PT_WRITABLE); 2005 } while ((found & PT_WRITABLE) == 0); 2006 } 2007 } 2008 x86pte_release_pagetable(ht); 2009 return (found); 2010 } 2011 2012 /* 2013 * Copy page tables - this is just a little more complicated than the 2014 * previous routines. Note that it's also not atomic! It also is never 2015 * used for VLP pagetables. 2016 */ 2017 void 2018 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2019 { 2020 caddr_t src_va; 2021 caddr_t dst_va; 2022 size_t size; 2023 x86pte_t *pteptr; 2024 x86pte_t pte; 2025 2026 ASSERT(khat_running); 2027 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2028 ASSERT(!(src->ht_flags & HTABLE_VLP)); 2029 ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 2030 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2031 2032 /* 2033 * Acquire access to the CPU pagetable windows for the dest and source. 2034 */ 2035 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2036 if (kpm_vbase) { 2037 src_va = (caddr_t) 2038 PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 2039 } else { 2040 uint_t x = PWIN_SRC(CPU->cpu_id); 2041 2042 /* 2043 * Finish defining the src pagetable mapping 2044 */ 2045 src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2046 pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 2047 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 2048 if (mmu.pae_hat) 2049 *pteptr = pte; 2050 else 2051 *(x86pte32_t *)pteptr = pte; 2052 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 2053 } 2054 2055 /* 2056 * now do the copy 2057 */ 2058 size = count << mmu.pte_size_shift; 2059 bcopy(src_va, dst_va, size); 2060 2061 x86pte_release_pagetable(dest); 2062 } 2063 2064 /* 2065 * Zero page table entries - Note this doesn't use atomic stores! 2066 */ 2067 static void 2068 x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 2069 { 2070 caddr_t dst_va; 2071 size_t size; 2072 2073 /* 2074 * Map in the page table to be zeroed. 2075 */ 2076 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2077 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2078 2079 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2080 2081 size = count << mmu.pte_size_shift; 2082 ASSERT(size > BLOCKZEROALIGN); 2083 #ifdef __i386 2084 if ((x86_feature & X86_SSE2) == 0) 2085 bzero(dst_va, size); 2086 else 2087 #endif 2088 block_zero_no_xmm(dst_va, size); 2089 2090 x86pte_release_pagetable(dest); 2091 } 2092 2093 /* 2094 * Called to ensure that all pagetables are in the system dump 2095 */ 2096 void 2097 hat_dump(void) 2098 { 2099 hat_t *hat; 2100 uint_t h; 2101 htable_t *ht; 2102 2103 /* 2104 * Dump all page tables 2105 */ 2106 for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 2107 for (h = 0; h < hat->hat_num_hash; ++h) { 2108 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2109 if ((ht->ht_flags & HTABLE_VLP) == 0) 2110 dump_page(ht->ht_pfn); 2111 } 2112 } 2113 } 2114 } 2115