1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/sysmacros.h> 31 #include <sys/kmem.h> 32 #include <sys/atomic.h> 33 #include <sys/bitmap.h> 34 #include <sys/machparam.h> 35 #include <sys/machsystm.h> 36 #include <sys/mman.h> 37 #include <sys/systm.h> 38 #include <sys/cpuvar.h> 39 #include <sys/thread.h> 40 #include <sys/proc.h> 41 #include <sys/cpu.h> 42 #include <sys/kmem.h> 43 #include <sys/disp.h> 44 #include <sys/vmem.h> 45 #include <sys/vmsystm.h> 46 #include <sys/promif.h> 47 #include <sys/var.h> 48 #include <sys/x86_archext.h> 49 #include <sys/archsystm.h> 50 #include <sys/bootconf.h> 51 #include <sys/dumphdr.h> 52 #include <vm/seg_kmem.h> 53 #include <vm/seg_kpm.h> 54 #include <vm/hat.h> 55 #include <vm/hat_i86.h> 56 #include <sys/cmn_err.h> 57 58 #include <sys/bootinfo.h> 59 #include <vm/kboot_mmu.h> 60 61 static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 62 63 kmem_cache_t *htable_cache; 64 65 /* 66 * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 67 * is used in order to facilitate testing of the htable_steal() code. 68 * By resetting htable_reserve_amount to a lower value, we can force 69 * stealing to occur. The reserve amount is a guess to get us through boot. 70 */ 71 #define HTABLE_RESERVE_AMOUNT (200) 72 uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 73 kmutex_t htable_reserve_mutex; 74 uint_t htable_reserve_cnt; 75 htable_t *htable_reserve_pool; 76 77 /* 78 * Used to hand test htable_steal(). 79 */ 80 #ifdef DEBUG 81 ulong_t force_steal = 0; 82 ulong_t ptable_cnt = 0; 83 #endif 84 85 /* 86 * This variable is so that we can tune this via /etc/system 87 * Any value works, but a power of two <= mmu.ptes_per_table is best. 88 */ 89 uint_t htable_steal_passes = 8; 90 91 /* 92 * mutex stuff for access to htable hash 93 */ 94 #define NUM_HTABLE_MUTEX 128 95 kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 96 #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 97 98 #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 99 #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 100 101 /* 102 * forward declarations 103 */ 104 static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 105 static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 106 static void htable_free(htable_t *ht); 107 static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 108 static void x86pte_release_pagetable(htable_t *ht); 109 static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 110 x86pte_t new); 111 112 /* 113 * A counter to track if we are stealing or reaping htables. When non-zero 114 * htable_free() will directly free htables (either to the reserve or kmem) 115 * instead of putting them in a hat's htable cache. 116 */ 117 uint32_t htable_dont_cache = 0; 118 119 /* 120 * Track the number of active pagetables, so we can know how many to reap 121 */ 122 static uint32_t active_ptables = 0; 123 124 /* 125 * Allocate a memory page for a hardware page table. 126 * 127 * A wrapper around page_get_physical(), with some extra checks. 128 */ 129 static pfn_t 130 ptable_alloc(uintptr_t seed) 131 { 132 pfn_t pfn; 133 page_t *pp; 134 135 pfn = PFN_INVALID; 136 atomic_add_32(&active_ptables, 1); 137 138 /* 139 * The first check is to see if there is memory in the system. If we 140 * drop to throttlefree, then fail the ptable_alloc() and let the 141 * stealing code kick in. Note that we have to do this test here, 142 * since the test in page_create_throttle() would let the NOSLEEP 143 * allocation go through and deplete the page reserves. 144 * 145 * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 146 */ 147 if (!NOMEMWAIT() && freemem <= throttlefree + 1) 148 return (PFN_INVALID); 149 150 #ifdef DEBUG 151 /* 152 * This code makes htable_steal() easier to test. By setting 153 * force_steal we force pagetable allocations to fall 154 * into the stealing code. Roughly 1 in ever "force_steal" 155 * page table allocations will fail. 156 */ 157 if (proc_pageout != NULL && force_steal > 1 && 158 ++ptable_cnt > force_steal) { 159 ptable_cnt = 0; 160 return (PFN_INVALID); 161 } 162 #endif /* DEBUG */ 163 164 pp = page_get_physical(seed); 165 if (pp == NULL) 166 return (PFN_INVALID); 167 pfn = pp->p_pagenum; 168 page_downgrade(pp); 169 ASSERT(PAGE_SHARED(pp)); 170 171 if (pfn == PFN_INVALID) 172 panic("ptable_alloc(): Invalid PFN!!"); 173 HATSTAT_INC(hs_ptable_allocs); 174 return (pfn); 175 } 176 177 /* 178 * Free an htable's associated page table page. See the comments 179 * for ptable_alloc(). 180 */ 181 static void 182 ptable_free(pfn_t pfn) 183 { 184 page_t *pp = page_numtopp_nolock(pfn); 185 186 /* 187 * need to destroy the page used for the pagetable 188 */ 189 ASSERT(pfn != PFN_INVALID); 190 HATSTAT_INC(hs_ptable_frees); 191 atomic_add_32(&active_ptables, -1); 192 if (pp == NULL) 193 panic("ptable_free(): no page for pfn!"); 194 ASSERT(PAGE_SHARED(pp)); 195 ASSERT(pfn == pp->p_pagenum); 196 197 /* 198 * Get an exclusive lock, might have to wait for a kmem reader. 199 */ 200 if (!page_tryupgrade(pp)) { 201 page_unlock(pp); 202 /* 203 * RFE: we could change this to not loop forever 204 * George Cameron had some idea on how to do that. 205 * For now looping works - it's just like sfmmu. 206 */ 207 while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 208 continue; 209 } 210 page_free(pp, 1); 211 page_unresv(1); 212 } 213 214 /* 215 * Put one htable on the reserve list. 216 */ 217 static void 218 htable_put_reserve(htable_t *ht) 219 { 220 ht->ht_hat = NULL; /* no longer tied to a hat */ 221 ASSERT(ht->ht_pfn == PFN_INVALID); 222 HATSTAT_INC(hs_htable_rputs); 223 mutex_enter(&htable_reserve_mutex); 224 ht->ht_next = htable_reserve_pool; 225 htable_reserve_pool = ht; 226 ++htable_reserve_cnt; 227 mutex_exit(&htable_reserve_mutex); 228 } 229 230 /* 231 * Take one htable from the reserve. 232 */ 233 static htable_t * 234 htable_get_reserve(void) 235 { 236 htable_t *ht = NULL; 237 238 mutex_enter(&htable_reserve_mutex); 239 if (htable_reserve_cnt != 0) { 240 ht = htable_reserve_pool; 241 ASSERT(ht != NULL); 242 ASSERT(ht->ht_pfn == PFN_INVALID); 243 htable_reserve_pool = ht->ht_next; 244 --htable_reserve_cnt; 245 HATSTAT_INC(hs_htable_rgets); 246 } 247 mutex_exit(&htable_reserve_mutex); 248 return (ht); 249 } 250 251 /* 252 * Allocate initial htables and put them on the reserve list 253 */ 254 void 255 htable_initial_reserve(uint_t count) 256 { 257 htable_t *ht; 258 259 count += HTABLE_RESERVE_AMOUNT; 260 while (count > 0) { 261 ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 262 ASSERT(ht != NULL); 263 264 ASSERT(use_boot_reserve); 265 ht->ht_pfn = PFN_INVALID; 266 htable_put_reserve(ht); 267 --count; 268 } 269 } 270 271 /* 272 * Readjust the reserves after a thread finishes using them. 273 */ 274 void 275 htable_adjust_reserve() 276 { 277 htable_t *ht; 278 279 /* 280 * Free any excess htables in the reserve list 281 */ 282 while (htable_reserve_cnt > htable_reserve_amount && 283 !USE_HAT_RESERVES()) { 284 ht = htable_get_reserve(); 285 if (ht == NULL) 286 return; 287 ASSERT(ht->ht_pfn == PFN_INVALID); 288 kmem_cache_free(htable_cache, ht); 289 } 290 } 291 292 293 /* 294 * This routine steals htables from user processes for htable_alloc() or 295 * for htable_reap(). 296 */ 297 static htable_t * 298 htable_steal(uint_t cnt) 299 { 300 hat_t *hat = kas.a_hat; /* list starts with khat */ 301 htable_t *list = NULL; 302 htable_t *ht; 303 htable_t *higher; 304 uint_t h; 305 uint_t h_start; 306 static uint_t h_seed = 0; 307 uint_t e; 308 uintptr_t va; 309 x86pte_t pte; 310 uint_t stolen = 0; 311 uint_t pass; 312 uint_t threshold; 313 314 /* 315 * Limit htable_steal_passes to something reasonable 316 */ 317 if (htable_steal_passes == 0) 318 htable_steal_passes = 1; 319 if (htable_steal_passes > mmu.ptes_per_table) 320 htable_steal_passes = mmu.ptes_per_table; 321 322 /* 323 * Loop through all user hats. The 1st pass takes cached htables that 324 * aren't in use. The later passes steal by removing mappings, too. 325 */ 326 atomic_add_32(&htable_dont_cache, 1); 327 for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 328 threshold = pass * mmu.ptes_per_table / htable_steal_passes; 329 hat = kas.a_hat; 330 for (;;) { 331 332 /* 333 * Clear the victim flag and move to next hat 334 */ 335 mutex_enter(&hat_list_lock); 336 if (hat != kas.a_hat) { 337 hat->hat_flags &= ~HAT_VICTIM; 338 cv_broadcast(&hat_list_cv); 339 } 340 hat = hat->hat_next; 341 342 /* 343 * Skip any hat that is already being stolen from. 344 * 345 * We skip SHARED hats, as these are dummy 346 * hats that host ISM shared page tables. 347 * 348 * We also skip if HAT_FREEING because hat_pte_unmap() 349 * won't zero out the PTE's. That would lead to hitting 350 * stale PTEs either here or under hat_unload() when we 351 * steal and unload the same page table in competing 352 * threads. 353 */ 354 while (hat != NULL && 355 (hat->hat_flags & 356 (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 357 hat = hat->hat_next; 358 359 if (hat == NULL) { 360 mutex_exit(&hat_list_lock); 361 break; 362 } 363 364 /* 365 * Are we finished? 366 */ 367 if (stolen == cnt) { 368 /* 369 * Try to spread the pain of stealing, 370 * move victim HAT to the end of the HAT list. 371 */ 372 if (pass >= 1 && cnt == 1 && 373 kas.a_hat->hat_prev != hat) { 374 375 /* unlink victim hat */ 376 if (hat->hat_prev) 377 hat->hat_prev->hat_next = 378 hat->hat_next; 379 else 380 kas.a_hat->hat_next = 381 hat->hat_next; 382 if (hat->hat_next) 383 hat->hat_next->hat_prev = 384 hat->hat_prev; 385 else 386 kas.a_hat->hat_prev = 387 hat->hat_prev; 388 389 390 /* relink at end of hat list */ 391 hat->hat_next = NULL; 392 hat->hat_prev = kas.a_hat->hat_prev; 393 if (hat->hat_prev) 394 hat->hat_prev->hat_next = hat; 395 else 396 kas.a_hat->hat_next = hat; 397 kas.a_hat->hat_prev = hat; 398 399 } 400 401 mutex_exit(&hat_list_lock); 402 break; 403 } 404 405 /* 406 * Mark the HAT as a stealing victim. 407 */ 408 hat->hat_flags |= HAT_VICTIM; 409 mutex_exit(&hat_list_lock); 410 411 /* 412 * Take any htables from the hat's cached "free" list. 413 */ 414 hat_enter(hat); 415 while ((ht = hat->hat_ht_cached) != NULL && 416 stolen < cnt) { 417 hat->hat_ht_cached = ht->ht_next; 418 ht->ht_next = list; 419 list = ht; 420 ++stolen; 421 } 422 hat_exit(hat); 423 424 /* 425 * Don't steal on first pass. 426 */ 427 if (pass == 0 || stolen == cnt) 428 continue; 429 430 /* 431 * Search the active htables for one to steal. 432 * Start at a different hash bucket every time to 433 * help spread the pain of stealing. 434 */ 435 h = h_start = h_seed++ % hat->hat_num_hash; 436 do { 437 higher = NULL; 438 HTABLE_ENTER(h); 439 for (ht = hat->hat_ht_hash[h]; ht; 440 ht = ht->ht_next) { 441 442 /* 443 * Can we rule out reaping? 444 */ 445 if (ht->ht_busy != 0 || 446 (ht->ht_flags & HTABLE_SHARED_PFN)|| 447 ht->ht_level > 0 || 448 ht->ht_valid_cnt > threshold || 449 ht->ht_lock_cnt != 0) 450 continue; 451 452 /* 453 * Increment busy so the htable can't 454 * disappear. We drop the htable mutex 455 * to avoid deadlocks with 456 * hat_pageunload() and the hment mutex 457 * while we call hat_pte_unmap() 458 */ 459 ++ht->ht_busy; 460 HTABLE_EXIT(h); 461 462 /* 463 * Try stealing. 464 * - unload and invalidate all PTEs 465 */ 466 for (e = 0, va = ht->ht_vaddr; 467 e < HTABLE_NUM_PTES(ht) && 468 ht->ht_valid_cnt > 0 && 469 ht->ht_busy == 1 && 470 ht->ht_lock_cnt == 0; 471 ++e, va += MMU_PAGESIZE) { 472 pte = x86pte_get(ht, e); 473 if (!PTE_ISVALID(pte)) 474 continue; 475 hat_pte_unmap(ht, e, 476 HAT_UNLOAD, pte, NULL); 477 } 478 479 /* 480 * Reacquire htable lock. If we didn't 481 * remove all mappings in the table, 482 * or another thread added a new mapping 483 * behind us, give up on this table. 484 */ 485 HTABLE_ENTER(h); 486 if (ht->ht_busy != 1 || 487 ht->ht_valid_cnt != 0 || 488 ht->ht_lock_cnt != 0) { 489 --ht->ht_busy; 490 continue; 491 } 492 493 /* 494 * Steal it and unlink the page table. 495 */ 496 higher = ht->ht_parent; 497 unlink_ptp(higher, ht, ht->ht_vaddr); 498 499 /* 500 * remove from the hash list 501 */ 502 if (ht->ht_next) 503 ht->ht_next->ht_prev = 504 ht->ht_prev; 505 506 if (ht->ht_prev) { 507 ht->ht_prev->ht_next = 508 ht->ht_next; 509 } else { 510 ASSERT(hat->hat_ht_hash[h] == 511 ht); 512 hat->hat_ht_hash[h] = 513 ht->ht_next; 514 } 515 516 /* 517 * Break to outer loop to release the 518 * higher (ht_parent) pagetable. This 519 * spreads out the pain caused by 520 * pagefaults. 521 */ 522 ht->ht_next = list; 523 list = ht; 524 ++stolen; 525 break; 526 } 527 HTABLE_EXIT(h); 528 if (higher != NULL) 529 htable_release(higher); 530 if (++h == hat->hat_num_hash) 531 h = 0; 532 } while (stolen < cnt && h != h_start); 533 } 534 } 535 atomic_add_32(&htable_dont_cache, -1); 536 return (list); 537 } 538 539 540 /* 541 * This is invoked from kmem when the system is low on memory. We try 542 * to free hments, htables, and ptables to improve the memory situation. 543 */ 544 /*ARGSUSED*/ 545 static void 546 htable_reap(void *handle) 547 { 548 uint_t reap_cnt; 549 htable_t *list; 550 htable_t *ht; 551 552 HATSTAT_INC(hs_reap_attempts); 553 if (!can_steal_post_boot) 554 return; 555 556 /* 557 * Try to reap 5% of the page tables bounded by a maximum of 558 * 5% of physmem and a minimum of 10. 559 */ 560 reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 561 562 /* 563 * Let htable_steal() do the work, we just call htable_free() 564 */ 565 list = htable_steal(reap_cnt); 566 while ((ht = list) != NULL) { 567 list = ht->ht_next; 568 HATSTAT_INC(hs_reaped); 569 htable_free(ht); 570 } 571 572 /* 573 * Free up excess reserves 574 */ 575 htable_adjust_reserve(); 576 hment_adjust_reserve(); 577 } 578 579 /* 580 * Allocate an htable, stealing one or using the reserve if necessary 581 */ 582 static htable_t * 583 htable_alloc( 584 hat_t *hat, 585 uintptr_t vaddr, 586 level_t level, 587 htable_t *shared) 588 { 589 htable_t *ht = NULL; 590 uint_t is_vlp; 591 uint_t is_bare = 0; 592 uint_t need_to_zero = 1; 593 int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 594 595 if (level < 0 || level > TOP_LEVEL(hat)) 596 panic("htable_alloc(): level %d out of range\n", level); 597 598 is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 599 if (is_vlp || shared != NULL) 600 is_bare = 1; 601 602 /* 603 * First reuse a cached htable from the hat_ht_cached field, this 604 * avoids unnecessary trips through kmem/page allocators. 605 */ 606 if (hat->hat_ht_cached != NULL && !is_bare) { 607 hat_enter(hat); 608 ht = hat->hat_ht_cached; 609 if (ht != NULL) { 610 hat->hat_ht_cached = ht->ht_next; 611 need_to_zero = 0; 612 /* XX64 ASSERT() they're all zero somehow */ 613 ASSERT(ht->ht_pfn != PFN_INVALID); 614 } 615 hat_exit(hat); 616 } 617 618 if (ht == NULL) { 619 /* 620 * Allocate an htable, possibly refilling the reserves. 621 */ 622 if (USE_HAT_RESERVES()) { 623 ht = htable_get_reserve(); 624 } else { 625 /* 626 * Donate successful htable allocations to the reserve. 627 */ 628 for (;;) { 629 ht = kmem_cache_alloc(htable_cache, kmflags); 630 if (ht == NULL) 631 break; 632 ht->ht_pfn = PFN_INVALID; 633 if (USE_HAT_RESERVES() || 634 htable_reserve_cnt >= htable_reserve_amount) 635 break; 636 htable_put_reserve(ht); 637 } 638 } 639 640 /* 641 * allocate a page for the hardware page table if needed 642 */ 643 if (ht != NULL && !is_bare) { 644 ht->ht_hat = hat; 645 ht->ht_pfn = ptable_alloc((uintptr_t)ht); 646 if (ht->ht_pfn == PFN_INVALID) { 647 if (USE_HAT_RESERVES()) 648 htable_put_reserve(ht); 649 else 650 kmem_cache_free(htable_cache, ht); 651 ht = NULL; 652 } 653 } 654 } 655 656 /* 657 * If allocations failed, kick off a kmem_reap() and resort to 658 * htable steal(). We may spin here if the system is very low on 659 * memory. If the kernel itself has consumed all memory and kmem_reap() 660 * can't free up anything, then we'll really get stuck here. 661 * That should only happen in a system where the administrator has 662 * misconfigured VM parameters via /etc/system. 663 */ 664 while (ht == NULL && can_steal_post_boot) { 665 kmem_reap(); 666 ht = htable_steal(1); 667 HATSTAT_INC(hs_steals); 668 669 /* 670 * If we stole for a bare htable, release the pagetable page. 671 */ 672 if (ht != NULL) { 673 if (is_bare) { 674 ptable_free(ht->ht_pfn); 675 ht->ht_pfn = PFN_INVALID; 676 } 677 } 678 } 679 680 /* 681 * All attempts to allocate or steal failed. This should only happen 682 * if we run out of memory during boot, due perhaps to a huge 683 * boot_archive. At this point there's no way to continue. 684 */ 685 if (ht == NULL) 686 panic("htable_alloc(): couldn't steal\n"); 687 688 /* 689 * Shared page tables have all entries locked and entries may not 690 * be added or deleted. 691 */ 692 ht->ht_flags = 0; 693 if (shared != NULL) { 694 ASSERT(level == 0); 695 ASSERT(shared->ht_valid_cnt > 0); 696 ht->ht_flags |= HTABLE_SHARED_PFN; 697 ht->ht_pfn = shared->ht_pfn; 698 ht->ht_lock_cnt = 0; 699 ht->ht_valid_cnt = 0; /* updated in hat_share() */ 700 ht->ht_shares = shared; 701 need_to_zero = 0; 702 } else { 703 ht->ht_shares = NULL; 704 ht->ht_lock_cnt = 0; 705 ht->ht_valid_cnt = 0; 706 } 707 708 /* 709 * setup flags, etc. for VLP htables 710 */ 711 if (is_vlp) { 712 ht->ht_flags |= HTABLE_VLP; 713 ASSERT(ht->ht_pfn == PFN_INVALID); 714 need_to_zero = 0; 715 } 716 717 /* 718 * fill in the htable 719 */ 720 ht->ht_hat = hat; 721 ht->ht_parent = NULL; 722 ht->ht_vaddr = vaddr; 723 ht->ht_level = level; 724 ht->ht_busy = 1; 725 ht->ht_next = NULL; 726 ht->ht_prev = NULL; 727 728 /* 729 * Zero out any freshly allocated page table 730 */ 731 if (need_to_zero) 732 x86pte_zero(ht, 0, mmu.ptes_per_table); 733 734 return (ht); 735 } 736 737 /* 738 * Free up an htable, either to a hat's cached list, the reserves or 739 * back to kmem. 740 */ 741 static void 742 htable_free(htable_t *ht) 743 { 744 hat_t *hat = ht->ht_hat; 745 746 /* 747 * If the process isn't exiting, cache the free htable in the hat 748 * structure. We always do this for the boot reserve. We don't 749 * do this if the hat is exiting or we are stealing/reaping htables. 750 */ 751 if (hat != NULL && 752 !(ht->ht_flags & HTABLE_SHARED_PFN) && 753 (use_boot_reserve || 754 (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 755 ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 756 ASSERT(ht->ht_pfn != PFN_INVALID); 757 hat_enter(hat); 758 ht->ht_next = hat->hat_ht_cached; 759 hat->hat_ht_cached = ht; 760 hat_exit(hat); 761 return; 762 } 763 764 /* 765 * If we have a hardware page table, free it. 766 * We don't free page tables that are accessed by sharing. 767 */ 768 if (ht->ht_flags & HTABLE_SHARED_PFN) { 769 ASSERT(ht->ht_pfn != PFN_INVALID); 770 } else if (!(ht->ht_flags & HTABLE_VLP)) { 771 ptable_free(ht->ht_pfn); 772 } 773 ht->ht_pfn = PFN_INVALID; 774 775 /* 776 * Free htables or put into reserves. 777 */ 778 if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 779 htable_put_reserve(ht); 780 } else { 781 kmem_cache_free(htable_cache, ht); 782 htable_adjust_reserve(); 783 } 784 } 785 786 787 /* 788 * This is called when a hat is being destroyed or swapped out. We reap all 789 * the remaining htables in the hat cache. If destroying all left over 790 * htables are also destroyed. 791 * 792 * We also don't need to invalidate any of the PTPs nor do any demapping. 793 */ 794 void 795 htable_purge_hat(hat_t *hat) 796 { 797 htable_t *ht; 798 int h; 799 800 /* 801 * Purge the htable cache if just reaping. 802 */ 803 if (!(hat->hat_flags & HAT_FREEING)) { 804 atomic_add_32(&htable_dont_cache, 1); 805 for (;;) { 806 hat_enter(hat); 807 ht = hat->hat_ht_cached; 808 if (ht == NULL) { 809 hat_exit(hat); 810 break; 811 } 812 hat->hat_ht_cached = ht->ht_next; 813 hat_exit(hat); 814 htable_free(ht); 815 } 816 atomic_add_32(&htable_dont_cache, -1); 817 return; 818 } 819 820 /* 821 * if freeing, no locking is needed 822 */ 823 while ((ht = hat->hat_ht_cached) != NULL) { 824 hat->hat_ht_cached = ht->ht_next; 825 htable_free(ht); 826 } 827 828 /* 829 * walk thru the htable hash table and free all the htables in it. 830 */ 831 for (h = 0; h < hat->hat_num_hash; ++h) { 832 while ((ht = hat->hat_ht_hash[h]) != NULL) { 833 if (ht->ht_next) 834 ht->ht_next->ht_prev = ht->ht_prev; 835 836 if (ht->ht_prev) { 837 ht->ht_prev->ht_next = ht->ht_next; 838 } else { 839 ASSERT(hat->hat_ht_hash[h] == ht); 840 hat->hat_ht_hash[h] = ht->ht_next; 841 } 842 htable_free(ht); 843 } 844 } 845 } 846 847 /* 848 * Unlink an entry for a table at vaddr and level out of the existing table 849 * one level higher. We are always holding the HASH_ENTER() when doing this. 850 */ 851 static void 852 unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 853 { 854 uint_t entry = htable_va2entry(vaddr, higher); 855 x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 856 x86pte_t found; 857 hat_t *hat = old->ht_hat; 858 859 ASSERT(higher->ht_busy > 0); 860 ASSERT(higher->ht_valid_cnt > 0); 861 ASSERT(old->ht_valid_cnt == 0); 862 found = x86pte_cas(higher, entry, expect, 0); 863 if (found != expect) 864 panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 865 found, expect); 866 867 /* 868 * When any top level VLP page table entry changes, we must issue 869 * a reload of cr3 on all processors. Also some CPU types require 870 * invalidating when inner table entries are invalidated. 871 */ 872 if (!(hat->hat_flags & HAT_FREEING)) { 873 if (higher->ht_flags & HTABLE_VLP) 874 hat_tlb_inval(hat, DEMAP_ALL_ADDR); 875 else if (mmu.inval_nonleaf) 876 hat_tlb_inval(hat, old->ht_vaddr); 877 } 878 879 HTABLE_DEC(higher->ht_valid_cnt); 880 } 881 882 /* 883 * Link an entry for a new table at vaddr and level into the existing table 884 * one level higher. We are always holding the HASH_ENTER() when doing this. 885 */ 886 static void 887 link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 888 { 889 uint_t entry = htable_va2entry(vaddr, higher); 890 x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 891 x86pte_t found; 892 893 ASSERT(higher->ht_busy > 0); 894 895 ASSERT(new->ht_level != mmu.max_level); 896 897 HTABLE_INC(higher->ht_valid_cnt); 898 899 found = x86pte_cas(higher, entry, 0, newptp); 900 if ((found & ~PT_REF) != 0) 901 panic("HAT: ptp not 0, found=" FMT_PTE, found); 902 903 /* 904 * When any top level VLP page table entry changes, we must issue 905 * a reload of cr3 on all processors using it. 906 * We also need to do this for the kernel hat on PAE 32 bit kernel. 907 */ 908 if ( 909 #ifdef __i386 910 (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 911 #endif 912 (higher->ht_flags & HTABLE_VLP)) 913 hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 914 } 915 916 /* 917 * Release of hold on an htable. If this is the last use and the pagetable 918 * is empty we may want to free it, then recursively look at the pagetable 919 * above it. The recursion is handled by the outer while() loop. 920 */ 921 void 922 htable_release(htable_t *ht) 923 { 924 uint_t hashval; 925 htable_t *shared; 926 htable_t *higher; 927 hat_t *hat; 928 uintptr_t va; 929 level_t level; 930 931 while (ht != NULL) { 932 shared = NULL; 933 for (;;) { 934 hat = ht->ht_hat; 935 va = ht->ht_vaddr; 936 level = ht->ht_level; 937 hashval = HTABLE_HASH(hat, va, level); 938 939 /* 940 * The common case is that this isn't the last use of 941 * an htable so we don't want to free the htable. 942 */ 943 HTABLE_ENTER(hashval); 944 ASSERT(ht->ht_lock_cnt == 0 || ht->ht_valid_cnt > 0); 945 ASSERT(ht->ht_valid_cnt >= 0); 946 ASSERT(ht->ht_busy > 0); 947 if (ht->ht_valid_cnt > 0) 948 break; 949 if (ht->ht_busy > 1) 950 break; 951 952 /* 953 * we always release empty shared htables 954 */ 955 if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 956 957 /* 958 * don't release if in address space tear down 959 */ 960 if (hat->hat_flags & HAT_FREEING) 961 break; 962 963 /* 964 * At and above max_page_level, free if it's for 965 * a boot-time kernel mapping below kernelbase. 966 */ 967 if (level >= mmu.max_page_level && 968 (hat != kas.a_hat || va >= kernelbase)) 969 break; 970 } 971 972 /* 973 * Remember if we destroy an htable that shares its PFN 974 * from elsewhere. 975 */ 976 if (ht->ht_flags & HTABLE_SHARED_PFN) { 977 ASSERT(ht->ht_level == 0); 978 ASSERT(shared == NULL); 979 shared = ht->ht_shares; 980 HATSTAT_INC(hs_htable_unshared); 981 } 982 983 /* 984 * Handle release of a table and freeing the htable_t. 985 * Unlink it from the table higher (ie. ht_parent). 986 */ 987 ASSERT(ht->ht_lock_cnt == 0); 988 higher = ht->ht_parent; 989 ASSERT(higher != NULL); 990 991 /* 992 * Unlink the pagetable. 993 */ 994 unlink_ptp(higher, ht, va); 995 996 /* 997 * remove this htable from its hash list 998 */ 999 if (ht->ht_next) 1000 ht->ht_next->ht_prev = ht->ht_prev; 1001 1002 if (ht->ht_prev) { 1003 ht->ht_prev->ht_next = ht->ht_next; 1004 } else { 1005 ASSERT(hat->hat_ht_hash[hashval] == ht); 1006 hat->hat_ht_hash[hashval] = ht->ht_next; 1007 } 1008 HTABLE_EXIT(hashval); 1009 htable_free(ht); 1010 ht = higher; 1011 } 1012 1013 ASSERT(ht->ht_busy >= 1); 1014 --ht->ht_busy; 1015 HTABLE_EXIT(hashval); 1016 1017 /* 1018 * If we released a shared htable, do a release on the htable 1019 * from which it shared 1020 */ 1021 ht = shared; 1022 } 1023 } 1024 1025 /* 1026 * Find the htable for the pagetable at the given level for the given address. 1027 * If found acquires a hold that eventually needs to be htable_release()d 1028 */ 1029 htable_t * 1030 htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 1031 { 1032 uintptr_t base; 1033 uint_t hashval; 1034 htable_t *ht = NULL; 1035 1036 ASSERT(level >= 0); 1037 ASSERT(level <= TOP_LEVEL(hat)); 1038 1039 if (level == TOP_LEVEL(hat)) 1040 base = 0; 1041 else 1042 base = vaddr & LEVEL_MASK(level + 1); 1043 1044 hashval = HTABLE_HASH(hat, base, level); 1045 HTABLE_ENTER(hashval); 1046 for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 1047 if (ht->ht_hat == hat && 1048 ht->ht_vaddr == base && 1049 ht->ht_level == level) 1050 break; 1051 } 1052 if (ht) 1053 ++ht->ht_busy; 1054 1055 HTABLE_EXIT(hashval); 1056 return (ht); 1057 } 1058 1059 /* 1060 * Acquires a hold on a known htable (from a locked hment entry). 1061 */ 1062 void 1063 htable_acquire(htable_t *ht) 1064 { 1065 hat_t *hat = ht->ht_hat; 1066 level_t level = ht->ht_level; 1067 uintptr_t base = ht->ht_vaddr; 1068 uint_t hashval = HTABLE_HASH(hat, base, level); 1069 1070 HTABLE_ENTER(hashval); 1071 #ifdef DEBUG 1072 /* 1073 * make sure the htable is there 1074 */ 1075 { 1076 htable_t *h; 1077 1078 for (h = hat->hat_ht_hash[hashval]; 1079 h && h != ht; 1080 h = h->ht_next) 1081 ; 1082 ASSERT(h == ht); 1083 } 1084 #endif /* DEBUG */ 1085 ++ht->ht_busy; 1086 HTABLE_EXIT(hashval); 1087 } 1088 1089 /* 1090 * Find the htable for the pagetable at the given level for the given address. 1091 * If found acquires a hold that eventually needs to be htable_release()d 1092 * If not found the table is created. 1093 * 1094 * Since we can't hold a hash table mutex during allocation, we have to 1095 * drop it and redo the search on a create. Then we may have to free the newly 1096 * allocated htable if another thread raced in and created it ahead of us. 1097 */ 1098 htable_t * 1099 htable_create( 1100 hat_t *hat, 1101 uintptr_t vaddr, 1102 level_t level, 1103 htable_t *shared) 1104 { 1105 uint_t h; 1106 level_t l; 1107 uintptr_t base; 1108 htable_t *ht; 1109 htable_t *higher = NULL; 1110 htable_t *new = NULL; 1111 1112 if (level < 0 || level > TOP_LEVEL(hat)) 1113 panic("htable_create(): level %d out of range\n", level); 1114 1115 /* 1116 * Create the page tables in top down order. 1117 */ 1118 for (l = TOP_LEVEL(hat); l >= level; --l) { 1119 new = NULL; 1120 if (l == TOP_LEVEL(hat)) 1121 base = 0; 1122 else 1123 base = vaddr & LEVEL_MASK(l + 1); 1124 1125 h = HTABLE_HASH(hat, base, l); 1126 try_again: 1127 /* 1128 * look up the htable at this level 1129 */ 1130 HTABLE_ENTER(h); 1131 if (l == TOP_LEVEL(hat)) { 1132 ht = hat->hat_htable; 1133 } else { 1134 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 1135 ASSERT(ht->ht_hat == hat); 1136 if (ht->ht_vaddr == base && 1137 ht->ht_level == l) 1138 break; 1139 } 1140 } 1141 1142 /* 1143 * if we found the htable, increment its busy cnt 1144 * and if we had allocated a new htable, free it. 1145 */ 1146 if (ht != NULL) { 1147 /* 1148 * If we find a pre-existing shared table, it must 1149 * share from the same place. 1150 */ 1151 if (l == level && shared && ht->ht_shares && 1152 ht->ht_shares != shared) { 1153 panic("htable shared from wrong place " 1154 "found htable=%p shared=%p", ht, shared); 1155 } 1156 ++ht->ht_busy; 1157 HTABLE_EXIT(h); 1158 if (new) 1159 htable_free(new); 1160 if (higher != NULL) 1161 htable_release(higher); 1162 higher = ht; 1163 1164 /* 1165 * if we didn't find it on the first search 1166 * allocate a new one and search again 1167 */ 1168 } else if (new == NULL) { 1169 HTABLE_EXIT(h); 1170 new = htable_alloc(hat, base, l, 1171 l == level ? shared : NULL); 1172 goto try_again; 1173 1174 /* 1175 * 2nd search and still not there, use "new" table 1176 * Link new table into higher, when not at top level. 1177 */ 1178 } else { 1179 ht = new; 1180 if (higher != NULL) { 1181 link_ptp(higher, ht, base); 1182 ht->ht_parent = higher; 1183 } 1184 ht->ht_next = hat->hat_ht_hash[h]; 1185 ASSERT(ht->ht_prev == NULL); 1186 if (hat->hat_ht_hash[h]) 1187 hat->hat_ht_hash[h]->ht_prev = ht; 1188 hat->hat_ht_hash[h] = ht; 1189 HTABLE_EXIT(h); 1190 1191 /* 1192 * Note we don't do htable_release(higher). 1193 * That happens recursively when "new" is removed by 1194 * htable_release() or htable_steal(). 1195 */ 1196 higher = ht; 1197 1198 /* 1199 * If we just created a new shared page table we 1200 * increment the shared htable's busy count, so that 1201 * it can't be the victim of a steal even if it's empty. 1202 */ 1203 if (l == level && shared) { 1204 (void) htable_lookup(shared->ht_hat, 1205 shared->ht_vaddr, shared->ht_level); 1206 HATSTAT_INC(hs_htable_shared); 1207 } 1208 } 1209 } 1210 1211 return (ht); 1212 } 1213 1214 /* 1215 * Inherit initial pagetables from the boot program. 1216 */ 1217 void 1218 htable_attach( 1219 hat_t *hat, 1220 uintptr_t base, 1221 level_t level, 1222 htable_t *parent, 1223 pfn_t pfn) 1224 { 1225 htable_t *ht; 1226 uint_t h; 1227 uint_t i; 1228 x86pte_t pte; 1229 x86pte_t *ptep; 1230 page_t *pp; 1231 extern page_t *boot_claim_page(pfn_t); 1232 1233 ht = htable_get_reserve(); 1234 if (level == mmu.max_level) 1235 kas.a_hat->hat_htable = ht; 1236 ht->ht_hat = hat; 1237 ht->ht_parent = parent; 1238 ht->ht_vaddr = base; 1239 ht->ht_level = level; 1240 ht->ht_busy = 1; 1241 ht->ht_next = NULL; 1242 ht->ht_prev = NULL; 1243 ht->ht_flags = 0; 1244 ht->ht_pfn = pfn; 1245 ht->ht_lock_cnt = 0; 1246 ht->ht_valid_cnt = 0; 1247 if (parent != NULL) 1248 ++parent->ht_busy; 1249 1250 h = HTABLE_HASH(hat, base, level); 1251 HTABLE_ENTER(h); 1252 ht->ht_next = hat->hat_ht_hash[h]; 1253 ASSERT(ht->ht_prev == NULL); 1254 if (hat->hat_ht_hash[h]) 1255 hat->hat_ht_hash[h]->ht_prev = ht; 1256 hat->hat_ht_hash[h] = ht; 1257 HTABLE_EXIT(h); 1258 1259 /* 1260 * make sure the page table physical page is not FREE 1261 */ 1262 if (page_resv(1, KM_NOSLEEP) == 0) 1263 panic("page_resv() failed in ptable alloc"); 1264 1265 pp = boot_claim_page(pfn); 1266 ASSERT(pp != NULL); 1267 page_downgrade(pp); 1268 /* 1269 * Record in the page_t that is a pagetable for segkpm setup. 1270 */ 1271 if (kpm_vbase) 1272 pp->p_index = 1; 1273 1274 /* 1275 * Count valid mappings and recursively attach lower level pagetables. 1276 */ 1277 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1278 for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1279 if (mmu.pae_hat) 1280 pte = ptep[i]; 1281 else 1282 pte = ((x86pte32_t *)ptep)[i]; 1283 if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1284 ++ht->ht_valid_cnt; 1285 if (!PTE_ISPAGE(pte, level)) { 1286 htable_attach(hat, base, level - 1, 1287 ht, PTE2PFN(pte, level)); 1288 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1289 } 1290 } 1291 base += LEVEL_SIZE(level); 1292 if (base == mmu.hole_start) 1293 base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1294 } 1295 1296 /* 1297 * As long as all the mappings we had were below kernel base 1298 * we can release the htable. 1299 */ 1300 if (base < kernelbase) 1301 htable_release(ht); 1302 } 1303 1304 /* 1305 * Walk through a given htable looking for the first valid entry. This 1306 * routine takes both a starting and ending address. The starting address 1307 * is required to be within the htable provided by the caller, but there is 1308 * no such restriction on the ending address. 1309 * 1310 * If the routine finds a valid entry in the htable (at or beyond the 1311 * starting address), the PTE (and its address) will be returned. 1312 * This PTE may correspond to either a page or a pagetable - it is the 1313 * caller's responsibility to determine which. If no valid entry is 1314 * found, 0 (and invalid PTE) and the next unexamined address will be 1315 * returned. 1316 * 1317 * The loop has been carefully coded for optimization. 1318 */ 1319 static x86pte_t 1320 htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 1321 { 1322 uint_t e; 1323 x86pte_t found_pte = (x86pte_t)0; 1324 caddr_t pte_ptr; 1325 caddr_t end_pte_ptr; 1326 int l = ht->ht_level; 1327 uintptr_t va = *vap & LEVEL_MASK(l); 1328 size_t pgsize = LEVEL_SIZE(l); 1329 1330 ASSERT(va >= ht->ht_vaddr); 1331 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1332 1333 /* 1334 * Compute the starting index and ending virtual address 1335 */ 1336 e = htable_va2entry(va, ht); 1337 1338 /* 1339 * The following page table scan code knows that the valid 1340 * bit of a PTE is in the lowest byte AND that x86 is little endian!! 1341 */ 1342 pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1343 end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1344 pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 1345 while (!PTE_ISVALID(*pte_ptr)) { 1346 va += pgsize; 1347 if (va >= eaddr) 1348 break; 1349 pte_ptr += mmu.pte_size; 1350 ASSERT(pte_ptr <= end_pte_ptr); 1351 if (pte_ptr == end_pte_ptr) 1352 break; 1353 } 1354 1355 /* 1356 * if we found a valid PTE, load the entire PTE 1357 */ 1358 if (va < eaddr && pte_ptr != end_pte_ptr) 1359 found_pte = GET_PTE((x86pte_t *)pte_ptr); 1360 x86pte_release_pagetable(ht); 1361 1362 #if defined(__amd64) 1363 /* 1364 * deal with VA hole on amd64 1365 */ 1366 if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 1367 va = mmu.hole_end + va - mmu.hole_start; 1368 #endif /* __amd64 */ 1369 1370 *vap = va; 1371 return (found_pte); 1372 } 1373 1374 /* 1375 * Find the address and htable for the first populated translation at or 1376 * above the given virtual address. The caller may also specify an upper 1377 * limit to the address range to search. Uses level information to quickly 1378 * skip unpopulated sections of virtual address spaces. 1379 * 1380 * If not found returns NULL. When found, returns the htable and virt addr 1381 * and has a hold on the htable. 1382 */ 1383 x86pte_t 1384 htable_walk( 1385 struct hat *hat, 1386 htable_t **htp, 1387 uintptr_t *vaddr, 1388 uintptr_t eaddr) 1389 { 1390 uintptr_t va = *vaddr; 1391 htable_t *ht; 1392 htable_t *prev = *htp; 1393 level_t l; 1394 level_t max_mapped_level; 1395 x86pte_t pte; 1396 1397 ASSERT(eaddr > va); 1398 1399 /* 1400 * If this is a user address, then we know we need not look beyond 1401 * kernelbase. 1402 */ 1403 ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 1404 eaddr == HTABLE_WALK_TO_END); 1405 if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 1406 eaddr = kernelbase; 1407 1408 /* 1409 * If we're coming in with a previous page table, search it first 1410 * without doing an htable_lookup(), this should be frequent. 1411 */ 1412 if (prev) { 1413 ASSERT(prev->ht_busy > 0); 1414 ASSERT(prev->ht_vaddr <= va); 1415 l = prev->ht_level; 1416 if (va <= HTABLE_LAST_PAGE(prev)) { 1417 pte = htable_scan(prev, &va, eaddr); 1418 1419 if (PTE_ISPAGE(pte, l)) { 1420 *vaddr = va; 1421 *htp = prev; 1422 return (pte); 1423 } 1424 } 1425 1426 /* 1427 * We found nothing in the htable provided by the caller, 1428 * so fall through and do the full search 1429 */ 1430 htable_release(prev); 1431 } 1432 1433 /* 1434 * Find the level of the largest pagesize used by this HAT. 1435 */ 1436 max_mapped_level = 0; 1437 for (l = 1; l <= mmu.max_page_level; ++l) 1438 if (hat->hat_pages_mapped[l] != 0) 1439 max_mapped_level = l; 1440 1441 while (va < eaddr && va >= *vaddr) { 1442 ASSERT(!IN_VA_HOLE(va)); 1443 1444 /* 1445 * Find lowest table with any entry for given address. 1446 */ 1447 for (l = 0; l <= TOP_LEVEL(hat); ++l) { 1448 ht = htable_lookup(hat, va, l); 1449 if (ht != NULL) { 1450 pte = htable_scan(ht, &va, eaddr); 1451 if (PTE_ISPAGE(pte, l)) { 1452 *vaddr = va; 1453 *htp = ht; 1454 return (pte); 1455 } 1456 htable_release(ht); 1457 break; 1458 } 1459 1460 /* 1461 * The ht is never NULL at the top level since 1462 * the top level htable is created in hat_alloc(). 1463 */ 1464 ASSERT(l < TOP_LEVEL(hat)); 1465 1466 /* 1467 * No htable covers the address. If there is no 1468 * larger page size that could cover it, we 1469 * skip to the start of the next page table. 1470 */ 1471 if (l >= max_mapped_level) { 1472 va = NEXT_ENTRY_VA(va, l + 1); 1473 break; 1474 } 1475 } 1476 } 1477 1478 *vaddr = 0; 1479 *htp = NULL; 1480 return (0); 1481 } 1482 1483 /* 1484 * Find the htable and page table entry index of the given virtual address 1485 * with pagesize at or below given level. 1486 * If not found returns NULL. When found, returns the htable, sets 1487 * entry, and has a hold on the htable. 1488 */ 1489 htable_t * 1490 htable_getpte( 1491 struct hat *hat, 1492 uintptr_t vaddr, 1493 uint_t *entry, 1494 x86pte_t *pte, 1495 level_t level) 1496 { 1497 htable_t *ht; 1498 level_t l; 1499 uint_t e; 1500 1501 ASSERT(level <= mmu.max_page_level); 1502 1503 for (l = 0; l <= level; ++l) { 1504 ht = htable_lookup(hat, vaddr, l); 1505 if (ht == NULL) 1506 continue; 1507 e = htable_va2entry(vaddr, ht); 1508 if (entry != NULL) 1509 *entry = e; 1510 if (pte != NULL) 1511 *pte = x86pte_get(ht, e); 1512 return (ht); 1513 } 1514 return (NULL); 1515 } 1516 1517 /* 1518 * Find the htable and page table entry index of the given virtual address. 1519 * There must be a valid page mapped at the given address. 1520 * If not found returns NULL. When found, returns the htable, sets 1521 * entry, and has a hold on the htable. 1522 */ 1523 htable_t * 1524 htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 1525 { 1526 htable_t *ht; 1527 uint_t e; 1528 x86pte_t pte; 1529 1530 ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 1531 if (ht == NULL) 1532 return (NULL); 1533 1534 if (entry) 1535 *entry = e; 1536 1537 if (PTE_ISPAGE(pte, ht->ht_level)) 1538 return (ht); 1539 htable_release(ht); 1540 return (NULL); 1541 } 1542 1543 1544 void 1545 htable_init() 1546 { 1547 /* 1548 * To save on kernel VA usage, we avoid debug information in 32 bit 1549 * kernels. 1550 */ 1551 #if defined(__amd64) 1552 int kmem_flags = KMC_NOHASH; 1553 #elif defined(__i386) 1554 int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 1555 #endif 1556 1557 /* 1558 * initialize kmem caches 1559 */ 1560 htable_cache = kmem_cache_create("htable_t", 1561 sizeof (htable_t), 0, NULL, NULL, 1562 htable_reap, NULL, hat_memload_arena, kmem_flags); 1563 } 1564 1565 /* 1566 * get the pte index for the virtual address in the given htable's pagetable 1567 */ 1568 uint_t 1569 htable_va2entry(uintptr_t va, htable_t *ht) 1570 { 1571 level_t l = ht->ht_level; 1572 1573 ASSERT(va >= ht->ht_vaddr); 1574 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1575 return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 1576 } 1577 1578 /* 1579 * Given an htable and the index of a pte in it, return the virtual address 1580 * of the page. 1581 */ 1582 uintptr_t 1583 htable_e2va(htable_t *ht, uint_t entry) 1584 { 1585 level_t l = ht->ht_level; 1586 uintptr_t va; 1587 1588 ASSERT(entry < HTABLE_NUM_PTES(ht)); 1589 va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 1590 1591 /* 1592 * Need to skip over any VA hole in top level table 1593 */ 1594 #if defined(__amd64) 1595 if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 1596 va += ((mmu.hole_end - mmu.hole_start) + 1); 1597 #endif 1598 1599 return (va); 1600 } 1601 1602 /* 1603 * The code uses compare and swap instructions to read/write PTE's to 1604 * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 1605 * will naturally be atomic. 1606 * 1607 * The combination of using kpreempt_disable()/_enable() and the hci_mutex 1608 * are used to ensure that an interrupt won't overwrite a temporary mapping 1609 * while it's in use. If an interrupt thread tries to access a PTE, it will 1610 * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 1611 */ 1612 void 1613 x86pte_cpu_init(cpu_t *cpu) 1614 { 1615 struct hat_cpu_info *hci; 1616 1617 hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 1618 mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 1619 cpu->cpu_hat_info = hci; 1620 } 1621 1622 void 1623 x86pte_cpu_fini(cpu_t *cpu) 1624 { 1625 struct hat_cpu_info *hci = cpu->cpu_hat_info; 1626 1627 kmem_free(hci, sizeof (*hci)); 1628 cpu->cpu_hat_info = NULL; 1629 } 1630 1631 #ifdef __i386 1632 /* 1633 * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1634 */ 1635 x86pte_t 1636 get_pte64(x86pte_t *ptr) 1637 { 1638 volatile uint32_t *p = (uint32_t *)ptr; 1639 x86pte_t t; 1640 1641 ASSERT(mmu.pae_hat != 0); 1642 for (;;) { 1643 t = p[0]; 1644 t |= (uint64_t)p[1] << 32; 1645 if ((t & 0xffffffff) == p[0]) 1646 return (t); 1647 } 1648 } 1649 #endif /* __i386 */ 1650 1651 /* 1652 * Disable preemption and establish a mapping to the pagetable with the 1653 * given pfn. This is optimized for there case where it's the same 1654 * pfn as we last used referenced from this CPU. 1655 */ 1656 static x86pte_t * 1657 x86pte_access_pagetable(htable_t *ht, uint_t index) 1658 { 1659 /* 1660 * VLP pagetables are contained in the hat_t 1661 */ 1662 if (ht->ht_flags & HTABLE_VLP) 1663 return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1664 return (x86pte_mapin(ht->ht_pfn, index, ht)); 1665 } 1666 1667 /* 1668 * map the given pfn into the page table window. 1669 */ 1670 /*ARGSUSED*/ 1671 x86pte_t * 1672 x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1673 { 1674 x86pte_t *pteptr; 1675 x86pte_t pte; 1676 x86pte_t newpte; 1677 int x; 1678 1679 ASSERT(pfn != PFN_INVALID); 1680 1681 if (!khat_running) { 1682 caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1683 return (PT_INDEX_PTR(va, index)); 1684 } 1685 1686 /* 1687 * If kpm is available, use it. 1688 */ 1689 if (kpm_vbase) 1690 return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1691 1692 /* 1693 * Disable preemption and grab the CPU's hci_mutex 1694 */ 1695 kpreempt_disable(); 1696 ASSERT(CPU->cpu_hat_info != NULL); 1697 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1698 x = PWIN_TABLE(CPU->cpu_id); 1699 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 1700 if (mmu.pae_hat) 1701 pte = *pteptr; 1702 else 1703 pte = *(x86pte32_t *)pteptr; 1704 1705 newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1706 newpte |= PT_WRITABLE; 1707 1708 if (!PTE_EQUIV(newpte, pte)) { 1709 if (mmu.pae_hat) 1710 *pteptr = newpte; 1711 else 1712 *(x86pte32_t *)pteptr = newpte; 1713 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 1714 } 1715 return (PT_INDEX_PTR(PWIN_VA(x), index)); 1716 } 1717 1718 /* 1719 * Release access to a page table. 1720 */ 1721 static void 1722 x86pte_release_pagetable(htable_t *ht) 1723 { 1724 /* 1725 * nothing to do for VLP htables 1726 */ 1727 if (ht->ht_flags & HTABLE_VLP) 1728 return; 1729 1730 x86pte_mapout(); 1731 } 1732 1733 void 1734 x86pte_mapout(void) 1735 { 1736 if (mmu.pwin_base == NULL || !khat_running) 1737 return; 1738 1739 /* 1740 * Drop the CPU's hci_mutex and restore preemption. 1741 */ 1742 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 1743 kpreempt_enable(); 1744 } 1745 1746 /* 1747 * Atomic retrieval of a pagetable entry 1748 */ 1749 x86pte_t 1750 x86pte_get(htable_t *ht, uint_t entry) 1751 { 1752 x86pte_t pte; 1753 x86pte_t *ptep; 1754 1755 /* 1756 * Be careful that loading PAE entries in 32 bit kernel is atomic. 1757 */ 1758 ASSERT(entry < mmu.ptes_per_table); 1759 ptep = x86pte_access_pagetable(ht, entry); 1760 pte = GET_PTE(ptep); 1761 x86pte_release_pagetable(ht); 1762 return (pte); 1763 } 1764 1765 /* 1766 * Atomic unconditional set of a page table entry, it returns the previous 1767 * value. For pre-existing mappings if the PFN changes, then we don't care 1768 * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 1769 * the MOD/REF bits unchanged. 1770 * 1771 * If asked to overwrite a link to a lower page table with a large page 1772 * mapping, this routine returns the special value of LPAGE_ERROR. This 1773 * allows the upper HAT layers to retry with a smaller mapping size. 1774 */ 1775 x86pte_t 1776 x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 1777 { 1778 x86pte_t old; 1779 x86pte_t prev; 1780 x86pte_t *ptep; 1781 level_t l = ht->ht_level; 1782 x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 1783 x86pte_t n; 1784 uintptr_t addr = htable_e2va(ht, entry); 1785 hat_t *hat = ht->ht_hat; 1786 1787 ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 1788 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 1789 if (ptr == NULL) 1790 ptep = x86pte_access_pagetable(ht, entry); 1791 else 1792 ptep = ptr; 1793 1794 /* 1795 * Install the new PTE. If remapping the same PFN, then 1796 * copy existing REF/MOD bits to new mapping. 1797 */ 1798 do { 1799 prev = GET_PTE(ptep); 1800 n = new; 1801 if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 1802 n |= prev & (PT_REF | PT_MOD); 1803 1804 /* 1805 * Another thread may have installed this mapping already, 1806 * flush the local TLB and be done. 1807 */ 1808 if (prev == n) { 1809 old = new; 1810 mmu_tlbflush_entry((caddr_t)addr); 1811 goto done; 1812 } 1813 1814 /* 1815 * Detect if we have a collision of installing a large 1816 * page mapping where there already is a lower page table. 1817 */ 1818 if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 1819 old = LPAGE_ERROR; 1820 goto done; 1821 } 1822 1823 old = CAS_PTE(ptep, prev, n); 1824 } while (old != prev); 1825 1826 /* 1827 * Do a TLB demap if needed, ie. the old pte was valid. 1828 * 1829 * Note that a stale TLB writeback to the PTE here either can't happen 1830 * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 1831 * mappings, but they were created with REF and MOD already set, so 1832 * no stale writeback will happen. 1833 * 1834 * Segmap is the only place where remaps happen on the same pfn and for 1835 * that we want to preserve the stale REF/MOD bits. 1836 */ 1837 if (old & PT_REF) 1838 hat_tlb_inval(hat, addr); 1839 1840 done: 1841 if (ptr == NULL) 1842 x86pte_release_pagetable(ht); 1843 return (old); 1844 } 1845 1846 /* 1847 * Atomic compare and swap of a page table entry. No TLB invalidates are done. 1848 * This is used for links between pagetables of different levels. 1849 * Note we always create these links with dirty/access set, so they should 1850 * never change. 1851 */ 1852 x86pte_t 1853 x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 1854 { 1855 x86pte_t pte; 1856 x86pte_t *ptep; 1857 1858 ptep = x86pte_access_pagetable(ht, entry); 1859 pte = CAS_PTE(ptep, old, new); 1860 x86pte_release_pagetable(ht); 1861 return (pte); 1862 } 1863 1864 /* 1865 * Invalidate a page table entry as long as it currently maps something that 1866 * matches the value determined by expect. 1867 * 1868 * Also invalidates any TLB entries and returns the previous value of the PTE. 1869 */ 1870 x86pte_t 1871 x86pte_inval( 1872 htable_t *ht, 1873 uint_t entry, 1874 x86pte_t expect, 1875 x86pte_t *pte_ptr) 1876 { 1877 x86pte_t *ptep; 1878 x86pte_t oldpte; 1879 x86pte_t found; 1880 1881 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 1882 ASSERT(ht->ht_level != VLP_LEVEL); 1883 1884 if (pte_ptr != NULL) 1885 ptep = pte_ptr; 1886 else 1887 ptep = x86pte_access_pagetable(ht, entry); 1888 1889 /* 1890 * Note that the loop is needed to handle changes due to h/w updating 1891 * of PT_MOD/PT_REF. 1892 */ 1893 do { 1894 oldpte = GET_PTE(ptep); 1895 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 1896 goto done; 1897 found = CAS_PTE(ptep, oldpte, 0); 1898 } while (found != oldpte); 1899 if (oldpte & (PT_REF | PT_MOD)) 1900 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 1901 1902 done: 1903 if (pte_ptr == NULL) 1904 x86pte_release_pagetable(ht); 1905 return (oldpte); 1906 } 1907 1908 /* 1909 * Change a page table entry af it currently matches the value in expect. 1910 */ 1911 x86pte_t 1912 x86pte_update( 1913 htable_t *ht, 1914 uint_t entry, 1915 x86pte_t expect, 1916 x86pte_t new) 1917 { 1918 x86pte_t *ptep; 1919 x86pte_t found; 1920 1921 ASSERT(new != 0); 1922 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 1923 ASSERT(ht->ht_level != VLP_LEVEL); 1924 1925 ptep = x86pte_access_pagetable(ht, entry); 1926 found = CAS_PTE(ptep, expect, new); 1927 if (found == expect) { 1928 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 1929 1930 /* 1931 * When removing write permission *and* clearing the 1932 * MOD bit, check if a write happened via a stale 1933 * TLB entry before the TLB shootdown finished. 1934 * 1935 * If it did happen, simply re-enable write permission and 1936 * act like the original CAS failed. 1937 */ 1938 if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 1939 (new & (PT_WRITABLE | PT_MOD)) == 0 && 1940 (GET_PTE(ptep) & PT_MOD) != 0) { 1941 do { 1942 found = GET_PTE(ptep); 1943 found = 1944 CAS_PTE(ptep, found, found | PT_WRITABLE); 1945 } while ((found & PT_WRITABLE) == 0); 1946 } 1947 } 1948 x86pte_release_pagetable(ht); 1949 return (found); 1950 } 1951 1952 /* 1953 * Copy page tables - this is just a little more complicated than the 1954 * previous routines. Note that it's also not atomic! It also is never 1955 * used for VLP pagetables. 1956 */ 1957 void 1958 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 1959 { 1960 caddr_t src_va; 1961 caddr_t dst_va; 1962 size_t size; 1963 x86pte_t *pteptr; 1964 x86pte_t pte; 1965 1966 ASSERT(khat_running); 1967 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 1968 ASSERT(!(src->ht_flags & HTABLE_VLP)); 1969 ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 1970 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 1971 1972 /* 1973 * Acquire access to the CPU pagetable windows for the dest and source. 1974 */ 1975 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 1976 if (kpm_vbase) { 1977 src_va = (caddr_t) 1978 PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 1979 } else { 1980 uint_t x = PWIN_SRC(CPU->cpu_id); 1981 1982 /* 1983 * Finish defining the src pagetable mapping 1984 */ 1985 src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 1986 pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 1987 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 1988 if (mmu.pae_hat) 1989 *pteptr = pte; 1990 else 1991 *(x86pte32_t *)pteptr = pte; 1992 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 1993 } 1994 1995 /* 1996 * now do the copy 1997 */ 1998 size = count << mmu.pte_size_shift; 1999 bcopy(src_va, dst_va, size); 2000 2001 x86pte_release_pagetable(dest); 2002 } 2003 2004 /* 2005 * Zero page table entries - Note this doesn't use atomic stores! 2006 */ 2007 static void 2008 x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 2009 { 2010 caddr_t dst_va; 2011 size_t size; 2012 2013 /* 2014 * Map in the page table to be zeroed. 2015 */ 2016 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2017 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2018 2019 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2020 2021 size = count << mmu.pte_size_shift; 2022 ASSERT(size > BLOCKZEROALIGN); 2023 #ifdef __i386 2024 if ((x86_feature & X86_SSE2) == 0) 2025 bzero(dst_va, size); 2026 else 2027 #endif 2028 block_zero_no_xmm(dst_va, size); 2029 2030 x86pte_release_pagetable(dest); 2031 } 2032 2033 /* 2034 * Called to ensure that all pagetables are in the system dump 2035 */ 2036 void 2037 hat_dump(void) 2038 { 2039 hat_t *hat; 2040 uint_t h; 2041 htable_t *ht; 2042 2043 /* 2044 * Dump all page tables 2045 */ 2046 for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 2047 for (h = 0; h < hat->hat_num_hash; ++h) { 2048 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2049 if ((ht->ht_flags & HTABLE_VLP) == 0) 2050 dump_page(ht->ht_pfn); 2051 } 2052 } 2053 } 2054 } 2055