1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/sysmacros.h> 31 #include <sys/kmem.h> 32 #include <sys/atomic.h> 33 #include <sys/bitmap.h> 34 #include <sys/machparam.h> 35 #include <sys/machsystm.h> 36 #include <sys/mman.h> 37 #include <sys/systm.h> 38 #include <sys/cpuvar.h> 39 #include <sys/thread.h> 40 #include <sys/proc.h> 41 #include <sys/cpu.h> 42 #include <sys/kmem.h> 43 #include <sys/disp.h> 44 #include <sys/vmem.h> 45 #include <sys/vmsystm.h> 46 #include <sys/promif.h> 47 #include <sys/var.h> 48 #include <sys/x86_archext.h> 49 #include <sys/archsystm.h> 50 #include <sys/bootconf.h> 51 #include <sys/dumphdr.h> 52 #include <vm/seg_kmem.h> 53 #include <vm/seg_kpm.h> 54 #include <vm/hat.h> 55 #include <vm/hat_i86.h> 56 #include <sys/cmn_err.h> 57 #include <sys/panic.h> 58 59 #ifdef __xpv 60 #include <sys/hypervisor.h> 61 #include <sys/xpv_panic.h> 62 #endif 63 64 #include <sys/bootinfo.h> 65 #include <vm/kboot_mmu.h> 66 67 static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 68 69 kmem_cache_t *htable_cache; 70 71 /* 72 * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 73 * is used in order to facilitate testing of the htable_steal() code. 74 * By resetting htable_reserve_amount to a lower value, we can force 75 * stealing to occur. The reserve amount is a guess to get us through boot. 76 */ 77 #define HTABLE_RESERVE_AMOUNT (200) 78 uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 79 kmutex_t htable_reserve_mutex; 80 uint_t htable_reserve_cnt; 81 htable_t *htable_reserve_pool; 82 83 /* 84 * Used to hand test htable_steal(). 85 */ 86 #ifdef DEBUG 87 ulong_t force_steal = 0; 88 ulong_t ptable_cnt = 0; 89 #endif 90 91 /* 92 * This variable is so that we can tune this via /etc/system 93 * Any value works, but a power of two <= mmu.ptes_per_table is best. 94 */ 95 uint_t htable_steal_passes = 8; 96 97 /* 98 * mutex stuff for access to htable hash 99 */ 100 #define NUM_HTABLE_MUTEX 128 101 kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 102 #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 103 104 #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 105 #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 106 107 /* 108 * forward declarations 109 */ 110 static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 111 static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 112 static void htable_free(htable_t *ht); 113 static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 114 static void x86pte_release_pagetable(htable_t *ht); 115 static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 116 x86pte_t new); 117 118 /* 119 * A counter to track if we are stealing or reaping htables. When non-zero 120 * htable_free() will directly free htables (either to the reserve or kmem) 121 * instead of putting them in a hat's htable cache. 122 */ 123 uint32_t htable_dont_cache = 0; 124 125 /* 126 * Track the number of active pagetables, so we can know how many to reap 127 */ 128 static uint32_t active_ptables = 0; 129 130 #ifdef __xpv 131 /* 132 * Deal with hypervisor complications. 133 */ 134 void 135 xen_flush_va(caddr_t va) 136 { 137 struct mmuext_op t; 138 uint_t count; 139 140 if (IN_XPV_PANIC()) { 141 mmu_tlbflush_entry((caddr_t)va); 142 } else { 143 t.cmd = MMUEXT_INVLPG_LOCAL; 144 t.arg1.linear_addr = (uintptr_t)va; 145 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 146 panic("HYPERVISOR_mmuext_op() failed"); 147 ASSERT(count == 1); 148 } 149 } 150 151 void 152 xen_gflush_va(caddr_t va, cpuset_t cpus) 153 { 154 struct mmuext_op t; 155 uint_t count; 156 157 if (IN_XPV_PANIC()) { 158 mmu_tlbflush_entry((caddr_t)va); 159 return; 160 } 161 162 t.cmd = MMUEXT_INVLPG_MULTI; 163 t.arg1.linear_addr = (uintptr_t)va; 164 /*LINTED: constant in conditional context*/ 165 set_xen_guest_handle(t.arg2.vcpumask, &cpus); 166 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 167 panic("HYPERVISOR_mmuext_op() failed"); 168 ASSERT(count == 1); 169 } 170 171 void 172 xen_flush_tlb() 173 { 174 struct mmuext_op t; 175 uint_t count; 176 177 if (IN_XPV_PANIC()) { 178 xpv_panic_reload_cr3(); 179 } else { 180 t.cmd = MMUEXT_TLB_FLUSH_LOCAL; 181 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 182 panic("HYPERVISOR_mmuext_op() failed"); 183 ASSERT(count == 1); 184 } 185 } 186 187 void 188 xen_gflush_tlb(cpuset_t cpus) 189 { 190 struct mmuext_op t; 191 uint_t count; 192 193 ASSERT(!IN_XPV_PANIC()); 194 t.cmd = MMUEXT_TLB_FLUSH_MULTI; 195 /*LINTED: constant in conditional context*/ 196 set_xen_guest_handle(t.arg2.vcpumask, &cpus); 197 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 198 panic("HYPERVISOR_mmuext_op() failed"); 199 ASSERT(count == 1); 200 } 201 202 /* 203 * Install/Adjust a kpm mapping under the hypervisor. 204 * Value of "how" should be: 205 * PT_WRITABLE | PT_VALID - regular kpm mapping 206 * PT_VALID - make mapping read-only 207 * 0 - remove mapping 208 * 209 * returns 0 on success. non-zero for failure. 210 */ 211 int 212 xen_kpm_page(pfn_t pfn, uint_t how) 213 { 214 paddr_t pa = mmu_ptob((paddr_t)pfn); 215 x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD; 216 217 if (kpm_vbase == NULL) 218 return (0); 219 220 if (how) 221 pte |= pa_to_ma(pa) | how; 222 else 223 pte = 0; 224 return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa, 225 pte, UVMF_INVLPG | UVMF_ALL)); 226 } 227 228 void 229 xen_pin(pfn_t pfn, level_t lvl) 230 { 231 struct mmuext_op t; 232 uint_t count; 233 234 t.cmd = MMUEXT_PIN_L1_TABLE + lvl; 235 t.arg1.mfn = pfn_to_mfn(pfn); 236 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 237 panic("HYPERVISOR_mmuext_op() failed"); 238 ASSERT(count == 1); 239 } 240 241 void 242 xen_unpin(pfn_t pfn) 243 { 244 struct mmuext_op t; 245 uint_t count; 246 247 t.cmd = MMUEXT_UNPIN_TABLE; 248 t.arg1.mfn = pfn_to_mfn(pfn); 249 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 250 panic("HYPERVISOR_mmuext_op() failed"); 251 ASSERT(count == 1); 252 } 253 254 static void 255 xen_map(uint64_t pte, caddr_t va) 256 { 257 if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte, 258 UVMF_INVLPG | UVMF_LOCAL)) 259 panic("HYPERVISOR_update_va_mapping() failed"); 260 } 261 #endif /* __xpv */ 262 263 /* 264 * Allocate a memory page for a hardware page table. 265 * 266 * A wrapper around page_get_physical(), with some extra checks. 267 */ 268 static pfn_t 269 ptable_alloc(uintptr_t seed) 270 { 271 pfn_t pfn; 272 page_t *pp; 273 274 pfn = PFN_INVALID; 275 atomic_add_32(&active_ptables, 1); 276 277 /* 278 * The first check is to see if there is memory in the system. If we 279 * drop to throttlefree, then fail the ptable_alloc() and let the 280 * stealing code kick in. Note that we have to do this test here, 281 * since the test in page_create_throttle() would let the NOSLEEP 282 * allocation go through and deplete the page reserves. 283 * 284 * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 285 */ 286 if (!NOMEMWAIT() && freemem <= throttlefree + 1) 287 return (PFN_INVALID); 288 289 #ifdef DEBUG 290 /* 291 * This code makes htable_steal() easier to test. By setting 292 * force_steal we force pagetable allocations to fall 293 * into the stealing code. Roughly 1 in ever "force_steal" 294 * page table allocations will fail. 295 */ 296 if (proc_pageout != NULL && force_steal > 1 && 297 ++ptable_cnt > force_steal) { 298 ptable_cnt = 0; 299 return (PFN_INVALID); 300 } 301 #endif /* DEBUG */ 302 303 pp = page_get_physical(seed); 304 if (pp == NULL) 305 return (PFN_INVALID); 306 pfn = pp->p_pagenum; 307 page_downgrade(pp); 308 ASSERT(PAGE_SHARED(pp)); 309 310 if (pfn == PFN_INVALID) 311 panic("ptable_alloc(): Invalid PFN!!"); 312 HATSTAT_INC(hs_ptable_allocs); 313 return (pfn); 314 } 315 316 /* 317 * Free an htable's associated page table page. See the comments 318 * for ptable_alloc(). 319 */ 320 static void 321 ptable_free(pfn_t pfn) 322 { 323 page_t *pp = page_numtopp_nolock(pfn); 324 325 /* 326 * need to destroy the page used for the pagetable 327 */ 328 ASSERT(pfn != PFN_INVALID); 329 HATSTAT_INC(hs_ptable_frees); 330 atomic_add_32(&active_ptables, -1); 331 if (pp == NULL) 332 panic("ptable_free(): no page for pfn!"); 333 ASSERT(PAGE_SHARED(pp)); 334 ASSERT(pfn == pp->p_pagenum); 335 ASSERT(!IN_XPV_PANIC()); 336 337 /* 338 * Get an exclusive lock, might have to wait for a kmem reader. 339 */ 340 if (!page_tryupgrade(pp)) { 341 page_unlock(pp); 342 /* 343 * RFE: we could change this to not loop forever 344 * George Cameron had some idea on how to do that. 345 * For now looping works - it's just like sfmmu. 346 */ 347 while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 348 continue; 349 } 350 #ifdef __xpv 351 if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0) 352 panic("failure making kpm r/w pfn=0x%lx", pfn); 353 #endif 354 page_free(pp, 1); 355 page_unresv(1); 356 } 357 358 /* 359 * Put one htable on the reserve list. 360 */ 361 static void 362 htable_put_reserve(htable_t *ht) 363 { 364 ht->ht_hat = NULL; /* no longer tied to a hat */ 365 ASSERT(ht->ht_pfn == PFN_INVALID); 366 HATSTAT_INC(hs_htable_rputs); 367 mutex_enter(&htable_reserve_mutex); 368 ht->ht_next = htable_reserve_pool; 369 htable_reserve_pool = ht; 370 ++htable_reserve_cnt; 371 mutex_exit(&htable_reserve_mutex); 372 } 373 374 /* 375 * Take one htable from the reserve. 376 */ 377 static htable_t * 378 htable_get_reserve(void) 379 { 380 htable_t *ht = NULL; 381 382 mutex_enter(&htable_reserve_mutex); 383 if (htable_reserve_cnt != 0) { 384 ht = htable_reserve_pool; 385 ASSERT(ht != NULL); 386 ASSERT(ht->ht_pfn == PFN_INVALID); 387 htable_reserve_pool = ht->ht_next; 388 --htable_reserve_cnt; 389 HATSTAT_INC(hs_htable_rgets); 390 } 391 mutex_exit(&htable_reserve_mutex); 392 return (ht); 393 } 394 395 /* 396 * Allocate initial htables and put them on the reserve list 397 */ 398 void 399 htable_initial_reserve(uint_t count) 400 { 401 htable_t *ht; 402 403 count += HTABLE_RESERVE_AMOUNT; 404 while (count > 0) { 405 ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 406 ASSERT(ht != NULL); 407 408 ASSERT(use_boot_reserve); 409 ht->ht_pfn = PFN_INVALID; 410 htable_put_reserve(ht); 411 --count; 412 } 413 } 414 415 /* 416 * Readjust the reserves after a thread finishes using them. 417 */ 418 void 419 htable_adjust_reserve() 420 { 421 htable_t *ht; 422 423 /* 424 * Free any excess htables in the reserve list 425 */ 426 while (htable_reserve_cnt > htable_reserve_amount && 427 !USE_HAT_RESERVES()) { 428 ht = htable_get_reserve(); 429 if (ht == NULL) 430 return; 431 ASSERT(ht->ht_pfn == PFN_INVALID); 432 kmem_cache_free(htable_cache, ht); 433 } 434 } 435 436 437 /* 438 * This routine steals htables from user processes for htable_alloc() or 439 * for htable_reap(). 440 */ 441 static htable_t * 442 htable_steal(uint_t cnt) 443 { 444 hat_t *hat = kas.a_hat; /* list starts with khat */ 445 htable_t *list = NULL; 446 htable_t *ht; 447 htable_t *higher; 448 uint_t h; 449 uint_t h_start; 450 static uint_t h_seed = 0; 451 uint_t e; 452 uintptr_t va; 453 x86pte_t pte; 454 uint_t stolen = 0; 455 uint_t pass; 456 uint_t threshold; 457 458 /* 459 * Limit htable_steal_passes to something reasonable 460 */ 461 if (htable_steal_passes == 0) 462 htable_steal_passes = 1; 463 if (htable_steal_passes > mmu.ptes_per_table) 464 htable_steal_passes = mmu.ptes_per_table; 465 466 /* 467 * Loop through all user hats. The 1st pass takes cached htables that 468 * aren't in use. The later passes steal by removing mappings, too. 469 */ 470 atomic_add_32(&htable_dont_cache, 1); 471 for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 472 threshold = pass * mmu.ptes_per_table / htable_steal_passes; 473 hat = kas.a_hat; 474 for (;;) { 475 476 /* 477 * Clear the victim flag and move to next hat 478 */ 479 mutex_enter(&hat_list_lock); 480 if (hat != kas.a_hat) { 481 hat->hat_flags &= ~HAT_VICTIM; 482 cv_broadcast(&hat_list_cv); 483 } 484 hat = hat->hat_next; 485 486 /* 487 * Skip any hat that is already being stolen from. 488 * 489 * We skip SHARED hats, as these are dummy 490 * hats that host ISM shared page tables. 491 * 492 * We also skip if HAT_FREEING because hat_pte_unmap() 493 * won't zero out the PTE's. That would lead to hitting 494 * stale PTEs either here or under hat_unload() when we 495 * steal and unload the same page table in competing 496 * threads. 497 */ 498 while (hat != NULL && 499 (hat->hat_flags & 500 (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 501 hat = hat->hat_next; 502 503 if (hat == NULL) { 504 mutex_exit(&hat_list_lock); 505 break; 506 } 507 508 /* 509 * Are we finished? 510 */ 511 if (stolen == cnt) { 512 /* 513 * Try to spread the pain of stealing, 514 * move victim HAT to the end of the HAT list. 515 */ 516 if (pass >= 1 && cnt == 1 && 517 kas.a_hat->hat_prev != hat) { 518 519 /* unlink victim hat */ 520 if (hat->hat_prev) 521 hat->hat_prev->hat_next = 522 hat->hat_next; 523 else 524 kas.a_hat->hat_next = 525 hat->hat_next; 526 if (hat->hat_next) 527 hat->hat_next->hat_prev = 528 hat->hat_prev; 529 else 530 kas.a_hat->hat_prev = 531 hat->hat_prev; 532 533 534 /* relink at end of hat list */ 535 hat->hat_next = NULL; 536 hat->hat_prev = kas.a_hat->hat_prev; 537 if (hat->hat_prev) 538 hat->hat_prev->hat_next = hat; 539 else 540 kas.a_hat->hat_next = hat; 541 kas.a_hat->hat_prev = hat; 542 543 } 544 545 mutex_exit(&hat_list_lock); 546 break; 547 } 548 549 /* 550 * Mark the HAT as a stealing victim. 551 */ 552 hat->hat_flags |= HAT_VICTIM; 553 mutex_exit(&hat_list_lock); 554 555 /* 556 * Take any htables from the hat's cached "free" list. 557 */ 558 hat_enter(hat); 559 while ((ht = hat->hat_ht_cached) != NULL && 560 stolen < cnt) { 561 hat->hat_ht_cached = ht->ht_next; 562 ht->ht_next = list; 563 list = ht; 564 ++stolen; 565 } 566 hat_exit(hat); 567 568 /* 569 * Don't steal on first pass. 570 */ 571 if (pass == 0 || stolen == cnt) 572 continue; 573 574 /* 575 * Search the active htables for one to steal. 576 * Start at a different hash bucket every time to 577 * help spread the pain of stealing. 578 */ 579 h = h_start = h_seed++ % hat->hat_num_hash; 580 do { 581 higher = NULL; 582 HTABLE_ENTER(h); 583 for (ht = hat->hat_ht_hash[h]; ht; 584 ht = ht->ht_next) { 585 586 /* 587 * Can we rule out reaping? 588 */ 589 if (ht->ht_busy != 0 || 590 (ht->ht_flags & HTABLE_SHARED_PFN)|| 591 ht->ht_level > 0 || 592 ht->ht_valid_cnt > threshold || 593 ht->ht_lock_cnt != 0) 594 continue; 595 596 /* 597 * Increment busy so the htable can't 598 * disappear. We drop the htable mutex 599 * to avoid deadlocks with 600 * hat_pageunload() and the hment mutex 601 * while we call hat_pte_unmap() 602 */ 603 ++ht->ht_busy; 604 HTABLE_EXIT(h); 605 606 /* 607 * Try stealing. 608 * - unload and invalidate all PTEs 609 */ 610 for (e = 0, va = ht->ht_vaddr; 611 e < HTABLE_NUM_PTES(ht) && 612 ht->ht_valid_cnt > 0 && 613 ht->ht_busy == 1 && 614 ht->ht_lock_cnt == 0; 615 ++e, va += MMU_PAGESIZE) { 616 pte = x86pte_get(ht, e); 617 if (!PTE_ISVALID(pte)) 618 continue; 619 hat_pte_unmap(ht, e, 620 HAT_UNLOAD, pte, NULL); 621 } 622 623 /* 624 * Reacquire htable lock. If we didn't 625 * remove all mappings in the table, 626 * or another thread added a new mapping 627 * behind us, give up on this table. 628 */ 629 HTABLE_ENTER(h); 630 if (ht->ht_busy != 1 || 631 ht->ht_valid_cnt != 0 || 632 ht->ht_lock_cnt != 0) { 633 --ht->ht_busy; 634 continue; 635 } 636 637 /* 638 * Steal it and unlink the page table. 639 */ 640 higher = ht->ht_parent; 641 unlink_ptp(higher, ht, ht->ht_vaddr); 642 643 /* 644 * remove from the hash list 645 */ 646 if (ht->ht_next) 647 ht->ht_next->ht_prev = 648 ht->ht_prev; 649 650 if (ht->ht_prev) { 651 ht->ht_prev->ht_next = 652 ht->ht_next; 653 } else { 654 ASSERT(hat->hat_ht_hash[h] == 655 ht); 656 hat->hat_ht_hash[h] = 657 ht->ht_next; 658 } 659 660 /* 661 * Break to outer loop to release the 662 * higher (ht_parent) pagetable. This 663 * spreads out the pain caused by 664 * pagefaults. 665 */ 666 ht->ht_next = list; 667 list = ht; 668 ++stolen; 669 break; 670 } 671 HTABLE_EXIT(h); 672 if (higher != NULL) 673 htable_release(higher); 674 if (++h == hat->hat_num_hash) 675 h = 0; 676 } while (stolen < cnt && h != h_start); 677 } 678 } 679 atomic_add_32(&htable_dont_cache, -1); 680 return (list); 681 } 682 683 684 /* 685 * This is invoked from kmem when the system is low on memory. We try 686 * to free hments, htables, and ptables to improve the memory situation. 687 */ 688 /*ARGSUSED*/ 689 static void 690 htable_reap(void *handle) 691 { 692 uint_t reap_cnt; 693 htable_t *list; 694 htable_t *ht; 695 696 HATSTAT_INC(hs_reap_attempts); 697 if (!can_steal_post_boot) 698 return; 699 700 /* 701 * Try to reap 5% of the page tables bounded by a maximum of 702 * 5% of physmem and a minimum of 10. 703 */ 704 reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 705 706 /* 707 * Let htable_steal() do the work, we just call htable_free() 708 */ 709 XPV_DISALLOW_MIGRATE(); 710 list = htable_steal(reap_cnt); 711 XPV_ALLOW_MIGRATE(); 712 while ((ht = list) != NULL) { 713 list = ht->ht_next; 714 HATSTAT_INC(hs_reaped); 715 htable_free(ht); 716 } 717 718 /* 719 * Free up excess reserves 720 */ 721 htable_adjust_reserve(); 722 hment_adjust_reserve(); 723 } 724 725 /* 726 * Allocate an htable, stealing one or using the reserve if necessary 727 */ 728 static htable_t * 729 htable_alloc( 730 hat_t *hat, 731 uintptr_t vaddr, 732 level_t level, 733 htable_t *shared) 734 { 735 htable_t *ht = NULL; 736 uint_t is_vlp; 737 uint_t is_bare = 0; 738 uint_t need_to_zero = 1; 739 int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 740 741 if (level < 0 || level > TOP_LEVEL(hat)) 742 panic("htable_alloc(): level %d out of range\n", level); 743 744 is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 745 if (is_vlp || shared != NULL) 746 is_bare = 1; 747 748 /* 749 * First reuse a cached htable from the hat_ht_cached field, this 750 * avoids unnecessary trips through kmem/page allocators. 751 */ 752 if (hat->hat_ht_cached != NULL && !is_bare) { 753 hat_enter(hat); 754 ht = hat->hat_ht_cached; 755 if (ht != NULL) { 756 hat->hat_ht_cached = ht->ht_next; 757 need_to_zero = 0; 758 /* XX64 ASSERT() they're all zero somehow */ 759 ASSERT(ht->ht_pfn != PFN_INVALID); 760 } 761 hat_exit(hat); 762 } 763 764 if (ht == NULL) { 765 /* 766 * Allocate an htable, possibly refilling the reserves. 767 */ 768 if (USE_HAT_RESERVES()) { 769 ht = htable_get_reserve(); 770 } else { 771 /* 772 * Donate successful htable allocations to the reserve. 773 */ 774 for (;;) { 775 ht = kmem_cache_alloc(htable_cache, kmflags); 776 if (ht == NULL) 777 break; 778 ht->ht_pfn = PFN_INVALID; 779 if (USE_HAT_RESERVES() || 780 htable_reserve_cnt >= htable_reserve_amount) 781 break; 782 htable_put_reserve(ht); 783 } 784 } 785 786 /* 787 * allocate a page for the hardware page table if needed 788 */ 789 if (ht != NULL && !is_bare) { 790 ht->ht_hat = hat; 791 ht->ht_pfn = ptable_alloc((uintptr_t)ht); 792 if (ht->ht_pfn == PFN_INVALID) { 793 if (USE_HAT_RESERVES()) 794 htable_put_reserve(ht); 795 else 796 kmem_cache_free(htable_cache, ht); 797 ht = NULL; 798 } 799 } 800 } 801 802 /* 803 * If allocations failed, kick off a kmem_reap() and resort to 804 * htable steal(). We may spin here if the system is very low on 805 * memory. If the kernel itself has consumed all memory and kmem_reap() 806 * can't free up anything, then we'll really get stuck here. 807 * That should only happen in a system where the administrator has 808 * misconfigured VM parameters via /etc/system. 809 */ 810 while (ht == NULL && can_steal_post_boot) { 811 kmem_reap(); 812 ht = htable_steal(1); 813 HATSTAT_INC(hs_steals); 814 815 /* 816 * If we stole for a bare htable, release the pagetable page. 817 */ 818 if (ht != NULL) { 819 if (is_bare) { 820 ptable_free(ht->ht_pfn); 821 ht->ht_pfn = PFN_INVALID; 822 #if defined(__xpv) && defined(__amd64) 823 /* 824 * make stolen page table writable again in kpm 825 */ 826 } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn, 827 PT_VALID | PT_WRITABLE) < 0) { 828 panic("failure making kpm r/w pfn=0x%lx", 829 ht->ht_pfn); 830 #endif 831 } 832 } 833 } 834 835 /* 836 * All attempts to allocate or steal failed. This should only happen 837 * if we run out of memory during boot, due perhaps to a huge 838 * boot_archive. At this point there's no way to continue. 839 */ 840 if (ht == NULL) 841 panic("htable_alloc(): couldn't steal\n"); 842 843 #if defined(__amd64) && defined(__xpv) 844 /* 845 * Under the 64-bit hypervisor, we have 2 top level page tables. 846 * If this allocation fails, we'll resort to stealing. 847 * We use the stolen page indirectly, by freeing the 848 * stolen htable first. 849 */ 850 if (level == mmu.max_level) { 851 for (;;) { 852 htable_t *stolen; 853 854 hat->hat_user_ptable = ptable_alloc((uintptr_t)ht + 1); 855 if (hat->hat_user_ptable != PFN_INVALID) 856 break; 857 stolen = htable_steal(1); 858 if (stolen == NULL) 859 panic("2nd steal ptable failed\n"); 860 htable_free(stolen); 861 } 862 block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable), 863 MMU_PAGESIZE); 864 } 865 #endif 866 867 /* 868 * Shared page tables have all entries locked and entries may not 869 * be added or deleted. 870 */ 871 ht->ht_flags = 0; 872 if (shared != NULL) { 873 ASSERT(shared->ht_valid_cnt > 0); 874 ht->ht_flags |= HTABLE_SHARED_PFN; 875 ht->ht_pfn = shared->ht_pfn; 876 ht->ht_lock_cnt = 0; 877 ht->ht_valid_cnt = 0; /* updated in hat_share() */ 878 ht->ht_shares = shared; 879 need_to_zero = 0; 880 } else { 881 ht->ht_shares = NULL; 882 ht->ht_lock_cnt = 0; 883 ht->ht_valid_cnt = 0; 884 } 885 886 /* 887 * setup flags, etc. for VLP htables 888 */ 889 if (is_vlp) { 890 ht->ht_flags |= HTABLE_VLP; 891 ASSERT(ht->ht_pfn == PFN_INVALID); 892 need_to_zero = 0; 893 } 894 895 /* 896 * fill in the htable 897 */ 898 ht->ht_hat = hat; 899 ht->ht_parent = NULL; 900 ht->ht_vaddr = vaddr; 901 ht->ht_level = level; 902 ht->ht_busy = 1; 903 ht->ht_next = NULL; 904 ht->ht_prev = NULL; 905 906 /* 907 * Zero out any freshly allocated page table 908 */ 909 if (need_to_zero) 910 x86pte_zero(ht, 0, mmu.ptes_per_table); 911 912 #if defined(__amd64) && defined(__xpv) 913 if (!is_bare && kpm_vbase) { 914 (void) xen_kpm_page(ht->ht_pfn, PT_VALID); 915 if (level == mmu.max_level) 916 (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID); 917 } 918 #endif 919 920 return (ht); 921 } 922 923 /* 924 * Free up an htable, either to a hat's cached list, the reserves or 925 * back to kmem. 926 */ 927 static void 928 htable_free(htable_t *ht) 929 { 930 hat_t *hat = ht->ht_hat; 931 932 /* 933 * If the process isn't exiting, cache the free htable in the hat 934 * structure. We always do this for the boot time reserve. We don't 935 * do this if the hat is exiting or we are stealing/reaping htables. 936 */ 937 if (hat != NULL && 938 !(ht->ht_flags & HTABLE_SHARED_PFN) && 939 (use_boot_reserve || 940 (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 941 ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 942 ASSERT(ht->ht_pfn != PFN_INVALID); 943 hat_enter(hat); 944 ht->ht_next = hat->hat_ht_cached; 945 hat->hat_ht_cached = ht; 946 hat_exit(hat); 947 return; 948 } 949 950 /* 951 * If we have a hardware page table, free it. 952 * We don't free page tables that are accessed by sharing. 953 */ 954 if (ht->ht_flags & HTABLE_SHARED_PFN) { 955 ASSERT(ht->ht_pfn != PFN_INVALID); 956 } else if (!(ht->ht_flags & HTABLE_VLP)) { 957 ptable_free(ht->ht_pfn); 958 #if defined(__amd64) && defined(__xpv) 959 if (ht->ht_level == mmu.max_level) { 960 ptable_free(hat->hat_user_ptable); 961 hat->hat_user_ptable = PFN_INVALID; 962 } 963 #endif 964 } 965 ht->ht_pfn = PFN_INVALID; 966 967 /* 968 * Free it or put into reserves. 969 */ 970 if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 971 htable_put_reserve(ht); 972 } else { 973 kmem_cache_free(htable_cache, ht); 974 htable_adjust_reserve(); 975 } 976 } 977 978 979 /* 980 * This is called when a hat is being destroyed or swapped out. We reap all 981 * the remaining htables in the hat cache. If destroying all left over 982 * htables are also destroyed. 983 * 984 * We also don't need to invalidate any of the PTPs nor do any demapping. 985 */ 986 void 987 htable_purge_hat(hat_t *hat) 988 { 989 htable_t *ht; 990 int h; 991 992 /* 993 * Purge the htable cache if just reaping. 994 */ 995 if (!(hat->hat_flags & HAT_FREEING)) { 996 atomic_add_32(&htable_dont_cache, 1); 997 for (;;) { 998 hat_enter(hat); 999 ht = hat->hat_ht_cached; 1000 if (ht == NULL) { 1001 hat_exit(hat); 1002 break; 1003 } 1004 hat->hat_ht_cached = ht->ht_next; 1005 hat_exit(hat); 1006 htable_free(ht); 1007 } 1008 atomic_add_32(&htable_dont_cache, -1); 1009 return; 1010 } 1011 1012 /* 1013 * if freeing, no locking is needed 1014 */ 1015 while ((ht = hat->hat_ht_cached) != NULL) { 1016 hat->hat_ht_cached = ht->ht_next; 1017 htable_free(ht); 1018 } 1019 1020 /* 1021 * walk thru the htable hash table and free all the htables in it. 1022 */ 1023 for (h = 0; h < hat->hat_num_hash; ++h) { 1024 while ((ht = hat->hat_ht_hash[h]) != NULL) { 1025 if (ht->ht_next) 1026 ht->ht_next->ht_prev = ht->ht_prev; 1027 1028 if (ht->ht_prev) { 1029 ht->ht_prev->ht_next = ht->ht_next; 1030 } else { 1031 ASSERT(hat->hat_ht_hash[h] == ht); 1032 hat->hat_ht_hash[h] = ht->ht_next; 1033 } 1034 htable_free(ht); 1035 } 1036 } 1037 } 1038 1039 /* 1040 * Unlink an entry for a table at vaddr and level out of the existing table 1041 * one level higher. We are always holding the HASH_ENTER() when doing this. 1042 */ 1043 static void 1044 unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 1045 { 1046 uint_t entry = htable_va2entry(vaddr, higher); 1047 x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 1048 x86pte_t found; 1049 hat_t *hat = old->ht_hat; 1050 1051 ASSERT(higher->ht_busy > 0); 1052 ASSERT(higher->ht_valid_cnt > 0); 1053 ASSERT(old->ht_valid_cnt == 0); 1054 found = x86pte_cas(higher, entry, expect, 0); 1055 #ifdef __xpv 1056 /* 1057 * This is weird, but Xen apparently automatically unlinks empty 1058 * pagetables from the upper page table. So allow PTP to be 0 already. 1059 */ 1060 if (found != expect && found != 0) 1061 #else 1062 if (found != expect) 1063 #endif 1064 panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 1065 found, expect); 1066 1067 /* 1068 * When a top level VLP page table entry changes, we must issue 1069 * a reload of cr3 on all processors. 1070 * 1071 * If we don't need do do that, then we still have to INVLPG against 1072 * an address covered by the inner page table, as the latest processors 1073 * have TLB-like caches for non-leaf page table entries. 1074 */ 1075 if (!(hat->hat_flags & HAT_FREEING)) { 1076 hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ? 1077 DEMAP_ALL_ADDR : old->ht_vaddr); 1078 } 1079 1080 HTABLE_DEC(higher->ht_valid_cnt); 1081 } 1082 1083 /* 1084 * Link an entry for a new table at vaddr and level into the existing table 1085 * one level higher. We are always holding the HASH_ENTER() when doing this. 1086 */ 1087 static void 1088 link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 1089 { 1090 uint_t entry = htable_va2entry(vaddr, higher); 1091 x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 1092 x86pte_t found; 1093 1094 ASSERT(higher->ht_busy > 0); 1095 1096 ASSERT(new->ht_level != mmu.max_level); 1097 1098 HTABLE_INC(higher->ht_valid_cnt); 1099 1100 found = x86pte_cas(higher, entry, 0, newptp); 1101 if ((found & ~PT_REF) != 0) 1102 panic("HAT: ptp not 0, found=" FMT_PTE, found); 1103 1104 /* 1105 * When any top level VLP page table entry changes, we must issue 1106 * a reload of cr3 on all processors using it. 1107 * We also need to do this for the kernel hat on PAE 32 bit kernel. 1108 */ 1109 if ( 1110 #ifdef __i386 1111 (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 1112 #endif 1113 (higher->ht_flags & HTABLE_VLP)) 1114 hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 1115 } 1116 1117 /* 1118 * Release of hold on an htable. If this is the last use and the pagetable 1119 * is empty we may want to free it, then recursively look at the pagetable 1120 * above it. The recursion is handled by the outer while() loop. 1121 * 1122 * On the metal, during process exit, we don't bother unlinking the tables from 1123 * upper level pagetables. They are instead handled in bulk by hat_free_end(). 1124 * We can't do this on the hypervisor as we need the page table to be 1125 * implicitly unpinnned before it goes to the free page lists. This can't 1126 * happen unless we fully unlink it from the page table hierarchy. 1127 */ 1128 void 1129 htable_release(htable_t *ht) 1130 { 1131 uint_t hashval; 1132 htable_t *shared; 1133 htable_t *higher; 1134 hat_t *hat; 1135 uintptr_t va; 1136 level_t level; 1137 1138 while (ht != NULL) { 1139 shared = NULL; 1140 for (;;) { 1141 hat = ht->ht_hat; 1142 va = ht->ht_vaddr; 1143 level = ht->ht_level; 1144 hashval = HTABLE_HASH(hat, va, level); 1145 1146 /* 1147 * The common case is that this isn't the last use of 1148 * an htable so we don't want to free the htable. 1149 */ 1150 HTABLE_ENTER(hashval); 1151 ASSERT(ht->ht_valid_cnt >= 0); 1152 ASSERT(ht->ht_busy > 0); 1153 if (ht->ht_valid_cnt > 0) 1154 break; 1155 if (ht->ht_busy > 1) 1156 break; 1157 ASSERT(ht->ht_lock_cnt == 0); 1158 1159 #if !defined(__xpv) 1160 /* 1161 * we always release empty shared htables 1162 */ 1163 if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 1164 1165 /* 1166 * don't release if in address space tear down 1167 */ 1168 if (hat->hat_flags & HAT_FREEING) 1169 break; 1170 1171 /* 1172 * At and above max_page_level, free if it's for 1173 * a boot-time kernel mapping below kernelbase. 1174 */ 1175 if (level >= mmu.max_page_level && 1176 (hat != kas.a_hat || va >= kernelbase)) 1177 break; 1178 } 1179 #endif /* __xpv */ 1180 1181 /* 1182 * Remember if we destroy an htable that shares its PFN 1183 * from elsewhere. 1184 */ 1185 if (ht->ht_flags & HTABLE_SHARED_PFN) { 1186 ASSERT(shared == NULL); 1187 shared = ht->ht_shares; 1188 HATSTAT_INC(hs_htable_unshared); 1189 } 1190 1191 /* 1192 * Handle release of a table and freeing the htable_t. 1193 * Unlink it from the table higher (ie. ht_parent). 1194 */ 1195 ASSERT(ht->ht_lock_cnt == 0); 1196 higher = ht->ht_parent; 1197 ASSERT(higher != NULL); 1198 1199 /* 1200 * Unlink the pagetable. 1201 */ 1202 unlink_ptp(higher, ht, va); 1203 1204 /* 1205 * remove this htable from its hash list 1206 */ 1207 if (ht->ht_next) 1208 ht->ht_next->ht_prev = ht->ht_prev; 1209 1210 if (ht->ht_prev) { 1211 ht->ht_prev->ht_next = ht->ht_next; 1212 } else { 1213 ASSERT(hat->hat_ht_hash[hashval] == ht); 1214 hat->hat_ht_hash[hashval] = ht->ht_next; 1215 } 1216 HTABLE_EXIT(hashval); 1217 htable_free(ht); 1218 ht = higher; 1219 } 1220 1221 ASSERT(ht->ht_busy >= 1); 1222 --ht->ht_busy; 1223 HTABLE_EXIT(hashval); 1224 1225 /* 1226 * If we released a shared htable, do a release on the htable 1227 * from which it shared 1228 */ 1229 ht = shared; 1230 } 1231 } 1232 1233 /* 1234 * Find the htable for the pagetable at the given level for the given address. 1235 * If found acquires a hold that eventually needs to be htable_release()d 1236 */ 1237 htable_t * 1238 htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 1239 { 1240 uintptr_t base; 1241 uint_t hashval; 1242 htable_t *ht = NULL; 1243 1244 ASSERT(level >= 0); 1245 ASSERT(level <= TOP_LEVEL(hat)); 1246 1247 if (level == TOP_LEVEL(hat)) { 1248 #if defined(__amd64) 1249 /* 1250 * 32 bit address spaces on 64 bit kernels need to check 1251 * for overflow of the 32 bit address space 1252 */ 1253 if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32)) 1254 return (NULL); 1255 #endif 1256 base = 0; 1257 } else { 1258 base = vaddr & LEVEL_MASK(level + 1); 1259 } 1260 1261 hashval = HTABLE_HASH(hat, base, level); 1262 HTABLE_ENTER(hashval); 1263 for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 1264 if (ht->ht_hat == hat && 1265 ht->ht_vaddr == base && 1266 ht->ht_level == level) 1267 break; 1268 } 1269 if (ht) 1270 ++ht->ht_busy; 1271 1272 HTABLE_EXIT(hashval); 1273 return (ht); 1274 } 1275 1276 /* 1277 * Acquires a hold on a known htable (from a locked hment entry). 1278 */ 1279 void 1280 htable_acquire(htable_t *ht) 1281 { 1282 hat_t *hat = ht->ht_hat; 1283 level_t level = ht->ht_level; 1284 uintptr_t base = ht->ht_vaddr; 1285 uint_t hashval = HTABLE_HASH(hat, base, level); 1286 1287 HTABLE_ENTER(hashval); 1288 #ifdef DEBUG 1289 /* 1290 * make sure the htable is there 1291 */ 1292 { 1293 htable_t *h; 1294 1295 for (h = hat->hat_ht_hash[hashval]; 1296 h && h != ht; 1297 h = h->ht_next) 1298 ; 1299 ASSERT(h == ht); 1300 } 1301 #endif /* DEBUG */ 1302 ++ht->ht_busy; 1303 HTABLE_EXIT(hashval); 1304 } 1305 1306 /* 1307 * Find the htable for the pagetable at the given level for the given address. 1308 * If found acquires a hold that eventually needs to be htable_release()d 1309 * If not found the table is created. 1310 * 1311 * Since we can't hold a hash table mutex during allocation, we have to 1312 * drop it and redo the search on a create. Then we may have to free the newly 1313 * allocated htable if another thread raced in and created it ahead of us. 1314 */ 1315 htable_t * 1316 htable_create( 1317 hat_t *hat, 1318 uintptr_t vaddr, 1319 level_t level, 1320 htable_t *shared) 1321 { 1322 uint_t h; 1323 level_t l; 1324 uintptr_t base; 1325 htable_t *ht; 1326 htable_t *higher = NULL; 1327 htable_t *new = NULL; 1328 1329 if (level < 0 || level > TOP_LEVEL(hat)) 1330 panic("htable_create(): level %d out of range\n", level); 1331 1332 /* 1333 * Create the page tables in top down order. 1334 */ 1335 for (l = TOP_LEVEL(hat); l >= level; --l) { 1336 new = NULL; 1337 if (l == TOP_LEVEL(hat)) 1338 base = 0; 1339 else 1340 base = vaddr & LEVEL_MASK(l + 1); 1341 1342 h = HTABLE_HASH(hat, base, l); 1343 try_again: 1344 /* 1345 * look up the htable at this level 1346 */ 1347 HTABLE_ENTER(h); 1348 if (l == TOP_LEVEL(hat)) { 1349 ht = hat->hat_htable; 1350 } else { 1351 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 1352 ASSERT(ht->ht_hat == hat); 1353 if (ht->ht_vaddr == base && 1354 ht->ht_level == l) 1355 break; 1356 } 1357 } 1358 1359 /* 1360 * if we found the htable, increment its busy cnt 1361 * and if we had allocated a new htable, free it. 1362 */ 1363 if (ht != NULL) { 1364 /* 1365 * If we find a pre-existing shared table, it must 1366 * share from the same place. 1367 */ 1368 if (l == level && shared && ht->ht_shares && 1369 ht->ht_shares != shared) { 1370 panic("htable shared from wrong place " 1371 "found htable=%p shared=%p", ht, shared); 1372 } 1373 ++ht->ht_busy; 1374 HTABLE_EXIT(h); 1375 if (new) 1376 htable_free(new); 1377 if (higher != NULL) 1378 htable_release(higher); 1379 higher = ht; 1380 1381 /* 1382 * if we didn't find it on the first search 1383 * allocate a new one and search again 1384 */ 1385 } else if (new == NULL) { 1386 HTABLE_EXIT(h); 1387 new = htable_alloc(hat, base, l, 1388 l == level ? shared : NULL); 1389 goto try_again; 1390 1391 /* 1392 * 2nd search and still not there, use "new" table 1393 * Link new table into higher, when not at top level. 1394 */ 1395 } else { 1396 ht = new; 1397 if (higher != NULL) { 1398 link_ptp(higher, ht, base); 1399 ht->ht_parent = higher; 1400 } 1401 ht->ht_next = hat->hat_ht_hash[h]; 1402 ASSERT(ht->ht_prev == NULL); 1403 if (hat->hat_ht_hash[h]) 1404 hat->hat_ht_hash[h]->ht_prev = ht; 1405 hat->hat_ht_hash[h] = ht; 1406 HTABLE_EXIT(h); 1407 1408 /* 1409 * Note we don't do htable_release(higher). 1410 * That happens recursively when "new" is removed by 1411 * htable_release() or htable_steal(). 1412 */ 1413 higher = ht; 1414 1415 /* 1416 * If we just created a new shared page table we 1417 * increment the shared htable's busy count, so that 1418 * it can't be the victim of a steal even if it's empty. 1419 */ 1420 if (l == level && shared) { 1421 (void) htable_lookup(shared->ht_hat, 1422 shared->ht_vaddr, shared->ht_level); 1423 HATSTAT_INC(hs_htable_shared); 1424 } 1425 } 1426 } 1427 1428 return (ht); 1429 } 1430 1431 /* 1432 * Inherit initial pagetables from the boot program. On the 64-bit 1433 * hypervisor we also temporarily mark the p_index field of page table 1434 * pages, so we know not to try making them writable in seg_kpm. 1435 */ 1436 void 1437 htable_attach( 1438 hat_t *hat, 1439 uintptr_t base, 1440 level_t level, 1441 htable_t *parent, 1442 pfn_t pfn) 1443 { 1444 htable_t *ht; 1445 uint_t h; 1446 uint_t i; 1447 x86pte_t pte; 1448 x86pte_t *ptep; 1449 page_t *pp; 1450 extern page_t *boot_claim_page(pfn_t); 1451 1452 ht = htable_get_reserve(); 1453 if (level == mmu.max_level) 1454 kas.a_hat->hat_htable = ht; 1455 ht->ht_hat = hat; 1456 ht->ht_parent = parent; 1457 ht->ht_vaddr = base; 1458 ht->ht_level = level; 1459 ht->ht_busy = 1; 1460 ht->ht_next = NULL; 1461 ht->ht_prev = NULL; 1462 ht->ht_flags = 0; 1463 ht->ht_pfn = pfn; 1464 ht->ht_lock_cnt = 0; 1465 ht->ht_valid_cnt = 0; 1466 if (parent != NULL) 1467 ++parent->ht_busy; 1468 1469 h = HTABLE_HASH(hat, base, level); 1470 HTABLE_ENTER(h); 1471 ht->ht_next = hat->hat_ht_hash[h]; 1472 ASSERT(ht->ht_prev == NULL); 1473 if (hat->hat_ht_hash[h]) 1474 hat->hat_ht_hash[h]->ht_prev = ht; 1475 hat->hat_ht_hash[h] = ht; 1476 HTABLE_EXIT(h); 1477 1478 /* 1479 * make sure the page table physical page is not FREE 1480 */ 1481 if (page_resv(1, KM_NOSLEEP) == 0) 1482 panic("page_resv() failed in ptable alloc"); 1483 1484 pp = boot_claim_page(pfn); 1485 ASSERT(pp != NULL); 1486 page_downgrade(pp); 1487 #if defined(__xpv) && defined(__amd64) 1488 /* 1489 * Record in the page_t that is a pagetable for segkpm setup. 1490 */ 1491 if (kpm_vbase) 1492 pp->p_index = 1; 1493 #endif 1494 1495 /* 1496 * Count valid mappings and recursively attach lower level pagetables. 1497 */ 1498 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1499 for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1500 if (mmu.pae_hat) 1501 pte = ptep[i]; 1502 else 1503 pte = ((x86pte32_t *)ptep)[i]; 1504 if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1505 ++ht->ht_valid_cnt; 1506 if (!PTE_ISPAGE(pte, level)) { 1507 htable_attach(hat, base, level - 1, 1508 ht, PTE2PFN(pte, level)); 1509 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1510 } 1511 } 1512 base += LEVEL_SIZE(level); 1513 if (base == mmu.hole_start) 1514 base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1515 } 1516 1517 /* 1518 * As long as all the mappings we had were below kernel base 1519 * we can release the htable. 1520 */ 1521 if (base < kernelbase) 1522 htable_release(ht); 1523 } 1524 1525 /* 1526 * Walk through a given htable looking for the first valid entry. This 1527 * routine takes both a starting and ending address. The starting address 1528 * is required to be within the htable provided by the caller, but there is 1529 * no such restriction on the ending address. 1530 * 1531 * If the routine finds a valid entry in the htable (at or beyond the 1532 * starting address), the PTE (and its address) will be returned. 1533 * This PTE may correspond to either a page or a pagetable - it is the 1534 * caller's responsibility to determine which. If no valid entry is 1535 * found, 0 (and invalid PTE) and the next unexamined address will be 1536 * returned. 1537 * 1538 * The loop has been carefully coded for optimization. 1539 */ 1540 static x86pte_t 1541 htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 1542 { 1543 uint_t e; 1544 x86pte_t found_pte = (x86pte_t)0; 1545 caddr_t pte_ptr; 1546 caddr_t end_pte_ptr; 1547 int l = ht->ht_level; 1548 uintptr_t va = *vap & LEVEL_MASK(l); 1549 size_t pgsize = LEVEL_SIZE(l); 1550 1551 ASSERT(va >= ht->ht_vaddr); 1552 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1553 1554 /* 1555 * Compute the starting index and ending virtual address 1556 */ 1557 e = htable_va2entry(va, ht); 1558 1559 /* 1560 * The following page table scan code knows that the valid 1561 * bit of a PTE is in the lowest byte AND that x86 is little endian!! 1562 */ 1563 pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1564 end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1565 pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 1566 while (!PTE_ISVALID(*pte_ptr)) { 1567 va += pgsize; 1568 if (va >= eaddr) 1569 break; 1570 pte_ptr += mmu.pte_size; 1571 ASSERT(pte_ptr <= end_pte_ptr); 1572 if (pte_ptr == end_pte_ptr) 1573 break; 1574 } 1575 1576 /* 1577 * if we found a valid PTE, load the entire PTE 1578 */ 1579 if (va < eaddr && pte_ptr != end_pte_ptr) 1580 found_pte = GET_PTE((x86pte_t *)pte_ptr); 1581 x86pte_release_pagetable(ht); 1582 1583 #if defined(__amd64) 1584 /* 1585 * deal with VA hole on amd64 1586 */ 1587 if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 1588 va = mmu.hole_end + va - mmu.hole_start; 1589 #endif /* __amd64 */ 1590 1591 *vap = va; 1592 return (found_pte); 1593 } 1594 1595 /* 1596 * Find the address and htable for the first populated translation at or 1597 * above the given virtual address. The caller may also specify an upper 1598 * limit to the address range to search. Uses level information to quickly 1599 * skip unpopulated sections of virtual address spaces. 1600 * 1601 * If not found returns NULL. When found, returns the htable and virt addr 1602 * and has a hold on the htable. 1603 */ 1604 x86pte_t 1605 htable_walk( 1606 struct hat *hat, 1607 htable_t **htp, 1608 uintptr_t *vaddr, 1609 uintptr_t eaddr) 1610 { 1611 uintptr_t va = *vaddr; 1612 htable_t *ht; 1613 htable_t *prev = *htp; 1614 level_t l; 1615 level_t max_mapped_level; 1616 x86pte_t pte; 1617 1618 ASSERT(eaddr > va); 1619 1620 /* 1621 * If this is a user address, then we know we need not look beyond 1622 * kernelbase. 1623 */ 1624 ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 1625 eaddr == HTABLE_WALK_TO_END); 1626 if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 1627 eaddr = kernelbase; 1628 1629 /* 1630 * If we're coming in with a previous page table, search it first 1631 * without doing an htable_lookup(), this should be frequent. 1632 */ 1633 if (prev) { 1634 ASSERT(prev->ht_busy > 0); 1635 ASSERT(prev->ht_vaddr <= va); 1636 l = prev->ht_level; 1637 if (va <= HTABLE_LAST_PAGE(prev)) { 1638 pte = htable_scan(prev, &va, eaddr); 1639 1640 if (PTE_ISPAGE(pte, l)) { 1641 *vaddr = va; 1642 *htp = prev; 1643 return (pte); 1644 } 1645 } 1646 1647 /* 1648 * We found nothing in the htable provided by the caller, 1649 * so fall through and do the full search 1650 */ 1651 htable_release(prev); 1652 } 1653 1654 /* 1655 * Find the level of the largest pagesize used by this HAT. 1656 */ 1657 if (hat->hat_ism_pgcnt > 0) { 1658 max_mapped_level = mmu.max_page_level; 1659 } else { 1660 max_mapped_level = 0; 1661 for (l = 1; l <= mmu.max_page_level; ++l) 1662 if (hat->hat_pages_mapped[l] != 0) 1663 max_mapped_level = l; 1664 } 1665 1666 while (va < eaddr && va >= *vaddr) { 1667 ASSERT(!IN_VA_HOLE(va)); 1668 1669 /* 1670 * Find lowest table with any entry for given address. 1671 */ 1672 for (l = 0; l <= TOP_LEVEL(hat); ++l) { 1673 ht = htable_lookup(hat, va, l); 1674 if (ht != NULL) { 1675 pte = htable_scan(ht, &va, eaddr); 1676 if (PTE_ISPAGE(pte, l)) { 1677 *vaddr = va; 1678 *htp = ht; 1679 return (pte); 1680 } 1681 htable_release(ht); 1682 break; 1683 } 1684 1685 /* 1686 * No htable at this level for the address. If there 1687 * is no larger page size that could cover it, we can 1688 * skip right to the start of the next page table. 1689 */ 1690 ASSERT(l < TOP_LEVEL(hat)); 1691 if (l >= max_mapped_level) { 1692 va = NEXT_ENTRY_VA(va, l + 1); 1693 if (va >= eaddr) 1694 break; 1695 } 1696 } 1697 } 1698 1699 *vaddr = 0; 1700 *htp = NULL; 1701 return (0); 1702 } 1703 1704 /* 1705 * Find the htable and page table entry index of the given virtual address 1706 * with pagesize at or below given level. 1707 * If not found returns NULL. When found, returns the htable, sets 1708 * entry, and has a hold on the htable. 1709 */ 1710 htable_t * 1711 htable_getpte( 1712 struct hat *hat, 1713 uintptr_t vaddr, 1714 uint_t *entry, 1715 x86pte_t *pte, 1716 level_t level) 1717 { 1718 htable_t *ht; 1719 level_t l; 1720 uint_t e; 1721 1722 ASSERT(level <= mmu.max_page_level); 1723 1724 for (l = 0; l <= level; ++l) { 1725 ht = htable_lookup(hat, vaddr, l); 1726 if (ht == NULL) 1727 continue; 1728 e = htable_va2entry(vaddr, ht); 1729 if (entry != NULL) 1730 *entry = e; 1731 if (pte != NULL) 1732 *pte = x86pte_get(ht, e); 1733 return (ht); 1734 } 1735 return (NULL); 1736 } 1737 1738 /* 1739 * Find the htable and page table entry index of the given virtual address. 1740 * There must be a valid page mapped at the given address. 1741 * If not found returns NULL. When found, returns the htable, sets 1742 * entry, and has a hold on the htable. 1743 */ 1744 htable_t * 1745 htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 1746 { 1747 htable_t *ht; 1748 uint_t e; 1749 x86pte_t pte; 1750 1751 ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 1752 if (ht == NULL) 1753 return (NULL); 1754 1755 if (entry) 1756 *entry = e; 1757 1758 if (PTE_ISPAGE(pte, ht->ht_level)) 1759 return (ht); 1760 htable_release(ht); 1761 return (NULL); 1762 } 1763 1764 1765 void 1766 htable_init() 1767 { 1768 /* 1769 * To save on kernel VA usage, we avoid debug information in 32 bit 1770 * kernels. 1771 */ 1772 #if defined(__amd64) 1773 int kmem_flags = KMC_NOHASH; 1774 #elif defined(__i386) 1775 int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 1776 #endif 1777 1778 /* 1779 * initialize kmem caches 1780 */ 1781 htable_cache = kmem_cache_create("htable_t", 1782 sizeof (htable_t), 0, NULL, NULL, 1783 htable_reap, NULL, hat_memload_arena, kmem_flags); 1784 } 1785 1786 /* 1787 * get the pte index for the virtual address in the given htable's pagetable 1788 */ 1789 uint_t 1790 htable_va2entry(uintptr_t va, htable_t *ht) 1791 { 1792 level_t l = ht->ht_level; 1793 1794 ASSERT(va >= ht->ht_vaddr); 1795 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1796 return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 1797 } 1798 1799 /* 1800 * Given an htable and the index of a pte in it, return the virtual address 1801 * of the page. 1802 */ 1803 uintptr_t 1804 htable_e2va(htable_t *ht, uint_t entry) 1805 { 1806 level_t l = ht->ht_level; 1807 uintptr_t va; 1808 1809 ASSERT(entry < HTABLE_NUM_PTES(ht)); 1810 va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 1811 1812 /* 1813 * Need to skip over any VA hole in top level table 1814 */ 1815 #if defined(__amd64) 1816 if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 1817 va += ((mmu.hole_end - mmu.hole_start) + 1); 1818 #endif 1819 1820 return (va); 1821 } 1822 1823 /* 1824 * The code uses compare and swap instructions to read/write PTE's to 1825 * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 1826 * will naturally be atomic. 1827 * 1828 * The combination of using kpreempt_disable()/_enable() and the hci_mutex 1829 * are used to ensure that an interrupt won't overwrite a temporary mapping 1830 * while it's in use. If an interrupt thread tries to access a PTE, it will 1831 * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 1832 */ 1833 void 1834 x86pte_cpu_init(cpu_t *cpu) 1835 { 1836 struct hat_cpu_info *hci; 1837 1838 hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 1839 mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 1840 cpu->cpu_hat_info = hci; 1841 } 1842 1843 void 1844 x86pte_cpu_fini(cpu_t *cpu) 1845 { 1846 struct hat_cpu_info *hci = cpu->cpu_hat_info; 1847 1848 kmem_free(hci, sizeof (*hci)); 1849 cpu->cpu_hat_info = NULL; 1850 } 1851 1852 #ifdef __i386 1853 /* 1854 * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1855 */ 1856 x86pte_t 1857 get_pte64(x86pte_t *ptr) 1858 { 1859 volatile uint32_t *p = (uint32_t *)ptr; 1860 x86pte_t t; 1861 1862 ASSERT(mmu.pae_hat != 0); 1863 for (;;) { 1864 t = p[0]; 1865 t |= (uint64_t)p[1] << 32; 1866 if ((t & 0xffffffff) == p[0]) 1867 return (t); 1868 } 1869 } 1870 #endif /* __i386 */ 1871 1872 /* 1873 * Disable preemption and establish a mapping to the pagetable with the 1874 * given pfn. This is optimized for there case where it's the same 1875 * pfn as we last used referenced from this CPU. 1876 */ 1877 static x86pte_t * 1878 x86pte_access_pagetable(htable_t *ht, uint_t index) 1879 { 1880 /* 1881 * VLP pagetables are contained in the hat_t 1882 */ 1883 if (ht->ht_flags & HTABLE_VLP) 1884 return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1885 return (x86pte_mapin(ht->ht_pfn, index, ht)); 1886 } 1887 1888 /* 1889 * map the given pfn into the page table window. 1890 */ 1891 /*ARGSUSED*/ 1892 x86pte_t * 1893 x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1894 { 1895 x86pte_t *pteptr; 1896 x86pte_t pte = 0; 1897 x86pte_t newpte; 1898 int x; 1899 1900 ASSERT(pfn != PFN_INVALID); 1901 1902 if (!khat_running) { 1903 caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1904 return (PT_INDEX_PTR(va, index)); 1905 } 1906 1907 /* 1908 * If kpm is available, use it. 1909 */ 1910 if (kpm_vbase) 1911 return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1912 1913 /* 1914 * Disable preemption and grab the CPU's hci_mutex 1915 */ 1916 kpreempt_disable(); 1917 ASSERT(CPU->cpu_hat_info != NULL); 1918 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1919 x = PWIN_TABLE(CPU->cpu_id); 1920 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 1921 #ifndef __xpv 1922 if (mmu.pae_hat) 1923 pte = *pteptr; 1924 else 1925 pte = *(x86pte32_t *)pteptr; 1926 #endif 1927 1928 newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1929 1930 /* 1931 * For hardware we can use a writable mapping. 1932 */ 1933 #ifdef __xpv 1934 if (IN_XPV_PANIC()) 1935 #endif 1936 newpte |= PT_WRITABLE; 1937 1938 if (!PTE_EQUIV(newpte, pte)) { 1939 1940 #ifdef __xpv 1941 if (!IN_XPV_PANIC()) { 1942 xen_map(newpte, PWIN_VA(x)); 1943 } else 1944 #endif 1945 { 1946 XPV_ALLOW_PAGETABLE_UPDATES(); 1947 if (mmu.pae_hat) 1948 *pteptr = newpte; 1949 else 1950 *(x86pte32_t *)pteptr = newpte; 1951 XPV_DISALLOW_PAGETABLE_UPDATES(); 1952 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 1953 } 1954 } 1955 return (PT_INDEX_PTR(PWIN_VA(x), index)); 1956 } 1957 1958 /* 1959 * Release access to a page table. 1960 */ 1961 static void 1962 x86pte_release_pagetable(htable_t *ht) 1963 { 1964 /* 1965 * nothing to do for VLP htables 1966 */ 1967 if (ht->ht_flags & HTABLE_VLP) 1968 return; 1969 1970 x86pte_mapout(); 1971 } 1972 1973 void 1974 x86pte_mapout(void) 1975 { 1976 if (kpm_vbase != NULL || !khat_running) 1977 return; 1978 1979 /* 1980 * Drop the CPU's hci_mutex and restore preemption. 1981 */ 1982 #ifdef __xpv 1983 if (!IN_XPV_PANIC()) { 1984 uintptr_t va; 1985 1986 /* 1987 * We need to always clear the mapping in case a page 1988 * that was once a page table page is ballooned out. 1989 */ 1990 va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id)); 1991 (void) HYPERVISOR_update_va_mapping(va, 0, 1992 UVMF_INVLPG | UVMF_LOCAL); 1993 } 1994 #endif 1995 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 1996 kpreempt_enable(); 1997 } 1998 1999 /* 2000 * Atomic retrieval of a pagetable entry 2001 */ 2002 x86pte_t 2003 x86pte_get(htable_t *ht, uint_t entry) 2004 { 2005 x86pte_t pte; 2006 x86pte_t *ptep; 2007 2008 /* 2009 * Be careful that loading PAE entries in 32 bit kernel is atomic. 2010 */ 2011 ASSERT(entry < mmu.ptes_per_table); 2012 ptep = x86pte_access_pagetable(ht, entry); 2013 pte = GET_PTE(ptep); 2014 x86pte_release_pagetable(ht); 2015 return (pte); 2016 } 2017 2018 /* 2019 * Atomic unconditional set of a page table entry, it returns the previous 2020 * value. For pre-existing mappings if the PFN changes, then we don't care 2021 * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 2022 * the MOD/REF bits unchanged. 2023 * 2024 * If asked to overwrite a link to a lower page table with a large page 2025 * mapping, this routine returns the special value of LPAGE_ERROR. This 2026 * allows the upper HAT layers to retry with a smaller mapping size. 2027 */ 2028 x86pte_t 2029 x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 2030 { 2031 x86pte_t old; 2032 x86pte_t prev; 2033 x86pte_t *ptep; 2034 level_t l = ht->ht_level; 2035 x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 2036 x86pte_t n; 2037 uintptr_t addr = htable_e2va(ht, entry); 2038 hat_t *hat = ht->ht_hat; 2039 2040 ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 2041 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2042 if (ptr == NULL) 2043 ptep = x86pte_access_pagetable(ht, entry); 2044 else 2045 ptep = ptr; 2046 2047 /* 2048 * Install the new PTE. If remapping the same PFN, then 2049 * copy existing REF/MOD bits to new mapping. 2050 */ 2051 do { 2052 prev = GET_PTE(ptep); 2053 n = new; 2054 if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 2055 n |= prev & (PT_REF | PT_MOD); 2056 2057 /* 2058 * Another thread may have installed this mapping already, 2059 * flush the local TLB and be done. 2060 */ 2061 if (prev == n) { 2062 old = new; 2063 #ifdef __xpv 2064 if (!IN_XPV_PANIC()) 2065 xen_flush_va((caddr_t)addr); 2066 else 2067 #endif 2068 mmu_tlbflush_entry((caddr_t)addr); 2069 goto done; 2070 } 2071 2072 /* 2073 * Detect if we have a collision of installing a large 2074 * page mapping where there already is a lower page table. 2075 */ 2076 if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 2077 old = LPAGE_ERROR; 2078 goto done; 2079 } 2080 2081 XPV_ALLOW_PAGETABLE_UPDATES(); 2082 old = CAS_PTE(ptep, prev, n); 2083 XPV_DISALLOW_PAGETABLE_UPDATES(); 2084 } while (old != prev); 2085 2086 /* 2087 * Do a TLB demap if needed, ie. the old pte was valid. 2088 * 2089 * Note that a stale TLB writeback to the PTE here either can't happen 2090 * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 2091 * mappings, but they were created with REF and MOD already set, so 2092 * no stale writeback will happen. 2093 * 2094 * Segmap is the only place where remaps happen on the same pfn and for 2095 * that we want to preserve the stale REF/MOD bits. 2096 */ 2097 if (old & PT_REF) 2098 hat_tlb_inval(hat, addr); 2099 2100 done: 2101 if (ptr == NULL) 2102 x86pte_release_pagetable(ht); 2103 return (old); 2104 } 2105 2106 /* 2107 * Atomic compare and swap of a page table entry. No TLB invalidates are done. 2108 * This is used for links between pagetables of different levels. 2109 * Note we always create these links with dirty/access set, so they should 2110 * never change. 2111 */ 2112 x86pte_t 2113 x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 2114 { 2115 x86pte_t pte; 2116 x86pte_t *ptep; 2117 #ifdef __xpv 2118 /* 2119 * We can't use writable pagetables for upper level tables, so fake it. 2120 */ 2121 mmu_update_t t[2]; 2122 int cnt = 1; 2123 int count; 2124 maddr_t ma; 2125 2126 if (!IN_XPV_PANIC()) { 2127 ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */ 2128 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2129 t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2130 t[0].val = new; 2131 2132 #if defined(__amd64) 2133 /* 2134 * On the 64-bit hypervisor we need to maintain the user mode 2135 * top page table too. 2136 */ 2137 if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) { 2138 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa( 2139 ht->ht_hat->hat_user_ptable), entry)); 2140 t[1].ptr = ma | MMU_NORMAL_PT_UPDATE; 2141 t[1].val = new; 2142 ++cnt; 2143 } 2144 #endif /* __amd64 */ 2145 2146 if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF)) 2147 panic("HYPERVISOR_mmu_update() failed"); 2148 ASSERT(count == cnt); 2149 return (old); 2150 } 2151 #endif 2152 ptep = x86pte_access_pagetable(ht, entry); 2153 XPV_ALLOW_PAGETABLE_UPDATES(); 2154 pte = CAS_PTE(ptep, old, new); 2155 XPV_DISALLOW_PAGETABLE_UPDATES(); 2156 x86pte_release_pagetable(ht); 2157 return (pte); 2158 } 2159 2160 /* 2161 * Invalidate a page table entry as long as it currently maps something that 2162 * matches the value determined by expect. 2163 * 2164 * Also invalidates any TLB entries and returns the previous value of the PTE. 2165 */ 2166 x86pte_t 2167 x86pte_inval( 2168 htable_t *ht, 2169 uint_t entry, 2170 x86pte_t expect, 2171 x86pte_t *pte_ptr) 2172 { 2173 x86pte_t *ptep; 2174 x86pte_t oldpte; 2175 x86pte_t found; 2176 2177 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2178 ASSERT(ht->ht_level != VLP_LEVEL); 2179 2180 if (pte_ptr != NULL) 2181 ptep = pte_ptr; 2182 else 2183 ptep = x86pte_access_pagetable(ht, entry); 2184 2185 #if defined(__xpv) 2186 /* 2187 * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing 2188 * with anything else. 2189 */ 2190 if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) { 2191 int count; 2192 mmu_update_t t[1]; 2193 maddr_t ma; 2194 2195 oldpte = GET_PTE(ptep); 2196 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2197 goto done; 2198 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2199 t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2200 t[0].val = 0; 2201 if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF)) 2202 panic("HYPERVISOR_mmu_update() failed"); 2203 ASSERT(count == 1); 2204 goto done; 2205 } 2206 #endif /* __xpv */ 2207 2208 /* 2209 * Note that the loop is needed to handle changes due to h/w updating 2210 * of PT_MOD/PT_REF. 2211 */ 2212 do { 2213 oldpte = GET_PTE(ptep); 2214 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2215 goto done; 2216 XPV_ALLOW_PAGETABLE_UPDATES(); 2217 found = CAS_PTE(ptep, oldpte, 0); 2218 XPV_DISALLOW_PAGETABLE_UPDATES(); 2219 } while (found != oldpte); 2220 if (oldpte & (PT_REF | PT_MOD)) 2221 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 2222 2223 done: 2224 if (pte_ptr == NULL) 2225 x86pte_release_pagetable(ht); 2226 return (oldpte); 2227 } 2228 2229 /* 2230 * Change a page table entry af it currently matches the value in expect. 2231 */ 2232 x86pte_t 2233 x86pte_update( 2234 htable_t *ht, 2235 uint_t entry, 2236 x86pte_t expect, 2237 x86pte_t new) 2238 { 2239 x86pte_t *ptep; 2240 x86pte_t found; 2241 2242 ASSERT(new != 0); 2243 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2244 ASSERT(ht->ht_level != VLP_LEVEL); 2245 2246 ptep = x86pte_access_pagetable(ht, entry); 2247 XPV_ALLOW_PAGETABLE_UPDATES(); 2248 found = CAS_PTE(ptep, expect, new); 2249 XPV_DISALLOW_PAGETABLE_UPDATES(); 2250 if (found == expect) { 2251 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 2252 2253 /* 2254 * When removing write permission *and* clearing the 2255 * MOD bit, check if a write happened via a stale 2256 * TLB entry before the TLB shootdown finished. 2257 * 2258 * If it did happen, simply re-enable write permission and 2259 * act like the original CAS failed. 2260 */ 2261 if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 2262 (new & (PT_WRITABLE | PT_MOD)) == 0 && 2263 (GET_PTE(ptep) & PT_MOD) != 0) { 2264 do { 2265 found = GET_PTE(ptep); 2266 XPV_ALLOW_PAGETABLE_UPDATES(); 2267 found = 2268 CAS_PTE(ptep, found, found | PT_WRITABLE); 2269 XPV_DISALLOW_PAGETABLE_UPDATES(); 2270 } while ((found & PT_WRITABLE) == 0); 2271 } 2272 } 2273 x86pte_release_pagetable(ht); 2274 return (found); 2275 } 2276 2277 #ifndef __xpv 2278 /* 2279 * Copy page tables - this is just a little more complicated than the 2280 * previous routines. Note that it's also not atomic! It also is never 2281 * used for VLP pagetables. 2282 */ 2283 void 2284 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2285 { 2286 caddr_t src_va; 2287 caddr_t dst_va; 2288 size_t size; 2289 x86pte_t *pteptr; 2290 x86pte_t pte; 2291 2292 ASSERT(khat_running); 2293 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2294 ASSERT(!(src->ht_flags & HTABLE_VLP)); 2295 ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 2296 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2297 2298 /* 2299 * Acquire access to the CPU pagetable windows for the dest and source. 2300 */ 2301 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2302 if (kpm_vbase) { 2303 src_va = (caddr_t) 2304 PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 2305 } else { 2306 uint_t x = PWIN_SRC(CPU->cpu_id); 2307 2308 /* 2309 * Finish defining the src pagetable mapping 2310 */ 2311 src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2312 pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 2313 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 2314 if (mmu.pae_hat) 2315 *pteptr = pte; 2316 else 2317 *(x86pte32_t *)pteptr = pte; 2318 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 2319 } 2320 2321 /* 2322 * now do the copy 2323 */ 2324 size = count << mmu.pte_size_shift; 2325 bcopy(src_va, dst_va, size); 2326 2327 x86pte_release_pagetable(dest); 2328 } 2329 2330 #else /* __xpv */ 2331 2332 /* 2333 * The hypervisor only supports writable pagetables at level 0, so we have 2334 * to install these 1 by 1 the slow way. 2335 */ 2336 void 2337 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2338 { 2339 caddr_t src_va; 2340 x86pte_t pte; 2341 2342 ASSERT(!IN_XPV_PANIC()); 2343 src_va = (caddr_t)x86pte_access_pagetable(src, entry); 2344 while (count) { 2345 if (mmu.pae_hat) 2346 pte = *(x86pte_t *)src_va; 2347 else 2348 pte = *(x86pte32_t *)src_va; 2349 if (pte != 0) { 2350 set_pteval(pfn_to_pa(dest->ht_pfn), entry, 2351 dest->ht_level, pte); 2352 #ifdef __amd64 2353 if (dest->ht_level == mmu.max_level && 2354 htable_e2va(dest, entry) < HYPERVISOR_VIRT_END) 2355 set_pteval( 2356 pfn_to_pa(dest->ht_hat->hat_user_ptable), 2357 entry, dest->ht_level, pte); 2358 #endif 2359 } 2360 --count; 2361 ++entry; 2362 src_va += mmu.pte_size; 2363 } 2364 x86pte_release_pagetable(src); 2365 } 2366 #endif /* __xpv */ 2367 2368 /* 2369 * Zero page table entries - Note this doesn't use atomic stores! 2370 */ 2371 static void 2372 x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 2373 { 2374 caddr_t dst_va; 2375 size_t size; 2376 #ifdef __xpv 2377 int x; 2378 x86pte_t newpte; 2379 #endif 2380 2381 /* 2382 * Map in the page table to be zeroed. 2383 */ 2384 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2385 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2386 2387 /* 2388 * On the hypervisor we don't use x86pte_access_pagetable() since 2389 * in this case the page is not pinned yet. 2390 */ 2391 #ifdef __xpv 2392 if (kpm_vbase == NULL) { 2393 kpreempt_disable(); 2394 ASSERT(CPU->cpu_hat_info != NULL); 2395 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 2396 x = PWIN_TABLE(CPU->cpu_id); 2397 newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE; 2398 xen_map(newpte, PWIN_VA(x)); 2399 dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2400 } else 2401 #endif 2402 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2403 2404 size = count << mmu.pte_size_shift; 2405 ASSERT(size > BLOCKZEROALIGN); 2406 #ifdef __i386 2407 if ((x86_feature & X86_SSE2) == 0) 2408 bzero(dst_va, size); 2409 else 2410 #endif 2411 block_zero_no_xmm(dst_va, size); 2412 2413 #ifdef __xpv 2414 if (kpm_vbase == NULL) { 2415 xen_map(0, PWIN_VA(x)); 2416 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 2417 kpreempt_enable(); 2418 } else 2419 #endif 2420 x86pte_release_pagetable(dest); 2421 } 2422 2423 /* 2424 * Called to ensure that all pagetables are in the system dump 2425 */ 2426 void 2427 hat_dump(void) 2428 { 2429 hat_t *hat; 2430 uint_t h; 2431 htable_t *ht; 2432 2433 /* 2434 * Dump all page tables 2435 */ 2436 for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 2437 for (h = 0; h < hat->hat_num_hash; ++h) { 2438 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2439 if ((ht->ht_flags & HTABLE_VLP) == 0) 2440 dump_page(ht->ht_pfn); 2441 } 2442 } 2443 } 2444 } 2445