1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/sysmacros.h> 29 #include <sys/kmem.h> 30 #include <sys/atomic.h> 31 #include <sys/bitmap.h> 32 #include <sys/machparam.h> 33 #include <sys/machsystm.h> 34 #include <sys/mman.h> 35 #include <sys/systm.h> 36 #include <sys/cpuvar.h> 37 #include <sys/thread.h> 38 #include <sys/proc.h> 39 #include <sys/cpu.h> 40 #include <sys/kmem.h> 41 #include <sys/disp.h> 42 #include <sys/vmem.h> 43 #include <sys/vmsystm.h> 44 #include <sys/promif.h> 45 #include <sys/var.h> 46 #include <sys/x86_archext.h> 47 #include <sys/archsystm.h> 48 #include <sys/bootconf.h> 49 #include <sys/dumphdr.h> 50 #include <vm/seg_kmem.h> 51 #include <vm/seg_kpm.h> 52 #include <vm/hat.h> 53 #include <vm/hat_i86.h> 54 #include <sys/cmn_err.h> 55 #include <sys/panic.h> 56 57 #ifdef __xpv 58 #include <sys/hypervisor.h> 59 #include <sys/xpv_panic.h> 60 #endif 61 62 #include <sys/bootinfo.h> 63 #include <vm/kboot_mmu.h> 64 65 static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 66 67 kmem_cache_t *htable_cache; 68 69 /* 70 * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 71 * is used in order to facilitate testing of the htable_steal() code. 72 * By resetting htable_reserve_amount to a lower value, we can force 73 * stealing to occur. The reserve amount is a guess to get us through boot. 74 */ 75 #define HTABLE_RESERVE_AMOUNT (200) 76 uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 77 kmutex_t htable_reserve_mutex; 78 uint_t htable_reserve_cnt; 79 htable_t *htable_reserve_pool; 80 81 /* 82 * Used to hand test htable_steal(). 83 */ 84 #ifdef DEBUG 85 ulong_t force_steal = 0; 86 ulong_t ptable_cnt = 0; 87 #endif 88 89 /* 90 * This variable is so that we can tune this via /etc/system 91 * Any value works, but a power of two <= mmu.ptes_per_table is best. 92 */ 93 uint_t htable_steal_passes = 8; 94 95 /* 96 * mutex stuff for access to htable hash 97 */ 98 #define NUM_HTABLE_MUTEX 128 99 kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 100 #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 101 102 #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 103 #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 104 105 /* 106 * forward declarations 107 */ 108 static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 109 static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 110 static void htable_free(htable_t *ht); 111 static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 112 static void x86pte_release_pagetable(htable_t *ht); 113 static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 114 x86pte_t new); 115 116 /* 117 * A counter to track if we are stealing or reaping htables. When non-zero 118 * htable_free() will directly free htables (either to the reserve or kmem) 119 * instead of putting them in a hat's htable cache. 120 */ 121 uint32_t htable_dont_cache = 0; 122 123 /* 124 * Track the number of active pagetables, so we can know how many to reap 125 */ 126 static uint32_t active_ptables = 0; 127 128 #ifdef __xpv 129 /* 130 * Deal with hypervisor complications. 131 */ 132 void 133 xen_flush_va(caddr_t va) 134 { 135 struct mmuext_op t; 136 uint_t count; 137 138 if (IN_XPV_PANIC()) { 139 mmu_tlbflush_entry((caddr_t)va); 140 } else { 141 t.cmd = MMUEXT_INVLPG_LOCAL; 142 t.arg1.linear_addr = (uintptr_t)va; 143 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 144 panic("HYPERVISOR_mmuext_op() failed"); 145 ASSERT(count == 1); 146 } 147 } 148 149 void 150 xen_gflush_va(caddr_t va, cpuset_t cpus) 151 { 152 struct mmuext_op t; 153 uint_t count; 154 155 if (IN_XPV_PANIC()) { 156 mmu_tlbflush_entry((caddr_t)va); 157 return; 158 } 159 160 t.cmd = MMUEXT_INVLPG_MULTI; 161 t.arg1.linear_addr = (uintptr_t)va; 162 /*LINTED: constant in conditional context*/ 163 set_xen_guest_handle(t.arg2.vcpumask, &cpus); 164 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 165 panic("HYPERVISOR_mmuext_op() failed"); 166 ASSERT(count == 1); 167 } 168 169 void 170 xen_flush_tlb() 171 { 172 struct mmuext_op t; 173 uint_t count; 174 175 if (IN_XPV_PANIC()) { 176 xpv_panic_reload_cr3(); 177 } else { 178 t.cmd = MMUEXT_TLB_FLUSH_LOCAL; 179 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 180 panic("HYPERVISOR_mmuext_op() failed"); 181 ASSERT(count == 1); 182 } 183 } 184 185 void 186 xen_gflush_tlb(cpuset_t cpus) 187 { 188 struct mmuext_op t; 189 uint_t count; 190 191 ASSERT(!IN_XPV_PANIC()); 192 t.cmd = MMUEXT_TLB_FLUSH_MULTI; 193 /*LINTED: constant in conditional context*/ 194 set_xen_guest_handle(t.arg2.vcpumask, &cpus); 195 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 196 panic("HYPERVISOR_mmuext_op() failed"); 197 ASSERT(count == 1); 198 } 199 200 /* 201 * Install/Adjust a kpm mapping under the hypervisor. 202 * Value of "how" should be: 203 * PT_WRITABLE | PT_VALID - regular kpm mapping 204 * PT_VALID - make mapping read-only 205 * 0 - remove mapping 206 * 207 * returns 0 on success. non-zero for failure. 208 */ 209 int 210 xen_kpm_page(pfn_t pfn, uint_t how) 211 { 212 paddr_t pa = mmu_ptob((paddr_t)pfn); 213 x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD; 214 215 if (kpm_vbase == NULL) 216 return (0); 217 218 if (how) 219 pte |= pa_to_ma(pa) | how; 220 else 221 pte = 0; 222 return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa, 223 pte, UVMF_INVLPG | UVMF_ALL)); 224 } 225 226 void 227 xen_pin(pfn_t pfn, level_t lvl) 228 { 229 struct mmuext_op t; 230 uint_t count; 231 232 t.cmd = MMUEXT_PIN_L1_TABLE + lvl; 233 t.arg1.mfn = pfn_to_mfn(pfn); 234 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 235 panic("HYPERVISOR_mmuext_op() failed"); 236 ASSERT(count == 1); 237 } 238 239 void 240 xen_unpin(pfn_t pfn) 241 { 242 struct mmuext_op t; 243 uint_t count; 244 245 t.cmd = MMUEXT_UNPIN_TABLE; 246 t.arg1.mfn = pfn_to_mfn(pfn); 247 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 248 panic("HYPERVISOR_mmuext_op() failed"); 249 ASSERT(count == 1); 250 } 251 252 static void 253 xen_map(uint64_t pte, caddr_t va) 254 { 255 if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte, 256 UVMF_INVLPG | UVMF_LOCAL)) 257 panic("HYPERVISOR_update_va_mapping() failed"); 258 } 259 #endif /* __xpv */ 260 261 /* 262 * Allocate a memory page for a hardware page table. 263 * 264 * A wrapper around page_get_physical(), with some extra checks. 265 */ 266 static pfn_t 267 ptable_alloc(uintptr_t seed) 268 { 269 pfn_t pfn; 270 page_t *pp; 271 272 pfn = PFN_INVALID; 273 atomic_add_32(&active_ptables, 1); 274 275 /* 276 * The first check is to see if there is memory in the system. If we 277 * drop to throttlefree, then fail the ptable_alloc() and let the 278 * stealing code kick in. Note that we have to do this test here, 279 * since the test in page_create_throttle() would let the NOSLEEP 280 * allocation go through and deplete the page reserves. 281 * 282 * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 283 */ 284 if (!NOMEMWAIT() && freemem <= throttlefree + 1) 285 return (PFN_INVALID); 286 287 #ifdef DEBUG 288 /* 289 * This code makes htable_steal() easier to test. By setting 290 * force_steal we force pagetable allocations to fall 291 * into the stealing code. Roughly 1 in ever "force_steal" 292 * page table allocations will fail. 293 */ 294 if (proc_pageout != NULL && force_steal > 1 && 295 ++ptable_cnt > force_steal) { 296 ptable_cnt = 0; 297 return (PFN_INVALID); 298 } 299 #endif /* DEBUG */ 300 301 pp = page_get_physical(seed); 302 if (pp == NULL) 303 return (PFN_INVALID); 304 ASSERT(PAGE_SHARED(pp)); 305 pfn = pp->p_pagenum; 306 if (pfn == PFN_INVALID) 307 panic("ptable_alloc(): Invalid PFN!!"); 308 HATSTAT_INC(hs_ptable_allocs); 309 return (pfn); 310 } 311 312 /* 313 * Free an htable's associated page table page. See the comments 314 * for ptable_alloc(). 315 */ 316 static void 317 ptable_free(pfn_t pfn) 318 { 319 page_t *pp = page_numtopp_nolock(pfn); 320 321 /* 322 * need to destroy the page used for the pagetable 323 */ 324 ASSERT(pfn != PFN_INVALID); 325 HATSTAT_INC(hs_ptable_frees); 326 atomic_add_32(&active_ptables, -1); 327 if (pp == NULL) 328 panic("ptable_free(): no page for pfn!"); 329 ASSERT(PAGE_SHARED(pp)); 330 ASSERT(pfn == pp->p_pagenum); 331 ASSERT(!IN_XPV_PANIC()); 332 333 /* 334 * Get an exclusive lock, might have to wait for a kmem reader. 335 */ 336 if (!page_tryupgrade(pp)) { 337 page_unlock(pp); 338 /* 339 * RFE: we could change this to not loop forever 340 * For now looping works - it's just like sfmmu. 341 */ 342 while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 343 continue; 344 } 345 #ifdef __xpv 346 if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0) 347 panic("failure making kpm r/w pfn=0x%lx", pfn); 348 #endif 349 page_free(pp, 1); 350 page_unresv(1); 351 } 352 353 /* 354 * Put one htable on the reserve list. 355 */ 356 static void 357 htable_put_reserve(htable_t *ht) 358 { 359 ht->ht_hat = NULL; /* no longer tied to a hat */ 360 ASSERT(ht->ht_pfn == PFN_INVALID); 361 HATSTAT_INC(hs_htable_rputs); 362 mutex_enter(&htable_reserve_mutex); 363 ht->ht_next = htable_reserve_pool; 364 htable_reserve_pool = ht; 365 ++htable_reserve_cnt; 366 mutex_exit(&htable_reserve_mutex); 367 } 368 369 /* 370 * Take one htable from the reserve. 371 */ 372 static htable_t * 373 htable_get_reserve(void) 374 { 375 htable_t *ht = NULL; 376 377 mutex_enter(&htable_reserve_mutex); 378 if (htable_reserve_cnt != 0) { 379 ht = htable_reserve_pool; 380 ASSERT(ht != NULL); 381 ASSERT(ht->ht_pfn == PFN_INVALID); 382 htable_reserve_pool = ht->ht_next; 383 --htable_reserve_cnt; 384 HATSTAT_INC(hs_htable_rgets); 385 } 386 mutex_exit(&htable_reserve_mutex); 387 return (ht); 388 } 389 390 /* 391 * Allocate initial htables and put them on the reserve list 392 */ 393 void 394 htable_initial_reserve(uint_t count) 395 { 396 htable_t *ht; 397 398 count += HTABLE_RESERVE_AMOUNT; 399 while (count > 0) { 400 ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 401 ASSERT(ht != NULL); 402 403 ASSERT(use_boot_reserve); 404 ht->ht_pfn = PFN_INVALID; 405 htable_put_reserve(ht); 406 --count; 407 } 408 } 409 410 /* 411 * Readjust the reserves after a thread finishes using them. 412 */ 413 void 414 htable_adjust_reserve() 415 { 416 htable_t *ht; 417 418 /* 419 * Free any excess htables in the reserve list 420 */ 421 while (htable_reserve_cnt > htable_reserve_amount && 422 !USE_HAT_RESERVES()) { 423 ht = htable_get_reserve(); 424 if (ht == NULL) 425 return; 426 ASSERT(ht->ht_pfn == PFN_INVALID); 427 kmem_cache_free(htable_cache, ht); 428 } 429 } 430 431 432 /* 433 * This routine steals htables from user processes for htable_alloc() or 434 * for htable_reap(). 435 */ 436 static htable_t * 437 htable_steal(uint_t cnt) 438 { 439 hat_t *hat = kas.a_hat; /* list starts with khat */ 440 htable_t *list = NULL; 441 htable_t *ht; 442 htable_t *higher; 443 uint_t h; 444 uint_t h_start; 445 static uint_t h_seed = 0; 446 uint_t e; 447 uintptr_t va; 448 x86pte_t pte; 449 uint_t stolen = 0; 450 uint_t pass; 451 uint_t threshold; 452 453 /* 454 * Limit htable_steal_passes to something reasonable 455 */ 456 if (htable_steal_passes == 0) 457 htable_steal_passes = 1; 458 if (htable_steal_passes > mmu.ptes_per_table) 459 htable_steal_passes = mmu.ptes_per_table; 460 461 /* 462 * Loop through all user hats. The 1st pass takes cached htables that 463 * aren't in use. The later passes steal by removing mappings, too. 464 */ 465 atomic_add_32(&htable_dont_cache, 1); 466 for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 467 threshold = pass * mmu.ptes_per_table / htable_steal_passes; 468 hat = kas.a_hat; 469 for (;;) { 470 471 /* 472 * Clear the victim flag and move to next hat 473 */ 474 mutex_enter(&hat_list_lock); 475 if (hat != kas.a_hat) { 476 hat->hat_flags &= ~HAT_VICTIM; 477 cv_broadcast(&hat_list_cv); 478 } 479 hat = hat->hat_next; 480 481 /* 482 * Skip any hat that is already being stolen from. 483 * 484 * We skip SHARED hats, as these are dummy 485 * hats that host ISM shared page tables. 486 * 487 * We also skip if HAT_FREEING because hat_pte_unmap() 488 * won't zero out the PTE's. That would lead to hitting 489 * stale PTEs either here or under hat_unload() when we 490 * steal and unload the same page table in competing 491 * threads. 492 */ 493 while (hat != NULL && 494 (hat->hat_flags & 495 (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 496 hat = hat->hat_next; 497 498 if (hat == NULL) { 499 mutex_exit(&hat_list_lock); 500 break; 501 } 502 503 /* 504 * Are we finished? 505 */ 506 if (stolen == cnt) { 507 /* 508 * Try to spread the pain of stealing, 509 * move victim HAT to the end of the HAT list. 510 */ 511 if (pass >= 1 && cnt == 1 && 512 kas.a_hat->hat_prev != hat) { 513 514 /* unlink victim hat */ 515 if (hat->hat_prev) 516 hat->hat_prev->hat_next = 517 hat->hat_next; 518 else 519 kas.a_hat->hat_next = 520 hat->hat_next; 521 if (hat->hat_next) 522 hat->hat_next->hat_prev = 523 hat->hat_prev; 524 else 525 kas.a_hat->hat_prev = 526 hat->hat_prev; 527 528 529 /* relink at end of hat list */ 530 hat->hat_next = NULL; 531 hat->hat_prev = kas.a_hat->hat_prev; 532 if (hat->hat_prev) 533 hat->hat_prev->hat_next = hat; 534 else 535 kas.a_hat->hat_next = hat; 536 kas.a_hat->hat_prev = hat; 537 538 } 539 540 mutex_exit(&hat_list_lock); 541 break; 542 } 543 544 /* 545 * Mark the HAT as a stealing victim. 546 */ 547 hat->hat_flags |= HAT_VICTIM; 548 mutex_exit(&hat_list_lock); 549 550 /* 551 * Take any htables from the hat's cached "free" list. 552 */ 553 hat_enter(hat); 554 while ((ht = hat->hat_ht_cached) != NULL && 555 stolen < cnt) { 556 hat->hat_ht_cached = ht->ht_next; 557 ht->ht_next = list; 558 list = ht; 559 ++stolen; 560 } 561 hat_exit(hat); 562 563 /* 564 * Don't steal on first pass. 565 */ 566 if (pass == 0 || stolen == cnt) 567 continue; 568 569 /* 570 * Search the active htables for one to steal. 571 * Start at a different hash bucket every time to 572 * help spread the pain of stealing. 573 */ 574 h = h_start = h_seed++ % hat->hat_num_hash; 575 do { 576 higher = NULL; 577 HTABLE_ENTER(h); 578 for (ht = hat->hat_ht_hash[h]; ht; 579 ht = ht->ht_next) { 580 581 /* 582 * Can we rule out reaping? 583 */ 584 if (ht->ht_busy != 0 || 585 (ht->ht_flags & HTABLE_SHARED_PFN)|| 586 ht->ht_level > 0 || 587 ht->ht_valid_cnt > threshold || 588 ht->ht_lock_cnt != 0) 589 continue; 590 591 /* 592 * Increment busy so the htable can't 593 * disappear. We drop the htable mutex 594 * to avoid deadlocks with 595 * hat_pageunload() and the hment mutex 596 * while we call hat_pte_unmap() 597 */ 598 ++ht->ht_busy; 599 HTABLE_EXIT(h); 600 601 /* 602 * Try stealing. 603 * - unload and invalidate all PTEs 604 */ 605 for (e = 0, va = ht->ht_vaddr; 606 e < HTABLE_NUM_PTES(ht) && 607 ht->ht_valid_cnt > 0 && 608 ht->ht_busy == 1 && 609 ht->ht_lock_cnt == 0; 610 ++e, va += MMU_PAGESIZE) { 611 pte = x86pte_get(ht, e); 612 if (!PTE_ISVALID(pte)) 613 continue; 614 hat_pte_unmap(ht, e, 615 HAT_UNLOAD, pte, NULL); 616 } 617 618 /* 619 * Reacquire htable lock. If we didn't 620 * remove all mappings in the table, 621 * or another thread added a new mapping 622 * behind us, give up on this table. 623 */ 624 HTABLE_ENTER(h); 625 if (ht->ht_busy != 1 || 626 ht->ht_valid_cnt != 0 || 627 ht->ht_lock_cnt != 0) { 628 --ht->ht_busy; 629 continue; 630 } 631 632 /* 633 * Steal it and unlink the page table. 634 */ 635 higher = ht->ht_parent; 636 unlink_ptp(higher, ht, ht->ht_vaddr); 637 638 /* 639 * remove from the hash list 640 */ 641 if (ht->ht_next) 642 ht->ht_next->ht_prev = 643 ht->ht_prev; 644 645 if (ht->ht_prev) { 646 ht->ht_prev->ht_next = 647 ht->ht_next; 648 } else { 649 ASSERT(hat->hat_ht_hash[h] == 650 ht); 651 hat->hat_ht_hash[h] = 652 ht->ht_next; 653 } 654 655 /* 656 * Break to outer loop to release the 657 * higher (ht_parent) pagetable. This 658 * spreads out the pain caused by 659 * pagefaults. 660 */ 661 ht->ht_next = list; 662 list = ht; 663 ++stolen; 664 break; 665 } 666 HTABLE_EXIT(h); 667 if (higher != NULL) 668 htable_release(higher); 669 if (++h == hat->hat_num_hash) 670 h = 0; 671 } while (stolen < cnt && h != h_start); 672 } 673 } 674 atomic_add_32(&htable_dont_cache, -1); 675 return (list); 676 } 677 678 /* 679 * This is invoked from kmem when the system is low on memory. We try 680 * to free hments, htables, and ptables to improve the memory situation. 681 */ 682 /*ARGSUSED*/ 683 static void 684 htable_reap(void *handle) 685 { 686 uint_t reap_cnt; 687 htable_t *list; 688 htable_t *ht; 689 690 HATSTAT_INC(hs_reap_attempts); 691 if (!can_steal_post_boot) 692 return; 693 694 /* 695 * Try to reap 5% of the page tables bounded by a maximum of 696 * 5% of physmem and a minimum of 10. 697 */ 698 reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 699 700 /* 701 * Let htable_steal() do the work, we just call htable_free() 702 */ 703 XPV_DISALLOW_MIGRATE(); 704 list = htable_steal(reap_cnt); 705 XPV_ALLOW_MIGRATE(); 706 while ((ht = list) != NULL) { 707 list = ht->ht_next; 708 HATSTAT_INC(hs_reaped); 709 htable_free(ht); 710 } 711 712 /* 713 * Free up excess reserves 714 */ 715 htable_adjust_reserve(); 716 hment_adjust_reserve(); 717 } 718 719 /* 720 * Allocate an htable, stealing one or using the reserve if necessary 721 */ 722 static htable_t * 723 htable_alloc( 724 hat_t *hat, 725 uintptr_t vaddr, 726 level_t level, 727 htable_t *shared) 728 { 729 htable_t *ht = NULL; 730 uint_t is_vlp; 731 uint_t is_bare = 0; 732 uint_t need_to_zero = 1; 733 int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 734 735 if (level < 0 || level > TOP_LEVEL(hat)) 736 panic("htable_alloc(): level %d out of range\n", level); 737 738 is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 739 if (is_vlp || shared != NULL) 740 is_bare = 1; 741 742 /* 743 * First reuse a cached htable from the hat_ht_cached field, this 744 * avoids unnecessary trips through kmem/page allocators. 745 */ 746 if (hat->hat_ht_cached != NULL && !is_bare) { 747 hat_enter(hat); 748 ht = hat->hat_ht_cached; 749 if (ht != NULL) { 750 hat->hat_ht_cached = ht->ht_next; 751 need_to_zero = 0; 752 /* XX64 ASSERT() they're all zero somehow */ 753 ASSERT(ht->ht_pfn != PFN_INVALID); 754 } 755 hat_exit(hat); 756 } 757 758 if (ht == NULL) { 759 /* 760 * Allocate an htable, possibly refilling the reserves. 761 */ 762 if (USE_HAT_RESERVES()) { 763 ht = htable_get_reserve(); 764 } else { 765 /* 766 * Donate successful htable allocations to the reserve. 767 */ 768 for (;;) { 769 ht = kmem_cache_alloc(htable_cache, kmflags); 770 if (ht == NULL) 771 break; 772 ht->ht_pfn = PFN_INVALID; 773 if (USE_HAT_RESERVES() || 774 htable_reserve_cnt >= htable_reserve_amount) 775 break; 776 htable_put_reserve(ht); 777 } 778 } 779 780 /* 781 * allocate a page for the hardware page table if needed 782 */ 783 if (ht != NULL && !is_bare) { 784 ht->ht_hat = hat; 785 ht->ht_pfn = ptable_alloc((uintptr_t)ht); 786 if (ht->ht_pfn == PFN_INVALID) { 787 if (USE_HAT_RESERVES()) 788 htable_put_reserve(ht); 789 else 790 kmem_cache_free(htable_cache, ht); 791 ht = NULL; 792 } 793 } 794 } 795 796 /* 797 * If allocations failed, kick off a kmem_reap() and resort to 798 * htable steal(). We may spin here if the system is very low on 799 * memory. If the kernel itself has consumed all memory and kmem_reap() 800 * can't free up anything, then we'll really get stuck here. 801 * That should only happen in a system where the administrator has 802 * misconfigured VM parameters via /etc/system. 803 */ 804 while (ht == NULL && can_steal_post_boot) { 805 kmem_reap(); 806 ht = htable_steal(1); 807 HATSTAT_INC(hs_steals); 808 809 /* 810 * If we stole for a bare htable, release the pagetable page. 811 */ 812 if (ht != NULL) { 813 if (is_bare) { 814 ptable_free(ht->ht_pfn); 815 ht->ht_pfn = PFN_INVALID; 816 #if defined(__xpv) && defined(__amd64) 817 /* 818 * make stolen page table writable again in kpm 819 */ 820 } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn, 821 PT_VALID | PT_WRITABLE) < 0) { 822 panic("failure making kpm r/w pfn=0x%lx", 823 ht->ht_pfn); 824 #endif 825 } 826 } 827 } 828 829 /* 830 * All attempts to allocate or steal failed. This should only happen 831 * if we run out of memory during boot, due perhaps to a huge 832 * boot_archive. At this point there's no way to continue. 833 */ 834 if (ht == NULL) 835 panic("htable_alloc(): couldn't steal\n"); 836 837 #if defined(__amd64) && defined(__xpv) 838 /* 839 * Under the 64-bit hypervisor, we have 2 top level page tables. 840 * If this allocation fails, we'll resort to stealing. 841 * We use the stolen page indirectly, by freeing the 842 * stolen htable first. 843 */ 844 if (level == mmu.max_level) { 845 for (;;) { 846 htable_t *stolen; 847 848 hat->hat_user_ptable = ptable_alloc((uintptr_t)ht + 1); 849 if (hat->hat_user_ptable != PFN_INVALID) 850 break; 851 stolen = htable_steal(1); 852 if (stolen == NULL) 853 panic("2nd steal ptable failed\n"); 854 htable_free(stolen); 855 } 856 block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable), 857 MMU_PAGESIZE); 858 } 859 #endif 860 861 /* 862 * Shared page tables have all entries locked and entries may not 863 * be added or deleted. 864 */ 865 ht->ht_flags = 0; 866 if (shared != NULL) { 867 ASSERT(shared->ht_valid_cnt > 0); 868 ht->ht_flags |= HTABLE_SHARED_PFN; 869 ht->ht_pfn = shared->ht_pfn; 870 ht->ht_lock_cnt = 0; 871 ht->ht_valid_cnt = 0; /* updated in hat_share() */ 872 ht->ht_shares = shared; 873 need_to_zero = 0; 874 } else { 875 ht->ht_shares = NULL; 876 ht->ht_lock_cnt = 0; 877 ht->ht_valid_cnt = 0; 878 } 879 880 /* 881 * setup flags, etc. for VLP htables 882 */ 883 if (is_vlp) { 884 ht->ht_flags |= HTABLE_VLP; 885 ASSERT(ht->ht_pfn == PFN_INVALID); 886 need_to_zero = 0; 887 } 888 889 /* 890 * fill in the htable 891 */ 892 ht->ht_hat = hat; 893 ht->ht_parent = NULL; 894 ht->ht_vaddr = vaddr; 895 ht->ht_level = level; 896 ht->ht_busy = 1; 897 ht->ht_next = NULL; 898 ht->ht_prev = NULL; 899 900 /* 901 * Zero out any freshly allocated page table 902 */ 903 if (need_to_zero) 904 x86pte_zero(ht, 0, mmu.ptes_per_table); 905 906 #if defined(__amd64) && defined(__xpv) 907 if (!is_bare && kpm_vbase) { 908 (void) xen_kpm_page(ht->ht_pfn, PT_VALID); 909 if (level == mmu.max_level) 910 (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID); 911 } 912 #endif 913 914 return (ht); 915 } 916 917 /* 918 * Free up an htable, either to a hat's cached list, the reserves or 919 * back to kmem. 920 */ 921 static void 922 htable_free(htable_t *ht) 923 { 924 hat_t *hat = ht->ht_hat; 925 926 /* 927 * If the process isn't exiting, cache the free htable in the hat 928 * structure. We always do this for the boot time reserve. We don't 929 * do this if the hat is exiting or we are stealing/reaping htables. 930 */ 931 if (hat != NULL && 932 !(ht->ht_flags & HTABLE_SHARED_PFN) && 933 (use_boot_reserve || 934 (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 935 ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 936 ASSERT(ht->ht_pfn != PFN_INVALID); 937 hat_enter(hat); 938 ht->ht_next = hat->hat_ht_cached; 939 hat->hat_ht_cached = ht; 940 hat_exit(hat); 941 return; 942 } 943 944 /* 945 * If we have a hardware page table, free it. 946 * We don't free page tables that are accessed by sharing. 947 */ 948 if (ht->ht_flags & HTABLE_SHARED_PFN) { 949 ASSERT(ht->ht_pfn != PFN_INVALID); 950 } else if (!(ht->ht_flags & HTABLE_VLP)) { 951 ptable_free(ht->ht_pfn); 952 #if defined(__amd64) && defined(__xpv) 953 if (ht->ht_level == mmu.max_level) { 954 ptable_free(hat->hat_user_ptable); 955 hat->hat_user_ptable = PFN_INVALID; 956 } 957 #endif 958 } 959 ht->ht_pfn = PFN_INVALID; 960 961 /* 962 * Free it or put into reserves. 963 */ 964 if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 965 htable_put_reserve(ht); 966 } else { 967 kmem_cache_free(htable_cache, ht); 968 htable_adjust_reserve(); 969 } 970 } 971 972 973 /* 974 * This is called when a hat is being destroyed or swapped out. We reap all 975 * the remaining htables in the hat cache. If destroying all left over 976 * htables are also destroyed. 977 * 978 * We also don't need to invalidate any of the PTPs nor do any demapping. 979 */ 980 void 981 htable_purge_hat(hat_t *hat) 982 { 983 htable_t *ht; 984 int h; 985 986 /* 987 * Purge the htable cache if just reaping. 988 */ 989 if (!(hat->hat_flags & HAT_FREEING)) { 990 atomic_add_32(&htable_dont_cache, 1); 991 for (;;) { 992 hat_enter(hat); 993 ht = hat->hat_ht_cached; 994 if (ht == NULL) { 995 hat_exit(hat); 996 break; 997 } 998 hat->hat_ht_cached = ht->ht_next; 999 hat_exit(hat); 1000 htable_free(ht); 1001 } 1002 atomic_add_32(&htable_dont_cache, -1); 1003 return; 1004 } 1005 1006 /* 1007 * if freeing, no locking is needed 1008 */ 1009 while ((ht = hat->hat_ht_cached) != NULL) { 1010 hat->hat_ht_cached = ht->ht_next; 1011 htable_free(ht); 1012 } 1013 1014 /* 1015 * walk thru the htable hash table and free all the htables in it. 1016 */ 1017 for (h = 0; h < hat->hat_num_hash; ++h) { 1018 while ((ht = hat->hat_ht_hash[h]) != NULL) { 1019 if (ht->ht_next) 1020 ht->ht_next->ht_prev = ht->ht_prev; 1021 1022 if (ht->ht_prev) { 1023 ht->ht_prev->ht_next = ht->ht_next; 1024 } else { 1025 ASSERT(hat->hat_ht_hash[h] == ht); 1026 hat->hat_ht_hash[h] = ht->ht_next; 1027 } 1028 htable_free(ht); 1029 } 1030 } 1031 } 1032 1033 /* 1034 * Unlink an entry for a table at vaddr and level out of the existing table 1035 * one level higher. We are always holding the HASH_ENTER() when doing this. 1036 */ 1037 static void 1038 unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 1039 { 1040 uint_t entry = htable_va2entry(vaddr, higher); 1041 x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 1042 x86pte_t found; 1043 hat_t *hat = old->ht_hat; 1044 1045 ASSERT(higher->ht_busy > 0); 1046 ASSERT(higher->ht_valid_cnt > 0); 1047 ASSERT(old->ht_valid_cnt == 0); 1048 found = x86pte_cas(higher, entry, expect, 0); 1049 #ifdef __xpv 1050 /* 1051 * This is weird, but Xen apparently automatically unlinks empty 1052 * pagetables from the upper page table. So allow PTP to be 0 already. 1053 */ 1054 if (found != expect && found != 0) 1055 #else 1056 if (found != expect) 1057 #endif 1058 panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 1059 found, expect); 1060 1061 /* 1062 * When a top level VLP page table entry changes, we must issue 1063 * a reload of cr3 on all processors. 1064 * 1065 * If we don't need do do that, then we still have to INVLPG against 1066 * an address covered by the inner page table, as the latest processors 1067 * have TLB-like caches for non-leaf page table entries. 1068 */ 1069 if (!(hat->hat_flags & HAT_FREEING)) { 1070 hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ? 1071 DEMAP_ALL_ADDR : old->ht_vaddr); 1072 } 1073 1074 HTABLE_DEC(higher->ht_valid_cnt); 1075 } 1076 1077 /* 1078 * Link an entry for a new table at vaddr and level into the existing table 1079 * one level higher. We are always holding the HASH_ENTER() when doing this. 1080 */ 1081 static void 1082 link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 1083 { 1084 uint_t entry = htable_va2entry(vaddr, higher); 1085 x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 1086 x86pte_t found; 1087 1088 ASSERT(higher->ht_busy > 0); 1089 1090 ASSERT(new->ht_level != mmu.max_level); 1091 1092 HTABLE_INC(higher->ht_valid_cnt); 1093 1094 found = x86pte_cas(higher, entry, 0, newptp); 1095 if ((found & ~PT_REF) != 0) 1096 panic("HAT: ptp not 0, found=" FMT_PTE, found); 1097 1098 /* 1099 * When any top level VLP page table entry changes, we must issue 1100 * a reload of cr3 on all processors using it. 1101 * We also need to do this for the kernel hat on PAE 32 bit kernel. 1102 */ 1103 if ( 1104 #ifdef __i386 1105 (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 1106 #endif 1107 (higher->ht_flags & HTABLE_VLP)) 1108 hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 1109 } 1110 1111 /* 1112 * Release of hold on an htable. If this is the last use and the pagetable 1113 * is empty we may want to free it, then recursively look at the pagetable 1114 * above it. The recursion is handled by the outer while() loop. 1115 * 1116 * On the metal, during process exit, we don't bother unlinking the tables from 1117 * upper level pagetables. They are instead handled in bulk by hat_free_end(). 1118 * We can't do this on the hypervisor as we need the page table to be 1119 * implicitly unpinnned before it goes to the free page lists. This can't 1120 * happen unless we fully unlink it from the page table hierarchy. 1121 */ 1122 void 1123 htable_release(htable_t *ht) 1124 { 1125 uint_t hashval; 1126 htable_t *shared; 1127 htable_t *higher; 1128 hat_t *hat; 1129 uintptr_t va; 1130 level_t level; 1131 1132 while (ht != NULL) { 1133 shared = NULL; 1134 for (;;) { 1135 hat = ht->ht_hat; 1136 va = ht->ht_vaddr; 1137 level = ht->ht_level; 1138 hashval = HTABLE_HASH(hat, va, level); 1139 1140 /* 1141 * The common case is that this isn't the last use of 1142 * an htable so we don't want to free the htable. 1143 */ 1144 HTABLE_ENTER(hashval); 1145 ASSERT(ht->ht_valid_cnt >= 0); 1146 ASSERT(ht->ht_busy > 0); 1147 if (ht->ht_valid_cnt > 0) 1148 break; 1149 if (ht->ht_busy > 1) 1150 break; 1151 ASSERT(ht->ht_lock_cnt == 0); 1152 1153 #if !defined(__xpv) 1154 /* 1155 * we always release empty shared htables 1156 */ 1157 if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 1158 1159 /* 1160 * don't release if in address space tear down 1161 */ 1162 if (hat->hat_flags & HAT_FREEING) 1163 break; 1164 1165 /* 1166 * At and above max_page_level, free if it's for 1167 * a boot-time kernel mapping below kernelbase. 1168 */ 1169 if (level >= mmu.max_page_level && 1170 (hat != kas.a_hat || va >= kernelbase)) 1171 break; 1172 } 1173 #endif /* __xpv */ 1174 1175 /* 1176 * Remember if we destroy an htable that shares its PFN 1177 * from elsewhere. 1178 */ 1179 if (ht->ht_flags & HTABLE_SHARED_PFN) { 1180 ASSERT(shared == NULL); 1181 shared = ht->ht_shares; 1182 HATSTAT_INC(hs_htable_unshared); 1183 } 1184 1185 /* 1186 * Handle release of a table and freeing the htable_t. 1187 * Unlink it from the table higher (ie. ht_parent). 1188 */ 1189 ASSERT(ht->ht_lock_cnt == 0); 1190 higher = ht->ht_parent; 1191 ASSERT(higher != NULL); 1192 1193 /* 1194 * Unlink the pagetable. 1195 */ 1196 unlink_ptp(higher, ht, va); 1197 1198 /* 1199 * remove this htable from its hash list 1200 */ 1201 if (ht->ht_next) 1202 ht->ht_next->ht_prev = ht->ht_prev; 1203 1204 if (ht->ht_prev) { 1205 ht->ht_prev->ht_next = ht->ht_next; 1206 } else { 1207 ASSERT(hat->hat_ht_hash[hashval] == ht); 1208 hat->hat_ht_hash[hashval] = ht->ht_next; 1209 } 1210 HTABLE_EXIT(hashval); 1211 htable_free(ht); 1212 ht = higher; 1213 } 1214 1215 ASSERT(ht->ht_busy >= 1); 1216 --ht->ht_busy; 1217 HTABLE_EXIT(hashval); 1218 1219 /* 1220 * If we released a shared htable, do a release on the htable 1221 * from which it shared 1222 */ 1223 ht = shared; 1224 } 1225 } 1226 1227 /* 1228 * Find the htable for the pagetable at the given level for the given address. 1229 * If found acquires a hold that eventually needs to be htable_release()d 1230 */ 1231 htable_t * 1232 htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 1233 { 1234 uintptr_t base; 1235 uint_t hashval; 1236 htable_t *ht = NULL; 1237 1238 ASSERT(level >= 0); 1239 ASSERT(level <= TOP_LEVEL(hat)); 1240 1241 if (level == TOP_LEVEL(hat)) { 1242 #if defined(__amd64) 1243 /* 1244 * 32 bit address spaces on 64 bit kernels need to check 1245 * for overflow of the 32 bit address space 1246 */ 1247 if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32)) 1248 return (NULL); 1249 #endif 1250 base = 0; 1251 } else { 1252 base = vaddr & LEVEL_MASK(level + 1); 1253 } 1254 1255 hashval = HTABLE_HASH(hat, base, level); 1256 HTABLE_ENTER(hashval); 1257 for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 1258 if (ht->ht_hat == hat && 1259 ht->ht_vaddr == base && 1260 ht->ht_level == level) 1261 break; 1262 } 1263 if (ht) 1264 ++ht->ht_busy; 1265 1266 HTABLE_EXIT(hashval); 1267 return (ht); 1268 } 1269 1270 /* 1271 * Acquires a hold on a known htable (from a locked hment entry). 1272 */ 1273 void 1274 htable_acquire(htable_t *ht) 1275 { 1276 hat_t *hat = ht->ht_hat; 1277 level_t level = ht->ht_level; 1278 uintptr_t base = ht->ht_vaddr; 1279 uint_t hashval = HTABLE_HASH(hat, base, level); 1280 1281 HTABLE_ENTER(hashval); 1282 #ifdef DEBUG 1283 /* 1284 * make sure the htable is there 1285 */ 1286 { 1287 htable_t *h; 1288 1289 for (h = hat->hat_ht_hash[hashval]; 1290 h && h != ht; 1291 h = h->ht_next) 1292 ; 1293 ASSERT(h == ht); 1294 } 1295 #endif /* DEBUG */ 1296 ++ht->ht_busy; 1297 HTABLE_EXIT(hashval); 1298 } 1299 1300 /* 1301 * Find the htable for the pagetable at the given level for the given address. 1302 * If found acquires a hold that eventually needs to be htable_release()d 1303 * If not found the table is created. 1304 * 1305 * Since we can't hold a hash table mutex during allocation, we have to 1306 * drop it and redo the search on a create. Then we may have to free the newly 1307 * allocated htable if another thread raced in and created it ahead of us. 1308 */ 1309 htable_t * 1310 htable_create( 1311 hat_t *hat, 1312 uintptr_t vaddr, 1313 level_t level, 1314 htable_t *shared) 1315 { 1316 uint_t h; 1317 level_t l; 1318 uintptr_t base; 1319 htable_t *ht; 1320 htable_t *higher = NULL; 1321 htable_t *new = NULL; 1322 1323 if (level < 0 || level > TOP_LEVEL(hat)) 1324 panic("htable_create(): level %d out of range\n", level); 1325 1326 /* 1327 * Create the page tables in top down order. 1328 */ 1329 for (l = TOP_LEVEL(hat); l >= level; --l) { 1330 new = NULL; 1331 if (l == TOP_LEVEL(hat)) 1332 base = 0; 1333 else 1334 base = vaddr & LEVEL_MASK(l + 1); 1335 1336 h = HTABLE_HASH(hat, base, l); 1337 try_again: 1338 /* 1339 * look up the htable at this level 1340 */ 1341 HTABLE_ENTER(h); 1342 if (l == TOP_LEVEL(hat)) { 1343 ht = hat->hat_htable; 1344 } else { 1345 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 1346 ASSERT(ht->ht_hat == hat); 1347 if (ht->ht_vaddr == base && 1348 ht->ht_level == l) 1349 break; 1350 } 1351 } 1352 1353 /* 1354 * if we found the htable, increment its busy cnt 1355 * and if we had allocated a new htable, free it. 1356 */ 1357 if (ht != NULL) { 1358 /* 1359 * If we find a pre-existing shared table, it must 1360 * share from the same place. 1361 */ 1362 if (l == level && shared && ht->ht_shares && 1363 ht->ht_shares != shared) { 1364 panic("htable shared from wrong place " 1365 "found htable=%p shared=%p", 1366 (void *)ht, (void *)shared); 1367 } 1368 ++ht->ht_busy; 1369 HTABLE_EXIT(h); 1370 if (new) 1371 htable_free(new); 1372 if (higher != NULL) 1373 htable_release(higher); 1374 higher = ht; 1375 1376 /* 1377 * if we didn't find it on the first search 1378 * allocate a new one and search again 1379 */ 1380 } else if (new == NULL) { 1381 HTABLE_EXIT(h); 1382 new = htable_alloc(hat, base, l, 1383 l == level ? shared : NULL); 1384 goto try_again; 1385 1386 /* 1387 * 2nd search and still not there, use "new" table 1388 * Link new table into higher, when not at top level. 1389 */ 1390 } else { 1391 ht = new; 1392 if (higher != NULL) { 1393 link_ptp(higher, ht, base); 1394 ht->ht_parent = higher; 1395 } 1396 ht->ht_next = hat->hat_ht_hash[h]; 1397 ASSERT(ht->ht_prev == NULL); 1398 if (hat->hat_ht_hash[h]) 1399 hat->hat_ht_hash[h]->ht_prev = ht; 1400 hat->hat_ht_hash[h] = ht; 1401 HTABLE_EXIT(h); 1402 1403 /* 1404 * Note we don't do htable_release(higher). 1405 * That happens recursively when "new" is removed by 1406 * htable_release() or htable_steal(). 1407 */ 1408 higher = ht; 1409 1410 /* 1411 * If we just created a new shared page table we 1412 * increment the shared htable's busy count, so that 1413 * it can't be the victim of a steal even if it's empty. 1414 */ 1415 if (l == level && shared) { 1416 (void) htable_lookup(shared->ht_hat, 1417 shared->ht_vaddr, shared->ht_level); 1418 HATSTAT_INC(hs_htable_shared); 1419 } 1420 } 1421 } 1422 1423 return (ht); 1424 } 1425 1426 /* 1427 * Inherit initial pagetables from the boot program. On the 64-bit 1428 * hypervisor we also temporarily mark the p_index field of page table 1429 * pages, so we know not to try making them writable in seg_kpm. 1430 */ 1431 void 1432 htable_attach( 1433 hat_t *hat, 1434 uintptr_t base, 1435 level_t level, 1436 htable_t *parent, 1437 pfn_t pfn) 1438 { 1439 htable_t *ht; 1440 uint_t h; 1441 uint_t i; 1442 x86pte_t pte; 1443 x86pte_t *ptep; 1444 page_t *pp; 1445 extern page_t *boot_claim_page(pfn_t); 1446 1447 ht = htable_get_reserve(); 1448 if (level == mmu.max_level) 1449 kas.a_hat->hat_htable = ht; 1450 ht->ht_hat = hat; 1451 ht->ht_parent = parent; 1452 ht->ht_vaddr = base; 1453 ht->ht_level = level; 1454 ht->ht_busy = 1; 1455 ht->ht_next = NULL; 1456 ht->ht_prev = NULL; 1457 ht->ht_flags = 0; 1458 ht->ht_pfn = pfn; 1459 ht->ht_lock_cnt = 0; 1460 ht->ht_valid_cnt = 0; 1461 if (parent != NULL) 1462 ++parent->ht_busy; 1463 1464 h = HTABLE_HASH(hat, base, level); 1465 HTABLE_ENTER(h); 1466 ht->ht_next = hat->hat_ht_hash[h]; 1467 ASSERT(ht->ht_prev == NULL); 1468 if (hat->hat_ht_hash[h]) 1469 hat->hat_ht_hash[h]->ht_prev = ht; 1470 hat->hat_ht_hash[h] = ht; 1471 HTABLE_EXIT(h); 1472 1473 /* 1474 * make sure the page table physical page is not FREE 1475 */ 1476 if (page_resv(1, KM_NOSLEEP) == 0) 1477 panic("page_resv() failed in ptable alloc"); 1478 1479 pp = boot_claim_page(pfn); 1480 ASSERT(pp != NULL); 1481 page_downgrade(pp); 1482 #if defined(__xpv) && defined(__amd64) 1483 /* 1484 * Record in the page_t that is a pagetable for segkpm setup. 1485 */ 1486 if (kpm_vbase) 1487 pp->p_index = 1; 1488 #endif 1489 1490 /* 1491 * Count valid mappings and recursively attach lower level pagetables. 1492 */ 1493 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1494 for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1495 if (mmu.pae_hat) 1496 pte = ptep[i]; 1497 else 1498 pte = ((x86pte32_t *)ptep)[i]; 1499 if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1500 ++ht->ht_valid_cnt; 1501 if (!PTE_ISPAGE(pte, level)) { 1502 htable_attach(hat, base, level - 1, 1503 ht, PTE2PFN(pte, level)); 1504 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1505 } 1506 } 1507 base += LEVEL_SIZE(level); 1508 if (base == mmu.hole_start) 1509 base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1510 } 1511 1512 /* 1513 * As long as all the mappings we had were below kernel base 1514 * we can release the htable. 1515 */ 1516 if (base < kernelbase) 1517 htable_release(ht); 1518 } 1519 1520 /* 1521 * Walk through a given htable looking for the first valid entry. This 1522 * routine takes both a starting and ending address. The starting address 1523 * is required to be within the htable provided by the caller, but there is 1524 * no such restriction on the ending address. 1525 * 1526 * If the routine finds a valid entry in the htable (at or beyond the 1527 * starting address), the PTE (and its address) will be returned. 1528 * This PTE may correspond to either a page or a pagetable - it is the 1529 * caller's responsibility to determine which. If no valid entry is 1530 * found, 0 (and invalid PTE) and the next unexamined address will be 1531 * returned. 1532 * 1533 * The loop has been carefully coded for optimization. 1534 */ 1535 static x86pte_t 1536 htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 1537 { 1538 uint_t e; 1539 x86pte_t found_pte = (x86pte_t)0; 1540 caddr_t pte_ptr; 1541 caddr_t end_pte_ptr; 1542 int l = ht->ht_level; 1543 uintptr_t va = *vap & LEVEL_MASK(l); 1544 size_t pgsize = LEVEL_SIZE(l); 1545 1546 ASSERT(va >= ht->ht_vaddr); 1547 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1548 1549 /* 1550 * Compute the starting index and ending virtual address 1551 */ 1552 e = htable_va2entry(va, ht); 1553 1554 /* 1555 * The following page table scan code knows that the valid 1556 * bit of a PTE is in the lowest byte AND that x86 is little endian!! 1557 */ 1558 pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1559 end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1560 pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 1561 while (!PTE_ISVALID(*pte_ptr)) { 1562 va += pgsize; 1563 if (va >= eaddr) 1564 break; 1565 pte_ptr += mmu.pte_size; 1566 ASSERT(pte_ptr <= end_pte_ptr); 1567 if (pte_ptr == end_pte_ptr) 1568 break; 1569 } 1570 1571 /* 1572 * if we found a valid PTE, load the entire PTE 1573 */ 1574 if (va < eaddr && pte_ptr != end_pte_ptr) 1575 found_pte = GET_PTE((x86pte_t *)pte_ptr); 1576 x86pte_release_pagetable(ht); 1577 1578 #if defined(__amd64) 1579 /* 1580 * deal with VA hole on amd64 1581 */ 1582 if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 1583 va = mmu.hole_end + va - mmu.hole_start; 1584 #endif /* __amd64 */ 1585 1586 *vap = va; 1587 return (found_pte); 1588 } 1589 1590 /* 1591 * Find the address and htable for the first populated translation at or 1592 * above the given virtual address. The caller may also specify an upper 1593 * limit to the address range to search. Uses level information to quickly 1594 * skip unpopulated sections of virtual address spaces. 1595 * 1596 * If not found returns NULL. When found, returns the htable and virt addr 1597 * and has a hold on the htable. 1598 */ 1599 x86pte_t 1600 htable_walk( 1601 struct hat *hat, 1602 htable_t **htp, 1603 uintptr_t *vaddr, 1604 uintptr_t eaddr) 1605 { 1606 uintptr_t va = *vaddr; 1607 htable_t *ht; 1608 htable_t *prev = *htp; 1609 level_t l; 1610 level_t max_mapped_level; 1611 x86pte_t pte; 1612 1613 ASSERT(eaddr > va); 1614 1615 /* 1616 * If this is a user address, then we know we need not look beyond 1617 * kernelbase. 1618 */ 1619 ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 1620 eaddr == HTABLE_WALK_TO_END); 1621 if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 1622 eaddr = kernelbase; 1623 1624 /* 1625 * If we're coming in with a previous page table, search it first 1626 * without doing an htable_lookup(), this should be frequent. 1627 */ 1628 if (prev) { 1629 ASSERT(prev->ht_busy > 0); 1630 ASSERT(prev->ht_vaddr <= va); 1631 l = prev->ht_level; 1632 if (va <= HTABLE_LAST_PAGE(prev)) { 1633 pte = htable_scan(prev, &va, eaddr); 1634 1635 if (PTE_ISPAGE(pte, l)) { 1636 *vaddr = va; 1637 *htp = prev; 1638 return (pte); 1639 } 1640 } 1641 1642 /* 1643 * We found nothing in the htable provided by the caller, 1644 * so fall through and do the full search 1645 */ 1646 htable_release(prev); 1647 } 1648 1649 /* 1650 * Find the level of the largest pagesize used by this HAT. 1651 */ 1652 if (hat->hat_ism_pgcnt > 0) { 1653 max_mapped_level = mmu.umax_page_level; 1654 } else { 1655 max_mapped_level = 0; 1656 for (l = 1; l <= mmu.max_page_level; ++l) 1657 if (hat->hat_pages_mapped[l] != 0) 1658 max_mapped_level = l; 1659 } 1660 1661 while (va < eaddr && va >= *vaddr) { 1662 ASSERT(!IN_VA_HOLE(va)); 1663 1664 /* 1665 * Find lowest table with any entry for given address. 1666 */ 1667 for (l = 0; l <= TOP_LEVEL(hat); ++l) { 1668 ht = htable_lookup(hat, va, l); 1669 if (ht != NULL) { 1670 pte = htable_scan(ht, &va, eaddr); 1671 if (PTE_ISPAGE(pte, l)) { 1672 *vaddr = va; 1673 *htp = ht; 1674 return (pte); 1675 } 1676 htable_release(ht); 1677 break; 1678 } 1679 1680 /* 1681 * No htable at this level for the address. If there 1682 * is no larger page size that could cover it, we can 1683 * skip right to the start of the next page table. 1684 */ 1685 ASSERT(l < TOP_LEVEL(hat)); 1686 if (l >= max_mapped_level) { 1687 va = NEXT_ENTRY_VA(va, l + 1); 1688 if (va >= eaddr) 1689 break; 1690 } 1691 } 1692 } 1693 1694 *vaddr = 0; 1695 *htp = NULL; 1696 return (0); 1697 } 1698 1699 /* 1700 * Find the htable and page table entry index of the given virtual address 1701 * with pagesize at or below given level. 1702 * If not found returns NULL. When found, returns the htable, sets 1703 * entry, and has a hold on the htable. 1704 */ 1705 htable_t * 1706 htable_getpte( 1707 struct hat *hat, 1708 uintptr_t vaddr, 1709 uint_t *entry, 1710 x86pte_t *pte, 1711 level_t level) 1712 { 1713 htable_t *ht; 1714 level_t l; 1715 uint_t e; 1716 1717 ASSERT(level <= mmu.max_page_level); 1718 1719 for (l = 0; l <= level; ++l) { 1720 ht = htable_lookup(hat, vaddr, l); 1721 if (ht == NULL) 1722 continue; 1723 e = htable_va2entry(vaddr, ht); 1724 if (entry != NULL) 1725 *entry = e; 1726 if (pte != NULL) 1727 *pte = x86pte_get(ht, e); 1728 return (ht); 1729 } 1730 return (NULL); 1731 } 1732 1733 /* 1734 * Find the htable and page table entry index of the given virtual address. 1735 * There must be a valid page mapped at the given address. 1736 * If not found returns NULL. When found, returns the htable, sets 1737 * entry, and has a hold on the htable. 1738 */ 1739 htable_t * 1740 htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 1741 { 1742 htable_t *ht; 1743 uint_t e; 1744 x86pte_t pte; 1745 1746 ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 1747 if (ht == NULL) 1748 return (NULL); 1749 1750 if (entry) 1751 *entry = e; 1752 1753 if (PTE_ISPAGE(pte, ht->ht_level)) 1754 return (ht); 1755 htable_release(ht); 1756 return (NULL); 1757 } 1758 1759 1760 void 1761 htable_init() 1762 { 1763 /* 1764 * To save on kernel VA usage, we avoid debug information in 32 bit 1765 * kernels. 1766 */ 1767 #if defined(__amd64) 1768 int kmem_flags = KMC_NOHASH; 1769 #elif defined(__i386) 1770 int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 1771 #endif 1772 1773 /* 1774 * initialize kmem caches 1775 */ 1776 htable_cache = kmem_cache_create("htable_t", 1777 sizeof (htable_t), 0, NULL, NULL, 1778 htable_reap, NULL, hat_memload_arena, kmem_flags); 1779 } 1780 1781 /* 1782 * get the pte index for the virtual address in the given htable's pagetable 1783 */ 1784 uint_t 1785 htable_va2entry(uintptr_t va, htable_t *ht) 1786 { 1787 level_t l = ht->ht_level; 1788 1789 ASSERT(va >= ht->ht_vaddr); 1790 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1791 return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 1792 } 1793 1794 /* 1795 * Given an htable and the index of a pte in it, return the virtual address 1796 * of the page. 1797 */ 1798 uintptr_t 1799 htable_e2va(htable_t *ht, uint_t entry) 1800 { 1801 level_t l = ht->ht_level; 1802 uintptr_t va; 1803 1804 ASSERT(entry < HTABLE_NUM_PTES(ht)); 1805 va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 1806 1807 /* 1808 * Need to skip over any VA hole in top level table 1809 */ 1810 #if defined(__amd64) 1811 if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 1812 va += ((mmu.hole_end - mmu.hole_start) + 1); 1813 #endif 1814 1815 return (va); 1816 } 1817 1818 /* 1819 * The code uses compare and swap instructions to read/write PTE's to 1820 * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 1821 * will naturally be atomic. 1822 * 1823 * The combination of using kpreempt_disable()/_enable() and the hci_mutex 1824 * are used to ensure that an interrupt won't overwrite a temporary mapping 1825 * while it's in use. If an interrupt thread tries to access a PTE, it will 1826 * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 1827 */ 1828 void 1829 x86pte_cpu_init(cpu_t *cpu) 1830 { 1831 struct hat_cpu_info *hci; 1832 1833 hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 1834 mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 1835 cpu->cpu_hat_info = hci; 1836 } 1837 1838 void 1839 x86pte_cpu_fini(cpu_t *cpu) 1840 { 1841 struct hat_cpu_info *hci = cpu->cpu_hat_info; 1842 1843 kmem_free(hci, sizeof (*hci)); 1844 cpu->cpu_hat_info = NULL; 1845 } 1846 1847 #ifdef __i386 1848 /* 1849 * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1850 */ 1851 x86pte_t 1852 get_pte64(x86pte_t *ptr) 1853 { 1854 volatile uint32_t *p = (uint32_t *)ptr; 1855 x86pte_t t; 1856 1857 ASSERT(mmu.pae_hat != 0); 1858 for (;;) { 1859 t = p[0]; 1860 t |= (uint64_t)p[1] << 32; 1861 if ((t & 0xffffffff) == p[0]) 1862 return (t); 1863 } 1864 } 1865 #endif /* __i386 */ 1866 1867 /* 1868 * Disable preemption and establish a mapping to the pagetable with the 1869 * given pfn. This is optimized for there case where it's the same 1870 * pfn as we last used referenced from this CPU. 1871 */ 1872 static x86pte_t * 1873 x86pte_access_pagetable(htable_t *ht, uint_t index) 1874 { 1875 /* 1876 * VLP pagetables are contained in the hat_t 1877 */ 1878 if (ht->ht_flags & HTABLE_VLP) 1879 return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1880 return (x86pte_mapin(ht->ht_pfn, index, ht)); 1881 } 1882 1883 /* 1884 * map the given pfn into the page table window. 1885 */ 1886 /*ARGSUSED*/ 1887 x86pte_t * 1888 x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1889 { 1890 x86pte_t *pteptr; 1891 x86pte_t pte = 0; 1892 x86pte_t newpte; 1893 int x; 1894 1895 ASSERT(pfn != PFN_INVALID); 1896 1897 if (!khat_running) { 1898 caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1899 return (PT_INDEX_PTR(va, index)); 1900 } 1901 1902 /* 1903 * If kpm is available, use it. 1904 */ 1905 if (kpm_vbase) 1906 return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1907 1908 /* 1909 * Disable preemption and grab the CPU's hci_mutex 1910 */ 1911 kpreempt_disable(); 1912 ASSERT(CPU->cpu_hat_info != NULL); 1913 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1914 x = PWIN_TABLE(CPU->cpu_id); 1915 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 1916 #ifndef __xpv 1917 if (mmu.pae_hat) 1918 pte = *pteptr; 1919 else 1920 pte = *(x86pte32_t *)pteptr; 1921 #endif 1922 1923 newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1924 1925 /* 1926 * For hardware we can use a writable mapping. 1927 */ 1928 #ifdef __xpv 1929 if (IN_XPV_PANIC()) 1930 #endif 1931 newpte |= PT_WRITABLE; 1932 1933 if (!PTE_EQUIV(newpte, pte)) { 1934 1935 #ifdef __xpv 1936 if (!IN_XPV_PANIC()) { 1937 xen_map(newpte, PWIN_VA(x)); 1938 } else 1939 #endif 1940 { 1941 XPV_ALLOW_PAGETABLE_UPDATES(); 1942 if (mmu.pae_hat) 1943 *pteptr = newpte; 1944 else 1945 *(x86pte32_t *)pteptr = newpte; 1946 XPV_DISALLOW_PAGETABLE_UPDATES(); 1947 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 1948 } 1949 } 1950 return (PT_INDEX_PTR(PWIN_VA(x), index)); 1951 } 1952 1953 /* 1954 * Release access to a page table. 1955 */ 1956 static void 1957 x86pte_release_pagetable(htable_t *ht) 1958 { 1959 /* 1960 * nothing to do for VLP htables 1961 */ 1962 if (ht->ht_flags & HTABLE_VLP) 1963 return; 1964 1965 x86pte_mapout(); 1966 } 1967 1968 void 1969 x86pte_mapout(void) 1970 { 1971 if (kpm_vbase != NULL || !khat_running) 1972 return; 1973 1974 /* 1975 * Drop the CPU's hci_mutex and restore preemption. 1976 */ 1977 #ifdef __xpv 1978 if (!IN_XPV_PANIC()) { 1979 uintptr_t va; 1980 1981 /* 1982 * We need to always clear the mapping in case a page 1983 * that was once a page table page is ballooned out. 1984 */ 1985 va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id)); 1986 (void) HYPERVISOR_update_va_mapping(va, 0, 1987 UVMF_INVLPG | UVMF_LOCAL); 1988 } 1989 #endif 1990 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 1991 kpreempt_enable(); 1992 } 1993 1994 /* 1995 * Atomic retrieval of a pagetable entry 1996 */ 1997 x86pte_t 1998 x86pte_get(htable_t *ht, uint_t entry) 1999 { 2000 x86pte_t pte; 2001 x86pte_t *ptep; 2002 2003 /* 2004 * Be careful that loading PAE entries in 32 bit kernel is atomic. 2005 */ 2006 ASSERT(entry < mmu.ptes_per_table); 2007 ptep = x86pte_access_pagetable(ht, entry); 2008 pte = GET_PTE(ptep); 2009 x86pte_release_pagetable(ht); 2010 return (pte); 2011 } 2012 2013 /* 2014 * Atomic unconditional set of a page table entry, it returns the previous 2015 * value. For pre-existing mappings if the PFN changes, then we don't care 2016 * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 2017 * the MOD/REF bits unchanged. 2018 * 2019 * If asked to overwrite a link to a lower page table with a large page 2020 * mapping, this routine returns the special value of LPAGE_ERROR. This 2021 * allows the upper HAT layers to retry with a smaller mapping size. 2022 */ 2023 x86pte_t 2024 x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 2025 { 2026 x86pte_t old; 2027 x86pte_t prev; 2028 x86pte_t *ptep; 2029 level_t l = ht->ht_level; 2030 x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 2031 x86pte_t n; 2032 uintptr_t addr = htable_e2va(ht, entry); 2033 hat_t *hat = ht->ht_hat; 2034 2035 ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 2036 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2037 if (ptr == NULL) 2038 ptep = x86pte_access_pagetable(ht, entry); 2039 else 2040 ptep = ptr; 2041 2042 /* 2043 * Install the new PTE. If remapping the same PFN, then 2044 * copy existing REF/MOD bits to new mapping. 2045 */ 2046 do { 2047 prev = GET_PTE(ptep); 2048 n = new; 2049 if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 2050 n |= prev & (PT_REF | PT_MOD); 2051 2052 /* 2053 * Another thread may have installed this mapping already, 2054 * flush the local TLB and be done. 2055 */ 2056 if (prev == n) { 2057 old = new; 2058 #ifdef __xpv 2059 if (!IN_XPV_PANIC()) 2060 xen_flush_va((caddr_t)addr); 2061 else 2062 #endif 2063 mmu_tlbflush_entry((caddr_t)addr); 2064 goto done; 2065 } 2066 2067 /* 2068 * Detect if we have a collision of installing a large 2069 * page mapping where there already is a lower page table. 2070 */ 2071 if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 2072 old = LPAGE_ERROR; 2073 goto done; 2074 } 2075 2076 XPV_ALLOW_PAGETABLE_UPDATES(); 2077 old = CAS_PTE(ptep, prev, n); 2078 XPV_DISALLOW_PAGETABLE_UPDATES(); 2079 } while (old != prev); 2080 2081 /* 2082 * Do a TLB demap if needed, ie. the old pte was valid. 2083 * 2084 * Note that a stale TLB writeback to the PTE here either can't happen 2085 * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 2086 * mappings, but they were created with REF and MOD already set, so 2087 * no stale writeback will happen. 2088 * 2089 * Segmap is the only place where remaps happen on the same pfn and for 2090 * that we want to preserve the stale REF/MOD bits. 2091 */ 2092 if (old & PT_REF) 2093 hat_tlb_inval(hat, addr); 2094 2095 done: 2096 if (ptr == NULL) 2097 x86pte_release_pagetable(ht); 2098 return (old); 2099 } 2100 2101 /* 2102 * Atomic compare and swap of a page table entry. No TLB invalidates are done. 2103 * This is used for links between pagetables of different levels. 2104 * Note we always create these links with dirty/access set, so they should 2105 * never change. 2106 */ 2107 x86pte_t 2108 x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 2109 { 2110 x86pte_t pte; 2111 x86pte_t *ptep; 2112 #ifdef __xpv 2113 /* 2114 * We can't use writable pagetables for upper level tables, so fake it. 2115 */ 2116 mmu_update_t t[2]; 2117 int cnt = 1; 2118 int count; 2119 maddr_t ma; 2120 2121 if (!IN_XPV_PANIC()) { 2122 ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */ 2123 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2124 t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2125 t[0].val = new; 2126 2127 #if defined(__amd64) 2128 /* 2129 * On the 64-bit hypervisor we need to maintain the user mode 2130 * top page table too. 2131 */ 2132 if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) { 2133 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa( 2134 ht->ht_hat->hat_user_ptable), entry)); 2135 t[1].ptr = ma | MMU_NORMAL_PT_UPDATE; 2136 t[1].val = new; 2137 ++cnt; 2138 } 2139 #endif /* __amd64 */ 2140 2141 if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF)) 2142 panic("HYPERVISOR_mmu_update() failed"); 2143 ASSERT(count == cnt); 2144 return (old); 2145 } 2146 #endif 2147 ptep = x86pte_access_pagetable(ht, entry); 2148 XPV_ALLOW_PAGETABLE_UPDATES(); 2149 pte = CAS_PTE(ptep, old, new); 2150 XPV_DISALLOW_PAGETABLE_UPDATES(); 2151 x86pte_release_pagetable(ht); 2152 return (pte); 2153 } 2154 2155 /* 2156 * Invalidate a page table entry as long as it currently maps something that 2157 * matches the value determined by expect. 2158 * 2159 * Also invalidates any TLB entries and returns the previous value of the PTE. 2160 */ 2161 x86pte_t 2162 x86pte_inval( 2163 htable_t *ht, 2164 uint_t entry, 2165 x86pte_t expect, 2166 x86pte_t *pte_ptr) 2167 { 2168 x86pte_t *ptep; 2169 x86pte_t oldpte; 2170 x86pte_t found; 2171 2172 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2173 ASSERT(ht->ht_level <= mmu.max_page_level); 2174 2175 if (pte_ptr != NULL) 2176 ptep = pte_ptr; 2177 else 2178 ptep = x86pte_access_pagetable(ht, entry); 2179 2180 #if defined(__xpv) 2181 /* 2182 * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing 2183 * with anything else. 2184 */ 2185 if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) { 2186 int count; 2187 mmu_update_t t[1]; 2188 maddr_t ma; 2189 2190 oldpte = GET_PTE(ptep); 2191 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2192 goto done; 2193 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2194 t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2195 t[0].val = 0; 2196 if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF)) 2197 panic("HYPERVISOR_mmu_update() failed"); 2198 ASSERT(count == 1); 2199 goto done; 2200 } 2201 #endif /* __xpv */ 2202 2203 /* 2204 * Note that the loop is needed to handle changes due to h/w updating 2205 * of PT_MOD/PT_REF. 2206 */ 2207 do { 2208 oldpte = GET_PTE(ptep); 2209 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2210 goto done; 2211 XPV_ALLOW_PAGETABLE_UPDATES(); 2212 found = CAS_PTE(ptep, oldpte, 0); 2213 XPV_DISALLOW_PAGETABLE_UPDATES(); 2214 } while (found != oldpte); 2215 if (oldpte & (PT_REF | PT_MOD)) 2216 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 2217 2218 done: 2219 if (pte_ptr == NULL) 2220 x86pte_release_pagetable(ht); 2221 return (oldpte); 2222 } 2223 2224 /* 2225 * Change a page table entry af it currently matches the value in expect. 2226 */ 2227 x86pte_t 2228 x86pte_update( 2229 htable_t *ht, 2230 uint_t entry, 2231 x86pte_t expect, 2232 x86pte_t new) 2233 { 2234 x86pte_t *ptep; 2235 x86pte_t found; 2236 2237 ASSERT(new != 0); 2238 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2239 ASSERT(ht->ht_level <= mmu.max_page_level); 2240 2241 ptep = x86pte_access_pagetable(ht, entry); 2242 XPV_ALLOW_PAGETABLE_UPDATES(); 2243 found = CAS_PTE(ptep, expect, new); 2244 XPV_DISALLOW_PAGETABLE_UPDATES(); 2245 if (found == expect) { 2246 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 2247 2248 /* 2249 * When removing write permission *and* clearing the 2250 * MOD bit, check if a write happened via a stale 2251 * TLB entry before the TLB shootdown finished. 2252 * 2253 * If it did happen, simply re-enable write permission and 2254 * act like the original CAS failed. 2255 */ 2256 if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 2257 (new & (PT_WRITABLE | PT_MOD)) == 0 && 2258 (GET_PTE(ptep) & PT_MOD) != 0) { 2259 do { 2260 found = GET_PTE(ptep); 2261 XPV_ALLOW_PAGETABLE_UPDATES(); 2262 found = 2263 CAS_PTE(ptep, found, found | PT_WRITABLE); 2264 XPV_DISALLOW_PAGETABLE_UPDATES(); 2265 } while ((found & PT_WRITABLE) == 0); 2266 } 2267 } 2268 x86pte_release_pagetable(ht); 2269 return (found); 2270 } 2271 2272 #ifndef __xpv 2273 /* 2274 * Copy page tables - this is just a little more complicated than the 2275 * previous routines. Note that it's also not atomic! It also is never 2276 * used for VLP pagetables. 2277 */ 2278 void 2279 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2280 { 2281 caddr_t src_va; 2282 caddr_t dst_va; 2283 size_t size; 2284 x86pte_t *pteptr; 2285 x86pte_t pte; 2286 2287 ASSERT(khat_running); 2288 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2289 ASSERT(!(src->ht_flags & HTABLE_VLP)); 2290 ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 2291 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2292 2293 /* 2294 * Acquire access to the CPU pagetable windows for the dest and source. 2295 */ 2296 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2297 if (kpm_vbase) { 2298 src_va = (caddr_t) 2299 PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 2300 } else { 2301 uint_t x = PWIN_SRC(CPU->cpu_id); 2302 2303 /* 2304 * Finish defining the src pagetable mapping 2305 */ 2306 src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2307 pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 2308 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 2309 if (mmu.pae_hat) 2310 *pteptr = pte; 2311 else 2312 *(x86pte32_t *)pteptr = pte; 2313 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 2314 } 2315 2316 /* 2317 * now do the copy 2318 */ 2319 size = count << mmu.pte_size_shift; 2320 bcopy(src_va, dst_va, size); 2321 2322 x86pte_release_pagetable(dest); 2323 } 2324 2325 #else /* __xpv */ 2326 2327 /* 2328 * The hypervisor only supports writable pagetables at level 0, so we have 2329 * to install these 1 by 1 the slow way. 2330 */ 2331 void 2332 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2333 { 2334 caddr_t src_va; 2335 x86pte_t pte; 2336 2337 ASSERT(!IN_XPV_PANIC()); 2338 src_va = (caddr_t)x86pte_access_pagetable(src, entry); 2339 while (count) { 2340 if (mmu.pae_hat) 2341 pte = *(x86pte_t *)src_va; 2342 else 2343 pte = *(x86pte32_t *)src_va; 2344 if (pte != 0) { 2345 set_pteval(pfn_to_pa(dest->ht_pfn), entry, 2346 dest->ht_level, pte); 2347 #ifdef __amd64 2348 if (dest->ht_level == mmu.max_level && 2349 htable_e2va(dest, entry) < HYPERVISOR_VIRT_END) 2350 set_pteval( 2351 pfn_to_pa(dest->ht_hat->hat_user_ptable), 2352 entry, dest->ht_level, pte); 2353 #endif 2354 } 2355 --count; 2356 ++entry; 2357 src_va += mmu.pte_size; 2358 } 2359 x86pte_release_pagetable(src); 2360 } 2361 #endif /* __xpv */ 2362 2363 /* 2364 * Zero page table entries - Note this doesn't use atomic stores! 2365 */ 2366 static void 2367 x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 2368 { 2369 caddr_t dst_va; 2370 size_t size; 2371 #ifdef __xpv 2372 int x; 2373 x86pte_t newpte; 2374 #endif 2375 2376 /* 2377 * Map in the page table to be zeroed. 2378 */ 2379 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2380 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2381 2382 /* 2383 * On the hypervisor we don't use x86pte_access_pagetable() since 2384 * in this case the page is not pinned yet. 2385 */ 2386 #ifdef __xpv 2387 if (kpm_vbase == NULL) { 2388 kpreempt_disable(); 2389 ASSERT(CPU->cpu_hat_info != NULL); 2390 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 2391 x = PWIN_TABLE(CPU->cpu_id); 2392 newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE; 2393 xen_map(newpte, PWIN_VA(x)); 2394 dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2395 } else 2396 #endif 2397 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2398 2399 size = count << mmu.pte_size_shift; 2400 ASSERT(size > BLOCKZEROALIGN); 2401 #ifdef __i386 2402 if ((x86_feature & X86_SSE2) == 0) 2403 bzero(dst_va, size); 2404 else 2405 #endif 2406 block_zero_no_xmm(dst_va, size); 2407 2408 #ifdef __xpv 2409 if (kpm_vbase == NULL) { 2410 xen_map(0, PWIN_VA(x)); 2411 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 2412 kpreempt_enable(); 2413 } else 2414 #endif 2415 x86pte_release_pagetable(dest); 2416 } 2417 2418 /* 2419 * Called to ensure that all pagetables are in the system dump 2420 */ 2421 void 2422 hat_dump(void) 2423 { 2424 hat_t *hat; 2425 uint_t h; 2426 htable_t *ht; 2427 2428 /* 2429 * Dump all page tables 2430 */ 2431 for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 2432 for (h = 0; h < hat->hat_num_hash; ++h) { 2433 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2434 if ((ht->ht_flags & HTABLE_VLP) == 0) 2435 dump_page(ht->ht_pfn); 2436 } 2437 } 2438 } 2439 } 2440