1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/sysmacros.h> 29 #include <sys/kmem.h> 30 #include <sys/atomic.h> 31 #include <sys/bitmap.h> 32 #include <sys/machparam.h> 33 #include <sys/machsystm.h> 34 #include <sys/mman.h> 35 #include <sys/systm.h> 36 #include <sys/cpuvar.h> 37 #include <sys/thread.h> 38 #include <sys/proc.h> 39 #include <sys/cpu.h> 40 #include <sys/kmem.h> 41 #include <sys/disp.h> 42 #include <sys/vmem.h> 43 #include <sys/vmsystm.h> 44 #include <sys/promif.h> 45 #include <sys/var.h> 46 #include <sys/x86_archext.h> 47 #include <sys/archsystm.h> 48 #include <sys/bootconf.h> 49 #include <sys/dumphdr.h> 50 #include <vm/seg_kmem.h> 51 #include <vm/seg_kpm.h> 52 #include <vm/hat.h> 53 #include <vm/hat_i86.h> 54 #include <sys/cmn_err.h> 55 #include <sys/panic.h> 56 57 #ifdef __xpv 58 #include <sys/hypervisor.h> 59 #include <sys/xpv_panic.h> 60 #endif 61 62 #include <sys/bootinfo.h> 63 #include <vm/kboot_mmu.h> 64 65 static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 66 67 kmem_cache_t *htable_cache; 68 69 /* 70 * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 71 * is used in order to facilitate testing of the htable_steal() code. 72 * By resetting htable_reserve_amount to a lower value, we can force 73 * stealing to occur. The reserve amount is a guess to get us through boot. 74 */ 75 #define HTABLE_RESERVE_AMOUNT (200) 76 uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 77 kmutex_t htable_reserve_mutex; 78 uint_t htable_reserve_cnt; 79 htable_t *htable_reserve_pool; 80 81 /* 82 * Used to hand test htable_steal(). 83 */ 84 #ifdef DEBUG 85 ulong_t force_steal = 0; 86 ulong_t ptable_cnt = 0; 87 #endif 88 89 /* 90 * This variable is so that we can tune this via /etc/system 91 * Any value works, but a power of two <= mmu.ptes_per_table is best. 92 */ 93 uint_t htable_steal_passes = 8; 94 95 /* 96 * mutex stuff for access to htable hash 97 */ 98 #define NUM_HTABLE_MUTEX 128 99 kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 100 #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 101 102 #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 103 #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 104 105 /* 106 * forward declarations 107 */ 108 static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 109 static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 110 static void htable_free(htable_t *ht); 111 static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 112 static void x86pte_release_pagetable(htable_t *ht); 113 static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 114 x86pte_t new); 115 116 /* 117 * A counter to track if we are stealing or reaping htables. When non-zero 118 * htable_free() will directly free htables (either to the reserve or kmem) 119 * instead of putting them in a hat's htable cache. 120 */ 121 uint32_t htable_dont_cache = 0; 122 123 /* 124 * Track the number of active pagetables, so we can know how many to reap 125 */ 126 static uint32_t active_ptables = 0; 127 128 #ifdef __xpv 129 /* 130 * Deal with hypervisor complications. 131 */ 132 void 133 xen_flush_va(caddr_t va) 134 { 135 struct mmuext_op t; 136 uint_t count; 137 138 if (IN_XPV_PANIC()) { 139 mmu_tlbflush_entry((caddr_t)va); 140 } else { 141 t.cmd = MMUEXT_INVLPG_LOCAL; 142 t.arg1.linear_addr = (uintptr_t)va; 143 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 144 panic("HYPERVISOR_mmuext_op() failed"); 145 ASSERT(count == 1); 146 } 147 } 148 149 void 150 xen_gflush_va(caddr_t va, cpuset_t cpus) 151 { 152 struct mmuext_op t; 153 uint_t count; 154 155 if (IN_XPV_PANIC()) { 156 mmu_tlbflush_entry((caddr_t)va); 157 return; 158 } 159 160 t.cmd = MMUEXT_INVLPG_MULTI; 161 t.arg1.linear_addr = (uintptr_t)va; 162 /*LINTED: constant in conditional context*/ 163 set_xen_guest_handle(t.arg2.vcpumask, &cpus); 164 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 165 panic("HYPERVISOR_mmuext_op() failed"); 166 ASSERT(count == 1); 167 } 168 169 void 170 xen_flush_tlb() 171 { 172 struct mmuext_op t; 173 uint_t count; 174 175 if (IN_XPV_PANIC()) { 176 xpv_panic_reload_cr3(); 177 } else { 178 t.cmd = MMUEXT_TLB_FLUSH_LOCAL; 179 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 180 panic("HYPERVISOR_mmuext_op() failed"); 181 ASSERT(count == 1); 182 } 183 } 184 185 void 186 xen_gflush_tlb(cpuset_t cpus) 187 { 188 struct mmuext_op t; 189 uint_t count; 190 191 ASSERT(!IN_XPV_PANIC()); 192 t.cmd = MMUEXT_TLB_FLUSH_MULTI; 193 /*LINTED: constant in conditional context*/ 194 set_xen_guest_handle(t.arg2.vcpumask, &cpus); 195 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 196 panic("HYPERVISOR_mmuext_op() failed"); 197 ASSERT(count == 1); 198 } 199 200 /* 201 * Install/Adjust a kpm mapping under the hypervisor. 202 * Value of "how" should be: 203 * PT_WRITABLE | PT_VALID - regular kpm mapping 204 * PT_VALID - make mapping read-only 205 * 0 - remove mapping 206 * 207 * returns 0 on success. non-zero for failure. 208 */ 209 int 210 xen_kpm_page(pfn_t pfn, uint_t how) 211 { 212 paddr_t pa = mmu_ptob((paddr_t)pfn); 213 x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD; 214 215 if (kpm_vbase == NULL) 216 return (0); 217 218 if (how) 219 pte |= pa_to_ma(pa) | how; 220 else 221 pte = 0; 222 return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa, 223 pte, UVMF_INVLPG | UVMF_ALL)); 224 } 225 226 void 227 xen_pin(pfn_t pfn, level_t lvl) 228 { 229 struct mmuext_op t; 230 uint_t count; 231 232 t.cmd = MMUEXT_PIN_L1_TABLE + lvl; 233 t.arg1.mfn = pfn_to_mfn(pfn); 234 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 235 panic("HYPERVISOR_mmuext_op() failed"); 236 ASSERT(count == 1); 237 } 238 239 void 240 xen_unpin(pfn_t pfn) 241 { 242 struct mmuext_op t; 243 uint_t count; 244 245 t.cmd = MMUEXT_UNPIN_TABLE; 246 t.arg1.mfn = pfn_to_mfn(pfn); 247 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 248 panic("HYPERVISOR_mmuext_op() failed"); 249 ASSERT(count == 1); 250 } 251 252 static void 253 xen_map(uint64_t pte, caddr_t va) 254 { 255 if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte, 256 UVMF_INVLPG | UVMF_LOCAL)) 257 panic("HYPERVISOR_update_va_mapping() failed"); 258 } 259 #endif /* __xpv */ 260 261 /* 262 * Allocate a memory page for a hardware page table. 263 * 264 * A wrapper around page_get_physical(), with some extra checks. 265 */ 266 static pfn_t 267 ptable_alloc(void) 268 { 269 pfn_t pfn; 270 page_t *pp; 271 272 pfn = PFN_INVALID; 273 atomic_add_32(&active_ptables, 1); 274 275 /* 276 * The first check is to see if there is memory in the system. If we 277 * drop to throttlefree, then fail the ptable_alloc() and let the 278 * stealing code kick in. Note that we have to do this test here, 279 * since the test in page_create_throttle() would let the NOSLEEP 280 * allocation go through and deplete the page reserves. 281 * 282 * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 283 */ 284 if (!NOMEMWAIT() && freemem <= throttlefree + 1) 285 return (PFN_INVALID); 286 287 #ifdef DEBUG 288 /* 289 * This code makes htable_steal() easier to test. By setting 290 * force_steal we force pagetable allocations to fall 291 * into the stealing code. Roughly 1 in ever "force_steal" 292 * page table allocations will fail. 293 */ 294 if (proc_pageout != NULL && force_steal > 1 && 295 ++ptable_cnt > force_steal) { 296 ptable_cnt = 0; 297 return (PFN_INVALID); 298 } 299 #endif /* DEBUG */ 300 301 pp = page_get_physical(KM_NOSLEEP); 302 if (pp == NULL) 303 return (PFN_INVALID); 304 ASSERT(PAGE_SHARED(pp)); 305 pfn = pp->p_pagenum; 306 if (pfn == PFN_INVALID) 307 panic("ptable_alloc(): Invalid PFN!!"); 308 HATSTAT_INC(hs_ptable_allocs); 309 return (pfn); 310 } 311 312 /* 313 * Free an htable's associated page table page. See the comments 314 * for ptable_alloc(). 315 */ 316 static void 317 ptable_free(pfn_t pfn) 318 { 319 page_t *pp = page_numtopp_nolock(pfn); 320 321 /* 322 * need to destroy the page used for the pagetable 323 */ 324 ASSERT(pfn != PFN_INVALID); 325 HATSTAT_INC(hs_ptable_frees); 326 atomic_add_32(&active_ptables, -1); 327 if (pp == NULL) 328 panic("ptable_free(): no page for pfn!"); 329 ASSERT(pfn == pp->p_pagenum); 330 ASSERT(!IN_XPV_PANIC()); 331 #ifdef __xpv 332 if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0) 333 panic("failure making kpm r/w pfn=0x%lx", pfn); 334 #endif 335 page_free_physical(pp); 336 } 337 338 /* 339 * Put one htable on the reserve list. 340 */ 341 static void 342 htable_put_reserve(htable_t *ht) 343 { 344 ht->ht_hat = NULL; /* no longer tied to a hat */ 345 ASSERT(ht->ht_pfn == PFN_INVALID); 346 HATSTAT_INC(hs_htable_rputs); 347 mutex_enter(&htable_reserve_mutex); 348 ht->ht_next = htable_reserve_pool; 349 htable_reserve_pool = ht; 350 ++htable_reserve_cnt; 351 mutex_exit(&htable_reserve_mutex); 352 } 353 354 /* 355 * Take one htable from the reserve. 356 */ 357 static htable_t * 358 htable_get_reserve(void) 359 { 360 htable_t *ht = NULL; 361 362 mutex_enter(&htable_reserve_mutex); 363 if (htable_reserve_cnt != 0) { 364 ht = htable_reserve_pool; 365 ASSERT(ht != NULL); 366 ASSERT(ht->ht_pfn == PFN_INVALID); 367 htable_reserve_pool = ht->ht_next; 368 --htable_reserve_cnt; 369 HATSTAT_INC(hs_htable_rgets); 370 } 371 mutex_exit(&htable_reserve_mutex); 372 return (ht); 373 } 374 375 /* 376 * Allocate initial htables and put them on the reserve list 377 */ 378 void 379 htable_initial_reserve(uint_t count) 380 { 381 htable_t *ht; 382 383 count += HTABLE_RESERVE_AMOUNT; 384 while (count > 0) { 385 ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 386 ASSERT(ht != NULL); 387 388 ASSERT(use_boot_reserve); 389 ht->ht_pfn = PFN_INVALID; 390 htable_put_reserve(ht); 391 --count; 392 } 393 } 394 395 /* 396 * Readjust the reserves after a thread finishes using them. 397 */ 398 void 399 htable_adjust_reserve() 400 { 401 htable_t *ht; 402 403 /* 404 * Free any excess htables in the reserve list 405 */ 406 while (htable_reserve_cnt > htable_reserve_amount && 407 !USE_HAT_RESERVES()) { 408 ht = htable_get_reserve(); 409 if (ht == NULL) 410 return; 411 ASSERT(ht->ht_pfn == PFN_INVALID); 412 kmem_cache_free(htable_cache, ht); 413 } 414 } 415 416 417 /* 418 * This routine steals htables from user processes for htable_alloc() or 419 * for htable_reap(). 420 */ 421 static htable_t * 422 htable_steal(uint_t cnt) 423 { 424 hat_t *hat = kas.a_hat; /* list starts with khat */ 425 htable_t *list = NULL; 426 htable_t *ht; 427 htable_t *higher; 428 uint_t h; 429 uint_t h_start; 430 static uint_t h_seed = 0; 431 uint_t e; 432 uintptr_t va; 433 x86pte_t pte; 434 uint_t stolen = 0; 435 uint_t pass; 436 uint_t threshold; 437 438 /* 439 * Limit htable_steal_passes to something reasonable 440 */ 441 if (htable_steal_passes == 0) 442 htable_steal_passes = 1; 443 if (htable_steal_passes > mmu.ptes_per_table) 444 htable_steal_passes = mmu.ptes_per_table; 445 446 /* 447 * Loop through all user hats. The 1st pass takes cached htables that 448 * aren't in use. The later passes steal by removing mappings, too. 449 */ 450 atomic_add_32(&htable_dont_cache, 1); 451 for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 452 threshold = pass * mmu.ptes_per_table / htable_steal_passes; 453 hat = kas.a_hat; 454 for (;;) { 455 456 /* 457 * Clear the victim flag and move to next hat 458 */ 459 mutex_enter(&hat_list_lock); 460 if (hat != kas.a_hat) { 461 hat->hat_flags &= ~HAT_VICTIM; 462 cv_broadcast(&hat_list_cv); 463 } 464 hat = hat->hat_next; 465 466 /* 467 * Skip any hat that is already being stolen from. 468 * 469 * We skip SHARED hats, as these are dummy 470 * hats that host ISM shared page tables. 471 * 472 * We also skip if HAT_FREEING because hat_pte_unmap() 473 * won't zero out the PTE's. That would lead to hitting 474 * stale PTEs either here or under hat_unload() when we 475 * steal and unload the same page table in competing 476 * threads. 477 */ 478 while (hat != NULL && 479 (hat->hat_flags & 480 (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 481 hat = hat->hat_next; 482 483 if (hat == NULL) { 484 mutex_exit(&hat_list_lock); 485 break; 486 } 487 488 /* 489 * Are we finished? 490 */ 491 if (stolen == cnt) { 492 /* 493 * Try to spread the pain of stealing, 494 * move victim HAT to the end of the HAT list. 495 */ 496 if (pass >= 1 && cnt == 1 && 497 kas.a_hat->hat_prev != hat) { 498 499 /* unlink victim hat */ 500 if (hat->hat_prev) 501 hat->hat_prev->hat_next = 502 hat->hat_next; 503 else 504 kas.a_hat->hat_next = 505 hat->hat_next; 506 if (hat->hat_next) 507 hat->hat_next->hat_prev = 508 hat->hat_prev; 509 else 510 kas.a_hat->hat_prev = 511 hat->hat_prev; 512 513 514 /* relink at end of hat list */ 515 hat->hat_next = NULL; 516 hat->hat_prev = kas.a_hat->hat_prev; 517 if (hat->hat_prev) 518 hat->hat_prev->hat_next = hat; 519 else 520 kas.a_hat->hat_next = hat; 521 kas.a_hat->hat_prev = hat; 522 523 } 524 525 mutex_exit(&hat_list_lock); 526 break; 527 } 528 529 /* 530 * Mark the HAT as a stealing victim. 531 */ 532 hat->hat_flags |= HAT_VICTIM; 533 mutex_exit(&hat_list_lock); 534 535 /* 536 * Take any htables from the hat's cached "free" list. 537 */ 538 hat_enter(hat); 539 while ((ht = hat->hat_ht_cached) != NULL && 540 stolen < cnt) { 541 hat->hat_ht_cached = ht->ht_next; 542 ht->ht_next = list; 543 list = ht; 544 ++stolen; 545 } 546 hat_exit(hat); 547 548 /* 549 * Don't steal on first pass. 550 */ 551 if (pass == 0 || stolen == cnt) 552 continue; 553 554 /* 555 * Search the active htables for one to steal. 556 * Start at a different hash bucket every time to 557 * help spread the pain of stealing. 558 */ 559 h = h_start = h_seed++ % hat->hat_num_hash; 560 do { 561 higher = NULL; 562 HTABLE_ENTER(h); 563 for (ht = hat->hat_ht_hash[h]; ht; 564 ht = ht->ht_next) { 565 566 /* 567 * Can we rule out reaping? 568 */ 569 if (ht->ht_busy != 0 || 570 (ht->ht_flags & HTABLE_SHARED_PFN)|| 571 ht->ht_level > 0 || 572 ht->ht_valid_cnt > threshold || 573 ht->ht_lock_cnt != 0) 574 continue; 575 576 /* 577 * Increment busy so the htable can't 578 * disappear. We drop the htable mutex 579 * to avoid deadlocks with 580 * hat_pageunload() and the hment mutex 581 * while we call hat_pte_unmap() 582 */ 583 ++ht->ht_busy; 584 HTABLE_EXIT(h); 585 586 /* 587 * Try stealing. 588 * - unload and invalidate all PTEs 589 */ 590 for (e = 0, va = ht->ht_vaddr; 591 e < HTABLE_NUM_PTES(ht) && 592 ht->ht_valid_cnt > 0 && 593 ht->ht_busy == 1 && 594 ht->ht_lock_cnt == 0; 595 ++e, va += MMU_PAGESIZE) { 596 pte = x86pte_get(ht, e); 597 if (!PTE_ISVALID(pte)) 598 continue; 599 hat_pte_unmap(ht, e, 600 HAT_UNLOAD, pte, NULL); 601 } 602 603 /* 604 * Reacquire htable lock. If we didn't 605 * remove all mappings in the table, 606 * or another thread added a new mapping 607 * behind us, give up on this table. 608 */ 609 HTABLE_ENTER(h); 610 if (ht->ht_busy != 1 || 611 ht->ht_valid_cnt != 0 || 612 ht->ht_lock_cnt != 0) { 613 --ht->ht_busy; 614 continue; 615 } 616 617 /* 618 * Steal it and unlink the page table. 619 */ 620 higher = ht->ht_parent; 621 unlink_ptp(higher, ht, ht->ht_vaddr); 622 623 /* 624 * remove from the hash list 625 */ 626 if (ht->ht_next) 627 ht->ht_next->ht_prev = 628 ht->ht_prev; 629 630 if (ht->ht_prev) { 631 ht->ht_prev->ht_next = 632 ht->ht_next; 633 } else { 634 ASSERT(hat->hat_ht_hash[h] == 635 ht); 636 hat->hat_ht_hash[h] = 637 ht->ht_next; 638 } 639 640 /* 641 * Break to outer loop to release the 642 * higher (ht_parent) pagetable. This 643 * spreads out the pain caused by 644 * pagefaults. 645 */ 646 ht->ht_next = list; 647 list = ht; 648 ++stolen; 649 break; 650 } 651 HTABLE_EXIT(h); 652 if (higher != NULL) 653 htable_release(higher); 654 if (++h == hat->hat_num_hash) 655 h = 0; 656 } while (stolen < cnt && h != h_start); 657 } 658 } 659 atomic_add_32(&htable_dont_cache, -1); 660 return (list); 661 } 662 663 /* 664 * This is invoked from kmem when the system is low on memory. We try 665 * to free hments, htables, and ptables to improve the memory situation. 666 */ 667 /*ARGSUSED*/ 668 static void 669 htable_reap(void *handle) 670 { 671 uint_t reap_cnt; 672 htable_t *list; 673 htable_t *ht; 674 675 HATSTAT_INC(hs_reap_attempts); 676 if (!can_steal_post_boot) 677 return; 678 679 /* 680 * Try to reap 5% of the page tables bounded by a maximum of 681 * 5% of physmem and a minimum of 10. 682 */ 683 reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 684 685 /* 686 * Let htable_steal() do the work, we just call htable_free() 687 */ 688 XPV_DISALLOW_MIGRATE(); 689 list = htable_steal(reap_cnt); 690 XPV_ALLOW_MIGRATE(); 691 while ((ht = list) != NULL) { 692 list = ht->ht_next; 693 HATSTAT_INC(hs_reaped); 694 htable_free(ht); 695 } 696 697 /* 698 * Free up excess reserves 699 */ 700 htable_adjust_reserve(); 701 hment_adjust_reserve(); 702 } 703 704 /* 705 * Allocate an htable, stealing one or using the reserve if necessary 706 */ 707 static htable_t * 708 htable_alloc( 709 hat_t *hat, 710 uintptr_t vaddr, 711 level_t level, 712 htable_t *shared) 713 { 714 htable_t *ht = NULL; 715 uint_t is_vlp; 716 uint_t is_bare = 0; 717 uint_t need_to_zero = 1; 718 int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 719 720 if (level < 0 || level > TOP_LEVEL(hat)) 721 panic("htable_alloc(): level %d out of range\n", level); 722 723 is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 724 if (is_vlp || shared != NULL) 725 is_bare = 1; 726 727 /* 728 * First reuse a cached htable from the hat_ht_cached field, this 729 * avoids unnecessary trips through kmem/page allocators. 730 */ 731 if (hat->hat_ht_cached != NULL && !is_bare) { 732 hat_enter(hat); 733 ht = hat->hat_ht_cached; 734 if (ht != NULL) { 735 hat->hat_ht_cached = ht->ht_next; 736 need_to_zero = 0; 737 /* XX64 ASSERT() they're all zero somehow */ 738 ASSERT(ht->ht_pfn != PFN_INVALID); 739 } 740 hat_exit(hat); 741 } 742 743 if (ht == NULL) { 744 /* 745 * Allocate an htable, possibly refilling the reserves. 746 */ 747 if (USE_HAT_RESERVES()) { 748 ht = htable_get_reserve(); 749 } else { 750 /* 751 * Donate successful htable allocations to the reserve. 752 */ 753 for (;;) { 754 ht = kmem_cache_alloc(htable_cache, kmflags); 755 if (ht == NULL) 756 break; 757 ht->ht_pfn = PFN_INVALID; 758 if (USE_HAT_RESERVES() || 759 htable_reserve_cnt >= htable_reserve_amount) 760 break; 761 htable_put_reserve(ht); 762 } 763 } 764 765 /* 766 * allocate a page for the hardware page table if needed 767 */ 768 if (ht != NULL && !is_bare) { 769 ht->ht_hat = hat; 770 ht->ht_pfn = ptable_alloc(); 771 if (ht->ht_pfn == PFN_INVALID) { 772 if (USE_HAT_RESERVES()) 773 htable_put_reserve(ht); 774 else 775 kmem_cache_free(htable_cache, ht); 776 ht = NULL; 777 } 778 } 779 } 780 781 /* 782 * If allocations failed, kick off a kmem_reap() and resort to 783 * htable steal(). We may spin here if the system is very low on 784 * memory. If the kernel itself has consumed all memory and kmem_reap() 785 * can't free up anything, then we'll really get stuck here. 786 * That should only happen in a system where the administrator has 787 * misconfigured VM parameters via /etc/system. 788 */ 789 while (ht == NULL && can_steal_post_boot) { 790 kmem_reap(); 791 ht = htable_steal(1); 792 HATSTAT_INC(hs_steals); 793 794 /* 795 * If we stole for a bare htable, release the pagetable page. 796 */ 797 if (ht != NULL) { 798 if (is_bare) { 799 ptable_free(ht->ht_pfn); 800 ht->ht_pfn = PFN_INVALID; 801 #if defined(__xpv) && defined(__amd64) 802 /* 803 * make stolen page table writable again in kpm 804 */ 805 } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn, 806 PT_VALID | PT_WRITABLE) < 0) { 807 panic("failure making kpm r/w pfn=0x%lx", 808 ht->ht_pfn); 809 #endif 810 } 811 } 812 } 813 814 /* 815 * All attempts to allocate or steal failed. This should only happen 816 * if we run out of memory during boot, due perhaps to a huge 817 * boot_archive. At this point there's no way to continue. 818 */ 819 if (ht == NULL) 820 panic("htable_alloc(): couldn't steal\n"); 821 822 #if defined(__amd64) && defined(__xpv) 823 /* 824 * Under the 64-bit hypervisor, we have 2 top level page tables. 825 * If this allocation fails, we'll resort to stealing. 826 * We use the stolen page indirectly, by freeing the 827 * stolen htable first. 828 */ 829 if (level == mmu.max_level) { 830 for (;;) { 831 htable_t *stolen; 832 833 hat->hat_user_ptable = ptable_alloc(); 834 if (hat->hat_user_ptable != PFN_INVALID) 835 break; 836 stolen = htable_steal(1); 837 if (stolen == NULL) 838 panic("2nd steal ptable failed\n"); 839 htable_free(stolen); 840 } 841 block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable), 842 MMU_PAGESIZE); 843 } 844 #endif 845 846 /* 847 * Shared page tables have all entries locked and entries may not 848 * be added or deleted. 849 */ 850 ht->ht_flags = 0; 851 if (shared != NULL) { 852 ASSERT(shared->ht_valid_cnt > 0); 853 ht->ht_flags |= HTABLE_SHARED_PFN; 854 ht->ht_pfn = shared->ht_pfn; 855 ht->ht_lock_cnt = 0; 856 ht->ht_valid_cnt = 0; /* updated in hat_share() */ 857 ht->ht_shares = shared; 858 need_to_zero = 0; 859 } else { 860 ht->ht_shares = NULL; 861 ht->ht_lock_cnt = 0; 862 ht->ht_valid_cnt = 0; 863 } 864 865 /* 866 * setup flags, etc. for VLP htables 867 */ 868 if (is_vlp) { 869 ht->ht_flags |= HTABLE_VLP; 870 ASSERT(ht->ht_pfn == PFN_INVALID); 871 need_to_zero = 0; 872 } 873 874 /* 875 * fill in the htable 876 */ 877 ht->ht_hat = hat; 878 ht->ht_parent = NULL; 879 ht->ht_vaddr = vaddr; 880 ht->ht_level = level; 881 ht->ht_busy = 1; 882 ht->ht_next = NULL; 883 ht->ht_prev = NULL; 884 885 /* 886 * Zero out any freshly allocated page table 887 */ 888 if (need_to_zero) 889 x86pte_zero(ht, 0, mmu.ptes_per_table); 890 891 #if defined(__amd64) && defined(__xpv) 892 if (!is_bare && kpm_vbase) { 893 (void) xen_kpm_page(ht->ht_pfn, PT_VALID); 894 if (level == mmu.max_level) 895 (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID); 896 } 897 #endif 898 899 return (ht); 900 } 901 902 /* 903 * Free up an htable, either to a hat's cached list, the reserves or 904 * back to kmem. 905 */ 906 static void 907 htable_free(htable_t *ht) 908 { 909 hat_t *hat = ht->ht_hat; 910 911 /* 912 * If the process isn't exiting, cache the free htable in the hat 913 * structure. We always do this for the boot time reserve. We don't 914 * do this if the hat is exiting or we are stealing/reaping htables. 915 */ 916 if (hat != NULL && 917 !(ht->ht_flags & HTABLE_SHARED_PFN) && 918 (use_boot_reserve || 919 (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 920 ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 921 ASSERT(ht->ht_pfn != PFN_INVALID); 922 hat_enter(hat); 923 ht->ht_next = hat->hat_ht_cached; 924 hat->hat_ht_cached = ht; 925 hat_exit(hat); 926 return; 927 } 928 929 /* 930 * If we have a hardware page table, free it. 931 * We don't free page tables that are accessed by sharing. 932 */ 933 if (ht->ht_flags & HTABLE_SHARED_PFN) { 934 ASSERT(ht->ht_pfn != PFN_INVALID); 935 } else if (!(ht->ht_flags & HTABLE_VLP)) { 936 ptable_free(ht->ht_pfn); 937 #if defined(__amd64) && defined(__xpv) 938 if (ht->ht_level == mmu.max_level) { 939 ptable_free(hat->hat_user_ptable); 940 hat->hat_user_ptable = PFN_INVALID; 941 } 942 #endif 943 } 944 ht->ht_pfn = PFN_INVALID; 945 946 /* 947 * Free it or put into reserves. 948 */ 949 if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 950 htable_put_reserve(ht); 951 } else { 952 kmem_cache_free(htable_cache, ht); 953 htable_adjust_reserve(); 954 } 955 } 956 957 958 /* 959 * This is called when a hat is being destroyed or swapped out. We reap all 960 * the remaining htables in the hat cache. If destroying all left over 961 * htables are also destroyed. 962 * 963 * We also don't need to invalidate any of the PTPs nor do any demapping. 964 */ 965 void 966 htable_purge_hat(hat_t *hat) 967 { 968 htable_t *ht; 969 int h; 970 971 /* 972 * Purge the htable cache if just reaping. 973 */ 974 if (!(hat->hat_flags & HAT_FREEING)) { 975 atomic_add_32(&htable_dont_cache, 1); 976 for (;;) { 977 hat_enter(hat); 978 ht = hat->hat_ht_cached; 979 if (ht == NULL) { 980 hat_exit(hat); 981 break; 982 } 983 hat->hat_ht_cached = ht->ht_next; 984 hat_exit(hat); 985 htable_free(ht); 986 } 987 atomic_add_32(&htable_dont_cache, -1); 988 return; 989 } 990 991 /* 992 * if freeing, no locking is needed 993 */ 994 while ((ht = hat->hat_ht_cached) != NULL) { 995 hat->hat_ht_cached = ht->ht_next; 996 htable_free(ht); 997 } 998 999 /* 1000 * walk thru the htable hash table and free all the htables in it. 1001 */ 1002 for (h = 0; h < hat->hat_num_hash; ++h) { 1003 while ((ht = hat->hat_ht_hash[h]) != NULL) { 1004 if (ht->ht_next) 1005 ht->ht_next->ht_prev = ht->ht_prev; 1006 1007 if (ht->ht_prev) { 1008 ht->ht_prev->ht_next = ht->ht_next; 1009 } else { 1010 ASSERT(hat->hat_ht_hash[h] == ht); 1011 hat->hat_ht_hash[h] = ht->ht_next; 1012 } 1013 htable_free(ht); 1014 } 1015 } 1016 } 1017 1018 /* 1019 * Unlink an entry for a table at vaddr and level out of the existing table 1020 * one level higher. We are always holding the HASH_ENTER() when doing this. 1021 */ 1022 static void 1023 unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 1024 { 1025 uint_t entry = htable_va2entry(vaddr, higher); 1026 x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 1027 x86pte_t found; 1028 hat_t *hat = old->ht_hat; 1029 1030 ASSERT(higher->ht_busy > 0); 1031 ASSERT(higher->ht_valid_cnt > 0); 1032 ASSERT(old->ht_valid_cnt == 0); 1033 found = x86pte_cas(higher, entry, expect, 0); 1034 #ifdef __xpv 1035 /* 1036 * This is weird, but Xen apparently automatically unlinks empty 1037 * pagetables from the upper page table. So allow PTP to be 0 already. 1038 */ 1039 if (found != expect && found != 0) 1040 #else 1041 if (found != expect) 1042 #endif 1043 panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 1044 found, expect); 1045 1046 /* 1047 * When a top level VLP page table entry changes, we must issue 1048 * a reload of cr3 on all processors. 1049 * 1050 * If we don't need do do that, then we still have to INVLPG against 1051 * an address covered by the inner page table, as the latest processors 1052 * have TLB-like caches for non-leaf page table entries. 1053 */ 1054 if (!(hat->hat_flags & HAT_FREEING)) { 1055 hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ? 1056 DEMAP_ALL_ADDR : old->ht_vaddr); 1057 } 1058 1059 HTABLE_DEC(higher->ht_valid_cnt); 1060 } 1061 1062 /* 1063 * Link an entry for a new table at vaddr and level into the existing table 1064 * one level higher. We are always holding the HASH_ENTER() when doing this. 1065 */ 1066 static void 1067 link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 1068 { 1069 uint_t entry = htable_va2entry(vaddr, higher); 1070 x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 1071 x86pte_t found; 1072 1073 ASSERT(higher->ht_busy > 0); 1074 1075 ASSERT(new->ht_level != mmu.max_level); 1076 1077 HTABLE_INC(higher->ht_valid_cnt); 1078 1079 found = x86pte_cas(higher, entry, 0, newptp); 1080 if ((found & ~PT_REF) != 0) 1081 panic("HAT: ptp not 0, found=" FMT_PTE, found); 1082 1083 /* 1084 * When any top level VLP page table entry changes, we must issue 1085 * a reload of cr3 on all processors using it. 1086 * We also need to do this for the kernel hat on PAE 32 bit kernel. 1087 */ 1088 if ( 1089 #ifdef __i386 1090 (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 1091 #endif 1092 (higher->ht_flags & HTABLE_VLP)) 1093 hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 1094 } 1095 1096 /* 1097 * Release of hold on an htable. If this is the last use and the pagetable 1098 * is empty we may want to free it, then recursively look at the pagetable 1099 * above it. The recursion is handled by the outer while() loop. 1100 * 1101 * On the metal, during process exit, we don't bother unlinking the tables from 1102 * upper level pagetables. They are instead handled in bulk by hat_free_end(). 1103 * We can't do this on the hypervisor as we need the page table to be 1104 * implicitly unpinnned before it goes to the free page lists. This can't 1105 * happen unless we fully unlink it from the page table hierarchy. 1106 */ 1107 void 1108 htable_release(htable_t *ht) 1109 { 1110 uint_t hashval; 1111 htable_t *shared; 1112 htable_t *higher; 1113 hat_t *hat; 1114 uintptr_t va; 1115 level_t level; 1116 1117 while (ht != NULL) { 1118 shared = NULL; 1119 for (;;) { 1120 hat = ht->ht_hat; 1121 va = ht->ht_vaddr; 1122 level = ht->ht_level; 1123 hashval = HTABLE_HASH(hat, va, level); 1124 1125 /* 1126 * The common case is that this isn't the last use of 1127 * an htable so we don't want to free the htable. 1128 */ 1129 HTABLE_ENTER(hashval); 1130 ASSERT(ht->ht_valid_cnt >= 0); 1131 ASSERT(ht->ht_busy > 0); 1132 if (ht->ht_valid_cnt > 0) 1133 break; 1134 if (ht->ht_busy > 1) 1135 break; 1136 ASSERT(ht->ht_lock_cnt == 0); 1137 1138 #if !defined(__xpv) 1139 /* 1140 * we always release empty shared htables 1141 */ 1142 if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 1143 1144 /* 1145 * don't release if in address space tear down 1146 */ 1147 if (hat->hat_flags & HAT_FREEING) 1148 break; 1149 1150 /* 1151 * At and above max_page_level, free if it's for 1152 * a boot-time kernel mapping below kernelbase. 1153 */ 1154 if (level >= mmu.max_page_level && 1155 (hat != kas.a_hat || va >= kernelbase)) 1156 break; 1157 } 1158 #endif /* __xpv */ 1159 1160 /* 1161 * Remember if we destroy an htable that shares its PFN 1162 * from elsewhere. 1163 */ 1164 if (ht->ht_flags & HTABLE_SHARED_PFN) { 1165 ASSERT(shared == NULL); 1166 shared = ht->ht_shares; 1167 HATSTAT_INC(hs_htable_unshared); 1168 } 1169 1170 /* 1171 * Handle release of a table and freeing the htable_t. 1172 * Unlink it from the table higher (ie. ht_parent). 1173 */ 1174 ASSERT(ht->ht_lock_cnt == 0); 1175 higher = ht->ht_parent; 1176 ASSERT(higher != NULL); 1177 1178 /* 1179 * Unlink the pagetable. 1180 */ 1181 unlink_ptp(higher, ht, va); 1182 1183 /* 1184 * remove this htable from its hash list 1185 */ 1186 if (ht->ht_next) 1187 ht->ht_next->ht_prev = ht->ht_prev; 1188 1189 if (ht->ht_prev) { 1190 ht->ht_prev->ht_next = ht->ht_next; 1191 } else { 1192 ASSERT(hat->hat_ht_hash[hashval] == ht); 1193 hat->hat_ht_hash[hashval] = ht->ht_next; 1194 } 1195 HTABLE_EXIT(hashval); 1196 htable_free(ht); 1197 ht = higher; 1198 } 1199 1200 ASSERT(ht->ht_busy >= 1); 1201 --ht->ht_busy; 1202 HTABLE_EXIT(hashval); 1203 1204 /* 1205 * If we released a shared htable, do a release on the htable 1206 * from which it shared 1207 */ 1208 ht = shared; 1209 } 1210 } 1211 1212 /* 1213 * Find the htable for the pagetable at the given level for the given address. 1214 * If found acquires a hold that eventually needs to be htable_release()d 1215 */ 1216 htable_t * 1217 htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 1218 { 1219 uintptr_t base; 1220 uint_t hashval; 1221 htable_t *ht = NULL; 1222 1223 ASSERT(level >= 0); 1224 ASSERT(level <= TOP_LEVEL(hat)); 1225 1226 if (level == TOP_LEVEL(hat)) { 1227 #if defined(__amd64) 1228 /* 1229 * 32 bit address spaces on 64 bit kernels need to check 1230 * for overflow of the 32 bit address space 1231 */ 1232 if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32)) 1233 return (NULL); 1234 #endif 1235 base = 0; 1236 } else { 1237 base = vaddr & LEVEL_MASK(level + 1); 1238 } 1239 1240 hashval = HTABLE_HASH(hat, base, level); 1241 HTABLE_ENTER(hashval); 1242 for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 1243 if (ht->ht_hat == hat && 1244 ht->ht_vaddr == base && 1245 ht->ht_level == level) 1246 break; 1247 } 1248 if (ht) 1249 ++ht->ht_busy; 1250 1251 HTABLE_EXIT(hashval); 1252 return (ht); 1253 } 1254 1255 /* 1256 * Acquires a hold on a known htable (from a locked hment entry). 1257 */ 1258 void 1259 htable_acquire(htable_t *ht) 1260 { 1261 hat_t *hat = ht->ht_hat; 1262 level_t level = ht->ht_level; 1263 uintptr_t base = ht->ht_vaddr; 1264 uint_t hashval = HTABLE_HASH(hat, base, level); 1265 1266 HTABLE_ENTER(hashval); 1267 #ifdef DEBUG 1268 /* 1269 * make sure the htable is there 1270 */ 1271 { 1272 htable_t *h; 1273 1274 for (h = hat->hat_ht_hash[hashval]; 1275 h && h != ht; 1276 h = h->ht_next) 1277 ; 1278 ASSERT(h == ht); 1279 } 1280 #endif /* DEBUG */ 1281 ++ht->ht_busy; 1282 HTABLE_EXIT(hashval); 1283 } 1284 1285 /* 1286 * Find the htable for the pagetable at the given level for the given address. 1287 * If found acquires a hold that eventually needs to be htable_release()d 1288 * If not found the table is created. 1289 * 1290 * Since we can't hold a hash table mutex during allocation, we have to 1291 * drop it and redo the search on a create. Then we may have to free the newly 1292 * allocated htable if another thread raced in and created it ahead of us. 1293 */ 1294 htable_t * 1295 htable_create( 1296 hat_t *hat, 1297 uintptr_t vaddr, 1298 level_t level, 1299 htable_t *shared) 1300 { 1301 uint_t h; 1302 level_t l; 1303 uintptr_t base; 1304 htable_t *ht; 1305 htable_t *higher = NULL; 1306 htable_t *new = NULL; 1307 1308 if (level < 0 || level > TOP_LEVEL(hat)) 1309 panic("htable_create(): level %d out of range\n", level); 1310 1311 /* 1312 * Create the page tables in top down order. 1313 */ 1314 for (l = TOP_LEVEL(hat); l >= level; --l) { 1315 new = NULL; 1316 if (l == TOP_LEVEL(hat)) 1317 base = 0; 1318 else 1319 base = vaddr & LEVEL_MASK(l + 1); 1320 1321 h = HTABLE_HASH(hat, base, l); 1322 try_again: 1323 /* 1324 * look up the htable at this level 1325 */ 1326 HTABLE_ENTER(h); 1327 if (l == TOP_LEVEL(hat)) { 1328 ht = hat->hat_htable; 1329 } else { 1330 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 1331 ASSERT(ht->ht_hat == hat); 1332 if (ht->ht_vaddr == base && 1333 ht->ht_level == l) 1334 break; 1335 } 1336 } 1337 1338 /* 1339 * if we found the htable, increment its busy cnt 1340 * and if we had allocated a new htable, free it. 1341 */ 1342 if (ht != NULL) { 1343 /* 1344 * If we find a pre-existing shared table, it must 1345 * share from the same place. 1346 */ 1347 if (l == level && shared && ht->ht_shares && 1348 ht->ht_shares != shared) { 1349 panic("htable shared from wrong place " 1350 "found htable=%p shared=%p", 1351 (void *)ht, (void *)shared); 1352 } 1353 ++ht->ht_busy; 1354 HTABLE_EXIT(h); 1355 if (new) 1356 htable_free(new); 1357 if (higher != NULL) 1358 htable_release(higher); 1359 higher = ht; 1360 1361 /* 1362 * if we didn't find it on the first search 1363 * allocate a new one and search again 1364 */ 1365 } else if (new == NULL) { 1366 HTABLE_EXIT(h); 1367 new = htable_alloc(hat, base, l, 1368 l == level ? shared : NULL); 1369 goto try_again; 1370 1371 /* 1372 * 2nd search and still not there, use "new" table 1373 * Link new table into higher, when not at top level. 1374 */ 1375 } else { 1376 ht = new; 1377 if (higher != NULL) { 1378 link_ptp(higher, ht, base); 1379 ht->ht_parent = higher; 1380 } 1381 ht->ht_next = hat->hat_ht_hash[h]; 1382 ASSERT(ht->ht_prev == NULL); 1383 if (hat->hat_ht_hash[h]) 1384 hat->hat_ht_hash[h]->ht_prev = ht; 1385 hat->hat_ht_hash[h] = ht; 1386 HTABLE_EXIT(h); 1387 1388 /* 1389 * Note we don't do htable_release(higher). 1390 * That happens recursively when "new" is removed by 1391 * htable_release() or htable_steal(). 1392 */ 1393 higher = ht; 1394 1395 /* 1396 * If we just created a new shared page table we 1397 * increment the shared htable's busy count, so that 1398 * it can't be the victim of a steal even if it's empty. 1399 */ 1400 if (l == level && shared) { 1401 (void) htable_lookup(shared->ht_hat, 1402 shared->ht_vaddr, shared->ht_level); 1403 HATSTAT_INC(hs_htable_shared); 1404 } 1405 } 1406 } 1407 1408 return (ht); 1409 } 1410 1411 /* 1412 * Inherit initial pagetables from the boot program. On the 64-bit 1413 * hypervisor we also temporarily mark the p_index field of page table 1414 * pages, so we know not to try making them writable in seg_kpm. 1415 */ 1416 void 1417 htable_attach( 1418 hat_t *hat, 1419 uintptr_t base, 1420 level_t level, 1421 htable_t *parent, 1422 pfn_t pfn) 1423 { 1424 htable_t *ht; 1425 uint_t h; 1426 uint_t i; 1427 x86pte_t pte; 1428 x86pte_t *ptep; 1429 page_t *pp; 1430 extern page_t *boot_claim_page(pfn_t); 1431 1432 ht = htable_get_reserve(); 1433 if (level == mmu.max_level) 1434 kas.a_hat->hat_htable = ht; 1435 ht->ht_hat = hat; 1436 ht->ht_parent = parent; 1437 ht->ht_vaddr = base; 1438 ht->ht_level = level; 1439 ht->ht_busy = 1; 1440 ht->ht_next = NULL; 1441 ht->ht_prev = NULL; 1442 ht->ht_flags = 0; 1443 ht->ht_pfn = pfn; 1444 ht->ht_lock_cnt = 0; 1445 ht->ht_valid_cnt = 0; 1446 if (parent != NULL) 1447 ++parent->ht_busy; 1448 1449 h = HTABLE_HASH(hat, base, level); 1450 HTABLE_ENTER(h); 1451 ht->ht_next = hat->hat_ht_hash[h]; 1452 ASSERT(ht->ht_prev == NULL); 1453 if (hat->hat_ht_hash[h]) 1454 hat->hat_ht_hash[h]->ht_prev = ht; 1455 hat->hat_ht_hash[h] = ht; 1456 HTABLE_EXIT(h); 1457 1458 /* 1459 * make sure the page table physical page is not FREE 1460 */ 1461 if (page_resv(1, KM_NOSLEEP) == 0) 1462 panic("page_resv() failed in ptable alloc"); 1463 1464 pp = boot_claim_page(pfn); 1465 ASSERT(pp != NULL); 1466 page_downgrade(pp); 1467 #if defined(__xpv) && defined(__amd64) 1468 /* 1469 * Record in the page_t that is a pagetable for segkpm setup. 1470 */ 1471 if (kpm_vbase) 1472 pp->p_index = 1; 1473 #endif 1474 1475 /* 1476 * Count valid mappings and recursively attach lower level pagetables. 1477 */ 1478 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1479 for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1480 if (mmu.pae_hat) 1481 pte = ptep[i]; 1482 else 1483 pte = ((x86pte32_t *)ptep)[i]; 1484 if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1485 ++ht->ht_valid_cnt; 1486 if (!PTE_ISPAGE(pte, level)) { 1487 htable_attach(hat, base, level - 1, 1488 ht, PTE2PFN(pte, level)); 1489 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1490 } 1491 } 1492 base += LEVEL_SIZE(level); 1493 if (base == mmu.hole_start) 1494 base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1495 } 1496 1497 /* 1498 * As long as all the mappings we had were below kernel base 1499 * we can release the htable. 1500 */ 1501 if (base < kernelbase) 1502 htable_release(ht); 1503 } 1504 1505 /* 1506 * Walk through a given htable looking for the first valid entry. This 1507 * routine takes both a starting and ending address. The starting address 1508 * is required to be within the htable provided by the caller, but there is 1509 * no such restriction on the ending address. 1510 * 1511 * If the routine finds a valid entry in the htable (at or beyond the 1512 * starting address), the PTE (and its address) will be returned. 1513 * This PTE may correspond to either a page or a pagetable - it is the 1514 * caller's responsibility to determine which. If no valid entry is 1515 * found, 0 (and invalid PTE) and the next unexamined address will be 1516 * returned. 1517 * 1518 * The loop has been carefully coded for optimization. 1519 */ 1520 static x86pte_t 1521 htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 1522 { 1523 uint_t e; 1524 x86pte_t found_pte = (x86pte_t)0; 1525 caddr_t pte_ptr; 1526 caddr_t end_pte_ptr; 1527 int l = ht->ht_level; 1528 uintptr_t va = *vap & LEVEL_MASK(l); 1529 size_t pgsize = LEVEL_SIZE(l); 1530 1531 ASSERT(va >= ht->ht_vaddr); 1532 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1533 1534 /* 1535 * Compute the starting index and ending virtual address 1536 */ 1537 e = htable_va2entry(va, ht); 1538 1539 /* 1540 * The following page table scan code knows that the valid 1541 * bit of a PTE is in the lowest byte AND that x86 is little endian!! 1542 */ 1543 pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1544 end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1545 pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 1546 while (!PTE_ISVALID(*pte_ptr)) { 1547 va += pgsize; 1548 if (va >= eaddr) 1549 break; 1550 pte_ptr += mmu.pte_size; 1551 ASSERT(pte_ptr <= end_pte_ptr); 1552 if (pte_ptr == end_pte_ptr) 1553 break; 1554 } 1555 1556 /* 1557 * if we found a valid PTE, load the entire PTE 1558 */ 1559 if (va < eaddr && pte_ptr != end_pte_ptr) 1560 found_pte = GET_PTE((x86pte_t *)pte_ptr); 1561 x86pte_release_pagetable(ht); 1562 1563 #if defined(__amd64) 1564 /* 1565 * deal with VA hole on amd64 1566 */ 1567 if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 1568 va = mmu.hole_end + va - mmu.hole_start; 1569 #endif /* __amd64 */ 1570 1571 *vap = va; 1572 return (found_pte); 1573 } 1574 1575 /* 1576 * Find the address and htable for the first populated translation at or 1577 * above the given virtual address. The caller may also specify an upper 1578 * limit to the address range to search. Uses level information to quickly 1579 * skip unpopulated sections of virtual address spaces. 1580 * 1581 * If not found returns NULL. When found, returns the htable and virt addr 1582 * and has a hold on the htable. 1583 */ 1584 x86pte_t 1585 htable_walk( 1586 struct hat *hat, 1587 htable_t **htp, 1588 uintptr_t *vaddr, 1589 uintptr_t eaddr) 1590 { 1591 uintptr_t va = *vaddr; 1592 htable_t *ht; 1593 htable_t *prev = *htp; 1594 level_t l; 1595 level_t max_mapped_level; 1596 x86pte_t pte; 1597 1598 ASSERT(eaddr > va); 1599 1600 /* 1601 * If this is a user address, then we know we need not look beyond 1602 * kernelbase. 1603 */ 1604 ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 1605 eaddr == HTABLE_WALK_TO_END); 1606 if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 1607 eaddr = kernelbase; 1608 1609 /* 1610 * If we're coming in with a previous page table, search it first 1611 * without doing an htable_lookup(), this should be frequent. 1612 */ 1613 if (prev) { 1614 ASSERT(prev->ht_busy > 0); 1615 ASSERT(prev->ht_vaddr <= va); 1616 l = prev->ht_level; 1617 if (va <= HTABLE_LAST_PAGE(prev)) { 1618 pte = htable_scan(prev, &va, eaddr); 1619 1620 if (PTE_ISPAGE(pte, l)) { 1621 *vaddr = va; 1622 *htp = prev; 1623 return (pte); 1624 } 1625 } 1626 1627 /* 1628 * We found nothing in the htable provided by the caller, 1629 * so fall through and do the full search 1630 */ 1631 htable_release(prev); 1632 } 1633 1634 /* 1635 * Find the level of the largest pagesize used by this HAT. 1636 */ 1637 if (hat->hat_ism_pgcnt > 0) { 1638 max_mapped_level = mmu.umax_page_level; 1639 } else { 1640 max_mapped_level = 0; 1641 for (l = 1; l <= mmu.max_page_level; ++l) 1642 if (hat->hat_pages_mapped[l] != 0) 1643 max_mapped_level = l; 1644 } 1645 1646 while (va < eaddr && va >= *vaddr) { 1647 ASSERT(!IN_VA_HOLE(va)); 1648 1649 /* 1650 * Find lowest table with any entry for given address. 1651 */ 1652 for (l = 0; l <= TOP_LEVEL(hat); ++l) { 1653 ht = htable_lookup(hat, va, l); 1654 if (ht != NULL) { 1655 pte = htable_scan(ht, &va, eaddr); 1656 if (PTE_ISPAGE(pte, l)) { 1657 *vaddr = va; 1658 *htp = ht; 1659 return (pte); 1660 } 1661 htable_release(ht); 1662 break; 1663 } 1664 1665 /* 1666 * No htable at this level for the address. If there 1667 * is no larger page size that could cover it, we can 1668 * skip right to the start of the next page table. 1669 */ 1670 ASSERT(l < TOP_LEVEL(hat)); 1671 if (l >= max_mapped_level) { 1672 va = NEXT_ENTRY_VA(va, l + 1); 1673 if (va >= eaddr) 1674 break; 1675 } 1676 } 1677 } 1678 1679 *vaddr = 0; 1680 *htp = NULL; 1681 return (0); 1682 } 1683 1684 /* 1685 * Find the htable and page table entry index of the given virtual address 1686 * with pagesize at or below given level. 1687 * If not found returns NULL. When found, returns the htable, sets 1688 * entry, and has a hold on the htable. 1689 */ 1690 htable_t * 1691 htable_getpte( 1692 struct hat *hat, 1693 uintptr_t vaddr, 1694 uint_t *entry, 1695 x86pte_t *pte, 1696 level_t level) 1697 { 1698 htable_t *ht; 1699 level_t l; 1700 uint_t e; 1701 1702 ASSERT(level <= mmu.max_page_level); 1703 1704 for (l = 0; l <= level; ++l) { 1705 ht = htable_lookup(hat, vaddr, l); 1706 if (ht == NULL) 1707 continue; 1708 e = htable_va2entry(vaddr, ht); 1709 if (entry != NULL) 1710 *entry = e; 1711 if (pte != NULL) 1712 *pte = x86pte_get(ht, e); 1713 return (ht); 1714 } 1715 return (NULL); 1716 } 1717 1718 /* 1719 * Find the htable and page table entry index of the given virtual address. 1720 * There must be a valid page mapped at the given address. 1721 * If not found returns NULL. When found, returns the htable, sets 1722 * entry, and has a hold on the htable. 1723 */ 1724 htable_t * 1725 htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 1726 { 1727 htable_t *ht; 1728 uint_t e; 1729 x86pte_t pte; 1730 1731 ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 1732 if (ht == NULL) 1733 return (NULL); 1734 1735 if (entry) 1736 *entry = e; 1737 1738 if (PTE_ISPAGE(pte, ht->ht_level)) 1739 return (ht); 1740 htable_release(ht); 1741 return (NULL); 1742 } 1743 1744 1745 void 1746 htable_init() 1747 { 1748 /* 1749 * To save on kernel VA usage, we avoid debug information in 32 bit 1750 * kernels. 1751 */ 1752 #if defined(__amd64) 1753 int kmem_flags = KMC_NOHASH; 1754 #elif defined(__i386) 1755 int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 1756 #endif 1757 1758 /* 1759 * initialize kmem caches 1760 */ 1761 htable_cache = kmem_cache_create("htable_t", 1762 sizeof (htable_t), 0, NULL, NULL, 1763 htable_reap, NULL, hat_memload_arena, kmem_flags); 1764 } 1765 1766 /* 1767 * get the pte index for the virtual address in the given htable's pagetable 1768 */ 1769 uint_t 1770 htable_va2entry(uintptr_t va, htable_t *ht) 1771 { 1772 level_t l = ht->ht_level; 1773 1774 ASSERT(va >= ht->ht_vaddr); 1775 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1776 return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 1777 } 1778 1779 /* 1780 * Given an htable and the index of a pte in it, return the virtual address 1781 * of the page. 1782 */ 1783 uintptr_t 1784 htable_e2va(htable_t *ht, uint_t entry) 1785 { 1786 level_t l = ht->ht_level; 1787 uintptr_t va; 1788 1789 ASSERT(entry < HTABLE_NUM_PTES(ht)); 1790 va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 1791 1792 /* 1793 * Need to skip over any VA hole in top level table 1794 */ 1795 #if defined(__amd64) 1796 if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 1797 va += ((mmu.hole_end - mmu.hole_start) + 1); 1798 #endif 1799 1800 return (va); 1801 } 1802 1803 /* 1804 * The code uses compare and swap instructions to read/write PTE's to 1805 * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 1806 * will naturally be atomic. 1807 * 1808 * The combination of using kpreempt_disable()/_enable() and the hci_mutex 1809 * are used to ensure that an interrupt won't overwrite a temporary mapping 1810 * while it's in use. If an interrupt thread tries to access a PTE, it will 1811 * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 1812 */ 1813 void 1814 x86pte_cpu_init(cpu_t *cpu) 1815 { 1816 struct hat_cpu_info *hci; 1817 1818 hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 1819 mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 1820 cpu->cpu_hat_info = hci; 1821 } 1822 1823 void 1824 x86pte_cpu_fini(cpu_t *cpu) 1825 { 1826 struct hat_cpu_info *hci = cpu->cpu_hat_info; 1827 1828 kmem_free(hci, sizeof (*hci)); 1829 cpu->cpu_hat_info = NULL; 1830 } 1831 1832 #ifdef __i386 1833 /* 1834 * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1835 */ 1836 x86pte_t 1837 get_pte64(x86pte_t *ptr) 1838 { 1839 volatile uint32_t *p = (uint32_t *)ptr; 1840 x86pte_t t; 1841 1842 ASSERT(mmu.pae_hat != 0); 1843 for (;;) { 1844 t = p[0]; 1845 t |= (uint64_t)p[1] << 32; 1846 if ((t & 0xffffffff) == p[0]) 1847 return (t); 1848 } 1849 } 1850 #endif /* __i386 */ 1851 1852 /* 1853 * Disable preemption and establish a mapping to the pagetable with the 1854 * given pfn. This is optimized for there case where it's the same 1855 * pfn as we last used referenced from this CPU. 1856 */ 1857 static x86pte_t * 1858 x86pte_access_pagetable(htable_t *ht, uint_t index) 1859 { 1860 /* 1861 * VLP pagetables are contained in the hat_t 1862 */ 1863 if (ht->ht_flags & HTABLE_VLP) 1864 return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1865 return (x86pte_mapin(ht->ht_pfn, index, ht)); 1866 } 1867 1868 /* 1869 * map the given pfn into the page table window. 1870 */ 1871 /*ARGSUSED*/ 1872 x86pte_t * 1873 x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1874 { 1875 x86pte_t *pteptr; 1876 x86pte_t pte = 0; 1877 x86pte_t newpte; 1878 int x; 1879 1880 ASSERT(pfn != PFN_INVALID); 1881 1882 if (!khat_running) { 1883 caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1884 return (PT_INDEX_PTR(va, index)); 1885 } 1886 1887 /* 1888 * If kpm is available, use it. 1889 */ 1890 if (kpm_vbase) 1891 return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1892 1893 /* 1894 * Disable preemption and grab the CPU's hci_mutex 1895 */ 1896 kpreempt_disable(); 1897 ASSERT(CPU->cpu_hat_info != NULL); 1898 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1899 x = PWIN_TABLE(CPU->cpu_id); 1900 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 1901 #ifndef __xpv 1902 if (mmu.pae_hat) 1903 pte = *pteptr; 1904 else 1905 pte = *(x86pte32_t *)pteptr; 1906 #endif 1907 1908 newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1909 1910 /* 1911 * For hardware we can use a writable mapping. 1912 */ 1913 #ifdef __xpv 1914 if (IN_XPV_PANIC()) 1915 #endif 1916 newpte |= PT_WRITABLE; 1917 1918 if (!PTE_EQUIV(newpte, pte)) { 1919 1920 #ifdef __xpv 1921 if (!IN_XPV_PANIC()) { 1922 xen_map(newpte, PWIN_VA(x)); 1923 } else 1924 #endif 1925 { 1926 XPV_ALLOW_PAGETABLE_UPDATES(); 1927 if (mmu.pae_hat) 1928 *pteptr = newpte; 1929 else 1930 *(x86pte32_t *)pteptr = newpte; 1931 XPV_DISALLOW_PAGETABLE_UPDATES(); 1932 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 1933 } 1934 } 1935 return (PT_INDEX_PTR(PWIN_VA(x), index)); 1936 } 1937 1938 /* 1939 * Release access to a page table. 1940 */ 1941 static void 1942 x86pte_release_pagetable(htable_t *ht) 1943 { 1944 /* 1945 * nothing to do for VLP htables 1946 */ 1947 if (ht->ht_flags & HTABLE_VLP) 1948 return; 1949 1950 x86pte_mapout(); 1951 } 1952 1953 void 1954 x86pte_mapout(void) 1955 { 1956 if (kpm_vbase != NULL || !khat_running) 1957 return; 1958 1959 /* 1960 * Drop the CPU's hci_mutex and restore preemption. 1961 */ 1962 #ifdef __xpv 1963 if (!IN_XPV_PANIC()) { 1964 uintptr_t va; 1965 1966 /* 1967 * We need to always clear the mapping in case a page 1968 * that was once a page table page is ballooned out. 1969 */ 1970 va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id)); 1971 (void) HYPERVISOR_update_va_mapping(va, 0, 1972 UVMF_INVLPG | UVMF_LOCAL); 1973 } 1974 #endif 1975 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 1976 kpreempt_enable(); 1977 } 1978 1979 /* 1980 * Atomic retrieval of a pagetable entry 1981 */ 1982 x86pte_t 1983 x86pte_get(htable_t *ht, uint_t entry) 1984 { 1985 x86pte_t pte; 1986 x86pte_t *ptep; 1987 1988 /* 1989 * Be careful that loading PAE entries in 32 bit kernel is atomic. 1990 */ 1991 ASSERT(entry < mmu.ptes_per_table); 1992 ptep = x86pte_access_pagetable(ht, entry); 1993 pte = GET_PTE(ptep); 1994 x86pte_release_pagetable(ht); 1995 return (pte); 1996 } 1997 1998 /* 1999 * Atomic unconditional set of a page table entry, it returns the previous 2000 * value. For pre-existing mappings if the PFN changes, then we don't care 2001 * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 2002 * the MOD/REF bits unchanged. 2003 * 2004 * If asked to overwrite a link to a lower page table with a large page 2005 * mapping, this routine returns the special value of LPAGE_ERROR. This 2006 * allows the upper HAT layers to retry with a smaller mapping size. 2007 */ 2008 x86pte_t 2009 x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 2010 { 2011 x86pte_t old; 2012 x86pte_t prev; 2013 x86pte_t *ptep; 2014 level_t l = ht->ht_level; 2015 x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 2016 x86pte_t n; 2017 uintptr_t addr = htable_e2va(ht, entry); 2018 hat_t *hat = ht->ht_hat; 2019 2020 ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 2021 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2022 if (ptr == NULL) 2023 ptep = x86pte_access_pagetable(ht, entry); 2024 else 2025 ptep = ptr; 2026 2027 /* 2028 * Install the new PTE. If remapping the same PFN, then 2029 * copy existing REF/MOD bits to new mapping. 2030 */ 2031 do { 2032 prev = GET_PTE(ptep); 2033 n = new; 2034 if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 2035 n |= prev & (PT_REF | PT_MOD); 2036 2037 /* 2038 * Another thread may have installed this mapping already, 2039 * flush the local TLB and be done. 2040 */ 2041 if (prev == n) { 2042 old = new; 2043 #ifdef __xpv 2044 if (!IN_XPV_PANIC()) 2045 xen_flush_va((caddr_t)addr); 2046 else 2047 #endif 2048 mmu_tlbflush_entry((caddr_t)addr); 2049 goto done; 2050 } 2051 2052 /* 2053 * Detect if we have a collision of installing a large 2054 * page mapping where there already is a lower page table. 2055 */ 2056 if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 2057 old = LPAGE_ERROR; 2058 goto done; 2059 } 2060 2061 XPV_ALLOW_PAGETABLE_UPDATES(); 2062 old = CAS_PTE(ptep, prev, n); 2063 XPV_DISALLOW_PAGETABLE_UPDATES(); 2064 } while (old != prev); 2065 2066 /* 2067 * Do a TLB demap if needed, ie. the old pte was valid. 2068 * 2069 * Note that a stale TLB writeback to the PTE here either can't happen 2070 * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 2071 * mappings, but they were created with REF and MOD already set, so 2072 * no stale writeback will happen. 2073 * 2074 * Segmap is the only place where remaps happen on the same pfn and for 2075 * that we want to preserve the stale REF/MOD bits. 2076 */ 2077 if (old & PT_REF) 2078 hat_tlb_inval(hat, addr); 2079 2080 done: 2081 if (ptr == NULL) 2082 x86pte_release_pagetable(ht); 2083 return (old); 2084 } 2085 2086 /* 2087 * Atomic compare and swap of a page table entry. No TLB invalidates are done. 2088 * This is used for links between pagetables of different levels. 2089 * Note we always create these links with dirty/access set, so they should 2090 * never change. 2091 */ 2092 x86pte_t 2093 x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 2094 { 2095 x86pte_t pte; 2096 x86pte_t *ptep; 2097 #ifdef __xpv 2098 /* 2099 * We can't use writable pagetables for upper level tables, so fake it. 2100 */ 2101 mmu_update_t t[2]; 2102 int cnt = 1; 2103 int count; 2104 maddr_t ma; 2105 2106 if (!IN_XPV_PANIC()) { 2107 ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */ 2108 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2109 t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2110 t[0].val = new; 2111 2112 #if defined(__amd64) 2113 /* 2114 * On the 64-bit hypervisor we need to maintain the user mode 2115 * top page table too. 2116 */ 2117 if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) { 2118 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa( 2119 ht->ht_hat->hat_user_ptable), entry)); 2120 t[1].ptr = ma | MMU_NORMAL_PT_UPDATE; 2121 t[1].val = new; 2122 ++cnt; 2123 } 2124 #endif /* __amd64 */ 2125 2126 if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF)) 2127 panic("HYPERVISOR_mmu_update() failed"); 2128 ASSERT(count == cnt); 2129 return (old); 2130 } 2131 #endif 2132 ptep = x86pte_access_pagetable(ht, entry); 2133 XPV_ALLOW_PAGETABLE_UPDATES(); 2134 pte = CAS_PTE(ptep, old, new); 2135 XPV_DISALLOW_PAGETABLE_UPDATES(); 2136 x86pte_release_pagetable(ht); 2137 return (pte); 2138 } 2139 2140 /* 2141 * Invalidate a page table entry as long as it currently maps something that 2142 * matches the value determined by expect. 2143 * 2144 * Also invalidates any TLB entries and returns the previous value of the PTE. 2145 */ 2146 x86pte_t 2147 x86pte_inval( 2148 htable_t *ht, 2149 uint_t entry, 2150 x86pte_t expect, 2151 x86pte_t *pte_ptr) 2152 { 2153 x86pte_t *ptep; 2154 x86pte_t oldpte; 2155 x86pte_t found; 2156 2157 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2158 ASSERT(ht->ht_level <= mmu.max_page_level); 2159 2160 if (pte_ptr != NULL) 2161 ptep = pte_ptr; 2162 else 2163 ptep = x86pte_access_pagetable(ht, entry); 2164 2165 #if defined(__xpv) 2166 /* 2167 * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing 2168 * with anything else. 2169 */ 2170 if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) { 2171 int count; 2172 mmu_update_t t[1]; 2173 maddr_t ma; 2174 2175 oldpte = GET_PTE(ptep); 2176 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2177 goto done; 2178 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2179 t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2180 t[0].val = 0; 2181 if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF)) 2182 panic("HYPERVISOR_mmu_update() failed"); 2183 ASSERT(count == 1); 2184 goto done; 2185 } 2186 #endif /* __xpv */ 2187 2188 /* 2189 * Note that the loop is needed to handle changes due to h/w updating 2190 * of PT_MOD/PT_REF. 2191 */ 2192 do { 2193 oldpte = GET_PTE(ptep); 2194 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2195 goto done; 2196 XPV_ALLOW_PAGETABLE_UPDATES(); 2197 found = CAS_PTE(ptep, oldpte, 0); 2198 XPV_DISALLOW_PAGETABLE_UPDATES(); 2199 } while (found != oldpte); 2200 if (oldpte & (PT_REF | PT_MOD)) 2201 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 2202 2203 done: 2204 if (pte_ptr == NULL) 2205 x86pte_release_pagetable(ht); 2206 return (oldpte); 2207 } 2208 2209 /* 2210 * Change a page table entry af it currently matches the value in expect. 2211 */ 2212 x86pte_t 2213 x86pte_update( 2214 htable_t *ht, 2215 uint_t entry, 2216 x86pte_t expect, 2217 x86pte_t new) 2218 { 2219 x86pte_t *ptep; 2220 x86pte_t found; 2221 2222 ASSERT(new != 0); 2223 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2224 ASSERT(ht->ht_level <= mmu.max_page_level); 2225 2226 ptep = x86pte_access_pagetable(ht, entry); 2227 XPV_ALLOW_PAGETABLE_UPDATES(); 2228 found = CAS_PTE(ptep, expect, new); 2229 XPV_DISALLOW_PAGETABLE_UPDATES(); 2230 if (found == expect) { 2231 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 2232 2233 /* 2234 * When removing write permission *and* clearing the 2235 * MOD bit, check if a write happened via a stale 2236 * TLB entry before the TLB shootdown finished. 2237 * 2238 * If it did happen, simply re-enable write permission and 2239 * act like the original CAS failed. 2240 */ 2241 if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 2242 (new & (PT_WRITABLE | PT_MOD)) == 0 && 2243 (GET_PTE(ptep) & PT_MOD) != 0) { 2244 do { 2245 found = GET_PTE(ptep); 2246 XPV_ALLOW_PAGETABLE_UPDATES(); 2247 found = 2248 CAS_PTE(ptep, found, found | PT_WRITABLE); 2249 XPV_DISALLOW_PAGETABLE_UPDATES(); 2250 } while ((found & PT_WRITABLE) == 0); 2251 } 2252 } 2253 x86pte_release_pagetable(ht); 2254 return (found); 2255 } 2256 2257 #ifndef __xpv 2258 /* 2259 * Copy page tables - this is just a little more complicated than the 2260 * previous routines. Note that it's also not atomic! It also is never 2261 * used for VLP pagetables. 2262 */ 2263 void 2264 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2265 { 2266 caddr_t src_va; 2267 caddr_t dst_va; 2268 size_t size; 2269 x86pte_t *pteptr; 2270 x86pte_t pte; 2271 2272 ASSERT(khat_running); 2273 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2274 ASSERT(!(src->ht_flags & HTABLE_VLP)); 2275 ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 2276 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2277 2278 /* 2279 * Acquire access to the CPU pagetable windows for the dest and source. 2280 */ 2281 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2282 if (kpm_vbase) { 2283 src_va = (caddr_t) 2284 PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 2285 } else { 2286 uint_t x = PWIN_SRC(CPU->cpu_id); 2287 2288 /* 2289 * Finish defining the src pagetable mapping 2290 */ 2291 src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2292 pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 2293 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 2294 if (mmu.pae_hat) 2295 *pteptr = pte; 2296 else 2297 *(x86pte32_t *)pteptr = pte; 2298 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 2299 } 2300 2301 /* 2302 * now do the copy 2303 */ 2304 size = count << mmu.pte_size_shift; 2305 bcopy(src_va, dst_va, size); 2306 2307 x86pte_release_pagetable(dest); 2308 } 2309 2310 #else /* __xpv */ 2311 2312 /* 2313 * The hypervisor only supports writable pagetables at level 0, so we have 2314 * to install these 1 by 1 the slow way. 2315 */ 2316 void 2317 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2318 { 2319 caddr_t src_va; 2320 x86pte_t pte; 2321 2322 ASSERT(!IN_XPV_PANIC()); 2323 src_va = (caddr_t)x86pte_access_pagetable(src, entry); 2324 while (count) { 2325 if (mmu.pae_hat) 2326 pte = *(x86pte_t *)src_va; 2327 else 2328 pte = *(x86pte32_t *)src_va; 2329 if (pte != 0) { 2330 set_pteval(pfn_to_pa(dest->ht_pfn), entry, 2331 dest->ht_level, pte); 2332 #ifdef __amd64 2333 if (dest->ht_level == mmu.max_level && 2334 htable_e2va(dest, entry) < HYPERVISOR_VIRT_END) 2335 set_pteval( 2336 pfn_to_pa(dest->ht_hat->hat_user_ptable), 2337 entry, dest->ht_level, pte); 2338 #endif 2339 } 2340 --count; 2341 ++entry; 2342 src_va += mmu.pte_size; 2343 } 2344 x86pte_release_pagetable(src); 2345 } 2346 #endif /* __xpv */ 2347 2348 /* 2349 * Zero page table entries - Note this doesn't use atomic stores! 2350 */ 2351 static void 2352 x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 2353 { 2354 caddr_t dst_va; 2355 size_t size; 2356 #ifdef __xpv 2357 int x; 2358 x86pte_t newpte; 2359 #endif 2360 2361 /* 2362 * Map in the page table to be zeroed. 2363 */ 2364 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2365 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2366 2367 /* 2368 * On the hypervisor we don't use x86pte_access_pagetable() since 2369 * in this case the page is not pinned yet. 2370 */ 2371 #ifdef __xpv 2372 if (kpm_vbase == NULL) { 2373 kpreempt_disable(); 2374 ASSERT(CPU->cpu_hat_info != NULL); 2375 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 2376 x = PWIN_TABLE(CPU->cpu_id); 2377 newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE; 2378 xen_map(newpte, PWIN_VA(x)); 2379 dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2380 } else 2381 #endif 2382 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2383 2384 size = count << mmu.pte_size_shift; 2385 ASSERT(size > BLOCKZEROALIGN); 2386 #ifdef __i386 2387 if ((x86_feature & X86_SSE2) == 0) 2388 bzero(dst_va, size); 2389 else 2390 #endif 2391 block_zero_no_xmm(dst_va, size); 2392 2393 #ifdef __xpv 2394 if (kpm_vbase == NULL) { 2395 xen_map(0, PWIN_VA(x)); 2396 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 2397 kpreempt_enable(); 2398 } else 2399 #endif 2400 x86pte_release_pagetable(dest); 2401 } 2402 2403 /* 2404 * Called to ensure that all pagetables are in the system dump 2405 */ 2406 void 2407 hat_dump(void) 2408 { 2409 hat_t *hat; 2410 uint_t h; 2411 htable_t *ht; 2412 2413 /* 2414 * Dump all page tables 2415 */ 2416 for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 2417 for (h = 0; h < hat->hat_num_hash; ++h) { 2418 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2419 if ((ht->ht_flags & HTABLE_VLP) == 0) 2420 dump_page(ht->ht_pfn); 2421 } 2422 } 2423 } 2424 } 2425