1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2014 by Delphix. All rights reserved. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/sysmacros.h> 29 #include <sys/kmem.h> 30 #include <sys/atomic.h> 31 #include <sys/bitmap.h> 32 #include <sys/machparam.h> 33 #include <sys/machsystm.h> 34 #include <sys/mman.h> 35 #include <sys/systm.h> 36 #include <sys/cpuvar.h> 37 #include <sys/thread.h> 38 #include <sys/proc.h> 39 #include <sys/cpu.h> 40 #include <sys/kmem.h> 41 #include <sys/disp.h> 42 #include <sys/vmem.h> 43 #include <sys/vmsystm.h> 44 #include <sys/promif.h> 45 #include <sys/var.h> 46 #include <sys/x86_archext.h> 47 #include <sys/archsystm.h> 48 #include <sys/bootconf.h> 49 #include <sys/dumphdr.h> 50 #include <vm/seg_kmem.h> 51 #include <vm/seg_kpm.h> 52 #include <vm/hat.h> 53 #include <vm/hat_i86.h> 54 #include <sys/cmn_err.h> 55 #include <sys/panic.h> 56 57 #ifdef __xpv 58 #include <sys/hypervisor.h> 59 #include <sys/xpv_panic.h> 60 #endif 61 62 #include <sys/bootinfo.h> 63 #include <vm/kboot_mmu.h> 64 65 static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 66 67 kmem_cache_t *htable_cache; 68 69 /* 70 * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 71 * is used in order to facilitate testing of the htable_steal() code. 72 * By resetting htable_reserve_amount to a lower value, we can force 73 * stealing to occur. The reserve amount is a guess to get us through boot. 74 */ 75 #define HTABLE_RESERVE_AMOUNT (200) 76 uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 77 kmutex_t htable_reserve_mutex; 78 uint_t htable_reserve_cnt; 79 htable_t *htable_reserve_pool; 80 81 /* 82 * Used to hand test htable_steal(). 83 */ 84 #ifdef DEBUG 85 ulong_t force_steal = 0; 86 ulong_t ptable_cnt = 0; 87 #endif 88 89 /* 90 * This variable is so that we can tune this via /etc/system 91 * Any value works, but a power of two <= mmu.ptes_per_table is best. 92 */ 93 uint_t htable_steal_passes = 8; 94 95 /* 96 * mutex stuff for access to htable hash 97 */ 98 #define NUM_HTABLE_MUTEX 128 99 kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 100 #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 101 102 #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 103 #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 104 105 /* 106 * forward declarations 107 */ 108 static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 109 static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 110 static void htable_free(htable_t *ht); 111 static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 112 static void x86pte_release_pagetable(htable_t *ht); 113 static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 114 x86pte_t new); 115 116 /* 117 * A counter to track if we are stealing or reaping htables. When non-zero 118 * htable_free() will directly free htables (either to the reserve or kmem) 119 * instead of putting them in a hat's htable cache. 120 */ 121 uint32_t htable_dont_cache = 0; 122 123 /* 124 * Track the number of active pagetables, so we can know how many to reap 125 */ 126 static uint32_t active_ptables = 0; 127 128 #ifdef __xpv 129 /* 130 * Deal with hypervisor complications. 131 */ 132 void 133 xen_flush_va(caddr_t va) 134 { 135 struct mmuext_op t; 136 uint_t count; 137 138 if (IN_XPV_PANIC()) { 139 mmu_tlbflush_entry((caddr_t)va); 140 } else { 141 t.cmd = MMUEXT_INVLPG_LOCAL; 142 t.arg1.linear_addr = (uintptr_t)va; 143 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 144 panic("HYPERVISOR_mmuext_op() failed"); 145 ASSERT(count == 1); 146 } 147 } 148 149 void 150 xen_gflush_va(caddr_t va, cpuset_t cpus) 151 { 152 struct mmuext_op t; 153 uint_t count; 154 155 if (IN_XPV_PANIC()) { 156 mmu_tlbflush_entry((caddr_t)va); 157 return; 158 } 159 160 t.cmd = MMUEXT_INVLPG_MULTI; 161 t.arg1.linear_addr = (uintptr_t)va; 162 /*LINTED: constant in conditional context*/ 163 set_xen_guest_handle(t.arg2.vcpumask, &cpus); 164 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 165 panic("HYPERVISOR_mmuext_op() failed"); 166 ASSERT(count == 1); 167 } 168 169 void 170 xen_flush_tlb() 171 { 172 struct mmuext_op t; 173 uint_t count; 174 175 if (IN_XPV_PANIC()) { 176 xpv_panic_reload_cr3(); 177 } else { 178 t.cmd = MMUEXT_TLB_FLUSH_LOCAL; 179 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 180 panic("HYPERVISOR_mmuext_op() failed"); 181 ASSERT(count == 1); 182 } 183 } 184 185 void 186 xen_gflush_tlb(cpuset_t cpus) 187 { 188 struct mmuext_op t; 189 uint_t count; 190 191 ASSERT(!IN_XPV_PANIC()); 192 t.cmd = MMUEXT_TLB_FLUSH_MULTI; 193 /*LINTED: constant in conditional context*/ 194 set_xen_guest_handle(t.arg2.vcpumask, &cpus); 195 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 196 panic("HYPERVISOR_mmuext_op() failed"); 197 ASSERT(count == 1); 198 } 199 200 /* 201 * Install/Adjust a kpm mapping under the hypervisor. 202 * Value of "how" should be: 203 * PT_WRITABLE | PT_VALID - regular kpm mapping 204 * PT_VALID - make mapping read-only 205 * 0 - remove mapping 206 * 207 * returns 0 on success. non-zero for failure. 208 */ 209 int 210 xen_kpm_page(pfn_t pfn, uint_t how) 211 { 212 paddr_t pa = mmu_ptob((paddr_t)pfn); 213 x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD; 214 215 if (kpm_vbase == NULL) 216 return (0); 217 218 if (how) 219 pte |= pa_to_ma(pa) | how; 220 else 221 pte = 0; 222 return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa, 223 pte, UVMF_INVLPG | UVMF_ALL)); 224 } 225 226 void 227 xen_pin(pfn_t pfn, level_t lvl) 228 { 229 struct mmuext_op t; 230 uint_t count; 231 232 t.cmd = MMUEXT_PIN_L1_TABLE + lvl; 233 t.arg1.mfn = pfn_to_mfn(pfn); 234 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 235 panic("HYPERVISOR_mmuext_op() failed"); 236 ASSERT(count == 1); 237 } 238 239 void 240 xen_unpin(pfn_t pfn) 241 { 242 struct mmuext_op t; 243 uint_t count; 244 245 t.cmd = MMUEXT_UNPIN_TABLE; 246 t.arg1.mfn = pfn_to_mfn(pfn); 247 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 248 panic("HYPERVISOR_mmuext_op() failed"); 249 ASSERT(count == 1); 250 } 251 252 static void 253 xen_map(uint64_t pte, caddr_t va) 254 { 255 if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte, 256 UVMF_INVLPG | UVMF_LOCAL)) 257 panic("HYPERVISOR_update_va_mapping() failed"); 258 } 259 #endif /* __xpv */ 260 261 /* 262 * Allocate a memory page for a hardware page table. 263 * 264 * A wrapper around page_get_physical(), with some extra checks. 265 */ 266 static pfn_t 267 ptable_alloc(uintptr_t seed) 268 { 269 pfn_t pfn; 270 page_t *pp; 271 272 pfn = PFN_INVALID; 273 274 /* 275 * The first check is to see if there is memory in the system. If we 276 * drop to throttlefree, then fail the ptable_alloc() and let the 277 * stealing code kick in. Note that we have to do this test here, 278 * since the test in page_create_throttle() would let the NOSLEEP 279 * allocation go through and deplete the page reserves. 280 * 281 * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 282 */ 283 if (!NOMEMWAIT() && freemem <= throttlefree + 1) 284 return (PFN_INVALID); 285 286 #ifdef DEBUG 287 /* 288 * This code makes htable_steal() easier to test. By setting 289 * force_steal we force pagetable allocations to fall 290 * into the stealing code. Roughly 1 in ever "force_steal" 291 * page table allocations will fail. 292 */ 293 if (proc_pageout != NULL && force_steal > 1 && 294 ++ptable_cnt > force_steal) { 295 ptable_cnt = 0; 296 return (PFN_INVALID); 297 } 298 #endif /* DEBUG */ 299 300 pp = page_get_physical(seed); 301 if (pp == NULL) 302 return (PFN_INVALID); 303 ASSERT(PAGE_SHARED(pp)); 304 pfn = pp->p_pagenum; 305 if (pfn == PFN_INVALID) 306 panic("ptable_alloc(): Invalid PFN!!"); 307 atomic_inc_32(&active_ptables); 308 HATSTAT_INC(hs_ptable_allocs); 309 return (pfn); 310 } 311 312 /* 313 * Free an htable's associated page table page. See the comments 314 * for ptable_alloc(). 315 */ 316 static void 317 ptable_free(pfn_t pfn) 318 { 319 page_t *pp = page_numtopp_nolock(pfn); 320 321 /* 322 * need to destroy the page used for the pagetable 323 */ 324 ASSERT(pfn != PFN_INVALID); 325 HATSTAT_INC(hs_ptable_frees); 326 atomic_dec_32(&active_ptables); 327 if (pp == NULL) 328 panic("ptable_free(): no page for pfn!"); 329 ASSERT(PAGE_SHARED(pp)); 330 ASSERT(pfn == pp->p_pagenum); 331 ASSERT(!IN_XPV_PANIC()); 332 333 /* 334 * Get an exclusive lock, might have to wait for a kmem reader. 335 */ 336 if (!page_tryupgrade(pp)) { 337 u_offset_t off = pp->p_offset; 338 page_unlock(pp); 339 pp = page_lookup(&kvp, off, SE_EXCL); 340 if (pp == NULL) 341 panic("page not found"); 342 } 343 #ifdef __xpv 344 if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0) 345 panic("failure making kpm r/w pfn=0x%lx", pfn); 346 #endif 347 page_hashout(pp, NULL); 348 page_free(pp, 1); 349 page_unresv(1); 350 } 351 352 /* 353 * Put one htable on the reserve list. 354 */ 355 static void 356 htable_put_reserve(htable_t *ht) 357 { 358 ht->ht_hat = NULL; /* no longer tied to a hat */ 359 ASSERT(ht->ht_pfn == PFN_INVALID); 360 HATSTAT_INC(hs_htable_rputs); 361 mutex_enter(&htable_reserve_mutex); 362 ht->ht_next = htable_reserve_pool; 363 htable_reserve_pool = ht; 364 ++htable_reserve_cnt; 365 mutex_exit(&htable_reserve_mutex); 366 } 367 368 /* 369 * Take one htable from the reserve. 370 */ 371 static htable_t * 372 htable_get_reserve(void) 373 { 374 htable_t *ht = NULL; 375 376 mutex_enter(&htable_reserve_mutex); 377 if (htable_reserve_cnt != 0) { 378 ht = htable_reserve_pool; 379 ASSERT(ht != NULL); 380 ASSERT(ht->ht_pfn == PFN_INVALID); 381 htable_reserve_pool = ht->ht_next; 382 --htable_reserve_cnt; 383 HATSTAT_INC(hs_htable_rgets); 384 } 385 mutex_exit(&htable_reserve_mutex); 386 return (ht); 387 } 388 389 /* 390 * Allocate initial htables and put them on the reserve list 391 */ 392 void 393 htable_initial_reserve(uint_t count) 394 { 395 htable_t *ht; 396 397 count += HTABLE_RESERVE_AMOUNT; 398 while (count > 0) { 399 ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 400 ASSERT(ht != NULL); 401 402 ASSERT(use_boot_reserve); 403 ht->ht_pfn = PFN_INVALID; 404 htable_put_reserve(ht); 405 --count; 406 } 407 } 408 409 /* 410 * Readjust the reserves after a thread finishes using them. 411 */ 412 void 413 htable_adjust_reserve() 414 { 415 htable_t *ht; 416 417 /* 418 * Free any excess htables in the reserve list 419 */ 420 while (htable_reserve_cnt > htable_reserve_amount && 421 !USE_HAT_RESERVES()) { 422 ht = htable_get_reserve(); 423 if (ht == NULL) 424 return; 425 ASSERT(ht->ht_pfn == PFN_INVALID); 426 kmem_cache_free(htable_cache, ht); 427 } 428 } 429 430 /* 431 * Search the active htables for one to steal. Start at a different hash 432 * bucket every time to help spread the pain of stealing 433 */ 434 static void 435 htable_steal_active(hat_t *hat, uint_t cnt, uint_t threshold, 436 uint_t *stolen, htable_t **list) 437 { 438 static uint_t h_seed = 0; 439 htable_t *higher, *ht; 440 uint_t h, e, h_start; 441 uintptr_t va; 442 x86pte_t pte; 443 444 h = h_start = h_seed++ % hat->hat_num_hash; 445 do { 446 higher = NULL; 447 HTABLE_ENTER(h); 448 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 449 450 /* 451 * Can we rule out reaping? 452 */ 453 if (ht->ht_busy != 0 || 454 (ht->ht_flags & HTABLE_SHARED_PFN) || 455 ht->ht_level > 0 || ht->ht_valid_cnt > threshold || 456 ht->ht_lock_cnt != 0) 457 continue; 458 459 /* 460 * Increment busy so the htable can't disappear. We 461 * drop the htable mutex to avoid deadlocks with 462 * hat_pageunload() and the hment mutex while we 463 * call hat_pte_unmap() 464 */ 465 ++ht->ht_busy; 466 HTABLE_EXIT(h); 467 468 /* 469 * Try stealing. 470 * - unload and invalidate all PTEs 471 */ 472 for (e = 0, va = ht->ht_vaddr; 473 e < HTABLE_NUM_PTES(ht) && ht->ht_valid_cnt > 0 && 474 ht->ht_busy == 1 && ht->ht_lock_cnt == 0; 475 ++e, va += MMU_PAGESIZE) { 476 pte = x86pte_get(ht, e); 477 if (!PTE_ISVALID(pte)) 478 continue; 479 hat_pte_unmap(ht, e, HAT_UNLOAD, pte, NULL, 480 B_TRUE); 481 } 482 483 /* 484 * Reacquire htable lock. If we didn't remove all 485 * mappings in the table, or another thread added a new 486 * mapping behind us, give up on this table. 487 */ 488 HTABLE_ENTER(h); 489 if (ht->ht_busy != 1 || ht->ht_valid_cnt != 0 || 490 ht->ht_lock_cnt != 0) { 491 --ht->ht_busy; 492 continue; 493 } 494 495 /* 496 * Steal it and unlink the page table. 497 */ 498 higher = ht->ht_parent; 499 unlink_ptp(higher, ht, ht->ht_vaddr); 500 501 /* 502 * remove from the hash list 503 */ 504 if (ht->ht_next) 505 ht->ht_next->ht_prev = ht->ht_prev; 506 507 if (ht->ht_prev) { 508 ht->ht_prev->ht_next = ht->ht_next; 509 } else { 510 ASSERT(hat->hat_ht_hash[h] == ht); 511 hat->hat_ht_hash[h] = ht->ht_next; 512 } 513 514 /* 515 * Break to outer loop to release the 516 * higher (ht_parent) pagetable. This 517 * spreads out the pain caused by 518 * pagefaults. 519 */ 520 ht->ht_next = *list; 521 *list = ht; 522 ++*stolen; 523 break; 524 } 525 HTABLE_EXIT(h); 526 if (higher != NULL) 527 htable_release(higher); 528 if (++h == hat->hat_num_hash) 529 h = 0; 530 } while (*stolen < cnt && h != h_start); 531 } 532 533 /* 534 * Move hat to the end of the kas list 535 */ 536 static void 537 move_victim(hat_t *hat) 538 { 539 ASSERT(MUTEX_HELD(&hat_list_lock)); 540 541 /* unlink victim hat */ 542 if (hat->hat_prev) 543 hat->hat_prev->hat_next = hat->hat_next; 544 else 545 kas.a_hat->hat_next = hat->hat_next; 546 547 if (hat->hat_next) 548 hat->hat_next->hat_prev = hat->hat_prev; 549 else 550 kas.a_hat->hat_prev = hat->hat_prev; 551 /* relink at end of hat list */ 552 hat->hat_next = NULL; 553 hat->hat_prev = kas.a_hat->hat_prev; 554 if (hat->hat_prev) 555 hat->hat_prev->hat_next = hat; 556 else 557 kas.a_hat->hat_next = hat; 558 559 kas.a_hat->hat_prev = hat; 560 } 561 562 /* 563 * This routine steals htables from user processes. Called by htable_reap 564 * (reap=TRUE) or htable_alloc (reap=FALSE). 565 */ 566 static htable_t * 567 htable_steal(uint_t cnt, boolean_t reap) 568 { 569 hat_t *hat = kas.a_hat; /* list starts with khat */ 570 htable_t *list = NULL; 571 htable_t *ht; 572 uint_t stolen = 0; 573 uint_t pass; 574 uint_t threshold; 575 576 /* 577 * Limit htable_steal_passes to something reasonable 578 */ 579 if (htable_steal_passes == 0) 580 htable_steal_passes = 1; 581 if (htable_steal_passes > mmu.ptes_per_table) 582 htable_steal_passes = mmu.ptes_per_table; 583 584 /* 585 * Loop through all user hats. The 1st pass takes cached htables that 586 * aren't in use. The later passes steal by removing mappings, too. 587 */ 588 atomic_inc_32(&htable_dont_cache); 589 for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 590 threshold = pass * mmu.ptes_per_table / htable_steal_passes; 591 592 mutex_enter(&hat_list_lock); 593 594 /* skip the first hat (kernel) */ 595 hat = kas.a_hat->hat_next; 596 for (;;) { 597 /* 598 * Skip any hat that is already being stolen from. 599 * 600 * We skip SHARED hats, as these are dummy 601 * hats that host ISM shared page tables. 602 * 603 * We also skip if HAT_FREEING because hat_pte_unmap() 604 * won't zero out the PTE's. That would lead to hitting 605 * stale PTEs either here or under hat_unload() when we 606 * steal and unload the same page table in competing 607 * threads. 608 */ 609 while (hat != NULL && 610 (hat->hat_flags & 611 (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 612 hat = hat->hat_next; 613 614 if (hat == NULL) 615 break; 616 617 /* 618 * Mark the HAT as a stealing victim so that it is 619 * not freed from under us, e.g. in as_free() 620 */ 621 hat->hat_flags |= HAT_VICTIM; 622 mutex_exit(&hat_list_lock); 623 624 /* 625 * Take any htables from the hat's cached "free" list. 626 */ 627 hat_enter(hat); 628 while ((ht = hat->hat_ht_cached) != NULL && 629 stolen < cnt) { 630 hat->hat_ht_cached = ht->ht_next; 631 ht->ht_next = list; 632 list = ht; 633 ++stolen; 634 } 635 hat_exit(hat); 636 637 /* 638 * Don't steal active htables on first pass. 639 */ 640 if (pass != 0 && (stolen < cnt)) 641 htable_steal_active(hat, cnt, threshold, 642 &stolen, &list); 643 644 /* 645 * do synchronous teardown for the reap case so that 646 * we can forget hat; at this time, hat is 647 * guaranteed to be around because HAT_VICTIM is set 648 * (see htable_free() for similar code) 649 */ 650 for (ht = list; (ht) && (reap); ht = ht->ht_next) { 651 if (ht->ht_hat == NULL) 652 continue; 653 ASSERT(ht->ht_hat == hat); 654 #if defined(__xpv) && defined(__amd64) 655 if (!(ht->ht_flags & HTABLE_VLP) && 656 ht->ht_level == mmu.max_level) { 657 ptable_free(hat->hat_user_ptable); 658 hat->hat_user_ptable = PFN_INVALID; 659 } 660 #endif 661 /* 662 * forget the hat 663 */ 664 ht->ht_hat = NULL; 665 } 666 667 mutex_enter(&hat_list_lock); 668 669 /* 670 * Are we finished? 671 */ 672 if (stolen == cnt) { 673 /* 674 * Try to spread the pain of stealing, 675 * move victim HAT to the end of the HAT list. 676 */ 677 if (pass >= 1 && cnt == 1 && 678 kas.a_hat->hat_prev != hat) 679 move_victim(hat); 680 /* 681 * We are finished 682 */ 683 } 684 685 /* 686 * Clear the victim flag, hat can go away now (once 687 * the lock is dropped) 688 */ 689 if (hat->hat_flags & HAT_VICTIM) { 690 ASSERT(hat != kas.a_hat); 691 hat->hat_flags &= ~HAT_VICTIM; 692 cv_broadcast(&hat_list_cv); 693 } 694 695 /* move on to the next hat */ 696 hat = hat->hat_next; 697 } 698 699 mutex_exit(&hat_list_lock); 700 701 } 702 ASSERT(!MUTEX_HELD(&hat_list_lock)); 703 704 atomic_dec_32(&htable_dont_cache); 705 return (list); 706 } 707 708 /* 709 * This is invoked from kmem when the system is low on memory. We try 710 * to free hments, htables, and ptables to improve the memory situation. 711 */ 712 /*ARGSUSED*/ 713 static void 714 htable_reap(void *handle) 715 { 716 uint_t reap_cnt; 717 htable_t *list; 718 htable_t *ht; 719 720 HATSTAT_INC(hs_reap_attempts); 721 if (!can_steal_post_boot) 722 return; 723 724 /* 725 * Try to reap 5% of the page tables bounded by a maximum of 726 * 5% of physmem and a minimum of 10. 727 */ 728 reap_cnt = MAX(MIN(physmem / 20, active_ptables / 20), 10); 729 730 /* 731 * Note: htable_dont_cache should be set at the time of 732 * invoking htable_free() 733 */ 734 atomic_inc_32(&htable_dont_cache); 735 /* 736 * Let htable_steal() do the work, we just call htable_free() 737 */ 738 XPV_DISALLOW_MIGRATE(); 739 list = htable_steal(reap_cnt, B_TRUE); 740 XPV_ALLOW_MIGRATE(); 741 while ((ht = list) != NULL) { 742 list = ht->ht_next; 743 HATSTAT_INC(hs_reaped); 744 htable_free(ht); 745 } 746 atomic_dec_32(&htable_dont_cache); 747 748 /* 749 * Free up excess reserves 750 */ 751 htable_adjust_reserve(); 752 hment_adjust_reserve(); 753 } 754 755 /* 756 * Allocate an htable, stealing one or using the reserve if necessary 757 */ 758 static htable_t * 759 htable_alloc( 760 hat_t *hat, 761 uintptr_t vaddr, 762 level_t level, 763 htable_t *shared) 764 { 765 htable_t *ht = NULL; 766 uint_t is_vlp; 767 uint_t is_bare = 0; 768 uint_t need_to_zero = 1; 769 int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 770 771 if (level < 0 || level > TOP_LEVEL(hat)) 772 panic("htable_alloc(): level %d out of range\n", level); 773 774 is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 775 if (is_vlp || shared != NULL) 776 is_bare = 1; 777 778 /* 779 * First reuse a cached htable from the hat_ht_cached field, this 780 * avoids unnecessary trips through kmem/page allocators. 781 */ 782 if (hat->hat_ht_cached != NULL && !is_bare) { 783 hat_enter(hat); 784 ht = hat->hat_ht_cached; 785 if (ht != NULL) { 786 hat->hat_ht_cached = ht->ht_next; 787 need_to_zero = 0; 788 /* XX64 ASSERT() they're all zero somehow */ 789 ASSERT(ht->ht_pfn != PFN_INVALID); 790 } 791 hat_exit(hat); 792 } 793 794 if (ht == NULL) { 795 /* 796 * Allocate an htable, possibly refilling the reserves. 797 */ 798 if (USE_HAT_RESERVES()) { 799 ht = htable_get_reserve(); 800 } else { 801 /* 802 * Donate successful htable allocations to the reserve. 803 */ 804 for (;;) { 805 ht = kmem_cache_alloc(htable_cache, kmflags); 806 if (ht == NULL) 807 break; 808 ht->ht_pfn = PFN_INVALID; 809 if (USE_HAT_RESERVES() || 810 htable_reserve_cnt >= htable_reserve_amount) 811 break; 812 htable_put_reserve(ht); 813 } 814 } 815 816 /* 817 * allocate a page for the hardware page table if needed 818 */ 819 if (ht != NULL && !is_bare) { 820 ht->ht_hat = hat; 821 ht->ht_pfn = ptable_alloc((uintptr_t)ht); 822 if (ht->ht_pfn == PFN_INVALID) { 823 if (USE_HAT_RESERVES()) 824 htable_put_reserve(ht); 825 else 826 kmem_cache_free(htable_cache, ht); 827 ht = NULL; 828 } 829 } 830 } 831 832 /* 833 * If allocations failed, kick off a kmem_reap() and resort to 834 * htable steal(). We may spin here if the system is very low on 835 * memory. If the kernel itself has consumed all memory and kmem_reap() 836 * can't free up anything, then we'll really get stuck here. 837 * That should only happen in a system where the administrator has 838 * misconfigured VM parameters via /etc/system. 839 */ 840 while (ht == NULL && can_steal_post_boot) { 841 kmem_reap(); 842 ht = htable_steal(1, B_FALSE); 843 HATSTAT_INC(hs_steals); 844 845 /* 846 * If we stole for a bare htable, release the pagetable page. 847 */ 848 if (ht != NULL) { 849 if (is_bare) { 850 ptable_free(ht->ht_pfn); 851 ht->ht_pfn = PFN_INVALID; 852 #if defined(__xpv) && defined(__amd64) 853 /* 854 * make stolen page table writable again in kpm 855 */ 856 } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn, 857 PT_VALID | PT_WRITABLE) < 0) { 858 panic("failure making kpm r/w pfn=0x%lx", 859 ht->ht_pfn); 860 #endif 861 } 862 } 863 } 864 865 /* 866 * All attempts to allocate or steal failed. This should only happen 867 * if we run out of memory during boot, due perhaps to a huge 868 * boot_archive. At this point there's no way to continue. 869 */ 870 if (ht == NULL) 871 panic("htable_alloc(): couldn't steal\n"); 872 873 #if defined(__amd64) && defined(__xpv) 874 /* 875 * Under the 64-bit hypervisor, we have 2 top level page tables. 876 * If this allocation fails, we'll resort to stealing. 877 * We use the stolen page indirectly, by freeing the 878 * stolen htable first. 879 */ 880 if (level == mmu.max_level) { 881 for (;;) { 882 htable_t *stolen; 883 884 hat->hat_user_ptable = ptable_alloc((uintptr_t)ht + 1); 885 if (hat->hat_user_ptable != PFN_INVALID) 886 break; 887 stolen = htable_steal(1, B_FALSE); 888 if (stolen == NULL) 889 panic("2nd steal ptable failed\n"); 890 htable_free(stolen); 891 } 892 block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable), 893 MMU_PAGESIZE); 894 } 895 #endif 896 897 /* 898 * Shared page tables have all entries locked and entries may not 899 * be added or deleted. 900 */ 901 ht->ht_flags = 0; 902 if (shared != NULL) { 903 ASSERT(shared->ht_valid_cnt > 0); 904 ht->ht_flags |= HTABLE_SHARED_PFN; 905 ht->ht_pfn = shared->ht_pfn; 906 ht->ht_lock_cnt = 0; 907 ht->ht_valid_cnt = 0; /* updated in hat_share() */ 908 ht->ht_shares = shared; 909 need_to_zero = 0; 910 } else { 911 ht->ht_shares = NULL; 912 ht->ht_lock_cnt = 0; 913 ht->ht_valid_cnt = 0; 914 } 915 916 /* 917 * setup flags, etc. for VLP htables 918 */ 919 if (is_vlp) { 920 ht->ht_flags |= HTABLE_VLP; 921 ASSERT(ht->ht_pfn == PFN_INVALID); 922 need_to_zero = 0; 923 } 924 925 /* 926 * fill in the htable 927 */ 928 ht->ht_hat = hat; 929 ht->ht_parent = NULL; 930 ht->ht_vaddr = vaddr; 931 ht->ht_level = level; 932 ht->ht_busy = 1; 933 ht->ht_next = NULL; 934 ht->ht_prev = NULL; 935 936 /* 937 * Zero out any freshly allocated page table 938 */ 939 if (need_to_zero) 940 x86pte_zero(ht, 0, mmu.ptes_per_table); 941 942 #if defined(__amd64) && defined(__xpv) 943 if (!is_bare && kpm_vbase) { 944 (void) xen_kpm_page(ht->ht_pfn, PT_VALID); 945 if (level == mmu.max_level) 946 (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID); 947 } 948 #endif 949 950 return (ht); 951 } 952 953 /* 954 * Free up an htable, either to a hat's cached list, the reserves or 955 * back to kmem. 956 */ 957 static void 958 htable_free(htable_t *ht) 959 { 960 hat_t *hat = ht->ht_hat; 961 962 /* 963 * If the process isn't exiting, cache the free htable in the hat 964 * structure. We always do this for the boot time reserve. We don't 965 * do this if the hat is exiting or we are stealing/reaping htables. 966 */ 967 if (hat != NULL && 968 !(ht->ht_flags & HTABLE_SHARED_PFN) && 969 (use_boot_reserve || 970 (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 971 ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 972 ASSERT(ht->ht_pfn != PFN_INVALID); 973 hat_enter(hat); 974 ht->ht_next = hat->hat_ht_cached; 975 hat->hat_ht_cached = ht; 976 hat_exit(hat); 977 return; 978 } 979 980 /* 981 * If we have a hardware page table, free it. 982 * We don't free page tables that are accessed by sharing. 983 */ 984 if (ht->ht_flags & HTABLE_SHARED_PFN) { 985 ASSERT(ht->ht_pfn != PFN_INVALID); 986 } else if (!(ht->ht_flags & HTABLE_VLP)) { 987 ptable_free(ht->ht_pfn); 988 #if defined(__amd64) && defined(__xpv) 989 if (ht->ht_level == mmu.max_level && hat != NULL) { 990 ptable_free(hat->hat_user_ptable); 991 hat->hat_user_ptable = PFN_INVALID; 992 } 993 #endif 994 } 995 ht->ht_pfn = PFN_INVALID; 996 997 /* 998 * Free it or put into reserves. 999 */ 1000 if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 1001 htable_put_reserve(ht); 1002 } else { 1003 kmem_cache_free(htable_cache, ht); 1004 htable_adjust_reserve(); 1005 } 1006 } 1007 1008 1009 /* 1010 * This is called when a hat is being destroyed or swapped out. We reap all 1011 * the remaining htables in the hat cache. If destroying all left over 1012 * htables are also destroyed. 1013 * 1014 * We also don't need to invalidate any of the PTPs nor do any demapping. 1015 */ 1016 void 1017 htable_purge_hat(hat_t *hat) 1018 { 1019 htable_t *ht; 1020 int h; 1021 1022 /* 1023 * Purge the htable cache if just reaping. 1024 */ 1025 if (!(hat->hat_flags & HAT_FREEING)) { 1026 atomic_inc_32(&htable_dont_cache); 1027 for (;;) { 1028 hat_enter(hat); 1029 ht = hat->hat_ht_cached; 1030 if (ht == NULL) { 1031 hat_exit(hat); 1032 break; 1033 } 1034 hat->hat_ht_cached = ht->ht_next; 1035 hat_exit(hat); 1036 htable_free(ht); 1037 } 1038 atomic_dec_32(&htable_dont_cache); 1039 return; 1040 } 1041 1042 /* 1043 * if freeing, no locking is needed 1044 */ 1045 while ((ht = hat->hat_ht_cached) != NULL) { 1046 hat->hat_ht_cached = ht->ht_next; 1047 htable_free(ht); 1048 } 1049 1050 /* 1051 * walk thru the htable hash table and free all the htables in it. 1052 */ 1053 for (h = 0; h < hat->hat_num_hash; ++h) { 1054 while ((ht = hat->hat_ht_hash[h]) != NULL) { 1055 if (ht->ht_next) 1056 ht->ht_next->ht_prev = ht->ht_prev; 1057 1058 if (ht->ht_prev) { 1059 ht->ht_prev->ht_next = ht->ht_next; 1060 } else { 1061 ASSERT(hat->hat_ht_hash[h] == ht); 1062 hat->hat_ht_hash[h] = ht->ht_next; 1063 } 1064 htable_free(ht); 1065 } 1066 } 1067 } 1068 1069 /* 1070 * Unlink an entry for a table at vaddr and level out of the existing table 1071 * one level higher. We are always holding the HASH_ENTER() when doing this. 1072 */ 1073 static void 1074 unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 1075 { 1076 uint_t entry = htable_va2entry(vaddr, higher); 1077 x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 1078 x86pte_t found; 1079 hat_t *hat = old->ht_hat; 1080 1081 ASSERT(higher->ht_busy > 0); 1082 ASSERT(higher->ht_valid_cnt > 0); 1083 ASSERT(old->ht_valid_cnt == 0); 1084 found = x86pte_cas(higher, entry, expect, 0); 1085 #ifdef __xpv 1086 /* 1087 * This is weird, but Xen apparently automatically unlinks empty 1088 * pagetables from the upper page table. So allow PTP to be 0 already. 1089 */ 1090 if (found != expect && found != 0) 1091 #else 1092 if (found != expect) 1093 #endif 1094 panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 1095 found, expect); 1096 1097 /* 1098 * When a top level VLP page table entry changes, we must issue 1099 * a reload of cr3 on all processors. 1100 * 1101 * If we don't need do do that, then we still have to INVLPG against 1102 * an address covered by the inner page table, as the latest processors 1103 * have TLB-like caches for non-leaf page table entries. 1104 */ 1105 if (!(hat->hat_flags & HAT_FREEING)) { 1106 hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ? 1107 DEMAP_ALL_ADDR : old->ht_vaddr); 1108 } 1109 1110 HTABLE_DEC(higher->ht_valid_cnt); 1111 } 1112 1113 /* 1114 * Link an entry for a new table at vaddr and level into the existing table 1115 * one level higher. We are always holding the HASH_ENTER() when doing this. 1116 */ 1117 static void 1118 link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 1119 { 1120 uint_t entry = htable_va2entry(vaddr, higher); 1121 x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 1122 x86pte_t found; 1123 1124 ASSERT(higher->ht_busy > 0); 1125 1126 ASSERT(new->ht_level != mmu.max_level); 1127 1128 HTABLE_INC(higher->ht_valid_cnt); 1129 1130 found = x86pte_cas(higher, entry, 0, newptp); 1131 if ((found & ~PT_REF) != 0) 1132 panic("HAT: ptp not 0, found=" FMT_PTE, found); 1133 1134 /* 1135 * When any top level VLP page table entry changes, we must issue 1136 * a reload of cr3 on all processors using it. 1137 * We also need to do this for the kernel hat on PAE 32 bit kernel. 1138 */ 1139 if ( 1140 #ifdef __i386 1141 (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 1142 #endif 1143 (higher->ht_flags & HTABLE_VLP)) 1144 hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 1145 } 1146 1147 /* 1148 * Release of hold on an htable. If this is the last use and the pagetable 1149 * is empty we may want to free it, then recursively look at the pagetable 1150 * above it. The recursion is handled by the outer while() loop. 1151 * 1152 * On the metal, during process exit, we don't bother unlinking the tables from 1153 * upper level pagetables. They are instead handled in bulk by hat_free_end(). 1154 * We can't do this on the hypervisor as we need the page table to be 1155 * implicitly unpinnned before it goes to the free page lists. This can't 1156 * happen unless we fully unlink it from the page table hierarchy. 1157 */ 1158 void 1159 htable_release(htable_t *ht) 1160 { 1161 uint_t hashval; 1162 htable_t *shared; 1163 htable_t *higher; 1164 hat_t *hat; 1165 uintptr_t va; 1166 level_t level; 1167 1168 while (ht != NULL) { 1169 shared = NULL; 1170 for (;;) { 1171 hat = ht->ht_hat; 1172 va = ht->ht_vaddr; 1173 level = ht->ht_level; 1174 hashval = HTABLE_HASH(hat, va, level); 1175 1176 /* 1177 * The common case is that this isn't the last use of 1178 * an htable so we don't want to free the htable. 1179 */ 1180 HTABLE_ENTER(hashval); 1181 ASSERT(ht->ht_valid_cnt >= 0); 1182 ASSERT(ht->ht_busy > 0); 1183 if (ht->ht_valid_cnt > 0) 1184 break; 1185 if (ht->ht_busy > 1) 1186 break; 1187 ASSERT(ht->ht_lock_cnt == 0); 1188 1189 #if !defined(__xpv) 1190 /* 1191 * we always release empty shared htables 1192 */ 1193 if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 1194 1195 /* 1196 * don't release if in address space tear down 1197 */ 1198 if (hat->hat_flags & HAT_FREEING) 1199 break; 1200 1201 /* 1202 * At and above max_page_level, free if it's for 1203 * a boot-time kernel mapping below kernelbase. 1204 */ 1205 if (level >= mmu.max_page_level && 1206 (hat != kas.a_hat || va >= kernelbase)) 1207 break; 1208 } 1209 #endif /* __xpv */ 1210 1211 /* 1212 * Remember if we destroy an htable that shares its PFN 1213 * from elsewhere. 1214 */ 1215 if (ht->ht_flags & HTABLE_SHARED_PFN) { 1216 ASSERT(shared == NULL); 1217 shared = ht->ht_shares; 1218 HATSTAT_INC(hs_htable_unshared); 1219 } 1220 1221 /* 1222 * Handle release of a table and freeing the htable_t. 1223 * Unlink it from the table higher (ie. ht_parent). 1224 */ 1225 higher = ht->ht_parent; 1226 ASSERT(higher != NULL); 1227 1228 /* 1229 * Unlink the pagetable. 1230 */ 1231 unlink_ptp(higher, ht, va); 1232 1233 /* 1234 * remove this htable from its hash list 1235 */ 1236 if (ht->ht_next) 1237 ht->ht_next->ht_prev = ht->ht_prev; 1238 1239 if (ht->ht_prev) { 1240 ht->ht_prev->ht_next = ht->ht_next; 1241 } else { 1242 ASSERT(hat->hat_ht_hash[hashval] == ht); 1243 hat->hat_ht_hash[hashval] = ht->ht_next; 1244 } 1245 HTABLE_EXIT(hashval); 1246 htable_free(ht); 1247 ht = higher; 1248 } 1249 1250 ASSERT(ht->ht_busy >= 1); 1251 --ht->ht_busy; 1252 HTABLE_EXIT(hashval); 1253 1254 /* 1255 * If we released a shared htable, do a release on the htable 1256 * from which it shared 1257 */ 1258 ht = shared; 1259 } 1260 } 1261 1262 /* 1263 * Find the htable for the pagetable at the given level for the given address. 1264 * If found acquires a hold that eventually needs to be htable_release()d 1265 */ 1266 htable_t * 1267 htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 1268 { 1269 uintptr_t base; 1270 uint_t hashval; 1271 htable_t *ht = NULL; 1272 1273 ASSERT(level >= 0); 1274 ASSERT(level <= TOP_LEVEL(hat)); 1275 1276 if (level == TOP_LEVEL(hat)) { 1277 #if defined(__amd64) 1278 /* 1279 * 32 bit address spaces on 64 bit kernels need to check 1280 * for overflow of the 32 bit address space 1281 */ 1282 if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32)) 1283 return (NULL); 1284 #endif 1285 base = 0; 1286 } else { 1287 base = vaddr & LEVEL_MASK(level + 1); 1288 } 1289 1290 hashval = HTABLE_HASH(hat, base, level); 1291 HTABLE_ENTER(hashval); 1292 for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 1293 if (ht->ht_hat == hat && 1294 ht->ht_vaddr == base && 1295 ht->ht_level == level) 1296 break; 1297 } 1298 if (ht) 1299 ++ht->ht_busy; 1300 1301 HTABLE_EXIT(hashval); 1302 return (ht); 1303 } 1304 1305 /* 1306 * Acquires a hold on a known htable (from a locked hment entry). 1307 */ 1308 void 1309 htable_acquire(htable_t *ht) 1310 { 1311 hat_t *hat = ht->ht_hat; 1312 level_t level = ht->ht_level; 1313 uintptr_t base = ht->ht_vaddr; 1314 uint_t hashval = HTABLE_HASH(hat, base, level); 1315 1316 HTABLE_ENTER(hashval); 1317 #ifdef DEBUG 1318 /* 1319 * make sure the htable is there 1320 */ 1321 { 1322 htable_t *h; 1323 1324 for (h = hat->hat_ht_hash[hashval]; 1325 h && h != ht; 1326 h = h->ht_next) 1327 ; 1328 ASSERT(h == ht); 1329 } 1330 #endif /* DEBUG */ 1331 ++ht->ht_busy; 1332 HTABLE_EXIT(hashval); 1333 } 1334 1335 /* 1336 * Find the htable for the pagetable at the given level for the given address. 1337 * If found acquires a hold that eventually needs to be htable_release()d 1338 * If not found the table is created. 1339 * 1340 * Since we can't hold a hash table mutex during allocation, we have to 1341 * drop it and redo the search on a create. Then we may have to free the newly 1342 * allocated htable if another thread raced in and created it ahead of us. 1343 */ 1344 htable_t * 1345 htable_create( 1346 hat_t *hat, 1347 uintptr_t vaddr, 1348 level_t level, 1349 htable_t *shared) 1350 { 1351 uint_t h; 1352 level_t l; 1353 uintptr_t base; 1354 htable_t *ht; 1355 htable_t *higher = NULL; 1356 htable_t *new = NULL; 1357 1358 if (level < 0 || level > TOP_LEVEL(hat)) 1359 panic("htable_create(): level %d out of range\n", level); 1360 1361 /* 1362 * Create the page tables in top down order. 1363 */ 1364 for (l = TOP_LEVEL(hat); l >= level; --l) { 1365 new = NULL; 1366 if (l == TOP_LEVEL(hat)) 1367 base = 0; 1368 else 1369 base = vaddr & LEVEL_MASK(l + 1); 1370 1371 h = HTABLE_HASH(hat, base, l); 1372 try_again: 1373 /* 1374 * look up the htable at this level 1375 */ 1376 HTABLE_ENTER(h); 1377 if (l == TOP_LEVEL(hat)) { 1378 ht = hat->hat_htable; 1379 } else { 1380 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 1381 ASSERT(ht->ht_hat == hat); 1382 if (ht->ht_vaddr == base && 1383 ht->ht_level == l) 1384 break; 1385 } 1386 } 1387 1388 /* 1389 * if we found the htable, increment its busy cnt 1390 * and if we had allocated a new htable, free it. 1391 */ 1392 if (ht != NULL) { 1393 /* 1394 * If we find a pre-existing shared table, it must 1395 * share from the same place. 1396 */ 1397 if (l == level && shared && ht->ht_shares && 1398 ht->ht_shares != shared) { 1399 panic("htable shared from wrong place " 1400 "found htable=%p shared=%p", 1401 (void *)ht, (void *)shared); 1402 } 1403 ++ht->ht_busy; 1404 HTABLE_EXIT(h); 1405 if (new) 1406 htable_free(new); 1407 if (higher != NULL) 1408 htable_release(higher); 1409 higher = ht; 1410 1411 /* 1412 * if we didn't find it on the first search 1413 * allocate a new one and search again 1414 */ 1415 } else if (new == NULL) { 1416 HTABLE_EXIT(h); 1417 new = htable_alloc(hat, base, l, 1418 l == level ? shared : NULL); 1419 goto try_again; 1420 1421 /* 1422 * 2nd search and still not there, use "new" table 1423 * Link new table into higher, when not at top level. 1424 */ 1425 } else { 1426 ht = new; 1427 if (higher != NULL) { 1428 link_ptp(higher, ht, base); 1429 ht->ht_parent = higher; 1430 } 1431 ht->ht_next = hat->hat_ht_hash[h]; 1432 ASSERT(ht->ht_prev == NULL); 1433 if (hat->hat_ht_hash[h]) 1434 hat->hat_ht_hash[h]->ht_prev = ht; 1435 hat->hat_ht_hash[h] = ht; 1436 HTABLE_EXIT(h); 1437 1438 /* 1439 * Note we don't do htable_release(higher). 1440 * That happens recursively when "new" is removed by 1441 * htable_release() or htable_steal(). 1442 */ 1443 higher = ht; 1444 1445 /* 1446 * If we just created a new shared page table we 1447 * increment the shared htable's busy count, so that 1448 * it can't be the victim of a steal even if it's empty. 1449 */ 1450 if (l == level && shared) { 1451 (void) htable_lookup(shared->ht_hat, 1452 shared->ht_vaddr, shared->ht_level); 1453 HATSTAT_INC(hs_htable_shared); 1454 } 1455 } 1456 } 1457 1458 return (ht); 1459 } 1460 1461 /* 1462 * Inherit initial pagetables from the boot program. On the 64-bit 1463 * hypervisor we also temporarily mark the p_index field of page table 1464 * pages, so we know not to try making them writable in seg_kpm. 1465 */ 1466 void 1467 htable_attach( 1468 hat_t *hat, 1469 uintptr_t base, 1470 level_t level, 1471 htable_t *parent, 1472 pfn_t pfn) 1473 { 1474 htable_t *ht; 1475 uint_t h; 1476 uint_t i; 1477 x86pte_t pte; 1478 x86pte_t *ptep; 1479 page_t *pp; 1480 extern page_t *boot_claim_page(pfn_t); 1481 1482 ht = htable_get_reserve(); 1483 if (level == mmu.max_level) 1484 kas.a_hat->hat_htable = ht; 1485 ht->ht_hat = hat; 1486 ht->ht_parent = parent; 1487 ht->ht_vaddr = base; 1488 ht->ht_level = level; 1489 ht->ht_busy = 1; 1490 ht->ht_next = NULL; 1491 ht->ht_prev = NULL; 1492 ht->ht_flags = 0; 1493 ht->ht_pfn = pfn; 1494 ht->ht_lock_cnt = 0; 1495 ht->ht_valid_cnt = 0; 1496 if (parent != NULL) 1497 ++parent->ht_busy; 1498 1499 h = HTABLE_HASH(hat, base, level); 1500 HTABLE_ENTER(h); 1501 ht->ht_next = hat->hat_ht_hash[h]; 1502 ASSERT(ht->ht_prev == NULL); 1503 if (hat->hat_ht_hash[h]) 1504 hat->hat_ht_hash[h]->ht_prev = ht; 1505 hat->hat_ht_hash[h] = ht; 1506 HTABLE_EXIT(h); 1507 1508 /* 1509 * make sure the page table physical page is not FREE 1510 */ 1511 if (page_resv(1, KM_NOSLEEP) == 0) 1512 panic("page_resv() failed in ptable alloc"); 1513 1514 pp = boot_claim_page(pfn); 1515 ASSERT(pp != NULL); 1516 1517 /* 1518 * Page table pages that were allocated by dboot or 1519 * in very early startup didn't go through boot_mapin() 1520 * and so won't have vnode/offsets. Fix that here. 1521 */ 1522 if (pp->p_vnode == NULL) { 1523 /* match offset calculation in page_get_physical() */ 1524 u_offset_t offset = (uintptr_t)ht; 1525 if (offset > kernelbase) 1526 offset -= kernelbase; 1527 offset <<= MMU_PAGESHIFT; 1528 #if defined(__amd64) 1529 offset += mmu.hole_start; /* something in VA hole */ 1530 #else 1531 offset += 1ULL << 40; /* something > 4 Gig */ 1532 #endif 1533 ASSERT(page_exists(&kvp, offset) == NULL); 1534 (void) page_hashin(pp, &kvp, offset, NULL); 1535 } 1536 page_downgrade(pp); 1537 #if defined(__xpv) && defined(__amd64) 1538 /* 1539 * Record in the page_t that is a pagetable for segkpm setup. 1540 */ 1541 if (kpm_vbase) 1542 pp->p_index = 1; 1543 #endif 1544 1545 /* 1546 * Count valid mappings and recursively attach lower level pagetables. 1547 */ 1548 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1549 for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1550 if (mmu.pae_hat) 1551 pte = ptep[i]; 1552 else 1553 pte = ((x86pte32_t *)ptep)[i]; 1554 if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1555 ++ht->ht_valid_cnt; 1556 if (!PTE_ISPAGE(pte, level)) { 1557 htable_attach(hat, base, level - 1, 1558 ht, PTE2PFN(pte, level)); 1559 ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1560 } 1561 } 1562 base += LEVEL_SIZE(level); 1563 if (base == mmu.hole_start) 1564 base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1565 } 1566 1567 /* 1568 * As long as all the mappings we had were below kernel base 1569 * we can release the htable. 1570 */ 1571 if (base < kernelbase) 1572 htable_release(ht); 1573 } 1574 1575 /* 1576 * Walk through a given htable looking for the first valid entry. This 1577 * routine takes both a starting and ending address. The starting address 1578 * is required to be within the htable provided by the caller, but there is 1579 * no such restriction on the ending address. 1580 * 1581 * If the routine finds a valid entry in the htable (at or beyond the 1582 * starting address), the PTE (and its address) will be returned. 1583 * This PTE may correspond to either a page or a pagetable - it is the 1584 * caller's responsibility to determine which. If no valid entry is 1585 * found, 0 (and invalid PTE) and the next unexamined address will be 1586 * returned. 1587 * 1588 * The loop has been carefully coded for optimization. 1589 */ 1590 static x86pte_t 1591 htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 1592 { 1593 uint_t e; 1594 x86pte_t found_pte = (x86pte_t)0; 1595 caddr_t pte_ptr; 1596 caddr_t end_pte_ptr; 1597 int l = ht->ht_level; 1598 uintptr_t va = *vap & LEVEL_MASK(l); 1599 size_t pgsize = LEVEL_SIZE(l); 1600 1601 ASSERT(va >= ht->ht_vaddr); 1602 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1603 1604 /* 1605 * Compute the starting index and ending virtual address 1606 */ 1607 e = htable_va2entry(va, ht); 1608 1609 /* 1610 * The following page table scan code knows that the valid 1611 * bit of a PTE is in the lowest byte AND that x86 is little endian!! 1612 */ 1613 pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1614 end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1615 pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 1616 while (!PTE_ISVALID(*pte_ptr)) { 1617 va += pgsize; 1618 if (va >= eaddr) 1619 break; 1620 pte_ptr += mmu.pte_size; 1621 ASSERT(pte_ptr <= end_pte_ptr); 1622 if (pte_ptr == end_pte_ptr) 1623 break; 1624 } 1625 1626 /* 1627 * if we found a valid PTE, load the entire PTE 1628 */ 1629 if (va < eaddr && pte_ptr != end_pte_ptr) 1630 found_pte = GET_PTE((x86pte_t *)pte_ptr); 1631 x86pte_release_pagetable(ht); 1632 1633 #if defined(__amd64) 1634 /* 1635 * deal with VA hole on amd64 1636 */ 1637 if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 1638 va = mmu.hole_end + va - mmu.hole_start; 1639 #endif /* __amd64 */ 1640 1641 *vap = va; 1642 return (found_pte); 1643 } 1644 1645 /* 1646 * Find the address and htable for the first populated translation at or 1647 * above the given virtual address. The caller may also specify an upper 1648 * limit to the address range to search. Uses level information to quickly 1649 * skip unpopulated sections of virtual address spaces. 1650 * 1651 * If not found returns NULL. When found, returns the htable and virt addr 1652 * and has a hold on the htable. 1653 */ 1654 x86pte_t 1655 htable_walk( 1656 struct hat *hat, 1657 htable_t **htp, 1658 uintptr_t *vaddr, 1659 uintptr_t eaddr) 1660 { 1661 uintptr_t va = *vaddr; 1662 htable_t *ht; 1663 htable_t *prev = *htp; 1664 level_t l; 1665 level_t max_mapped_level; 1666 x86pte_t pte; 1667 1668 ASSERT(eaddr > va); 1669 1670 /* 1671 * If this is a user address, then we know we need not look beyond 1672 * kernelbase. 1673 */ 1674 ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 1675 eaddr == HTABLE_WALK_TO_END); 1676 if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 1677 eaddr = kernelbase; 1678 1679 /* 1680 * If we're coming in with a previous page table, search it first 1681 * without doing an htable_lookup(), this should be frequent. 1682 */ 1683 if (prev) { 1684 ASSERT(prev->ht_busy > 0); 1685 ASSERT(prev->ht_vaddr <= va); 1686 l = prev->ht_level; 1687 if (va <= HTABLE_LAST_PAGE(prev)) { 1688 pte = htable_scan(prev, &va, eaddr); 1689 1690 if (PTE_ISPAGE(pte, l)) { 1691 *vaddr = va; 1692 *htp = prev; 1693 return (pte); 1694 } 1695 } 1696 1697 /* 1698 * We found nothing in the htable provided by the caller, 1699 * so fall through and do the full search 1700 */ 1701 htable_release(prev); 1702 } 1703 1704 /* 1705 * Find the level of the largest pagesize used by this HAT. 1706 */ 1707 if (hat->hat_ism_pgcnt > 0) { 1708 max_mapped_level = mmu.umax_page_level; 1709 } else { 1710 max_mapped_level = 0; 1711 for (l = 1; l <= mmu.max_page_level; ++l) 1712 if (hat->hat_pages_mapped[l] != 0) 1713 max_mapped_level = l; 1714 } 1715 1716 while (va < eaddr && va >= *vaddr) { 1717 ASSERT(!IN_VA_HOLE(va)); 1718 1719 /* 1720 * Find lowest table with any entry for given address. 1721 */ 1722 for (l = 0; l <= TOP_LEVEL(hat); ++l) { 1723 ht = htable_lookup(hat, va, l); 1724 if (ht != NULL) { 1725 pte = htable_scan(ht, &va, eaddr); 1726 if (PTE_ISPAGE(pte, l)) { 1727 *vaddr = va; 1728 *htp = ht; 1729 return (pte); 1730 } 1731 htable_release(ht); 1732 break; 1733 } 1734 1735 /* 1736 * No htable at this level for the address. If there 1737 * is no larger page size that could cover it, we can 1738 * skip right to the start of the next page table. 1739 */ 1740 ASSERT(l < TOP_LEVEL(hat)); 1741 if (l >= max_mapped_level) { 1742 va = NEXT_ENTRY_VA(va, l + 1); 1743 if (va >= eaddr) 1744 break; 1745 } 1746 } 1747 } 1748 1749 *vaddr = 0; 1750 *htp = NULL; 1751 return (0); 1752 } 1753 1754 /* 1755 * Find the htable and page table entry index of the given virtual address 1756 * with pagesize at or below given level. 1757 * If not found returns NULL. When found, returns the htable, sets 1758 * entry, and has a hold on the htable. 1759 */ 1760 htable_t * 1761 htable_getpte( 1762 struct hat *hat, 1763 uintptr_t vaddr, 1764 uint_t *entry, 1765 x86pte_t *pte, 1766 level_t level) 1767 { 1768 htable_t *ht; 1769 level_t l; 1770 uint_t e; 1771 1772 ASSERT(level <= mmu.max_page_level); 1773 1774 for (l = 0; l <= level; ++l) { 1775 ht = htable_lookup(hat, vaddr, l); 1776 if (ht == NULL) 1777 continue; 1778 e = htable_va2entry(vaddr, ht); 1779 if (entry != NULL) 1780 *entry = e; 1781 if (pte != NULL) 1782 *pte = x86pte_get(ht, e); 1783 return (ht); 1784 } 1785 return (NULL); 1786 } 1787 1788 /* 1789 * Find the htable and page table entry index of the given virtual address. 1790 * There must be a valid page mapped at the given address. 1791 * If not found returns NULL. When found, returns the htable, sets 1792 * entry, and has a hold on the htable. 1793 */ 1794 htable_t * 1795 htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 1796 { 1797 htable_t *ht; 1798 uint_t e; 1799 x86pte_t pte; 1800 1801 ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 1802 if (ht == NULL) 1803 return (NULL); 1804 1805 if (entry) 1806 *entry = e; 1807 1808 if (PTE_ISPAGE(pte, ht->ht_level)) 1809 return (ht); 1810 htable_release(ht); 1811 return (NULL); 1812 } 1813 1814 1815 void 1816 htable_init() 1817 { 1818 /* 1819 * To save on kernel VA usage, we avoid debug information in 32 bit 1820 * kernels. 1821 */ 1822 #if defined(__amd64) 1823 int kmem_flags = KMC_NOHASH; 1824 #elif defined(__i386) 1825 int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 1826 #endif 1827 1828 /* 1829 * initialize kmem caches 1830 */ 1831 htable_cache = kmem_cache_create("htable_t", 1832 sizeof (htable_t), 0, NULL, NULL, 1833 htable_reap, NULL, hat_memload_arena, kmem_flags); 1834 } 1835 1836 /* 1837 * get the pte index for the virtual address in the given htable's pagetable 1838 */ 1839 uint_t 1840 htable_va2entry(uintptr_t va, htable_t *ht) 1841 { 1842 level_t l = ht->ht_level; 1843 1844 ASSERT(va >= ht->ht_vaddr); 1845 ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1846 return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 1847 } 1848 1849 /* 1850 * Given an htable and the index of a pte in it, return the virtual address 1851 * of the page. 1852 */ 1853 uintptr_t 1854 htable_e2va(htable_t *ht, uint_t entry) 1855 { 1856 level_t l = ht->ht_level; 1857 uintptr_t va; 1858 1859 ASSERT(entry < HTABLE_NUM_PTES(ht)); 1860 va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 1861 1862 /* 1863 * Need to skip over any VA hole in top level table 1864 */ 1865 #if defined(__amd64) 1866 if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 1867 va += ((mmu.hole_end - mmu.hole_start) + 1); 1868 #endif 1869 1870 return (va); 1871 } 1872 1873 /* 1874 * The code uses compare and swap instructions to read/write PTE's to 1875 * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 1876 * will naturally be atomic. 1877 * 1878 * The combination of using kpreempt_disable()/_enable() and the hci_mutex 1879 * are used to ensure that an interrupt won't overwrite a temporary mapping 1880 * while it's in use. If an interrupt thread tries to access a PTE, it will 1881 * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 1882 */ 1883 void 1884 x86pte_cpu_init(cpu_t *cpu) 1885 { 1886 struct hat_cpu_info *hci; 1887 1888 hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 1889 mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 1890 cpu->cpu_hat_info = hci; 1891 } 1892 1893 void 1894 x86pte_cpu_fini(cpu_t *cpu) 1895 { 1896 struct hat_cpu_info *hci = cpu->cpu_hat_info; 1897 1898 kmem_free(hci, sizeof (*hci)); 1899 cpu->cpu_hat_info = NULL; 1900 } 1901 1902 #ifdef __i386 1903 /* 1904 * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1905 */ 1906 x86pte_t 1907 get_pte64(x86pte_t *ptr) 1908 { 1909 volatile uint32_t *p = (uint32_t *)ptr; 1910 x86pte_t t; 1911 1912 ASSERT(mmu.pae_hat != 0); 1913 for (;;) { 1914 t = p[0]; 1915 t |= (uint64_t)p[1] << 32; 1916 if ((t & 0xffffffff) == p[0]) 1917 return (t); 1918 } 1919 } 1920 #endif /* __i386 */ 1921 1922 /* 1923 * Disable preemption and establish a mapping to the pagetable with the 1924 * given pfn. This is optimized for there case where it's the same 1925 * pfn as we last used referenced from this CPU. 1926 */ 1927 static x86pte_t * 1928 x86pte_access_pagetable(htable_t *ht, uint_t index) 1929 { 1930 /* 1931 * VLP pagetables are contained in the hat_t 1932 */ 1933 if (ht->ht_flags & HTABLE_VLP) 1934 return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1935 return (x86pte_mapin(ht->ht_pfn, index, ht)); 1936 } 1937 1938 /* 1939 * map the given pfn into the page table window. 1940 */ 1941 /*ARGSUSED*/ 1942 x86pte_t * 1943 x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1944 { 1945 x86pte_t *pteptr; 1946 x86pte_t pte = 0; 1947 x86pte_t newpte; 1948 int x; 1949 1950 ASSERT(pfn != PFN_INVALID); 1951 1952 if (!khat_running) { 1953 caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1954 return (PT_INDEX_PTR(va, index)); 1955 } 1956 1957 /* 1958 * If kpm is available, use it. 1959 */ 1960 if (kpm_vbase) 1961 return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1962 1963 /* 1964 * Disable preemption and grab the CPU's hci_mutex 1965 */ 1966 kpreempt_disable(); 1967 ASSERT(CPU->cpu_hat_info != NULL); 1968 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1969 x = PWIN_TABLE(CPU->cpu_id); 1970 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 1971 #ifndef __xpv 1972 if (mmu.pae_hat) 1973 pte = *pteptr; 1974 else 1975 pte = *(x86pte32_t *)pteptr; 1976 #endif 1977 1978 newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1979 1980 /* 1981 * For hardware we can use a writable mapping. 1982 */ 1983 #ifdef __xpv 1984 if (IN_XPV_PANIC()) 1985 #endif 1986 newpte |= PT_WRITABLE; 1987 1988 if (!PTE_EQUIV(newpte, pte)) { 1989 1990 #ifdef __xpv 1991 if (!IN_XPV_PANIC()) { 1992 xen_map(newpte, PWIN_VA(x)); 1993 } else 1994 #endif 1995 { 1996 XPV_ALLOW_PAGETABLE_UPDATES(); 1997 if (mmu.pae_hat) 1998 *pteptr = newpte; 1999 else 2000 *(x86pte32_t *)pteptr = newpte; 2001 XPV_DISALLOW_PAGETABLE_UPDATES(); 2002 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 2003 } 2004 } 2005 return (PT_INDEX_PTR(PWIN_VA(x), index)); 2006 } 2007 2008 /* 2009 * Release access to a page table. 2010 */ 2011 static void 2012 x86pte_release_pagetable(htable_t *ht) 2013 { 2014 /* 2015 * nothing to do for VLP htables 2016 */ 2017 if (ht->ht_flags & HTABLE_VLP) 2018 return; 2019 2020 x86pte_mapout(); 2021 } 2022 2023 void 2024 x86pte_mapout(void) 2025 { 2026 if (kpm_vbase != NULL || !khat_running) 2027 return; 2028 2029 /* 2030 * Drop the CPU's hci_mutex and restore preemption. 2031 */ 2032 #ifdef __xpv 2033 if (!IN_XPV_PANIC()) { 2034 uintptr_t va; 2035 2036 /* 2037 * We need to always clear the mapping in case a page 2038 * that was once a page table page is ballooned out. 2039 */ 2040 va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id)); 2041 (void) HYPERVISOR_update_va_mapping(va, 0, 2042 UVMF_INVLPG | UVMF_LOCAL); 2043 } 2044 #endif 2045 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 2046 kpreempt_enable(); 2047 } 2048 2049 /* 2050 * Atomic retrieval of a pagetable entry 2051 */ 2052 x86pte_t 2053 x86pte_get(htable_t *ht, uint_t entry) 2054 { 2055 x86pte_t pte; 2056 x86pte_t *ptep; 2057 2058 /* 2059 * Be careful that loading PAE entries in 32 bit kernel is atomic. 2060 */ 2061 ASSERT(entry < mmu.ptes_per_table); 2062 ptep = x86pte_access_pagetable(ht, entry); 2063 pte = GET_PTE(ptep); 2064 x86pte_release_pagetable(ht); 2065 return (pte); 2066 } 2067 2068 /* 2069 * Atomic unconditional set of a page table entry, it returns the previous 2070 * value. For pre-existing mappings if the PFN changes, then we don't care 2071 * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 2072 * the MOD/REF bits unchanged. 2073 * 2074 * If asked to overwrite a link to a lower page table with a large page 2075 * mapping, this routine returns the special value of LPAGE_ERROR. This 2076 * allows the upper HAT layers to retry with a smaller mapping size. 2077 */ 2078 x86pte_t 2079 x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 2080 { 2081 x86pte_t old; 2082 x86pte_t prev; 2083 x86pte_t *ptep; 2084 level_t l = ht->ht_level; 2085 x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 2086 x86pte_t n; 2087 uintptr_t addr = htable_e2va(ht, entry); 2088 hat_t *hat = ht->ht_hat; 2089 2090 ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 2091 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2092 if (ptr == NULL) 2093 ptep = x86pte_access_pagetable(ht, entry); 2094 else 2095 ptep = ptr; 2096 2097 /* 2098 * Install the new PTE. If remapping the same PFN, then 2099 * copy existing REF/MOD bits to new mapping. 2100 */ 2101 do { 2102 prev = GET_PTE(ptep); 2103 n = new; 2104 if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 2105 n |= prev & (PT_REF | PT_MOD); 2106 2107 /* 2108 * Another thread may have installed this mapping already, 2109 * flush the local TLB and be done. 2110 */ 2111 if (prev == n) { 2112 old = new; 2113 #ifdef __xpv 2114 if (!IN_XPV_PANIC()) 2115 xen_flush_va((caddr_t)addr); 2116 else 2117 #endif 2118 mmu_tlbflush_entry((caddr_t)addr); 2119 goto done; 2120 } 2121 2122 /* 2123 * Detect if we have a collision of installing a large 2124 * page mapping where there already is a lower page table. 2125 */ 2126 if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 2127 old = LPAGE_ERROR; 2128 goto done; 2129 } 2130 2131 XPV_ALLOW_PAGETABLE_UPDATES(); 2132 old = CAS_PTE(ptep, prev, n); 2133 XPV_DISALLOW_PAGETABLE_UPDATES(); 2134 } while (old != prev); 2135 2136 /* 2137 * Do a TLB demap if needed, ie. the old pte was valid. 2138 * 2139 * Note that a stale TLB writeback to the PTE here either can't happen 2140 * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 2141 * mappings, but they were created with REF and MOD already set, so 2142 * no stale writeback will happen. 2143 * 2144 * Segmap is the only place where remaps happen on the same pfn and for 2145 * that we want to preserve the stale REF/MOD bits. 2146 */ 2147 if (old & PT_REF) 2148 hat_tlb_inval(hat, addr); 2149 2150 done: 2151 if (ptr == NULL) 2152 x86pte_release_pagetable(ht); 2153 return (old); 2154 } 2155 2156 /* 2157 * Atomic compare and swap of a page table entry. No TLB invalidates are done. 2158 * This is used for links between pagetables of different levels. 2159 * Note we always create these links with dirty/access set, so they should 2160 * never change. 2161 */ 2162 x86pte_t 2163 x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 2164 { 2165 x86pte_t pte; 2166 x86pte_t *ptep; 2167 #ifdef __xpv 2168 /* 2169 * We can't use writable pagetables for upper level tables, so fake it. 2170 */ 2171 mmu_update_t t[2]; 2172 int cnt = 1; 2173 int count; 2174 maddr_t ma; 2175 2176 if (!IN_XPV_PANIC()) { 2177 ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */ 2178 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2179 t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2180 t[0].val = new; 2181 2182 #if defined(__amd64) 2183 /* 2184 * On the 64-bit hypervisor we need to maintain the user mode 2185 * top page table too. 2186 */ 2187 if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) { 2188 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa( 2189 ht->ht_hat->hat_user_ptable), entry)); 2190 t[1].ptr = ma | MMU_NORMAL_PT_UPDATE; 2191 t[1].val = new; 2192 ++cnt; 2193 } 2194 #endif /* __amd64 */ 2195 2196 if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF)) 2197 panic("HYPERVISOR_mmu_update() failed"); 2198 ASSERT(count == cnt); 2199 return (old); 2200 } 2201 #endif 2202 ptep = x86pte_access_pagetable(ht, entry); 2203 XPV_ALLOW_PAGETABLE_UPDATES(); 2204 pte = CAS_PTE(ptep, old, new); 2205 XPV_DISALLOW_PAGETABLE_UPDATES(); 2206 x86pte_release_pagetable(ht); 2207 return (pte); 2208 } 2209 2210 /* 2211 * Invalidate a page table entry as long as it currently maps something that 2212 * matches the value determined by expect. 2213 * 2214 * If tlb is set, also invalidates any TLB entries. 2215 * 2216 * Returns the previous value of the PTE. 2217 */ 2218 x86pte_t 2219 x86pte_inval( 2220 htable_t *ht, 2221 uint_t entry, 2222 x86pte_t expect, 2223 x86pte_t *pte_ptr, 2224 boolean_t tlb) 2225 { 2226 x86pte_t *ptep; 2227 x86pte_t oldpte; 2228 x86pte_t found; 2229 2230 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2231 ASSERT(ht->ht_level <= mmu.max_page_level); 2232 2233 if (pte_ptr != NULL) 2234 ptep = pte_ptr; 2235 else 2236 ptep = x86pte_access_pagetable(ht, entry); 2237 2238 #if defined(__xpv) 2239 /* 2240 * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing 2241 * with anything else. 2242 */ 2243 if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) { 2244 int count; 2245 mmu_update_t t[1]; 2246 maddr_t ma; 2247 2248 oldpte = GET_PTE(ptep); 2249 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2250 goto done; 2251 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2252 t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2253 t[0].val = 0; 2254 if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF)) 2255 panic("HYPERVISOR_mmu_update() failed"); 2256 ASSERT(count == 1); 2257 goto done; 2258 } 2259 #endif /* __xpv */ 2260 2261 /* 2262 * Note that the loop is needed to handle changes due to h/w updating 2263 * of PT_MOD/PT_REF. 2264 */ 2265 do { 2266 oldpte = GET_PTE(ptep); 2267 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2268 goto done; 2269 XPV_ALLOW_PAGETABLE_UPDATES(); 2270 found = CAS_PTE(ptep, oldpte, 0); 2271 XPV_DISALLOW_PAGETABLE_UPDATES(); 2272 } while (found != oldpte); 2273 if (tlb && (oldpte & (PT_REF | PT_MOD))) 2274 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 2275 2276 done: 2277 if (pte_ptr == NULL) 2278 x86pte_release_pagetable(ht); 2279 return (oldpte); 2280 } 2281 2282 /* 2283 * Change a page table entry af it currently matches the value in expect. 2284 */ 2285 x86pte_t 2286 x86pte_update( 2287 htable_t *ht, 2288 uint_t entry, 2289 x86pte_t expect, 2290 x86pte_t new) 2291 { 2292 x86pte_t *ptep; 2293 x86pte_t found; 2294 2295 ASSERT(new != 0); 2296 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2297 ASSERT(ht->ht_level <= mmu.max_page_level); 2298 2299 ptep = x86pte_access_pagetable(ht, entry); 2300 XPV_ALLOW_PAGETABLE_UPDATES(); 2301 found = CAS_PTE(ptep, expect, new); 2302 XPV_DISALLOW_PAGETABLE_UPDATES(); 2303 if (found == expect) { 2304 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 2305 2306 /* 2307 * When removing write permission *and* clearing the 2308 * MOD bit, check if a write happened via a stale 2309 * TLB entry before the TLB shootdown finished. 2310 * 2311 * If it did happen, simply re-enable write permission and 2312 * act like the original CAS failed. 2313 */ 2314 if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 2315 (new & (PT_WRITABLE | PT_MOD)) == 0 && 2316 (GET_PTE(ptep) & PT_MOD) != 0) { 2317 do { 2318 found = GET_PTE(ptep); 2319 XPV_ALLOW_PAGETABLE_UPDATES(); 2320 found = 2321 CAS_PTE(ptep, found, found | PT_WRITABLE); 2322 XPV_DISALLOW_PAGETABLE_UPDATES(); 2323 } while ((found & PT_WRITABLE) == 0); 2324 } 2325 } 2326 x86pte_release_pagetable(ht); 2327 return (found); 2328 } 2329 2330 #ifndef __xpv 2331 /* 2332 * Copy page tables - this is just a little more complicated than the 2333 * previous routines. Note that it's also not atomic! It also is never 2334 * used for VLP pagetables. 2335 */ 2336 void 2337 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2338 { 2339 caddr_t src_va; 2340 caddr_t dst_va; 2341 size_t size; 2342 x86pte_t *pteptr; 2343 x86pte_t pte; 2344 2345 ASSERT(khat_running); 2346 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2347 ASSERT(!(src->ht_flags & HTABLE_VLP)); 2348 ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 2349 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2350 2351 /* 2352 * Acquire access to the CPU pagetable windows for the dest and source. 2353 */ 2354 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2355 if (kpm_vbase) { 2356 src_va = (caddr_t) 2357 PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 2358 } else { 2359 uint_t x = PWIN_SRC(CPU->cpu_id); 2360 2361 /* 2362 * Finish defining the src pagetable mapping 2363 */ 2364 src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2365 pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 2366 pteptr = (x86pte_t *)PWIN_PTE_VA(x); 2367 if (mmu.pae_hat) 2368 *pteptr = pte; 2369 else 2370 *(x86pte32_t *)pteptr = pte; 2371 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 2372 } 2373 2374 /* 2375 * now do the copy 2376 */ 2377 size = count << mmu.pte_size_shift; 2378 bcopy(src_va, dst_va, size); 2379 2380 x86pte_release_pagetable(dest); 2381 } 2382 2383 #else /* __xpv */ 2384 2385 /* 2386 * The hypervisor only supports writable pagetables at level 0, so we have 2387 * to install these 1 by 1 the slow way. 2388 */ 2389 void 2390 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2391 { 2392 caddr_t src_va; 2393 x86pte_t pte; 2394 2395 ASSERT(!IN_XPV_PANIC()); 2396 src_va = (caddr_t)x86pte_access_pagetable(src, entry); 2397 while (count) { 2398 if (mmu.pae_hat) 2399 pte = *(x86pte_t *)src_va; 2400 else 2401 pte = *(x86pte32_t *)src_va; 2402 if (pte != 0) { 2403 set_pteval(pfn_to_pa(dest->ht_pfn), entry, 2404 dest->ht_level, pte); 2405 #ifdef __amd64 2406 if (dest->ht_level == mmu.max_level && 2407 htable_e2va(dest, entry) < HYPERVISOR_VIRT_END) 2408 set_pteval( 2409 pfn_to_pa(dest->ht_hat->hat_user_ptable), 2410 entry, dest->ht_level, pte); 2411 #endif 2412 } 2413 --count; 2414 ++entry; 2415 src_va += mmu.pte_size; 2416 } 2417 x86pte_release_pagetable(src); 2418 } 2419 #endif /* __xpv */ 2420 2421 /* 2422 * Zero page table entries - Note this doesn't use atomic stores! 2423 */ 2424 static void 2425 x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 2426 { 2427 caddr_t dst_va; 2428 size_t size; 2429 #ifdef __xpv 2430 int x; 2431 x86pte_t newpte; 2432 #endif 2433 2434 /* 2435 * Map in the page table to be zeroed. 2436 */ 2437 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 2438 ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2439 2440 /* 2441 * On the hypervisor we don't use x86pte_access_pagetable() since 2442 * in this case the page is not pinned yet. 2443 */ 2444 #ifdef __xpv 2445 if (kpm_vbase == NULL) { 2446 kpreempt_disable(); 2447 ASSERT(CPU->cpu_hat_info != NULL); 2448 mutex_enter(&CPU->cpu_hat_info->hci_mutex); 2449 x = PWIN_TABLE(CPU->cpu_id); 2450 newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE; 2451 xen_map(newpte, PWIN_VA(x)); 2452 dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2453 } else 2454 #endif 2455 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2456 2457 size = count << mmu.pte_size_shift; 2458 ASSERT(size > BLOCKZEROALIGN); 2459 #ifdef __i386 2460 if (!is_x86_feature(x86_featureset, X86FSET_SSE2)) 2461 bzero(dst_va, size); 2462 else 2463 #endif 2464 block_zero_no_xmm(dst_va, size); 2465 2466 #ifdef __xpv 2467 if (kpm_vbase == NULL) { 2468 xen_map(0, PWIN_VA(x)); 2469 mutex_exit(&CPU->cpu_hat_info->hci_mutex); 2470 kpreempt_enable(); 2471 } else 2472 #endif 2473 x86pte_release_pagetable(dest); 2474 } 2475 2476 /* 2477 * Called to ensure that all pagetables are in the system dump 2478 */ 2479 void 2480 hat_dump(void) 2481 { 2482 hat_t *hat; 2483 uint_t h; 2484 htable_t *ht; 2485 2486 /* 2487 * Dump all page tables 2488 */ 2489 for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 2490 for (h = 0; h < hat->hat_num_hash; ++h) { 2491 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2492 if ((ht->ht_flags & HTABLE_VLP) == 0) 2493 dump_page(ht->ht_pfn); 2494 } 2495 } 2496 } 2497 } 2498