1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * Copyright (c) 2010, Intel Corporation. 26 * All rights reserved. 27 */ 28 29 30 /* 31 * VM - Hardware Address Translation management for i386 and amd64 32 * 33 * Implementation of the interfaces described in <common/vm/hat.h> 34 * 35 * Nearly all the details of how the hardware is managed should not be 36 * visible outside this layer except for misc. machine specific functions 37 * that work in conjunction with this code. 38 * 39 * Routines used only inside of i86pc/vm start with hati_ for HAT Internal. 40 */ 41 42 #include <sys/machparam.h> 43 #include <sys/machsystm.h> 44 #include <sys/mman.h> 45 #include <sys/types.h> 46 #include <sys/systm.h> 47 #include <sys/cpuvar.h> 48 #include <sys/thread.h> 49 #include <sys/proc.h> 50 #include <sys/cpu.h> 51 #include <sys/kmem.h> 52 #include <sys/disp.h> 53 #include <sys/shm.h> 54 #include <sys/sysmacros.h> 55 #include <sys/machparam.h> 56 #include <sys/vmem.h> 57 #include <sys/vmsystm.h> 58 #include <sys/promif.h> 59 #include <sys/var.h> 60 #include <sys/x86_archext.h> 61 #include <sys/atomic.h> 62 #include <sys/bitmap.h> 63 #include <sys/controlregs.h> 64 #include <sys/bootconf.h> 65 #include <sys/bootsvcs.h> 66 #include <sys/bootinfo.h> 67 #include <sys/archsystm.h> 68 69 #include <vm/seg_kmem.h> 70 #include <vm/hat_i86.h> 71 #include <vm/as.h> 72 #include <vm/seg.h> 73 #include <vm/page.h> 74 #include <vm/seg_kp.h> 75 #include <vm/seg_kpm.h> 76 #include <vm/vm_dep.h> 77 #ifdef __xpv 78 #include <sys/hypervisor.h> 79 #endif 80 #include <vm/kboot_mmu.h> 81 #include <vm/seg_spt.h> 82 83 #include <sys/cmn_err.h> 84 85 /* 86 * Basic parameters for hat operation. 87 */ 88 struct hat_mmu_info mmu; 89 90 /* 91 * The page that is the kernel's top level pagetable. 92 * 93 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries 94 * on this 4K page for its top level page table. The remaining groups of 95 * 4 entries are used for per processor copies of user VLP pagetables for 96 * running threads. See hat_switch() and reload_pae32() for details. 97 * 98 * vlp_page[0..3] - level==2 PTEs for kernel HAT 99 * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0 100 * vlp_page[8..11] - level==2 PTE for user thread on cpu 1 101 * etc... 102 */ 103 static x86pte_t *vlp_page; 104 105 /* 106 * forward declaration of internal utility routines 107 */ 108 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, 109 x86pte_t new); 110 111 /* 112 * The kernel address space exists in all HATs. To implement this the 113 * kernel reserves a fixed number of entries in the topmost level(s) of page 114 * tables. The values are setup during startup and then copied to every user 115 * hat created by hat_alloc(). This means that kernelbase must be: 116 * 117 * 4Meg aligned for 32 bit kernels 118 * 512Gig aligned for x86_64 64 bit kernel 119 * 120 * The hat_kernel_range_ts describe what needs to be copied from kernel hat 121 * to each user hat. 122 */ 123 typedef struct hat_kernel_range { 124 level_t hkr_level; 125 uintptr_t hkr_start_va; 126 uintptr_t hkr_end_va; /* zero means to end of memory */ 127 } hat_kernel_range_t; 128 #define NUM_KERNEL_RANGE 2 129 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE]; 130 static int num_kernel_ranges; 131 132 uint_t use_boot_reserve = 1; /* cleared after early boot process */ 133 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */ 134 135 /* 136 * enable_1gpg: controls 1g page support for user applications. 137 * By default, 1g pages are exported to user applications. enable_1gpg can 138 * be set to 0 to not export. 139 */ 140 int enable_1gpg = 1; 141 142 /* 143 * AMD shanghai processors provide better management of 1gb ptes in its tlb. 144 * By default, 1g page support will be disabled for pre-shanghai AMD 145 * processors that don't have optimal tlb support for the 1g page size. 146 * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal 147 * processors. 148 */ 149 int chk_optimal_1gtlb = 1; 150 151 152 #ifdef DEBUG 153 uint_t map1gcnt; 154 #endif 155 156 157 /* 158 * A cpuset for all cpus. This is used for kernel address cross calls, since 159 * the kernel addresses apply to all cpus. 160 */ 161 cpuset_t khat_cpuset; 162 163 /* 164 * management stuff for hat structures 165 */ 166 kmutex_t hat_list_lock; 167 kcondvar_t hat_list_cv; 168 kmem_cache_t *hat_cache; 169 kmem_cache_t *hat_hash_cache; 170 kmem_cache_t *vlp_hash_cache; 171 172 /* 173 * Simple statistics 174 */ 175 struct hatstats hatstat; 176 177 /* 178 * Some earlier hypervisor versions do not emulate cmpxchg of PTEs 179 * correctly. For such hypervisors we must set PT_USER for kernel 180 * entries ourselves (normally the emulation would set PT_USER for 181 * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is 182 * thus set appropriately. Note that dboot/kbm is OK, as only the full 183 * HAT uses cmpxchg() and the other paths (hypercall etc.) were never 184 * incorrect. 185 */ 186 int pt_kern; 187 188 /* 189 * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's. 190 */ 191 extern void atomic_orb(uchar_t *addr, uchar_t val); 192 extern void atomic_andb(uchar_t *addr, uchar_t val); 193 194 #ifndef __xpv 195 extern pfn_t memseg_get_start(struct memseg *); 196 #endif 197 198 #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask) 199 #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD) 200 #define PP_ISREF(pp) PP_GETRM(pp, P_REF) 201 #define PP_ISRO(pp) PP_GETRM(pp, P_RO) 202 203 #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm) 204 #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD) 205 #define PP_SETREF(pp) PP_SETRM(pp, P_REF) 206 #define PP_SETRO(pp) PP_SETRM(pp, P_RO) 207 208 #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm)) 209 #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD) 210 #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF) 211 #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO) 212 #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO) 213 214 /* 215 * kmem cache constructor for struct hat 216 */ 217 /*ARGSUSED*/ 218 static int 219 hati_constructor(void *buf, void *handle, int kmflags) 220 { 221 hat_t *hat = buf; 222 223 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 224 bzero(hat->hat_pages_mapped, 225 sizeof (pgcnt_t) * (mmu.max_page_level + 1)); 226 hat->hat_ism_pgcnt = 0; 227 hat->hat_stats = 0; 228 hat->hat_flags = 0; 229 CPUSET_ZERO(hat->hat_cpus); 230 hat->hat_htable = NULL; 231 hat->hat_ht_hash = NULL; 232 return (0); 233 } 234 235 /* 236 * Allocate a hat structure for as. We also create the top level 237 * htable and initialize it to contain the kernel hat entries. 238 */ 239 hat_t * 240 hat_alloc(struct as *as) 241 { 242 hat_t *hat; 243 htable_t *ht; /* top level htable */ 244 uint_t use_vlp; 245 uint_t r; 246 hat_kernel_range_t *rp; 247 uintptr_t va; 248 uintptr_t eva; 249 uint_t start; 250 uint_t cnt; 251 htable_t *src; 252 253 /* 254 * Once we start creating user process HATs we can enable 255 * the htable_steal() code. 256 */ 257 if (can_steal_post_boot == 0) 258 can_steal_post_boot = 1; 259 260 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 261 hat = kmem_cache_alloc(hat_cache, KM_SLEEP); 262 hat->hat_as = as; 263 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 264 ASSERT(hat->hat_flags == 0); 265 266 #if defined(__xpv) 267 /* 268 * No VLP stuff on the hypervisor due to the 64-bit split top level 269 * page tables. On 32-bit it's not needed as the hypervisor takes 270 * care of copying the top level PTEs to a below 4Gig page. 271 */ 272 use_vlp = 0; 273 #else /* __xpv */ 274 /* 32 bit processes uses a VLP style hat when running with PAE */ 275 #if defined(__amd64) 276 use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32); 277 #elif defined(__i386) 278 use_vlp = mmu.pae_hat; 279 #endif 280 #endif /* __xpv */ 281 if (use_vlp) { 282 hat->hat_flags = HAT_VLP; 283 bzero(hat->hat_vlp_ptes, VLP_SIZE); 284 } 285 286 /* 287 * Allocate the htable hash 288 */ 289 if ((hat->hat_flags & HAT_VLP)) { 290 hat->hat_num_hash = mmu.vlp_hash_cnt; 291 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP); 292 } else { 293 hat->hat_num_hash = mmu.hash_cnt; 294 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP); 295 } 296 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *)); 297 298 /* 299 * Initialize Kernel HAT entries at the top of the top level page 300 * tables for the new hat. 301 */ 302 hat->hat_htable = NULL; 303 hat->hat_ht_cached = NULL; 304 XPV_DISALLOW_MIGRATE(); 305 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL); 306 hat->hat_htable = ht; 307 308 #if defined(__amd64) 309 if (hat->hat_flags & HAT_VLP) 310 goto init_done; 311 #endif 312 313 for (r = 0; r < num_kernel_ranges; ++r) { 314 rp = &kernel_ranges[r]; 315 for (va = rp->hkr_start_va; va != rp->hkr_end_va; 316 va += cnt * LEVEL_SIZE(rp->hkr_level)) { 317 318 if (rp->hkr_level == TOP_LEVEL(hat)) 319 ht = hat->hat_htable; 320 else 321 ht = htable_create(hat, va, rp->hkr_level, 322 NULL); 323 324 start = htable_va2entry(va, ht); 325 cnt = HTABLE_NUM_PTES(ht) - start; 326 eva = va + 327 ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level)); 328 if (rp->hkr_end_va != 0 && 329 (eva > rp->hkr_end_va || eva == 0)) 330 cnt = htable_va2entry(rp->hkr_end_va, ht) - 331 start; 332 333 #if defined(__i386) && !defined(__xpv) 334 if (ht->ht_flags & HTABLE_VLP) { 335 bcopy(&vlp_page[start], 336 &hat->hat_vlp_ptes[start], 337 cnt * sizeof (x86pte_t)); 338 continue; 339 } 340 #endif 341 src = htable_lookup(kas.a_hat, va, rp->hkr_level); 342 ASSERT(src != NULL); 343 x86pte_copy(src, ht, start, cnt); 344 htable_release(src); 345 } 346 } 347 348 init_done: 349 350 #if defined(__xpv) 351 /* 352 * Pin top level page tables after initializing them 353 */ 354 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level); 355 #if defined(__amd64) 356 xen_pin(hat->hat_user_ptable, mmu.max_level); 357 #endif 358 #endif 359 XPV_ALLOW_MIGRATE(); 360 361 /* 362 * Put it at the start of the global list of all hats (used by stealing) 363 * 364 * kas.a_hat is not in the list but is instead used to find the 365 * first and last items in the list. 366 * 367 * - kas.a_hat->hat_next points to the start of the user hats. 368 * The list ends where hat->hat_next == NULL 369 * 370 * - kas.a_hat->hat_prev points to the last of the user hats. 371 * The list begins where hat->hat_prev == NULL 372 */ 373 mutex_enter(&hat_list_lock); 374 hat->hat_prev = NULL; 375 hat->hat_next = kas.a_hat->hat_next; 376 if (hat->hat_next) 377 hat->hat_next->hat_prev = hat; 378 else 379 kas.a_hat->hat_prev = hat; 380 kas.a_hat->hat_next = hat; 381 mutex_exit(&hat_list_lock); 382 383 return (hat); 384 } 385 386 /* 387 * process has finished executing but as has not been cleaned up yet. 388 */ 389 /*ARGSUSED*/ 390 void 391 hat_free_start(hat_t *hat) 392 { 393 ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock)); 394 395 /* 396 * If the hat is currently a stealing victim, wait for the stealing 397 * to finish. Once we mark it as HAT_FREEING, htable_steal() 398 * won't look at its pagetables anymore. 399 */ 400 mutex_enter(&hat_list_lock); 401 while (hat->hat_flags & HAT_VICTIM) 402 cv_wait(&hat_list_cv, &hat_list_lock); 403 hat->hat_flags |= HAT_FREEING; 404 mutex_exit(&hat_list_lock); 405 } 406 407 /* 408 * An address space is being destroyed, so we destroy the associated hat. 409 */ 410 void 411 hat_free_end(hat_t *hat) 412 { 413 kmem_cache_t *cache; 414 415 ASSERT(hat->hat_flags & HAT_FREEING); 416 417 /* 418 * must not be running on the given hat 419 */ 420 ASSERT(CPU->cpu_current_hat != hat); 421 422 /* 423 * Remove it from the list of HATs 424 */ 425 mutex_enter(&hat_list_lock); 426 if (hat->hat_prev) 427 hat->hat_prev->hat_next = hat->hat_next; 428 else 429 kas.a_hat->hat_next = hat->hat_next; 430 if (hat->hat_next) 431 hat->hat_next->hat_prev = hat->hat_prev; 432 else 433 kas.a_hat->hat_prev = hat->hat_prev; 434 mutex_exit(&hat_list_lock); 435 hat->hat_next = hat->hat_prev = NULL; 436 437 #if defined(__xpv) 438 /* 439 * On the hypervisor, unpin top level page table(s) 440 */ 441 xen_unpin(hat->hat_htable->ht_pfn); 442 #if defined(__amd64) 443 xen_unpin(hat->hat_user_ptable); 444 #endif 445 #endif 446 447 /* 448 * Make a pass through the htables freeing them all up. 449 */ 450 htable_purge_hat(hat); 451 452 /* 453 * Decide which kmem cache the hash table came from, then free it. 454 */ 455 if (hat->hat_flags & HAT_VLP) 456 cache = vlp_hash_cache; 457 else 458 cache = hat_hash_cache; 459 kmem_cache_free(cache, hat->hat_ht_hash); 460 hat->hat_ht_hash = NULL; 461 462 hat->hat_flags = 0; 463 kmem_cache_free(hat_cache, hat); 464 } 465 466 /* 467 * round kernelbase down to a supported value to use for _userlimit 468 * 469 * userlimit must be aligned down to an entry in the top level htable. 470 * The one exception is for 32 bit HAT's running PAE. 471 */ 472 uintptr_t 473 hat_kernelbase(uintptr_t va) 474 { 475 #if defined(__i386) 476 va &= LEVEL_MASK(1); 477 #endif 478 if (IN_VA_HOLE(va)) 479 panic("_userlimit %p will fall in VA hole\n", (void *)va); 480 return (va); 481 } 482 483 /* 484 * 485 */ 486 static void 487 set_max_page_level() 488 { 489 level_t lvl; 490 491 if (!kbm_largepage_support) { 492 lvl = 0; 493 } else { 494 if (x86_feature & X86_1GPG) { 495 lvl = 2; 496 if (chk_optimal_1gtlb && 497 cpuid_opteron_erratum(CPU, 6671130)) { 498 lvl = 1; 499 } 500 if (plat_mnode_xcheck(LEVEL_SIZE(2) >> 501 LEVEL_SHIFT(0))) { 502 lvl = 1; 503 } 504 } else { 505 lvl = 1; 506 } 507 } 508 mmu.max_page_level = lvl; 509 510 if ((lvl == 2) && (enable_1gpg == 0)) 511 mmu.umax_page_level = 1; 512 else 513 mmu.umax_page_level = lvl; 514 } 515 516 /* 517 * Initialize hat data structures based on processor MMU information. 518 */ 519 void 520 mmu_init(void) 521 { 522 uint_t max_htables; 523 uint_t pa_bits; 524 uint_t va_bits; 525 int i; 526 527 /* 528 * If CPU enabled the page table global bit, use it for the kernel 529 * This is bit 7 in CR4 (PGE - Page Global Enable). 530 */ 531 if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0) 532 mmu.pt_global = PT_GLOBAL; 533 534 /* 535 * Detect NX and PAE usage. 536 */ 537 mmu.pae_hat = kbm_pae_support; 538 if (kbm_nx_support) 539 mmu.pt_nx = PT_NX; 540 else 541 mmu.pt_nx = 0; 542 543 /* 544 * Use CPU info to set various MMU parameters 545 */ 546 cpuid_get_addrsize(CPU, &pa_bits, &va_bits); 547 548 if (va_bits < sizeof (void *) * NBBY) { 549 mmu.hole_start = (1ul << (va_bits - 1)); 550 mmu.hole_end = 0ul - mmu.hole_start - 1; 551 } else { 552 mmu.hole_end = 0; 553 mmu.hole_start = mmu.hole_end - 1; 554 } 555 #if defined(OPTERON_ERRATUM_121) 556 /* 557 * If erratum 121 has already been detected at this time, hole_start 558 * contains the value to be subtracted from mmu.hole_start. 559 */ 560 ASSERT(hole_start == 0 || opteron_erratum_121 != 0); 561 hole_start = mmu.hole_start - hole_start; 562 #else 563 hole_start = mmu.hole_start; 564 #endif 565 hole_end = mmu.hole_end; 566 567 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1); 568 if (mmu.pae_hat == 0 && pa_bits > 32) 569 mmu.highest_pfn = PFN_4G - 1; 570 571 if (mmu.pae_hat) { 572 mmu.pte_size = 8; /* 8 byte PTEs */ 573 mmu.pte_size_shift = 3; 574 } else { 575 mmu.pte_size = 4; /* 4 byte PTEs */ 576 mmu.pte_size_shift = 2; 577 } 578 579 if (mmu.pae_hat && (x86_feature & X86_PAE) == 0) 580 panic("Processor does not support PAE"); 581 582 if ((x86_feature & X86_CX8) == 0) 583 panic("Processor does not support cmpxchg8b instruction"); 584 585 #if defined(__amd64) 586 587 mmu.num_level = 4; 588 mmu.max_level = 3; 589 mmu.ptes_per_table = 512; 590 mmu.top_level_count = 512; 591 592 mmu.level_shift[0] = 12; 593 mmu.level_shift[1] = 21; 594 mmu.level_shift[2] = 30; 595 mmu.level_shift[3] = 39; 596 597 #elif defined(__i386) 598 599 if (mmu.pae_hat) { 600 mmu.num_level = 3; 601 mmu.max_level = 2; 602 mmu.ptes_per_table = 512; 603 mmu.top_level_count = 4; 604 605 mmu.level_shift[0] = 12; 606 mmu.level_shift[1] = 21; 607 mmu.level_shift[2] = 30; 608 609 } else { 610 mmu.num_level = 2; 611 mmu.max_level = 1; 612 mmu.ptes_per_table = 1024; 613 mmu.top_level_count = 1024; 614 615 mmu.level_shift[0] = 12; 616 mmu.level_shift[1] = 22; 617 } 618 619 #endif /* __i386 */ 620 621 for (i = 0; i < mmu.num_level; ++i) { 622 mmu.level_size[i] = 1UL << mmu.level_shift[i]; 623 mmu.level_offset[i] = mmu.level_size[i] - 1; 624 mmu.level_mask[i] = ~mmu.level_offset[i]; 625 } 626 627 set_max_page_level(); 628 629 mmu_page_sizes = mmu.max_page_level + 1; 630 mmu_exported_page_sizes = mmu.umax_page_level + 1; 631 632 /* restrict legacy applications from using pagesizes 1g and above */ 633 mmu_legacy_page_sizes = 634 (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes; 635 636 637 for (i = 0; i <= mmu.max_page_level; ++i) { 638 mmu.pte_bits[i] = PT_VALID | pt_kern; 639 if (i > 0) 640 mmu.pte_bits[i] |= PT_PAGESIZE; 641 } 642 643 /* 644 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level. 645 */ 646 for (i = 1; i < mmu.num_level; ++i) 647 mmu.ptp_bits[i] = PT_PTPBITS; 648 649 #if defined(__i386) 650 mmu.ptp_bits[2] = PT_VALID; 651 #endif 652 653 /* 654 * Compute how many hash table entries to have per process for htables. 655 * We start with 1 page's worth of entries. 656 * 657 * If physical memory is small, reduce the amount need to cover it. 658 */ 659 max_htables = physmax / mmu.ptes_per_table; 660 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *); 661 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables) 662 mmu.hash_cnt >>= 1; 663 mmu.vlp_hash_cnt = mmu.hash_cnt; 664 665 #if defined(__amd64) 666 /* 667 * If running in 64 bits and physical memory is large, 668 * increase the size of the cache to cover all of memory for 669 * a 64 bit process. 670 */ 671 #define HASH_MAX_LENGTH 4 672 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables) 673 mmu.hash_cnt <<= 1; 674 #endif 675 } 676 677 678 /* 679 * initialize hat data structures 680 */ 681 void 682 hat_init() 683 { 684 #if defined(__i386) 685 /* 686 * _userlimit must be aligned correctly 687 */ 688 if ((_userlimit & LEVEL_MASK(1)) != _userlimit) { 689 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n", 690 (void *)_userlimit, (void *)LEVEL_SIZE(1)); 691 halt("hat_init(): Unable to continue"); 692 } 693 #endif 694 695 cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL); 696 697 /* 698 * initialize kmem caches 699 */ 700 htable_init(); 701 hment_init(); 702 703 hat_cache = kmem_cache_create("hat_t", 704 sizeof (hat_t), 0, hati_constructor, NULL, NULL, 705 NULL, 0, 0); 706 707 hat_hash_cache = kmem_cache_create("HatHash", 708 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 709 NULL, 0, 0); 710 711 /* 712 * VLP hats can use a smaller hash table size on large memroy machines 713 */ 714 if (mmu.hash_cnt == mmu.vlp_hash_cnt) { 715 vlp_hash_cache = hat_hash_cache; 716 } else { 717 vlp_hash_cache = kmem_cache_create("HatVlpHash", 718 mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 719 NULL, 0, 0); 720 } 721 722 /* 723 * Set up the kernel's hat 724 */ 725 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 726 kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); 727 mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 728 kas.a_hat->hat_as = &kas; 729 kas.a_hat->hat_flags = 0; 730 AS_LOCK_EXIT(&kas, &kas.a_lock); 731 732 CPUSET_ZERO(khat_cpuset); 733 CPUSET_ADD(khat_cpuset, CPU->cpu_id); 734 735 /* 736 * The kernel hat's next pointer serves as the head of the hat list . 737 * The kernel hat's prev pointer tracks the last hat on the list for 738 * htable_steal() to use. 739 */ 740 kas.a_hat->hat_next = NULL; 741 kas.a_hat->hat_prev = NULL; 742 743 /* 744 * Allocate an htable hash bucket for the kernel 745 * XX64 - tune for 64 bit procs 746 */ 747 kas.a_hat->hat_num_hash = mmu.hash_cnt; 748 kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP); 749 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *)); 750 751 /* 752 * zero out the top level and cached htable pointers 753 */ 754 kas.a_hat->hat_ht_cached = NULL; 755 kas.a_hat->hat_htable = NULL; 756 757 /* 758 * Pre-allocate hrm_hashtab before enabling the collection of 759 * refmod statistics. Allocating on the fly would mean us 760 * running the risk of suffering recursive mutex enters or 761 * deadlocks. 762 */ 763 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 764 KM_SLEEP); 765 } 766 767 /* 768 * Prepare CPU specific pagetables for VLP processes on 64 bit kernels. 769 * 770 * Each CPU has a set of 2 pagetables that are reused for any 32 bit 771 * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and 772 * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes. 773 */ 774 /*ARGSUSED*/ 775 static void 776 hat_vlp_setup(struct cpu *cpu) 777 { 778 #if defined(__amd64) && !defined(__xpv) 779 struct hat_cpu_info *hci = cpu->cpu_hat_info; 780 pfn_t pfn; 781 782 /* 783 * allocate the level==2 page table for the bottom most 784 * 512Gig of address space (this is where 32 bit apps live) 785 */ 786 ASSERT(hci != NULL); 787 hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 788 789 /* 790 * Allocate a top level pagetable and copy the kernel's 791 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry. 792 */ 793 hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 794 hci->hci_vlp_pfn = 795 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes); 796 ASSERT(hci->hci_vlp_pfn != PFN_INVALID); 797 bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE); 798 799 pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes); 800 ASSERT(pfn != PFN_INVALID); 801 hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2); 802 #endif /* __amd64 && !__xpv */ 803 } 804 805 /*ARGSUSED*/ 806 static void 807 hat_vlp_teardown(cpu_t *cpu) 808 { 809 #if defined(__amd64) && !defined(__xpv) 810 struct hat_cpu_info *hci; 811 812 if ((hci = cpu->cpu_hat_info) == NULL) 813 return; 814 if (hci->hci_vlp_l2ptes) 815 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE); 816 if (hci->hci_vlp_l3ptes) 817 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE); 818 #endif 819 } 820 821 #define NEXT_HKR(r, l, s, e) { \ 822 kernel_ranges[r].hkr_level = l; \ 823 kernel_ranges[r].hkr_start_va = s; \ 824 kernel_ranges[r].hkr_end_va = e; \ 825 ++r; \ 826 } 827 828 /* 829 * Finish filling in the kernel hat. 830 * Pre fill in all top level kernel page table entries for the kernel's 831 * part of the address range. From this point on we can't use any new 832 * kernel large pages if they need PTE's at max_level 833 * 834 * create the kmap mappings. 835 */ 836 void 837 hat_init_finish(void) 838 { 839 size_t size; 840 uint_t r = 0; 841 uintptr_t va; 842 hat_kernel_range_t *rp; 843 844 845 /* 846 * We are now effectively running on the kernel hat. 847 * Clearing use_boot_reserve shuts off using the pre-allocated boot 848 * reserve for all HAT allocations. From here on, the reserves are 849 * only used when avoiding recursion in kmem_alloc(). 850 */ 851 use_boot_reserve = 0; 852 htable_adjust_reserve(); 853 854 /* 855 * User HATs are initialized with copies of all kernel mappings in 856 * higher level page tables. Ensure that those entries exist. 857 */ 858 #if defined(__amd64) 859 860 NEXT_HKR(r, 3, kernelbase, 0); 861 #if defined(__xpv) 862 NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END); 863 #endif 864 865 #elif defined(__i386) 866 867 #if !defined(__xpv) 868 if (mmu.pae_hat) { 869 va = kernelbase; 870 if ((va & LEVEL_MASK(2)) != va) { 871 va = P2ROUNDUP(va, LEVEL_SIZE(2)); 872 NEXT_HKR(r, 1, kernelbase, va); 873 } 874 if (va != 0) 875 NEXT_HKR(r, 2, va, 0); 876 } else 877 #endif /* __xpv */ 878 NEXT_HKR(r, 1, kernelbase, 0); 879 880 #endif /* __i386 */ 881 882 num_kernel_ranges = r; 883 884 /* 885 * Create all the kernel pagetables that will have entries 886 * shared to user HATs. 887 */ 888 for (r = 0; r < num_kernel_ranges; ++r) { 889 rp = &kernel_ranges[r]; 890 for (va = rp->hkr_start_va; va != rp->hkr_end_va; 891 va += LEVEL_SIZE(rp->hkr_level)) { 892 htable_t *ht; 893 894 if (IN_HYPERVISOR_VA(va)) 895 continue; 896 897 /* can/must skip if a page mapping already exists */ 898 if (rp->hkr_level <= mmu.max_page_level && 899 (ht = htable_getpage(kas.a_hat, va, NULL)) != 900 NULL) { 901 htable_release(ht); 902 continue; 903 } 904 905 (void) htable_create(kas.a_hat, va, rp->hkr_level - 1, 906 NULL); 907 } 908 } 909 910 /* 911 * 32 bit PAE metal kernels use only 4 of the 512 entries in the 912 * page holding the top level pagetable. We use the remainder for 913 * the "per CPU" page tables for VLP processes. 914 * Map the top level kernel pagetable into the kernel to make 915 * it easy to use bcopy access these tables. 916 */ 917 if (mmu.pae_hat) { 918 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP); 919 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE, 920 kas.a_hat->hat_htable->ht_pfn, 921 #if !defined(__xpv) 922 PROT_WRITE | 923 #endif 924 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK, 925 HAT_LOAD | HAT_LOAD_NOCONSIST); 926 } 927 hat_vlp_setup(CPU); 928 929 /* 930 * Create kmap (cached mappings of kernel PTEs) 931 * for 32 bit we map from segmap_start .. ekernelheap 932 * for 64 bit we map from segmap_start .. segmap_start + segmapsize; 933 */ 934 #if defined(__i386) 935 size = (uintptr_t)ekernelheap - segmap_start; 936 #elif defined(__amd64) 937 size = segmapsize; 938 #endif 939 hat_kmap_init((uintptr_t)segmap_start, size); 940 } 941 942 /* 943 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references 944 * are 32 bit, so for safety we must use cas64() to install these. 945 */ 946 #ifdef __i386 947 static void 948 reload_pae32(hat_t *hat, cpu_t *cpu) 949 { 950 x86pte_t *src; 951 x86pte_t *dest; 952 x86pte_t pte; 953 int i; 954 955 /* 956 * Load the 4 entries of the level 2 page table into this 957 * cpu's range of the vlp_page and point cr3 at them. 958 */ 959 ASSERT(mmu.pae_hat); 960 src = hat->hat_vlp_ptes; 961 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES; 962 for (i = 0; i < VLP_NUM_PTES; ++i) { 963 for (;;) { 964 pte = dest[i]; 965 if (pte == src[i]) 966 break; 967 if (cas64(dest + i, pte, src[i]) != src[i]) 968 break; 969 } 970 } 971 } 972 #endif 973 974 /* 975 * Switch to a new active hat, maintaining bit masks to track active CPUs. 976 * 977 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it 978 * remains a 32-bit value. 979 */ 980 void 981 hat_switch(hat_t *hat) 982 { 983 uint64_t newcr3; 984 cpu_t *cpu = CPU; 985 hat_t *old = cpu->cpu_current_hat; 986 987 /* 988 * set up this information first, so we don't miss any cross calls 989 */ 990 if (old != NULL) { 991 if (old == hat) 992 return; 993 if (old != kas.a_hat) 994 CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id); 995 } 996 997 /* 998 * Add this CPU to the active set for this HAT. 999 */ 1000 if (hat != kas.a_hat) { 1001 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id); 1002 } 1003 cpu->cpu_current_hat = hat; 1004 1005 /* 1006 * now go ahead and load cr3 1007 */ 1008 if (hat->hat_flags & HAT_VLP) { 1009 #if defined(__amd64) 1010 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 1011 1012 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1013 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn); 1014 #elif defined(__i386) 1015 reload_pae32(hat, cpu); 1016 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) + 1017 (cpu->cpu_id + 1) * VLP_SIZE; 1018 #endif 1019 } else { 1020 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn); 1021 } 1022 #ifdef __xpv 1023 { 1024 struct mmuext_op t[2]; 1025 uint_t retcnt; 1026 uint_t opcnt = 1; 1027 1028 t[0].cmd = MMUEXT_NEW_BASEPTR; 1029 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1030 #if defined(__amd64) 1031 /* 1032 * There's an interesting problem here, as to what to 1033 * actually specify when switching to the kernel hat. 1034 * For now we'll reuse the kernel hat again. 1035 */ 1036 t[1].cmd = MMUEXT_NEW_USER_BASEPTR; 1037 if (hat == kas.a_hat) 1038 t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1039 else 1040 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable); 1041 ++opcnt; 1042 #endif /* __amd64 */ 1043 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0) 1044 panic("HYPERVISOR_mmu_update() failed"); 1045 ASSERT(retcnt == opcnt); 1046 1047 } 1048 #else 1049 setcr3(newcr3); 1050 #endif 1051 ASSERT(cpu == CPU); 1052 } 1053 1054 /* 1055 * Utility to return a valid x86pte_t from protections, pfn, and level number 1056 */ 1057 static x86pte_t 1058 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags) 1059 { 1060 x86pte_t pte; 1061 uint_t cache_attr = attr & HAT_ORDER_MASK; 1062 1063 pte = MAKEPTE(pfn, level); 1064 1065 if (attr & PROT_WRITE) 1066 PTE_SET(pte, PT_WRITABLE); 1067 1068 if (attr & PROT_USER) 1069 PTE_SET(pte, PT_USER); 1070 1071 if (!(attr & PROT_EXEC)) 1072 PTE_SET(pte, mmu.pt_nx); 1073 1074 /* 1075 * Set the software bits used track ref/mod sync's and hments. 1076 * If not using REF/MOD, set them to avoid h/w rewriting PTEs. 1077 */ 1078 if (flags & HAT_LOAD_NOCONSIST) 1079 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD); 1080 else if (attr & HAT_NOSYNC) 1081 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD); 1082 1083 /* 1084 * Set the caching attributes in the PTE. The combination 1085 * of attributes are poorly defined, so we pay attention 1086 * to them in the given order. 1087 * 1088 * The test for HAT_STRICTORDER is different because it's defined 1089 * as "0" - which was a stupid thing to do, but is too late to change! 1090 */ 1091 if (cache_attr == HAT_STRICTORDER) { 1092 PTE_SET(pte, PT_NOCACHE); 1093 /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */ 1094 } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) { 1095 /* nothing to set */; 1096 } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) { 1097 PTE_SET(pte, PT_NOCACHE); 1098 if (x86_feature & X86_PAT) 1099 PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE); 1100 else 1101 PTE_SET(pte, PT_WRITETHRU); 1102 } else { 1103 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr); 1104 } 1105 1106 return (pte); 1107 } 1108 1109 /* 1110 * Duplicate address translations of the parent to the child. 1111 * This function really isn't used anymore. 1112 */ 1113 /*ARGSUSED*/ 1114 int 1115 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag) 1116 { 1117 ASSERT((uintptr_t)addr < kernelbase); 1118 ASSERT(new != kas.a_hat); 1119 ASSERT(old != kas.a_hat); 1120 return (0); 1121 } 1122 1123 /* 1124 * Allocate any hat resources required for a process being swapped in. 1125 */ 1126 /*ARGSUSED*/ 1127 void 1128 hat_swapin(hat_t *hat) 1129 { 1130 /* do nothing - we let everything fault back in */ 1131 } 1132 1133 /* 1134 * Unload all translations associated with an address space of a process 1135 * that is being swapped out. 1136 */ 1137 void 1138 hat_swapout(hat_t *hat) 1139 { 1140 uintptr_t vaddr = (uintptr_t)0; 1141 uintptr_t eaddr = _userlimit; 1142 htable_t *ht = NULL; 1143 level_t l; 1144 1145 XPV_DISALLOW_MIGRATE(); 1146 /* 1147 * We can't just call hat_unload(hat, 0, _userlimit...) here, because 1148 * seg_spt and shared pagetables can't be swapped out. 1149 * Take a look at segspt_shmswapout() - it's a big no-op. 1150 * 1151 * Instead we'll walk through all the address space and unload 1152 * any mappings which we are sure are not shared, not locked. 1153 */ 1154 ASSERT(IS_PAGEALIGNED(vaddr)); 1155 ASSERT(IS_PAGEALIGNED(eaddr)); 1156 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1157 if ((uintptr_t)hat->hat_as->a_userlimit < eaddr) 1158 eaddr = (uintptr_t)hat->hat_as->a_userlimit; 1159 1160 while (vaddr < eaddr) { 1161 (void) htable_walk(hat, &ht, &vaddr, eaddr); 1162 if (ht == NULL) 1163 break; 1164 1165 ASSERT(!IN_VA_HOLE(vaddr)); 1166 1167 /* 1168 * If the page table is shared skip its entire range. 1169 */ 1170 l = ht->ht_level; 1171 if (ht->ht_flags & HTABLE_SHARED_PFN) { 1172 vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1); 1173 htable_release(ht); 1174 ht = NULL; 1175 continue; 1176 } 1177 1178 /* 1179 * If the page table has no locked entries, unload this one. 1180 */ 1181 if (ht->ht_lock_cnt == 0) 1182 hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l), 1183 HAT_UNLOAD_UNMAP); 1184 1185 /* 1186 * If we have a level 0 page table with locked entries, 1187 * skip the entire page table, otherwise skip just one entry. 1188 */ 1189 if (ht->ht_lock_cnt > 0 && l == 0) 1190 vaddr = ht->ht_vaddr + LEVEL_SIZE(1); 1191 else 1192 vaddr += LEVEL_SIZE(l); 1193 } 1194 if (ht) 1195 htable_release(ht); 1196 1197 /* 1198 * We're in swapout because the system is low on memory, so 1199 * go back and flush all the htables off the cached list. 1200 */ 1201 htable_purge_hat(hat); 1202 XPV_ALLOW_MIGRATE(); 1203 } 1204 1205 /* 1206 * returns number of bytes that have valid mappings in hat. 1207 */ 1208 size_t 1209 hat_get_mapped_size(hat_t *hat) 1210 { 1211 size_t total = 0; 1212 int l; 1213 1214 for (l = 0; l <= mmu.max_page_level; l++) 1215 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l)); 1216 total += hat->hat_ism_pgcnt; 1217 1218 return (total); 1219 } 1220 1221 /* 1222 * enable/disable collection of stats for hat. 1223 */ 1224 int 1225 hat_stats_enable(hat_t *hat) 1226 { 1227 atomic_add_32(&hat->hat_stats, 1); 1228 return (1); 1229 } 1230 1231 void 1232 hat_stats_disable(hat_t *hat) 1233 { 1234 atomic_add_32(&hat->hat_stats, -1); 1235 } 1236 1237 /* 1238 * Utility to sync the ref/mod bits from a page table entry to the page_t 1239 * We must be holding the mapping list lock when this is called. 1240 */ 1241 static void 1242 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level) 1243 { 1244 uint_t rm = 0; 1245 pgcnt_t pgcnt; 1246 1247 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 1248 return; 1249 1250 if (PTE_GET(pte, PT_REF)) 1251 rm |= P_REF; 1252 1253 if (PTE_GET(pte, PT_MOD)) 1254 rm |= P_MOD; 1255 1256 if (rm == 0) 1257 return; 1258 1259 /* 1260 * sync to all constituent pages of a large page 1261 */ 1262 ASSERT(x86_hm_held(pp)); 1263 pgcnt = page_get_pagecnt(level); 1264 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 1265 for (; pgcnt > 0; --pgcnt) { 1266 /* 1267 * hat_page_demote() can't decrease 1268 * pszc below this mapping size 1269 * since this large mapping existed after we 1270 * took mlist lock. 1271 */ 1272 ASSERT(pp->p_szc >= level); 1273 hat_page_setattr(pp, rm); 1274 ++pp; 1275 } 1276 } 1277 1278 /* 1279 * This the set of PTE bits for PFN, permissions and caching 1280 * that are allowed to change on a HAT_LOAD_REMAP 1281 */ 1282 #define PT_REMAP_BITS \ 1283 (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \ 1284 PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD) 1285 1286 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX) 1287 /* 1288 * Do the low-level work to get a mapping entered into a HAT's pagetables 1289 * and in the mapping list of the associated page_t. 1290 */ 1291 static int 1292 hati_pte_map( 1293 htable_t *ht, 1294 uint_t entry, 1295 page_t *pp, 1296 x86pte_t pte, 1297 int flags, 1298 void *pte_ptr) 1299 { 1300 hat_t *hat = ht->ht_hat; 1301 x86pte_t old_pte; 1302 level_t l = ht->ht_level; 1303 hment_t *hm; 1304 uint_t is_consist; 1305 uint_t is_locked; 1306 int rv = 0; 1307 1308 /* 1309 * Is this a consistent (ie. need mapping list lock) mapping? 1310 */ 1311 is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0); 1312 1313 /* 1314 * Track locked mapping count in the htable. Do this first, 1315 * as we track locking even if there already is a mapping present. 1316 */ 1317 is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat; 1318 if (is_locked) 1319 HTABLE_LOCK_INC(ht); 1320 1321 /* 1322 * Acquire the page's mapping list lock and get an hment to use. 1323 * Note that hment_prepare() might return NULL. 1324 */ 1325 if (is_consist) { 1326 x86_hm_enter(pp); 1327 hm = hment_prepare(ht, entry, pp); 1328 } 1329 1330 /* 1331 * Set the new pte, retrieving the old one at the same time. 1332 */ 1333 old_pte = x86pte_set(ht, entry, pte, pte_ptr); 1334 1335 /* 1336 * Did we get a large page / page table collision? 1337 */ 1338 if (old_pte == LPAGE_ERROR) { 1339 if (is_locked) 1340 HTABLE_LOCK_DEC(ht); 1341 rv = -1; 1342 goto done; 1343 } 1344 1345 /* 1346 * If the mapping didn't change there is nothing more to do. 1347 */ 1348 if (PTE_EQUIV(pte, old_pte)) 1349 goto done; 1350 1351 /* 1352 * Install a new mapping in the page's mapping list 1353 */ 1354 if (!PTE_ISVALID(old_pte)) { 1355 if (is_consist) { 1356 hment_assign(ht, entry, pp, hm); 1357 x86_hm_exit(pp); 1358 } else { 1359 ASSERT(flags & HAT_LOAD_NOCONSIST); 1360 } 1361 #if defined(__amd64) 1362 if (ht->ht_flags & HTABLE_VLP) { 1363 cpu_t *cpu = CPU; 1364 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 1365 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1366 } 1367 #endif 1368 HTABLE_INC(ht->ht_valid_cnt); 1369 PGCNT_INC(hat, l); 1370 return (rv); 1371 } 1372 1373 /* 1374 * Remap's are more complicated: 1375 * - HAT_LOAD_REMAP must be specified if changing the pfn. 1376 * We also require that NOCONSIST be specified. 1377 * - Otherwise only permission or caching bits may change. 1378 */ 1379 if (!PTE_ISPAGE(old_pte, l)) 1380 panic("non-null/page mapping pte=" FMT_PTE, old_pte); 1381 1382 if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) { 1383 REMAPASSERT(flags & HAT_LOAD_REMAP); 1384 REMAPASSERT(flags & HAT_LOAD_NOCONSIST); 1385 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 1386 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) == 1387 pf_is_memory(PTE2PFN(pte, l))); 1388 REMAPASSERT(!is_consist); 1389 } 1390 1391 /* 1392 * We only let remaps change the certain bits in the PTE. 1393 */ 1394 if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS)) 1395 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n", 1396 old_pte, pte); 1397 1398 /* 1399 * We don't create any mapping list entries on a remap, so release 1400 * any allocated hment after we drop the mapping list lock. 1401 */ 1402 done: 1403 if (is_consist) { 1404 x86_hm_exit(pp); 1405 if (hm != NULL) 1406 hment_free(hm); 1407 } 1408 return (rv); 1409 } 1410 1411 /* 1412 * Internal routine to load a single page table entry. This only fails if 1413 * we attempt to overwrite a page table link with a large page. 1414 */ 1415 static int 1416 hati_load_common( 1417 hat_t *hat, 1418 uintptr_t va, 1419 page_t *pp, 1420 uint_t attr, 1421 uint_t flags, 1422 level_t level, 1423 pfn_t pfn) 1424 { 1425 htable_t *ht; 1426 uint_t entry; 1427 x86pte_t pte; 1428 int rv = 0; 1429 1430 /* 1431 * The number 16 is arbitrary and here to catch a recursion problem 1432 * early before we blow out the kernel stack. 1433 */ 1434 ++curthread->t_hatdepth; 1435 ASSERT(curthread->t_hatdepth < 16); 1436 1437 ASSERT(hat == kas.a_hat || 1438 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1439 1440 if (flags & HAT_LOAD_SHARE) 1441 hat->hat_flags |= HAT_SHARED; 1442 1443 /* 1444 * Find the page table that maps this page if it already exists. 1445 */ 1446 ht = htable_lookup(hat, va, level); 1447 1448 /* 1449 * We must have HAT_LOAD_NOCONSIST if page_t is NULL. 1450 */ 1451 if (pp == NULL) 1452 flags |= HAT_LOAD_NOCONSIST; 1453 1454 if (ht == NULL) { 1455 ht = htable_create(hat, va, level, NULL); 1456 ASSERT(ht != NULL); 1457 } 1458 entry = htable_va2entry(va, ht); 1459 1460 /* 1461 * a bunch of paranoid error checking 1462 */ 1463 ASSERT(ht->ht_busy > 0); 1464 if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) 1465 panic("hati_load_common: bad htable %p, va %p", 1466 (void *)ht, (void *)va); 1467 ASSERT(ht->ht_level == level); 1468 1469 /* 1470 * construct the new PTE 1471 */ 1472 if (hat == kas.a_hat) 1473 attr &= ~PROT_USER; 1474 pte = hati_mkpte(pfn, attr, level, flags); 1475 if (hat == kas.a_hat && va >= kernelbase) 1476 PTE_SET(pte, mmu.pt_global); 1477 1478 /* 1479 * establish the mapping 1480 */ 1481 rv = hati_pte_map(ht, entry, pp, pte, flags, NULL); 1482 1483 /* 1484 * release the htable and any reserves 1485 */ 1486 htable_release(ht); 1487 --curthread->t_hatdepth; 1488 return (rv); 1489 } 1490 1491 /* 1492 * special case of hat_memload to deal with some kernel addrs for performance 1493 */ 1494 static void 1495 hat_kmap_load( 1496 caddr_t addr, 1497 page_t *pp, 1498 uint_t attr, 1499 uint_t flags) 1500 { 1501 uintptr_t va = (uintptr_t)addr; 1502 x86pte_t pte; 1503 pfn_t pfn = page_pptonum(pp); 1504 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr); 1505 htable_t *ht; 1506 uint_t entry; 1507 void *pte_ptr; 1508 1509 /* 1510 * construct the requested PTE 1511 */ 1512 attr &= ~PROT_USER; 1513 attr |= HAT_STORECACHING_OK; 1514 pte = hati_mkpte(pfn, attr, 0, flags); 1515 PTE_SET(pte, mmu.pt_global); 1516 1517 /* 1518 * Figure out the pte_ptr and htable and use common code to finish up 1519 */ 1520 if (mmu.pae_hat) 1521 pte_ptr = mmu.kmap_ptes + pg_off; 1522 else 1523 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off; 1524 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >> 1525 LEVEL_SHIFT(1)]; 1526 entry = htable_va2entry(va, ht); 1527 ++curthread->t_hatdepth; 1528 ASSERT(curthread->t_hatdepth < 16); 1529 (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr); 1530 --curthread->t_hatdepth; 1531 } 1532 1533 /* 1534 * hat_memload() - load a translation to the given page struct 1535 * 1536 * Flags for hat_memload/hat_devload/hat_*attr. 1537 * 1538 * HAT_LOAD Default flags to load a translation to the page. 1539 * 1540 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 1541 * and hat_devload(). 1542 * 1543 * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list. 1544 * sets PT_NOCONSIST 1545 * 1546 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 1547 * that map some user pages (not kas) is shared by more 1548 * than one process (eg. ISM). 1549 * 1550 * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 1551 * 1552 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 1553 * point, it's setting up mapping to allocate internal 1554 * hat layer data structures. This flag forces hat layer 1555 * to tap its reserves in order to prevent infinite 1556 * recursion. 1557 * 1558 * The following is a protection attribute (like PROT_READ, etc.) 1559 * 1560 * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits 1561 * are never cleared. 1562 * 1563 * Installing new valid PTE's and creation of the mapping list 1564 * entry are controlled under the same lock. It's derived from the 1565 * page_t being mapped. 1566 */ 1567 static uint_t supported_memload_flags = 1568 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST | 1569 HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT; 1570 1571 void 1572 hat_memload( 1573 hat_t *hat, 1574 caddr_t addr, 1575 page_t *pp, 1576 uint_t attr, 1577 uint_t flags) 1578 { 1579 uintptr_t va = (uintptr_t)addr; 1580 level_t level = 0; 1581 pfn_t pfn = page_pptonum(pp); 1582 1583 XPV_DISALLOW_MIGRATE(); 1584 ASSERT(IS_PAGEALIGNED(va)); 1585 ASSERT(hat == kas.a_hat || va < _userlimit); 1586 ASSERT(hat == kas.a_hat || 1587 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1588 ASSERT((flags & supported_memload_flags) == flags); 1589 1590 ASSERT(!IN_VA_HOLE(va)); 1591 ASSERT(!PP_ISFREE(pp)); 1592 1593 /* 1594 * kernel address special case for performance. 1595 */ 1596 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 1597 ASSERT(hat == kas.a_hat); 1598 hat_kmap_load(addr, pp, attr, flags); 1599 XPV_ALLOW_MIGRATE(); 1600 return; 1601 } 1602 1603 /* 1604 * This is used for memory with normal caching enabled, so 1605 * always set HAT_STORECACHING_OK. 1606 */ 1607 attr |= HAT_STORECACHING_OK; 1608 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0) 1609 panic("unexpected hati_load_common() failure"); 1610 XPV_ALLOW_MIGRATE(); 1611 } 1612 1613 /* ARGSUSED */ 1614 void 1615 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 1616 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 1617 { 1618 hat_memload(hat, addr, pp, attr, flags); 1619 } 1620 1621 /* 1622 * Load the given array of page structs using large pages when possible 1623 */ 1624 void 1625 hat_memload_array( 1626 hat_t *hat, 1627 caddr_t addr, 1628 size_t len, 1629 page_t **pages, 1630 uint_t attr, 1631 uint_t flags) 1632 { 1633 uintptr_t va = (uintptr_t)addr; 1634 uintptr_t eaddr = va + len; 1635 level_t level; 1636 size_t pgsize; 1637 pgcnt_t pgindx = 0; 1638 pfn_t pfn; 1639 pgcnt_t i; 1640 1641 XPV_DISALLOW_MIGRATE(); 1642 ASSERT(IS_PAGEALIGNED(va)); 1643 ASSERT(hat == kas.a_hat || va + len <= _userlimit); 1644 ASSERT(hat == kas.a_hat || 1645 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1646 ASSERT((flags & supported_memload_flags) == flags); 1647 1648 /* 1649 * memload is used for memory with full caching enabled, so 1650 * set HAT_STORECACHING_OK. 1651 */ 1652 attr |= HAT_STORECACHING_OK; 1653 1654 /* 1655 * handle all pages using largest possible pagesize 1656 */ 1657 while (va < eaddr) { 1658 /* 1659 * decide what level mapping to use (ie. pagesize) 1660 */ 1661 pfn = page_pptonum(pages[pgindx]); 1662 for (level = mmu.max_page_level; ; --level) { 1663 pgsize = LEVEL_SIZE(level); 1664 if (level == 0) 1665 break; 1666 1667 if (!IS_P2ALIGNED(va, pgsize) || 1668 (eaddr - va) < pgsize || 1669 !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize)) 1670 continue; 1671 1672 /* 1673 * To use a large mapping of this size, all the 1674 * pages we are passed must be sequential subpages 1675 * of the large page. 1676 * hat_page_demote() can't change p_szc because 1677 * all pages are locked. 1678 */ 1679 if (pages[pgindx]->p_szc >= level) { 1680 for (i = 0; i < mmu_btop(pgsize); ++i) { 1681 if (pfn + i != 1682 page_pptonum(pages[pgindx + i])) 1683 break; 1684 ASSERT(pages[pgindx + i]->p_szc >= 1685 level); 1686 ASSERT(pages[pgindx] + i == 1687 pages[pgindx + i]); 1688 } 1689 if (i == mmu_btop(pgsize)) { 1690 #ifdef DEBUG 1691 if (level == 2) 1692 map1gcnt++; 1693 #endif 1694 break; 1695 } 1696 } 1697 } 1698 1699 /* 1700 * Load this page mapping. If the load fails, try a smaller 1701 * pagesize. 1702 */ 1703 ASSERT(!IN_VA_HOLE(va)); 1704 while (hati_load_common(hat, va, pages[pgindx], attr, 1705 flags, level, pfn) != 0) { 1706 if (level == 0) 1707 panic("unexpected hati_load_common() failure"); 1708 --level; 1709 pgsize = LEVEL_SIZE(level); 1710 } 1711 1712 /* 1713 * move to next page 1714 */ 1715 va += pgsize; 1716 pgindx += mmu_btop(pgsize); 1717 } 1718 XPV_ALLOW_MIGRATE(); 1719 } 1720 1721 /* ARGSUSED */ 1722 void 1723 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 1724 struct page **pps, uint_t attr, uint_t flags, 1725 hat_region_cookie_t rcookie) 1726 { 1727 hat_memload_array(hat, addr, len, pps, attr, flags); 1728 } 1729 1730 /* 1731 * void hat_devload(hat, addr, len, pf, attr, flags) 1732 * load/lock the given page frame number 1733 * 1734 * Advisory ordering attributes. Apply only to device mappings. 1735 * 1736 * HAT_STRICTORDER: the CPU must issue the references in order, as the 1737 * programmer specified. This is the default. 1738 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 1739 * of reordering; store or load with store or load). 1740 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 1741 * to consecutive locations (for example, turn two consecutive byte 1742 * stores into one halfword store), and it may batch individual loads 1743 * (for example, turn two consecutive byte loads into one halfword load). 1744 * This also implies re-ordering. 1745 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 1746 * until another store occurs. The default is to fetch new data 1747 * on every load. This also implies merging. 1748 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 1749 * the device (perhaps with other data) at a later time. The default is 1750 * to push the data right away. This also implies load caching. 1751 * 1752 * Equivalent of hat_memload(), but can be used for device memory where 1753 * there are no page_t's and we support additional flags (write merging, etc). 1754 * Note that we can have large page mappings with this interface. 1755 */ 1756 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK | 1757 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK | 1758 HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK; 1759 1760 void 1761 hat_devload( 1762 hat_t *hat, 1763 caddr_t addr, 1764 size_t len, 1765 pfn_t pfn, 1766 uint_t attr, 1767 int flags) 1768 { 1769 uintptr_t va = ALIGN2PAGE(addr); 1770 uintptr_t eva = va + len; 1771 level_t level; 1772 size_t pgsize; 1773 page_t *pp; 1774 int f; /* per PTE copy of flags - maybe modified */ 1775 uint_t a; /* per PTE copy of attr */ 1776 1777 XPV_DISALLOW_MIGRATE(); 1778 ASSERT(IS_PAGEALIGNED(va)); 1779 ASSERT(hat == kas.a_hat || eva <= _userlimit); 1780 ASSERT(hat == kas.a_hat || 1781 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1782 ASSERT((flags & supported_devload_flags) == flags); 1783 1784 /* 1785 * handle all pages 1786 */ 1787 while (va < eva) { 1788 1789 /* 1790 * decide what level mapping to use (ie. pagesize) 1791 */ 1792 for (level = mmu.max_page_level; ; --level) { 1793 pgsize = LEVEL_SIZE(level); 1794 if (level == 0) 1795 break; 1796 if (IS_P2ALIGNED(va, pgsize) && 1797 (eva - va) >= pgsize && 1798 IS_P2ALIGNED(pfn, mmu_btop(pgsize))) { 1799 #ifdef DEBUG 1800 if (level == 2) 1801 map1gcnt++; 1802 #endif 1803 break; 1804 } 1805 } 1806 1807 /* 1808 * If this is just memory then allow caching (this happens 1809 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used 1810 * to override that. If we don't have a page_t then make sure 1811 * NOCONSIST is set. 1812 */ 1813 a = attr; 1814 f = flags; 1815 if (!pf_is_memory(pfn)) 1816 f |= HAT_LOAD_NOCONSIST; 1817 else if (!(a & HAT_PLAT_NOCACHE)) 1818 a |= HAT_STORECACHING_OK; 1819 1820 if (f & HAT_LOAD_NOCONSIST) 1821 pp = NULL; 1822 else 1823 pp = page_numtopp_nolock(pfn); 1824 1825 /* 1826 * Check to make sure we are really trying to map a valid 1827 * memory page. The caller wishing to intentionally map 1828 * free memory pages will have passed the HAT_LOAD_NOCONSIST 1829 * flag, then pp will be NULL. 1830 */ 1831 if (pp != NULL) { 1832 if (PP_ISFREE(pp)) { 1833 panic("hat_devload: loading " 1834 "a mapping to free page %p", (void *)pp); 1835 } 1836 1837 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1838 panic("hat_devload: loading a mapping " 1839 "to an unlocked page %p", 1840 (void *)pp); 1841 } 1842 } 1843 1844 /* 1845 * load this page mapping 1846 */ 1847 ASSERT(!IN_VA_HOLE(va)); 1848 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) { 1849 if (level == 0) 1850 panic("unexpected hati_load_common() failure"); 1851 --level; 1852 pgsize = LEVEL_SIZE(level); 1853 } 1854 1855 /* 1856 * move to next page 1857 */ 1858 va += pgsize; 1859 pfn += mmu_btop(pgsize); 1860 } 1861 XPV_ALLOW_MIGRATE(); 1862 } 1863 1864 /* 1865 * void hat_unlock(hat, addr, len) 1866 * unlock the mappings to a given range of addresses 1867 * 1868 * Locks are tracked by ht_lock_cnt in the htable. 1869 */ 1870 void 1871 hat_unlock(hat_t *hat, caddr_t addr, size_t len) 1872 { 1873 uintptr_t vaddr = (uintptr_t)addr; 1874 uintptr_t eaddr = vaddr + len; 1875 htable_t *ht = NULL; 1876 1877 /* 1878 * kernel entries are always locked, we don't track lock counts 1879 */ 1880 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 1881 ASSERT(IS_PAGEALIGNED(vaddr)); 1882 ASSERT(IS_PAGEALIGNED(eaddr)); 1883 if (hat == kas.a_hat) 1884 return; 1885 if (eaddr > _userlimit) 1886 panic("hat_unlock() address out of range - above _userlimit"); 1887 1888 XPV_DISALLOW_MIGRATE(); 1889 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1890 while (vaddr < eaddr) { 1891 (void) htable_walk(hat, &ht, &vaddr, eaddr); 1892 if (ht == NULL) 1893 break; 1894 1895 ASSERT(!IN_VA_HOLE(vaddr)); 1896 1897 if (ht->ht_lock_cnt < 1) 1898 panic("hat_unlock(): lock_cnt < 1, " 1899 "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr); 1900 HTABLE_LOCK_DEC(ht); 1901 1902 vaddr += LEVEL_SIZE(ht->ht_level); 1903 } 1904 if (ht) 1905 htable_release(ht); 1906 XPV_ALLOW_MIGRATE(); 1907 } 1908 1909 /* ARGSUSED */ 1910 void 1911 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len, 1912 hat_region_cookie_t rcookie) 1913 { 1914 panic("No shared region support on x86"); 1915 } 1916 1917 #if !defined(__xpv) 1918 /* 1919 * Cross call service routine to demap a virtual page on 1920 * the current CPU or flush all mappings in TLB. 1921 */ 1922 /*ARGSUSED*/ 1923 static int 1924 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 1925 { 1926 hat_t *hat = (hat_t *)a1; 1927 caddr_t addr = (caddr_t)a2; 1928 1929 /* 1930 * If the target hat isn't the kernel and this CPU isn't operating 1931 * in the target hat, we can ignore the cross call. 1932 */ 1933 if (hat != kas.a_hat && hat != CPU->cpu_current_hat) 1934 return (0); 1935 1936 /* 1937 * For a normal address, we just flush one page mapping 1938 */ 1939 if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 1940 mmu_tlbflush_entry(addr); 1941 return (0); 1942 } 1943 1944 /* 1945 * Otherwise we reload cr3 to effect a complete TLB flush. 1946 * 1947 * A reload of cr3 on a VLP process also means we must also recopy in 1948 * the pte values from the struct hat 1949 */ 1950 if (hat->hat_flags & HAT_VLP) { 1951 #if defined(__amd64) 1952 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes; 1953 1954 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1955 #elif defined(__i386) 1956 reload_pae32(hat, CPU); 1957 #endif 1958 } 1959 reload_cr3(); 1960 return (0); 1961 } 1962 1963 /* 1964 * Flush all TLB entries, including global (ie. kernel) ones. 1965 */ 1966 static void 1967 flush_all_tlb_entries(void) 1968 { 1969 ulong_t cr4 = getcr4(); 1970 1971 if (cr4 & CR4_PGE) { 1972 setcr4(cr4 & ~(ulong_t)CR4_PGE); 1973 setcr4(cr4); 1974 1975 /* 1976 * 32 bit PAE also needs to always reload_cr3() 1977 */ 1978 if (mmu.max_level == 2) 1979 reload_cr3(); 1980 } else { 1981 reload_cr3(); 1982 } 1983 } 1984 1985 #define TLB_CPU_HALTED (01ul) 1986 #define TLB_INVAL_ALL (02ul) 1987 #define CAS_TLB_INFO(cpu, old, new) \ 1988 caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new)) 1989 1990 /* 1991 * Record that a CPU is going idle 1992 */ 1993 void 1994 tlb_going_idle(void) 1995 { 1996 atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED); 1997 } 1998 1999 /* 2000 * Service a delayed TLB flush if coming out of being idle. 2001 * It will be called from cpu idle notification with interrupt disabled. 2002 */ 2003 void 2004 tlb_service(void) 2005 { 2006 ulong_t tlb_info; 2007 ulong_t found; 2008 2009 /* 2010 * We only have to do something if coming out of being idle. 2011 */ 2012 tlb_info = CPU->cpu_m.mcpu_tlb_info; 2013 if (tlb_info & TLB_CPU_HALTED) { 2014 ASSERT(CPU->cpu_current_hat == kas.a_hat); 2015 2016 /* 2017 * Atomic clear and fetch of old state. 2018 */ 2019 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) { 2020 ASSERT(found & TLB_CPU_HALTED); 2021 tlb_info = found; 2022 SMT_PAUSE(); 2023 } 2024 if (tlb_info & TLB_INVAL_ALL) 2025 flush_all_tlb_entries(); 2026 } 2027 } 2028 #endif /* !__xpv */ 2029 2030 /* 2031 * Internal routine to do cross calls to invalidate a range of pages on 2032 * all CPUs using a given hat. 2033 */ 2034 void 2035 hat_tlb_inval(hat_t *hat, uintptr_t va) 2036 { 2037 extern int flushes_require_xcalls; /* from mp_startup.c */ 2038 cpuset_t justme; 2039 cpuset_t cpus_to_shootdown; 2040 #ifndef __xpv 2041 cpuset_t check_cpus; 2042 cpu_t *cpup; 2043 int c; 2044 #endif 2045 2046 /* 2047 * If the hat is being destroyed, there are no more users, so 2048 * demap need not do anything. 2049 */ 2050 if (hat->hat_flags & HAT_FREEING) 2051 return; 2052 2053 /* 2054 * If demapping from a shared pagetable, we best demap the 2055 * entire set of user TLBs, since we don't know what addresses 2056 * these were shared at. 2057 */ 2058 if (hat->hat_flags & HAT_SHARED) { 2059 hat = kas.a_hat; 2060 va = DEMAP_ALL_ADDR; 2061 } 2062 2063 /* 2064 * if not running with multiple CPUs, don't use cross calls 2065 */ 2066 if (panicstr || !flushes_require_xcalls) { 2067 #ifdef __xpv 2068 if (va == DEMAP_ALL_ADDR) 2069 xen_flush_tlb(); 2070 else 2071 xen_flush_va((caddr_t)va); 2072 #else 2073 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 2074 #endif 2075 return; 2076 } 2077 2078 2079 /* 2080 * Determine CPUs to shootdown. Kernel changes always do all CPUs. 2081 * Otherwise it's just CPUs currently executing in this hat. 2082 */ 2083 kpreempt_disable(); 2084 CPUSET_ONLY(justme, CPU->cpu_id); 2085 if (hat == kas.a_hat) 2086 cpus_to_shootdown = khat_cpuset; 2087 else 2088 cpus_to_shootdown = hat->hat_cpus; 2089 2090 #ifndef __xpv 2091 /* 2092 * If any CPUs in the set are idle, just request a delayed flush 2093 * and avoid waking them up. 2094 */ 2095 check_cpus = cpus_to_shootdown; 2096 for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) { 2097 ulong_t tlb_info; 2098 2099 if (!CPU_IN_SET(check_cpus, c)) 2100 continue; 2101 CPUSET_DEL(check_cpus, c); 2102 cpup = cpu[c]; 2103 if (cpup == NULL) 2104 continue; 2105 2106 tlb_info = cpup->cpu_m.mcpu_tlb_info; 2107 while (tlb_info == TLB_CPU_HALTED) { 2108 (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED, 2109 TLB_CPU_HALTED | TLB_INVAL_ALL); 2110 SMT_PAUSE(); 2111 tlb_info = cpup->cpu_m.mcpu_tlb_info; 2112 } 2113 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) { 2114 HATSTAT_INC(hs_tlb_inval_delayed); 2115 CPUSET_DEL(cpus_to_shootdown, c); 2116 } 2117 } 2118 #endif 2119 2120 if (CPUSET_ISNULL(cpus_to_shootdown) || 2121 CPUSET_ISEQUAL(cpus_to_shootdown, justme)) { 2122 2123 #ifdef __xpv 2124 if (va == DEMAP_ALL_ADDR) 2125 xen_flush_tlb(); 2126 else 2127 xen_flush_va((caddr_t)va); 2128 #else 2129 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 2130 #endif 2131 2132 } else { 2133 2134 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id); 2135 #ifdef __xpv 2136 if (va == DEMAP_ALL_ADDR) 2137 xen_gflush_tlb(cpus_to_shootdown); 2138 else 2139 xen_gflush_va((caddr_t)va, cpus_to_shootdown); 2140 #else 2141 xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, 2142 CPUSET2BV(cpus_to_shootdown), hati_demap_func); 2143 #endif 2144 2145 } 2146 kpreempt_enable(); 2147 } 2148 2149 /* 2150 * Interior routine for HAT_UNLOADs from hat_unload_callback(), 2151 * hat_kmap_unload() OR from hat_steal() code. This routine doesn't 2152 * handle releasing of the htables. 2153 */ 2154 void 2155 hat_pte_unmap( 2156 htable_t *ht, 2157 uint_t entry, 2158 uint_t flags, 2159 x86pte_t old_pte, 2160 void *pte_ptr) 2161 { 2162 hat_t *hat = ht->ht_hat; 2163 hment_t *hm = NULL; 2164 page_t *pp = NULL; 2165 level_t l = ht->ht_level; 2166 pfn_t pfn; 2167 2168 /* 2169 * We always track the locking counts, even if nothing is unmapped 2170 */ 2171 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) { 2172 ASSERT(ht->ht_lock_cnt > 0); 2173 HTABLE_LOCK_DEC(ht); 2174 } 2175 2176 /* 2177 * Figure out which page's mapping list lock to acquire using the PFN 2178 * passed in "old" PTE. We then attempt to invalidate the PTE. 2179 * If another thread, probably a hat_pageunload, has asynchronously 2180 * unmapped/remapped this address we'll loop here. 2181 */ 2182 ASSERT(ht->ht_busy > 0); 2183 while (PTE_ISVALID(old_pte)) { 2184 pfn = PTE2PFN(old_pte, l); 2185 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) { 2186 pp = NULL; 2187 } else { 2188 #ifdef __xpv 2189 if (pfn == PFN_INVALID) 2190 panic("Invalid PFN, but not PT_NOCONSIST"); 2191 #endif 2192 pp = page_numtopp_nolock(pfn); 2193 if (pp == NULL) { 2194 panic("no page_t, not NOCONSIST: old_pte=" 2195 FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx", 2196 old_pte, (uintptr_t)ht, entry, 2197 (uintptr_t)pte_ptr); 2198 } 2199 x86_hm_enter(pp); 2200 } 2201 2202 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr); 2203 2204 /* 2205 * If the page hadn't changed we've unmapped it and can proceed 2206 */ 2207 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn) 2208 break; 2209 2210 /* 2211 * Otherwise, we'll have to retry with the current old_pte. 2212 * Drop the hment lock, since the pfn may have changed. 2213 */ 2214 if (pp != NULL) { 2215 x86_hm_exit(pp); 2216 pp = NULL; 2217 } else { 2218 ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 2219 } 2220 } 2221 2222 /* 2223 * If the old mapping wasn't valid, there's nothing more to do 2224 */ 2225 if (!PTE_ISVALID(old_pte)) { 2226 if (pp != NULL) 2227 x86_hm_exit(pp); 2228 return; 2229 } 2230 2231 /* 2232 * Take care of syncing any MOD/REF bits and removing the hment. 2233 */ 2234 if (pp != NULL) { 2235 if (!(flags & HAT_UNLOAD_NOSYNC)) 2236 hati_sync_pte_to_page(pp, old_pte, l); 2237 hm = hment_remove(pp, ht, entry); 2238 x86_hm_exit(pp); 2239 if (hm != NULL) 2240 hment_free(hm); 2241 } 2242 2243 /* 2244 * Handle book keeping in the htable and hat 2245 */ 2246 ASSERT(ht->ht_valid_cnt > 0); 2247 HTABLE_DEC(ht->ht_valid_cnt); 2248 PGCNT_DEC(hat, l); 2249 } 2250 2251 /* 2252 * very cheap unload implementation to special case some kernel addresses 2253 */ 2254 static void 2255 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags) 2256 { 2257 uintptr_t va = (uintptr_t)addr; 2258 uintptr_t eva = va + len; 2259 pgcnt_t pg_index; 2260 htable_t *ht; 2261 uint_t entry; 2262 x86pte_t *pte_ptr; 2263 x86pte_t old_pte; 2264 2265 for (; va < eva; va += MMU_PAGESIZE) { 2266 /* 2267 * Get the PTE 2268 */ 2269 pg_index = mmu_btop(va - mmu.kmap_addr); 2270 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index); 2271 old_pte = GET_PTE(pte_ptr); 2272 2273 /* 2274 * get the htable / entry 2275 */ 2276 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) 2277 >> LEVEL_SHIFT(1)]; 2278 entry = htable_va2entry(va, ht); 2279 2280 /* 2281 * use mostly common code to unmap it. 2282 */ 2283 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr); 2284 } 2285 } 2286 2287 2288 /* 2289 * unload a range of virtual address space (no callback) 2290 */ 2291 void 2292 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2293 { 2294 uintptr_t va = (uintptr_t)addr; 2295 2296 XPV_DISALLOW_MIGRATE(); 2297 ASSERT(hat == kas.a_hat || va + len <= _userlimit); 2298 2299 /* 2300 * special case for performance. 2301 */ 2302 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 2303 ASSERT(hat == kas.a_hat); 2304 hat_kmap_unload(addr, len, flags); 2305 } else { 2306 hat_unload_callback(hat, addr, len, flags, NULL); 2307 } 2308 XPV_ALLOW_MIGRATE(); 2309 } 2310 2311 /* 2312 * Do the callbacks for ranges being unloaded. 2313 */ 2314 typedef struct range_info { 2315 uintptr_t rng_va; 2316 ulong_t rng_cnt; 2317 level_t rng_level; 2318 } range_info_t; 2319 2320 static void 2321 handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range) 2322 { 2323 /* 2324 * do callbacks to upper level VM system 2325 */ 2326 while (cb != NULL && cnt > 0) { 2327 --cnt; 2328 cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; 2329 cb->hcb_end_addr = cb->hcb_start_addr; 2330 cb->hcb_end_addr += 2331 range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level); 2332 cb->hcb_function(cb); 2333 } 2334 } 2335 2336 /* 2337 * Unload a given range of addresses (has optional callback) 2338 * 2339 * Flags: 2340 * define HAT_UNLOAD 0x00 2341 * define HAT_UNLOAD_NOSYNC 0x02 2342 * define HAT_UNLOAD_UNLOCK 0x04 2343 * define HAT_UNLOAD_OTHER 0x08 - not used 2344 * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD 2345 */ 2346 #define MAX_UNLOAD_CNT (8) 2347 void 2348 hat_unload_callback( 2349 hat_t *hat, 2350 caddr_t addr, 2351 size_t len, 2352 uint_t flags, 2353 hat_callback_t *cb) 2354 { 2355 uintptr_t vaddr = (uintptr_t)addr; 2356 uintptr_t eaddr = vaddr + len; 2357 htable_t *ht = NULL; 2358 uint_t entry; 2359 uintptr_t contig_va = (uintptr_t)-1L; 2360 range_info_t r[MAX_UNLOAD_CNT]; 2361 uint_t r_cnt = 0; 2362 x86pte_t old_pte; 2363 2364 XPV_DISALLOW_MIGRATE(); 2365 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 2366 ASSERT(IS_PAGEALIGNED(vaddr)); 2367 ASSERT(IS_PAGEALIGNED(eaddr)); 2368 2369 /* 2370 * Special case a single page being unloaded for speed. This happens 2371 * quite frequently, COW faults after a fork() for example. 2372 */ 2373 if (cb == NULL && len == MMU_PAGESIZE) { 2374 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0); 2375 if (ht != NULL) { 2376 if (PTE_ISVALID(old_pte)) 2377 hat_pte_unmap(ht, entry, flags, old_pte, NULL); 2378 htable_release(ht); 2379 } 2380 XPV_ALLOW_MIGRATE(); 2381 return; 2382 } 2383 2384 while (vaddr < eaddr) { 2385 old_pte = htable_walk(hat, &ht, &vaddr, eaddr); 2386 if (ht == NULL) 2387 break; 2388 2389 ASSERT(!IN_VA_HOLE(vaddr)); 2390 2391 if (vaddr < (uintptr_t)addr) 2392 panic("hat_unload_callback(): unmap inside large page"); 2393 2394 /* 2395 * We'll do the call backs for contiguous ranges 2396 */ 2397 if (vaddr != contig_va || 2398 (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) { 2399 if (r_cnt == MAX_UNLOAD_CNT) { 2400 handle_ranges(cb, r_cnt, r); 2401 r_cnt = 0; 2402 } 2403 r[r_cnt].rng_va = vaddr; 2404 r[r_cnt].rng_cnt = 0; 2405 r[r_cnt].rng_level = ht->ht_level; 2406 ++r_cnt; 2407 } 2408 2409 /* 2410 * Unload one mapping from the page tables. 2411 */ 2412 entry = htable_va2entry(vaddr, ht); 2413 hat_pte_unmap(ht, entry, flags, old_pte, NULL); 2414 ASSERT(ht->ht_level <= mmu.max_page_level); 2415 vaddr += LEVEL_SIZE(ht->ht_level); 2416 contig_va = vaddr; 2417 ++r[r_cnt - 1].rng_cnt; 2418 } 2419 if (ht) 2420 htable_release(ht); 2421 2422 /* 2423 * handle last range for callbacks 2424 */ 2425 if (r_cnt > 0) 2426 handle_ranges(cb, r_cnt, r); 2427 XPV_ALLOW_MIGRATE(); 2428 } 2429 2430 /* 2431 * Invalidate a virtual address translation on a slave CPU during 2432 * panic() dumps. 2433 */ 2434 void 2435 hat_flush_range(hat_t *hat, caddr_t va, size_t size) 2436 { 2437 ssize_t sz; 2438 caddr_t endva = va + size; 2439 2440 while (va < endva) { 2441 sz = hat_getpagesize(hat, va); 2442 if (sz < 0) { 2443 #ifdef __xpv 2444 xen_flush_tlb(); 2445 #else 2446 flush_all_tlb_entries(); 2447 #endif 2448 break; 2449 } 2450 #ifdef __xpv 2451 xen_flush_va(va); 2452 #else 2453 mmu_tlbflush_entry(va); 2454 #endif 2455 va += sz; 2456 } 2457 } 2458 2459 /* 2460 * synchronize mapping with software data structures 2461 * 2462 * This interface is currently only used by the working set monitor 2463 * driver. 2464 */ 2465 /*ARGSUSED*/ 2466 void 2467 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2468 { 2469 uintptr_t vaddr = (uintptr_t)addr; 2470 uintptr_t eaddr = vaddr + len; 2471 htable_t *ht = NULL; 2472 uint_t entry; 2473 x86pte_t pte; 2474 x86pte_t save_pte; 2475 x86pte_t new; 2476 page_t *pp; 2477 2478 ASSERT(!IN_VA_HOLE(vaddr)); 2479 ASSERT(IS_PAGEALIGNED(vaddr)); 2480 ASSERT(IS_PAGEALIGNED(eaddr)); 2481 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 2482 2483 XPV_DISALLOW_MIGRATE(); 2484 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 2485 try_again: 2486 pte = htable_walk(hat, &ht, &vaddr, eaddr); 2487 if (ht == NULL) 2488 break; 2489 entry = htable_va2entry(vaddr, ht); 2490 2491 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 2492 PTE_GET(pte, PT_REF | PT_MOD) == 0) 2493 continue; 2494 2495 /* 2496 * We need to acquire the mapping list lock to protect 2497 * against hat_pageunload(), hat_unload(), etc. 2498 */ 2499 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level)); 2500 if (pp == NULL) 2501 break; 2502 x86_hm_enter(pp); 2503 save_pte = pte; 2504 pte = x86pte_get(ht, entry); 2505 if (pte != save_pte) { 2506 x86_hm_exit(pp); 2507 goto try_again; 2508 } 2509 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 2510 PTE_GET(pte, PT_REF | PT_MOD) == 0) { 2511 x86_hm_exit(pp); 2512 continue; 2513 } 2514 2515 /* 2516 * Need to clear ref or mod bits. We may compete with 2517 * hardware updating the R/M bits and have to try again. 2518 */ 2519 if (flags == HAT_SYNC_ZERORM) { 2520 new = pte; 2521 PTE_CLR(new, PT_REF | PT_MOD); 2522 pte = hati_update_pte(ht, entry, pte, new); 2523 if (pte != 0) { 2524 x86_hm_exit(pp); 2525 goto try_again; 2526 } 2527 } else { 2528 /* 2529 * sync the PTE to the page_t 2530 */ 2531 hati_sync_pte_to_page(pp, save_pte, ht->ht_level); 2532 } 2533 x86_hm_exit(pp); 2534 } 2535 if (ht) 2536 htable_release(ht); 2537 XPV_ALLOW_MIGRATE(); 2538 } 2539 2540 /* 2541 * void hat_map(hat, addr, len, flags) 2542 */ 2543 /*ARGSUSED*/ 2544 void 2545 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2546 { 2547 /* does nothing */ 2548 } 2549 2550 /* 2551 * uint_t hat_getattr(hat, addr, *attr) 2552 * returns attr for <hat,addr> in *attr. returns 0 if there was a 2553 * mapping and *attr is valid, nonzero if there was no mapping and 2554 * *attr is not valid. 2555 */ 2556 uint_t 2557 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr) 2558 { 2559 uintptr_t vaddr = ALIGN2PAGE(addr); 2560 htable_t *ht = NULL; 2561 x86pte_t pte; 2562 2563 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2564 2565 if (IN_VA_HOLE(vaddr)) 2566 return ((uint_t)-1); 2567 2568 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level); 2569 if (ht == NULL) 2570 return ((uint_t)-1); 2571 2572 if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) { 2573 htable_release(ht); 2574 return ((uint_t)-1); 2575 } 2576 2577 *attr = PROT_READ; 2578 if (PTE_GET(pte, PT_WRITABLE)) 2579 *attr |= PROT_WRITE; 2580 if (PTE_GET(pte, PT_USER)) 2581 *attr |= PROT_USER; 2582 if (!PTE_GET(pte, mmu.pt_nx)) 2583 *attr |= PROT_EXEC; 2584 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 2585 *attr |= HAT_NOSYNC; 2586 htable_release(ht); 2587 return (0); 2588 } 2589 2590 /* 2591 * hat_updateattr() applies the given attribute change to an existing mapping 2592 */ 2593 #define HAT_LOAD_ATTR 1 2594 #define HAT_SET_ATTR 2 2595 #define HAT_CLR_ATTR 3 2596 2597 static void 2598 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what) 2599 { 2600 uintptr_t vaddr = (uintptr_t)addr; 2601 uintptr_t eaddr = (uintptr_t)addr + len; 2602 htable_t *ht = NULL; 2603 uint_t entry; 2604 x86pte_t oldpte, newpte; 2605 page_t *pp; 2606 2607 XPV_DISALLOW_MIGRATE(); 2608 ASSERT(IS_PAGEALIGNED(vaddr)); 2609 ASSERT(IS_PAGEALIGNED(eaddr)); 2610 ASSERT(hat == kas.a_hat || 2611 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 2612 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 2613 try_again: 2614 oldpte = htable_walk(hat, &ht, &vaddr, eaddr); 2615 if (ht == NULL) 2616 break; 2617 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST) 2618 continue; 2619 2620 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level)); 2621 if (pp == NULL) 2622 continue; 2623 x86_hm_enter(pp); 2624 2625 newpte = oldpte; 2626 /* 2627 * We found a page table entry in the desired range, 2628 * figure out the new attributes. 2629 */ 2630 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) { 2631 if ((attr & PROT_WRITE) && 2632 !PTE_GET(oldpte, PT_WRITABLE)) 2633 newpte |= PT_WRITABLE; 2634 2635 if ((attr & HAT_NOSYNC) && 2636 PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC) 2637 newpte |= PT_NOSYNC; 2638 2639 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx)) 2640 newpte &= ~mmu.pt_nx; 2641 } 2642 2643 if (what == HAT_LOAD_ATTR) { 2644 if (!(attr & PROT_WRITE) && 2645 PTE_GET(oldpte, PT_WRITABLE)) 2646 newpte &= ~PT_WRITABLE; 2647 2648 if (!(attr & HAT_NOSYNC) && 2649 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2650 newpte &= ~PT_SOFTWARE; 2651 2652 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 2653 newpte |= mmu.pt_nx; 2654 } 2655 2656 if (what == HAT_CLR_ATTR) { 2657 if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE)) 2658 newpte &= ~PT_WRITABLE; 2659 2660 if ((attr & HAT_NOSYNC) && 2661 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2662 newpte &= ~PT_SOFTWARE; 2663 2664 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 2665 newpte |= mmu.pt_nx; 2666 } 2667 2668 /* 2669 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set. 2670 * x86pte_set() depends on this. 2671 */ 2672 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC) 2673 newpte |= PT_REF | PT_MOD; 2674 2675 /* 2676 * what about PROT_READ or others? this code only handles: 2677 * EXEC, WRITE, NOSYNC 2678 */ 2679 2680 /* 2681 * If new PTE really changed, update the table. 2682 */ 2683 if (newpte != oldpte) { 2684 entry = htable_va2entry(vaddr, ht); 2685 oldpte = hati_update_pte(ht, entry, oldpte, newpte); 2686 if (oldpte != 0) { 2687 x86_hm_exit(pp); 2688 goto try_again; 2689 } 2690 } 2691 x86_hm_exit(pp); 2692 } 2693 if (ht) 2694 htable_release(ht); 2695 XPV_ALLOW_MIGRATE(); 2696 } 2697 2698 /* 2699 * Various wrappers for hat_updateattr() 2700 */ 2701 void 2702 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2703 { 2704 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2705 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR); 2706 } 2707 2708 void 2709 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2710 { 2711 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2712 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR); 2713 } 2714 2715 void 2716 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2717 { 2718 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2719 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR); 2720 } 2721 2722 void 2723 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot) 2724 { 2725 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2726 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR); 2727 } 2728 2729 /* 2730 * size_t hat_getpagesize(hat, addr) 2731 * returns pagesize in bytes for <hat, addr>. returns -1 of there is 2732 * no mapping. This is an advisory call. 2733 */ 2734 ssize_t 2735 hat_getpagesize(hat_t *hat, caddr_t addr) 2736 { 2737 uintptr_t vaddr = ALIGN2PAGE(addr); 2738 htable_t *ht; 2739 size_t pagesize; 2740 2741 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2742 if (IN_VA_HOLE(vaddr)) 2743 return (-1); 2744 ht = htable_getpage(hat, vaddr, NULL); 2745 if (ht == NULL) 2746 return (-1); 2747 pagesize = LEVEL_SIZE(ht->ht_level); 2748 htable_release(ht); 2749 return (pagesize); 2750 } 2751 2752 2753 2754 /* 2755 * pfn_t hat_getpfnum(hat, addr) 2756 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 2757 */ 2758 pfn_t 2759 hat_getpfnum(hat_t *hat, caddr_t addr) 2760 { 2761 uintptr_t vaddr = ALIGN2PAGE(addr); 2762 htable_t *ht; 2763 uint_t entry; 2764 pfn_t pfn = PFN_INVALID; 2765 2766 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2767 if (khat_running == 0) 2768 return (PFN_INVALID); 2769 2770 if (IN_VA_HOLE(vaddr)) 2771 return (PFN_INVALID); 2772 2773 XPV_DISALLOW_MIGRATE(); 2774 /* 2775 * A very common use of hat_getpfnum() is from the DDI for kernel pages. 2776 * Use the kmap_ptes (which also covers the 32 bit heap) to speed 2777 * this up. 2778 */ 2779 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 2780 x86pte_t pte; 2781 pgcnt_t pg_index; 2782 2783 pg_index = mmu_btop(vaddr - mmu.kmap_addr); 2784 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index)); 2785 if (PTE_ISVALID(pte)) 2786 /*LINTED [use of constant 0 causes a lint warning] */ 2787 pfn = PTE2PFN(pte, 0); 2788 XPV_ALLOW_MIGRATE(); 2789 return (pfn); 2790 } 2791 2792 ht = htable_getpage(hat, vaddr, &entry); 2793 if (ht == NULL) { 2794 XPV_ALLOW_MIGRATE(); 2795 return (PFN_INVALID); 2796 } 2797 ASSERT(vaddr >= ht->ht_vaddr); 2798 ASSERT(vaddr <= HTABLE_LAST_PAGE(ht)); 2799 pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level); 2800 if (ht->ht_level > 0) 2801 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level)); 2802 htable_release(ht); 2803 XPV_ALLOW_MIGRATE(); 2804 return (pfn); 2805 } 2806 2807 /* 2808 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 2809 * Use hat_getpfnum(kas.a_hat, ...) instead. 2810 * 2811 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 2812 * but can't right now due to the fact that some software has grown to use 2813 * this interface incorrectly. So for now when the interface is misused, 2814 * return a warning to the user that in the future it won't work in the 2815 * way they're abusing it, and carry on. 2816 * 2817 * Note that hat_getkpfnum() is never supported on amd64. 2818 */ 2819 #if !defined(__amd64) 2820 pfn_t 2821 hat_getkpfnum(caddr_t addr) 2822 { 2823 pfn_t pfn; 2824 int badcaller = 0; 2825 2826 if (khat_running == 0) 2827 panic("hat_getkpfnum(): called too early\n"); 2828 if ((uintptr_t)addr < kernelbase) 2829 return (PFN_INVALID); 2830 2831 XPV_DISALLOW_MIGRATE(); 2832 if (segkpm && IS_KPM_ADDR(addr)) { 2833 badcaller = 1; 2834 pfn = hat_kpm_va2pfn(addr); 2835 } else { 2836 pfn = hat_getpfnum(kas.a_hat, addr); 2837 badcaller = pf_is_memory(pfn); 2838 } 2839 2840 if (badcaller) 2841 hat_getkpfnum_badcall(caller()); 2842 XPV_ALLOW_MIGRATE(); 2843 return (pfn); 2844 } 2845 #endif /* __amd64 */ 2846 2847 /* 2848 * int hat_probe(hat, addr) 2849 * return 0 if no valid mapping is present. Faster version 2850 * of hat_getattr in certain architectures. 2851 */ 2852 int 2853 hat_probe(hat_t *hat, caddr_t addr) 2854 { 2855 uintptr_t vaddr = ALIGN2PAGE(addr); 2856 uint_t entry; 2857 htable_t *ht; 2858 pgcnt_t pg_off; 2859 2860 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2861 ASSERT(hat == kas.a_hat || 2862 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 2863 if (IN_VA_HOLE(vaddr)) 2864 return (0); 2865 2866 /* 2867 * Most common use of hat_probe is from segmap. We special case it 2868 * for performance. 2869 */ 2870 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 2871 pg_off = mmu_btop(vaddr - mmu.kmap_addr); 2872 if (mmu.pae_hat) 2873 return (PTE_ISVALID(mmu.kmap_ptes[pg_off])); 2874 else 2875 return (PTE_ISVALID( 2876 ((x86pte32_t *)mmu.kmap_ptes)[pg_off])); 2877 } 2878 2879 ht = htable_getpage(hat, vaddr, &entry); 2880 htable_release(ht); 2881 return (ht != NULL); 2882 } 2883 2884 /* 2885 * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM. 2886 */ 2887 static int 2888 is_it_dism(hat_t *hat, caddr_t va) 2889 { 2890 struct seg *seg; 2891 struct shm_data *shmd; 2892 struct spt_data *sptd; 2893 2894 seg = as_findseg(hat->hat_as, va, 0); 2895 ASSERT(seg != NULL); 2896 ASSERT(seg->s_base <= va); 2897 shmd = (struct shm_data *)seg->s_data; 2898 ASSERT(shmd != NULL); 2899 sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2900 ASSERT(sptd != NULL); 2901 if (sptd->spt_flags & SHM_PAGEABLE) 2902 return (1); 2903 return (0); 2904 } 2905 2906 /* 2907 * Simple implementation of ISM. hat_share() is similar to hat_memload_array(), 2908 * except that we use the ism_hat's existing mappings to determine the pages 2909 * and protections to use for this hat. If we find a full properly aligned 2910 * and sized pagetable, we will attempt to share the pagetable itself. 2911 */ 2912 /*ARGSUSED*/ 2913 int 2914 hat_share( 2915 hat_t *hat, 2916 caddr_t addr, 2917 hat_t *ism_hat, 2918 caddr_t src_addr, 2919 size_t len, /* almost useless value, see below.. */ 2920 uint_t ismszc) 2921 { 2922 uintptr_t vaddr_start = (uintptr_t)addr; 2923 uintptr_t vaddr; 2924 uintptr_t eaddr = vaddr_start + len; 2925 uintptr_t ism_addr_start = (uintptr_t)src_addr; 2926 uintptr_t ism_addr = ism_addr_start; 2927 uintptr_t e_ism_addr = ism_addr + len; 2928 htable_t *ism_ht = NULL; 2929 htable_t *ht; 2930 x86pte_t pte; 2931 page_t *pp; 2932 pfn_t pfn; 2933 level_t l; 2934 pgcnt_t pgcnt; 2935 uint_t prot; 2936 int is_dism; 2937 int flags; 2938 2939 /* 2940 * We might be asked to share an empty DISM hat by as_dup() 2941 */ 2942 ASSERT(hat != kas.a_hat); 2943 ASSERT(eaddr <= _userlimit); 2944 if (!(ism_hat->hat_flags & HAT_SHARED)) { 2945 ASSERT(hat_get_mapped_size(ism_hat) == 0); 2946 return (0); 2947 } 2948 XPV_DISALLOW_MIGRATE(); 2949 2950 /* 2951 * The SPT segment driver often passes us a size larger than there are 2952 * valid mappings. That's because it rounds the segment size up to a 2953 * large pagesize, even if the actual memory mapped by ism_hat is less. 2954 */ 2955 ASSERT(IS_PAGEALIGNED(vaddr_start)); 2956 ASSERT(IS_PAGEALIGNED(ism_addr_start)); 2957 ASSERT(ism_hat->hat_flags & HAT_SHARED); 2958 is_dism = is_it_dism(hat, addr); 2959 while (ism_addr < e_ism_addr) { 2960 /* 2961 * use htable_walk to get the next valid ISM mapping 2962 */ 2963 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr); 2964 if (ism_ht == NULL) 2965 break; 2966 2967 /* 2968 * First check to see if we already share the page table. 2969 */ 2970 l = ism_ht->ht_level; 2971 vaddr = vaddr_start + (ism_addr - ism_addr_start); 2972 ht = htable_lookup(hat, vaddr, l); 2973 if (ht != NULL) { 2974 if (ht->ht_flags & HTABLE_SHARED_PFN) 2975 goto shared; 2976 htable_release(ht); 2977 goto not_shared; 2978 } 2979 2980 /* 2981 * Can't ever share top table. 2982 */ 2983 if (l == mmu.max_level) 2984 goto not_shared; 2985 2986 /* 2987 * Avoid level mismatches later due to DISM faults. 2988 */ 2989 if (is_dism && l > 0) 2990 goto not_shared; 2991 2992 /* 2993 * addresses and lengths must align 2994 * table must be fully populated 2995 * no lower level page tables 2996 */ 2997 if (ism_addr != ism_ht->ht_vaddr || 2998 (vaddr & LEVEL_OFFSET(l + 1)) != 0) 2999 goto not_shared; 3000 3001 /* 3002 * The range of address space must cover a full table. 3003 */ 3004 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1)) 3005 goto not_shared; 3006 3007 /* 3008 * All entries in the ISM page table must be leaf PTEs. 3009 */ 3010 if (l > 0) { 3011 int e; 3012 3013 /* 3014 * We know the 0th is from htable_walk() above. 3015 */ 3016 for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) { 3017 x86pte_t pte; 3018 pte = x86pte_get(ism_ht, e); 3019 if (!PTE_ISPAGE(pte, l)) 3020 goto not_shared; 3021 } 3022 } 3023 3024 /* 3025 * share the page table 3026 */ 3027 ht = htable_create(hat, vaddr, l, ism_ht); 3028 shared: 3029 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN); 3030 ASSERT(ht->ht_shares == ism_ht); 3031 hat->hat_ism_pgcnt += 3032 (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) << 3033 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 3034 ht->ht_valid_cnt = ism_ht->ht_valid_cnt; 3035 htable_release(ht); 3036 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1); 3037 htable_release(ism_ht); 3038 ism_ht = NULL; 3039 continue; 3040 3041 not_shared: 3042 /* 3043 * Unable to share the page table. Instead we will 3044 * create new mappings from the values in the ISM mappings. 3045 * Figure out what level size mappings to use; 3046 */ 3047 for (l = ism_ht->ht_level; l > 0; --l) { 3048 if (LEVEL_SIZE(l) <= eaddr - vaddr && 3049 (vaddr & LEVEL_OFFSET(l)) == 0) 3050 break; 3051 } 3052 3053 /* 3054 * The ISM mapping might be larger than the share area, 3055 * be careful to truncate it if needed. 3056 */ 3057 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) { 3058 pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level)); 3059 } else { 3060 pgcnt = mmu_btop(eaddr - vaddr); 3061 l = 0; 3062 } 3063 3064 pfn = PTE2PFN(pte, ism_ht->ht_level); 3065 ASSERT(pfn != PFN_INVALID); 3066 while (pgcnt > 0) { 3067 /* 3068 * Make a new pte for the PFN for this level. 3069 * Copy protections for the pte from the ISM pte. 3070 */ 3071 pp = page_numtopp_nolock(pfn); 3072 ASSERT(pp != NULL); 3073 3074 prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK; 3075 if (PTE_GET(pte, PT_WRITABLE)) 3076 prot |= PROT_WRITE; 3077 if (!PTE_GET(pte, PT_NX)) 3078 prot |= PROT_EXEC; 3079 3080 flags = HAT_LOAD; 3081 if (!is_dism) 3082 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST; 3083 while (hati_load_common(hat, vaddr, pp, prot, flags, 3084 l, pfn) != 0) { 3085 if (l == 0) 3086 panic("hati_load_common() failure"); 3087 --l; 3088 } 3089 3090 vaddr += LEVEL_SIZE(l); 3091 ism_addr += LEVEL_SIZE(l); 3092 pfn += mmu_btop(LEVEL_SIZE(l)); 3093 pgcnt -= mmu_btop(LEVEL_SIZE(l)); 3094 } 3095 } 3096 if (ism_ht != NULL) 3097 htable_release(ism_ht); 3098 XPV_ALLOW_MIGRATE(); 3099 return (0); 3100 } 3101 3102 3103 /* 3104 * hat_unshare() is similar to hat_unload_callback(), but 3105 * we have to look for empty shared pagetables. Note that 3106 * hat_unshare() is always invoked against an entire segment. 3107 */ 3108 /*ARGSUSED*/ 3109 void 3110 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc) 3111 { 3112 uint64_t vaddr = (uintptr_t)addr; 3113 uintptr_t eaddr = vaddr + len; 3114 htable_t *ht = NULL; 3115 uint_t need_demaps = 0; 3116 int flags = HAT_UNLOAD_UNMAP; 3117 level_t l; 3118 3119 ASSERT(hat != kas.a_hat); 3120 ASSERT(eaddr <= _userlimit); 3121 ASSERT(IS_PAGEALIGNED(vaddr)); 3122 ASSERT(IS_PAGEALIGNED(eaddr)); 3123 XPV_DISALLOW_MIGRATE(); 3124 3125 /* 3126 * First go through and remove any shared pagetables. 3127 * 3128 * Note that it's ok to delay the TLB shootdown till the entire range is 3129 * finished, because if hat_pageunload() were to unload a shared 3130 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate. 3131 */ 3132 l = mmu.max_page_level; 3133 if (l == mmu.max_level) 3134 --l; 3135 for (; l >= 0; --l) { 3136 for (vaddr = (uintptr_t)addr; vaddr < eaddr; 3137 vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) { 3138 ASSERT(!IN_VA_HOLE(vaddr)); 3139 /* 3140 * find a pagetable that maps the current address 3141 */ 3142 ht = htable_lookup(hat, vaddr, l); 3143 if (ht == NULL) 3144 continue; 3145 if (ht->ht_flags & HTABLE_SHARED_PFN) { 3146 /* 3147 * clear page count, set valid_cnt to 0, 3148 * let htable_release() finish the job 3149 */ 3150 hat->hat_ism_pgcnt -= ht->ht_valid_cnt << 3151 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 3152 ht->ht_valid_cnt = 0; 3153 need_demaps = 1; 3154 } 3155 htable_release(ht); 3156 } 3157 } 3158 3159 /* 3160 * flush the TLBs - since we're probably dealing with MANY mappings 3161 * we do just one CR3 reload. 3162 */ 3163 if (!(hat->hat_flags & HAT_FREEING) && need_demaps) 3164 hat_tlb_inval(hat, DEMAP_ALL_ADDR); 3165 3166 /* 3167 * Now go back and clean up any unaligned mappings that 3168 * couldn't share pagetables. 3169 */ 3170 if (!is_it_dism(hat, addr)) 3171 flags |= HAT_UNLOAD_UNLOCK; 3172 hat_unload(hat, addr, len, flags); 3173 XPV_ALLOW_MIGRATE(); 3174 } 3175 3176 3177 /* 3178 * hat_reserve() does nothing 3179 */ 3180 /*ARGSUSED*/ 3181 void 3182 hat_reserve(struct as *as, caddr_t addr, size_t len) 3183 { 3184 } 3185 3186 3187 /* 3188 * Called when all mappings to a page should have write permission removed. 3189 * Mostly stolen from hat_pagesync() 3190 */ 3191 static void 3192 hati_page_clrwrt(struct page *pp) 3193 { 3194 hment_t *hm = NULL; 3195 htable_t *ht; 3196 uint_t entry; 3197 x86pte_t old; 3198 x86pte_t new; 3199 uint_t pszc = 0; 3200 3201 XPV_DISALLOW_MIGRATE(); 3202 next_size: 3203 /* 3204 * walk thru the mapping list clearing write permission 3205 */ 3206 x86_hm_enter(pp); 3207 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 3208 if (ht->ht_level < pszc) 3209 continue; 3210 old = x86pte_get(ht, entry); 3211 3212 for (;;) { 3213 /* 3214 * Is this mapping of interest? 3215 */ 3216 if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum || 3217 PTE_GET(old, PT_WRITABLE) == 0) 3218 break; 3219 3220 /* 3221 * Clear ref/mod writable bits. This requires cross 3222 * calls to ensure any executing TLBs see cleared bits. 3223 */ 3224 new = old; 3225 PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE); 3226 old = hati_update_pte(ht, entry, old, new); 3227 if (old != 0) 3228 continue; 3229 3230 break; 3231 } 3232 } 3233 x86_hm_exit(pp); 3234 while (pszc < pp->p_szc) { 3235 page_t *tpp; 3236 pszc++; 3237 tpp = PP_GROUPLEADER(pp, pszc); 3238 if (pp != tpp) { 3239 pp = tpp; 3240 goto next_size; 3241 } 3242 } 3243 XPV_ALLOW_MIGRATE(); 3244 } 3245 3246 /* 3247 * void hat_page_setattr(pp, flag) 3248 * void hat_page_clrattr(pp, flag) 3249 * used to set/clr ref/mod bits. 3250 */ 3251 void 3252 hat_page_setattr(struct page *pp, uint_t flag) 3253 { 3254 vnode_t *vp = pp->p_vnode; 3255 kmutex_t *vphm = NULL; 3256 page_t **listp; 3257 int noshuffle; 3258 3259 noshuffle = flag & P_NSH; 3260 flag &= ~P_NSH; 3261 3262 if (PP_GETRM(pp, flag) == flag) 3263 return; 3264 3265 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 3266 !noshuffle) { 3267 vphm = page_vnode_mutex(vp); 3268 mutex_enter(vphm); 3269 } 3270 3271 PP_SETRM(pp, flag); 3272 3273 if (vphm != NULL) { 3274 3275 /* 3276 * Some File Systems examine v_pages for NULL w/o 3277 * grabbing the vphm mutex. Must not let it become NULL when 3278 * pp is the only page on the list. 3279 */ 3280 if (pp->p_vpnext != pp) { 3281 page_vpsub(&vp->v_pages, pp); 3282 if (vp->v_pages != NULL) 3283 listp = &vp->v_pages->p_vpprev->p_vpnext; 3284 else 3285 listp = &vp->v_pages; 3286 page_vpadd(listp, pp); 3287 } 3288 mutex_exit(vphm); 3289 } 3290 } 3291 3292 void 3293 hat_page_clrattr(struct page *pp, uint_t flag) 3294 { 3295 vnode_t *vp = pp->p_vnode; 3296 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 3297 3298 /* 3299 * Caller is expected to hold page's io lock for VMODSORT to work 3300 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 3301 * bit is cleared. 3302 * We don't have assert to avoid tripping some existing third party 3303 * code. The dirty page is moved back to top of the v_page list 3304 * after IO is done in pvn_write_done(). 3305 */ 3306 PP_CLRRM(pp, flag); 3307 3308 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 3309 3310 /* 3311 * VMODSORT works by removing write permissions and getting 3312 * a fault when a page is made dirty. At this point 3313 * we need to remove write permission from all mappings 3314 * to this page. 3315 */ 3316 hati_page_clrwrt(pp); 3317 } 3318 } 3319 3320 /* 3321 * If flag is specified, returns 0 if attribute is disabled 3322 * and non zero if enabled. If flag specifes multiple attributes 3323 * then returns 0 if ALL attributes are disabled. This is an advisory 3324 * call. 3325 */ 3326 uint_t 3327 hat_page_getattr(struct page *pp, uint_t flag) 3328 { 3329 return (PP_GETRM(pp, flag)); 3330 } 3331 3332 3333 /* 3334 * common code used by hat_pageunload() and hment_steal() 3335 */ 3336 hment_t * 3337 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry) 3338 { 3339 x86pte_t old_pte; 3340 pfn_t pfn = pp->p_pagenum; 3341 hment_t *hm; 3342 3343 /* 3344 * We need to acquire a hold on the htable in order to 3345 * do the invalidate. We know the htable must exist, since 3346 * unmap's don't release the htable until after removing any 3347 * hment. Having x86_hm_enter() keeps that from proceeding. 3348 */ 3349 htable_acquire(ht); 3350 3351 /* 3352 * Invalidate the PTE and remove the hment. 3353 */ 3354 old_pte = x86pte_inval(ht, entry, 0, NULL); 3355 if (PTE2PFN(old_pte, ht->ht_level) != pfn) { 3356 panic("x86pte_inval() failure found PTE = " FMT_PTE 3357 " pfn being unmapped is %lx ht=0x%lx entry=0x%x", 3358 old_pte, pfn, (uintptr_t)ht, entry); 3359 } 3360 3361 /* 3362 * Clean up all the htable information for this mapping 3363 */ 3364 ASSERT(ht->ht_valid_cnt > 0); 3365 HTABLE_DEC(ht->ht_valid_cnt); 3366 PGCNT_DEC(ht->ht_hat, ht->ht_level); 3367 3368 /* 3369 * sync ref/mod bits to the page_t 3370 */ 3371 if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC) 3372 hati_sync_pte_to_page(pp, old_pte, ht->ht_level); 3373 3374 /* 3375 * Remove the mapping list entry for this page. 3376 */ 3377 hm = hment_remove(pp, ht, entry); 3378 3379 /* 3380 * drop the mapping list lock so that we might free the 3381 * hment and htable. 3382 */ 3383 x86_hm_exit(pp); 3384 htable_release(ht); 3385 return (hm); 3386 } 3387 3388 extern int vpm_enable; 3389 /* 3390 * Unload all translations to a page. If the page is a subpage of a large 3391 * page, the large page mappings are also removed. 3392 * 3393 * The forceflags are unused. 3394 */ 3395 3396 /*ARGSUSED*/ 3397 static int 3398 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag) 3399 { 3400 page_t *cur_pp = pp; 3401 hment_t *hm; 3402 hment_t *prev; 3403 htable_t *ht; 3404 uint_t entry; 3405 level_t level; 3406 3407 XPV_DISALLOW_MIGRATE(); 3408 3409 /* 3410 * prevent recursion due to kmem_free() 3411 */ 3412 ++curthread->t_hatdepth; 3413 ASSERT(curthread->t_hatdepth < 16); 3414 3415 #if defined(__amd64) 3416 /* 3417 * clear the vpm ref. 3418 */ 3419 if (vpm_enable) { 3420 pp->p_vpmref = 0; 3421 } 3422 #endif 3423 /* 3424 * The loop with next_size handles pages with multiple pagesize mappings 3425 */ 3426 next_size: 3427 for (;;) { 3428 3429 /* 3430 * Get a mapping list entry 3431 */ 3432 x86_hm_enter(cur_pp); 3433 for (prev = NULL; ; prev = hm) { 3434 hm = hment_walk(cur_pp, &ht, &entry, prev); 3435 if (hm == NULL) { 3436 x86_hm_exit(cur_pp); 3437 3438 /* 3439 * If not part of a larger page, we're done. 3440 */ 3441 if (cur_pp->p_szc <= pg_szcd) { 3442 ASSERT(curthread->t_hatdepth > 0); 3443 --curthread->t_hatdepth; 3444 XPV_ALLOW_MIGRATE(); 3445 return (0); 3446 } 3447 3448 /* 3449 * Else check the next larger page size. 3450 * hat_page_demote() may decrease p_szc 3451 * but that's ok we'll just take an extra 3452 * trip discover there're no larger mappings 3453 * and return. 3454 */ 3455 ++pg_szcd; 3456 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd); 3457 goto next_size; 3458 } 3459 3460 /* 3461 * If this mapping size matches, remove it. 3462 */ 3463 level = ht->ht_level; 3464 if (level == pg_szcd) 3465 break; 3466 } 3467 3468 /* 3469 * Remove the mapping list entry for this page. 3470 * Note this does the x86_hm_exit() for us. 3471 */ 3472 hm = hati_page_unmap(cur_pp, ht, entry); 3473 if (hm != NULL) 3474 hment_free(hm); 3475 } 3476 } 3477 3478 int 3479 hat_pageunload(struct page *pp, uint_t forceflag) 3480 { 3481 ASSERT(PAGE_EXCL(pp)); 3482 return (hati_pageunload(pp, 0, forceflag)); 3483 } 3484 3485 /* 3486 * Unload all large mappings to pp and reduce by 1 p_szc field of every large 3487 * page level that included pp. 3488 * 3489 * pp must be locked EXCL. Even though no other constituent pages are locked 3490 * it's legal to unload large mappings to pp because all constituent pages of 3491 * large locked mappings have to be locked SHARED. therefore if we have EXCL 3492 * lock on one of constituent pages none of the large mappings to pp are 3493 * locked. 3494 * 3495 * Change (always decrease) p_szc field starting from the last constituent 3496 * page and ending with root constituent page so that root's pszc always shows 3497 * the area where hat_page_demote() may be active. 3498 * 3499 * This mechanism is only used for file system pages where it's not always 3500 * possible to get EXCL locks on all constituent pages to demote the size code 3501 * (as is done for anonymous or kernel large pages). 3502 */ 3503 void 3504 hat_page_demote(page_t *pp) 3505 { 3506 uint_t pszc; 3507 uint_t rszc; 3508 uint_t szc; 3509 page_t *rootpp; 3510 page_t *firstpp; 3511 page_t *lastpp; 3512 pgcnt_t pgcnt; 3513 3514 ASSERT(PAGE_EXCL(pp)); 3515 ASSERT(!PP_ISFREE(pp)); 3516 ASSERT(page_szc_lock_assert(pp)); 3517 3518 if (pp->p_szc == 0) 3519 return; 3520 3521 rootpp = PP_GROUPLEADER(pp, 1); 3522 (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD); 3523 3524 /* 3525 * all large mappings to pp are gone 3526 * and no new can be setup since pp is locked exclusively. 3527 * 3528 * Lock the root to make sure there's only one hat_page_demote() 3529 * outstanding within the area of this root's pszc. 3530 * 3531 * Second potential hat_page_demote() is already eliminated by upper 3532 * VM layer via page_szc_lock() but we don't rely on it and use our 3533 * own locking (so that upper layer locking can be changed without 3534 * assumptions that hat depends on upper layer VM to prevent multiple 3535 * hat_page_demote() to be issued simultaneously to the same large 3536 * page). 3537 */ 3538 again: 3539 pszc = pp->p_szc; 3540 if (pszc == 0) 3541 return; 3542 rootpp = PP_GROUPLEADER(pp, pszc); 3543 x86_hm_enter(rootpp); 3544 /* 3545 * If root's p_szc is different from pszc we raced with another 3546 * hat_page_demote(). Drop the lock and try to find the root again. 3547 * If root's p_szc is greater than pszc previous hat_page_demote() is 3548 * not done yet. Take and release mlist lock of root's root to wait 3549 * for previous hat_page_demote() to complete. 3550 */ 3551 if ((rszc = rootpp->p_szc) != pszc) { 3552 x86_hm_exit(rootpp); 3553 if (rszc > pszc) { 3554 /* p_szc of a locked non free page can't increase */ 3555 ASSERT(pp != rootpp); 3556 3557 rootpp = PP_GROUPLEADER(rootpp, rszc); 3558 x86_hm_enter(rootpp); 3559 x86_hm_exit(rootpp); 3560 } 3561 goto again; 3562 } 3563 ASSERT(pp->p_szc == pszc); 3564 3565 /* 3566 * Decrement by 1 p_szc of every constituent page of a region that 3567 * covered pp. For example if original szc is 3 it gets changed to 2 3568 * everywhere except in region 2 that covered pp. Region 2 that 3569 * covered pp gets demoted to 1 everywhere except in region 1 that 3570 * covered pp. The region 1 that covered pp is demoted to region 3571 * 0. It's done this way because from region 3 we removed level 3 3572 * mappings, from region 2 that covered pp we removed level 2 mappings 3573 * and from region 1 that covered pp we removed level 1 mappings. All 3574 * changes are done from from high pfn's to low pfn's so that roots 3575 * are changed last allowing one to know the largest region where 3576 * hat_page_demote() is stil active by only looking at the root page. 3577 * 3578 * This algorithm is implemented in 2 while loops. First loop changes 3579 * p_szc of pages to the right of pp's level 1 region and second 3580 * loop changes p_szc of pages of level 1 region that covers pp 3581 * and all pages to the left of level 1 region that covers pp. 3582 * In the first loop p_szc keeps dropping with every iteration 3583 * and in the second loop it keeps increasing with every iteration. 3584 * 3585 * First loop description: Demote pages to the right of pp outside of 3586 * level 1 region that covers pp. In every iteration of the while 3587 * loop below find the last page of szc region and the first page of 3588 * (szc - 1) region that is immediately to the right of (szc - 1) 3589 * region that covers pp. From last such page to first such page 3590 * change every page's szc to szc - 1. Decrement szc and continue 3591 * looping until szc is 1. If pp belongs to the last (szc - 1) region 3592 * of szc region skip to the next iteration. 3593 */ 3594 szc = pszc; 3595 while (szc > 1) { 3596 lastpp = PP_GROUPLEADER(pp, szc); 3597 pgcnt = page_get_pagecnt(szc); 3598 lastpp += pgcnt - 1; 3599 firstpp = PP_GROUPLEADER(pp, (szc - 1)); 3600 pgcnt = page_get_pagecnt(szc - 1); 3601 if (lastpp - firstpp < pgcnt) { 3602 szc--; 3603 continue; 3604 } 3605 firstpp += pgcnt; 3606 while (lastpp != firstpp) { 3607 ASSERT(lastpp->p_szc == pszc); 3608 lastpp->p_szc = szc - 1; 3609 lastpp--; 3610 } 3611 firstpp->p_szc = szc - 1; 3612 szc--; 3613 } 3614 3615 /* 3616 * Second loop description: 3617 * First iteration changes p_szc to 0 of every 3618 * page of level 1 region that covers pp. 3619 * Subsequent iterations find last page of szc region 3620 * immediately to the left of szc region that covered pp 3621 * and first page of (szc + 1) region that covers pp. 3622 * From last to first page change p_szc of every page to szc. 3623 * Increment szc and continue looping until szc is pszc. 3624 * If pp belongs to the fist szc region of (szc + 1) region 3625 * skip to the next iteration. 3626 * 3627 */ 3628 szc = 0; 3629 while (szc < pszc) { 3630 firstpp = PP_GROUPLEADER(pp, (szc + 1)); 3631 if (szc == 0) { 3632 pgcnt = page_get_pagecnt(1); 3633 lastpp = firstpp + (pgcnt - 1); 3634 } else { 3635 lastpp = PP_GROUPLEADER(pp, szc); 3636 if (firstpp == lastpp) { 3637 szc++; 3638 continue; 3639 } 3640 lastpp--; 3641 pgcnt = page_get_pagecnt(szc); 3642 } 3643 while (lastpp != firstpp) { 3644 ASSERT(lastpp->p_szc == pszc); 3645 lastpp->p_szc = szc; 3646 lastpp--; 3647 } 3648 firstpp->p_szc = szc; 3649 if (firstpp == rootpp) 3650 break; 3651 szc++; 3652 } 3653 x86_hm_exit(rootpp); 3654 } 3655 3656 /* 3657 * get hw stats from hardware into page struct and reset hw stats 3658 * returns attributes of page 3659 * Flags for hat_pagesync, hat_getstat, hat_sync 3660 * 3661 * define HAT_SYNC_ZERORM 0x01 3662 * 3663 * Additional flags for hat_pagesync 3664 * 3665 * define HAT_SYNC_STOPON_REF 0x02 3666 * define HAT_SYNC_STOPON_MOD 0x04 3667 * define HAT_SYNC_STOPON_RM 0x06 3668 * define HAT_SYNC_STOPON_SHARED 0x08 3669 */ 3670 uint_t 3671 hat_pagesync(struct page *pp, uint_t flags) 3672 { 3673 hment_t *hm = NULL; 3674 htable_t *ht; 3675 uint_t entry; 3676 x86pte_t old, save_old; 3677 x86pte_t new; 3678 uchar_t nrmbits = P_REF|P_MOD|P_RO; 3679 extern ulong_t po_share; 3680 page_t *save_pp = pp; 3681 uint_t pszc = 0; 3682 3683 ASSERT(PAGE_LOCKED(pp) || panicstr); 3684 3685 if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD)) 3686 return (pp->p_nrm & nrmbits); 3687 3688 if ((flags & HAT_SYNC_ZERORM) == 0) { 3689 3690 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp)) 3691 return (pp->p_nrm & nrmbits); 3692 3693 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp)) 3694 return (pp->p_nrm & nrmbits); 3695 3696 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 && 3697 hat_page_getshare(pp) > po_share) { 3698 if (PP_ISRO(pp)) 3699 PP_SETREF(pp); 3700 return (pp->p_nrm & nrmbits); 3701 } 3702 } 3703 3704 XPV_DISALLOW_MIGRATE(); 3705 next_size: 3706 /* 3707 * walk thru the mapping list syncing (and clearing) ref/mod bits. 3708 */ 3709 x86_hm_enter(pp); 3710 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 3711 if (ht->ht_level < pszc) 3712 continue; 3713 old = x86pte_get(ht, entry); 3714 try_again: 3715 3716 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum); 3717 3718 if (PTE_GET(old, PT_REF | PT_MOD) == 0) 3719 continue; 3720 3721 save_old = old; 3722 if ((flags & HAT_SYNC_ZERORM) != 0) { 3723 3724 /* 3725 * Need to clear ref or mod bits. Need to demap 3726 * to make sure any executing TLBs see cleared bits. 3727 */ 3728 new = old; 3729 PTE_CLR(new, PT_REF | PT_MOD); 3730 old = hati_update_pte(ht, entry, old, new); 3731 if (old != 0) 3732 goto try_again; 3733 3734 old = save_old; 3735 } 3736 3737 /* 3738 * Sync the PTE 3739 */ 3740 if (!(flags & HAT_SYNC_ZERORM) && 3741 PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC) 3742 hati_sync_pte_to_page(pp, old, ht->ht_level); 3743 3744 /* 3745 * can stop short if we found a ref'd or mod'd page 3746 */ 3747 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) || 3748 (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) { 3749 x86_hm_exit(pp); 3750 goto done; 3751 } 3752 } 3753 x86_hm_exit(pp); 3754 while (pszc < pp->p_szc) { 3755 page_t *tpp; 3756 pszc++; 3757 tpp = PP_GROUPLEADER(pp, pszc); 3758 if (pp != tpp) { 3759 pp = tpp; 3760 goto next_size; 3761 } 3762 } 3763 done: 3764 XPV_ALLOW_MIGRATE(); 3765 return (save_pp->p_nrm & nrmbits); 3766 } 3767 3768 /* 3769 * returns approx number of mappings to this pp. A return of 0 implies 3770 * there are no mappings to the page. 3771 */ 3772 ulong_t 3773 hat_page_getshare(page_t *pp) 3774 { 3775 uint_t cnt; 3776 cnt = hment_mapcnt(pp); 3777 #if defined(__amd64) 3778 if (vpm_enable && pp->p_vpmref) { 3779 cnt += 1; 3780 } 3781 #endif 3782 return (cnt); 3783 } 3784 3785 /* 3786 * Return 1 the number of mappings exceeds sh_thresh. Return 0 3787 * otherwise. 3788 */ 3789 int 3790 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 3791 { 3792 return (hat_page_getshare(pp) > sh_thresh); 3793 } 3794 3795 /* 3796 * hat_softlock isn't supported anymore 3797 */ 3798 /*ARGSUSED*/ 3799 faultcode_t 3800 hat_softlock( 3801 hat_t *hat, 3802 caddr_t addr, 3803 size_t *len, 3804 struct page **page_array, 3805 uint_t flags) 3806 { 3807 return (FC_NOSUPPORT); 3808 } 3809 3810 3811 3812 /* 3813 * Routine to expose supported HAT features to platform independent code. 3814 */ 3815 /*ARGSUSED*/ 3816 int 3817 hat_supported(enum hat_features feature, void *arg) 3818 { 3819 switch (feature) { 3820 3821 case HAT_SHARED_PT: /* this is really ISM */ 3822 return (1); 3823 3824 case HAT_DYNAMIC_ISM_UNMAP: 3825 return (0); 3826 3827 case HAT_VMODSORT: 3828 return (1); 3829 3830 case HAT_SHARED_REGIONS: 3831 return (0); 3832 3833 default: 3834 panic("hat_supported() - unknown feature"); 3835 } 3836 return (0); 3837 } 3838 3839 /* 3840 * Called when a thread is exiting and has been switched to the kernel AS 3841 */ 3842 void 3843 hat_thread_exit(kthread_t *thd) 3844 { 3845 ASSERT(thd->t_procp->p_as == &kas); 3846 XPV_DISALLOW_MIGRATE(); 3847 hat_switch(thd->t_procp->p_as->a_hat); 3848 XPV_ALLOW_MIGRATE(); 3849 } 3850 3851 /* 3852 * Setup the given brand new hat structure as the new HAT on this cpu's mmu. 3853 */ 3854 /*ARGSUSED*/ 3855 void 3856 hat_setup(hat_t *hat, int flags) 3857 { 3858 XPV_DISALLOW_MIGRATE(); 3859 kpreempt_disable(); 3860 3861 hat_switch(hat); 3862 3863 kpreempt_enable(); 3864 XPV_ALLOW_MIGRATE(); 3865 } 3866 3867 /* 3868 * Prepare for a CPU private mapping for the given address. 3869 * 3870 * The address can only be used from a single CPU and can be remapped 3871 * using hat_mempte_remap(). Return the address of the PTE. 3872 * 3873 * We do the htable_create() if necessary and increment the valid count so 3874 * the htable can't disappear. We also hat_devload() the page table into 3875 * kernel so that the PTE is quickly accessed. 3876 */ 3877 hat_mempte_t 3878 hat_mempte_setup(caddr_t addr) 3879 { 3880 uintptr_t va = (uintptr_t)addr; 3881 htable_t *ht; 3882 uint_t entry; 3883 x86pte_t oldpte; 3884 hat_mempte_t p; 3885 3886 ASSERT(IS_PAGEALIGNED(va)); 3887 ASSERT(!IN_VA_HOLE(va)); 3888 ++curthread->t_hatdepth; 3889 XPV_DISALLOW_MIGRATE(); 3890 ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0); 3891 if (ht == NULL) { 3892 ht = htable_create(kas.a_hat, va, 0, NULL); 3893 entry = htable_va2entry(va, ht); 3894 ASSERT(ht->ht_level == 0); 3895 oldpte = x86pte_get(ht, entry); 3896 } 3897 if (PTE_ISVALID(oldpte)) 3898 panic("hat_mempte_setup(): address already mapped" 3899 "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte); 3900 3901 /* 3902 * increment ht_valid_cnt so that the pagetable can't disappear 3903 */ 3904 HTABLE_INC(ht->ht_valid_cnt); 3905 3906 /* 3907 * return the PTE physical address to the caller. 3908 */ 3909 htable_release(ht); 3910 XPV_ALLOW_MIGRATE(); 3911 p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry); 3912 --curthread->t_hatdepth; 3913 return (p); 3914 } 3915 3916 /* 3917 * Release a CPU private mapping for the given address. 3918 * We decrement the htable valid count so it might be destroyed. 3919 */ 3920 /*ARGSUSED1*/ 3921 void 3922 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa) 3923 { 3924 htable_t *ht; 3925 3926 XPV_DISALLOW_MIGRATE(); 3927 /* 3928 * invalidate any left over mapping and decrement the htable valid count 3929 */ 3930 #ifdef __xpv 3931 if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0, 3932 UVMF_INVLPG | UVMF_LOCAL)) 3933 panic("HYPERVISOR_update_va_mapping() failed"); 3934 #else 3935 { 3936 x86pte_t *pteptr; 3937 3938 pteptr = x86pte_mapin(mmu_btop(pte_pa), 3939 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 3940 if (mmu.pae_hat) 3941 *pteptr = 0; 3942 else 3943 *(x86pte32_t *)pteptr = 0; 3944 mmu_tlbflush_entry(addr); 3945 x86pte_mapout(); 3946 } 3947 #endif 3948 3949 ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0); 3950 if (ht == NULL) 3951 panic("hat_mempte_release(): invalid address"); 3952 ASSERT(ht->ht_level == 0); 3953 HTABLE_DEC(ht->ht_valid_cnt); 3954 htable_release(ht); 3955 XPV_ALLOW_MIGRATE(); 3956 } 3957 3958 /* 3959 * Apply a temporary CPU private mapping to a page. We flush the TLB only 3960 * on this CPU, so this ought to have been called with preemption disabled. 3961 */ 3962 void 3963 hat_mempte_remap( 3964 pfn_t pfn, 3965 caddr_t addr, 3966 hat_mempte_t pte_pa, 3967 uint_t attr, 3968 uint_t flags) 3969 { 3970 uintptr_t va = (uintptr_t)addr; 3971 x86pte_t pte; 3972 3973 /* 3974 * Remap the given PTE to the new page's PFN. Invalidate only 3975 * on this CPU. 3976 */ 3977 #ifdef DEBUG 3978 htable_t *ht; 3979 uint_t entry; 3980 3981 ASSERT(IS_PAGEALIGNED(va)); 3982 ASSERT(!IN_VA_HOLE(va)); 3983 ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0); 3984 ASSERT(ht != NULL); 3985 ASSERT(ht->ht_level == 0); 3986 ASSERT(ht->ht_valid_cnt > 0); 3987 ASSERT(ht->ht_pfn == mmu_btop(pte_pa)); 3988 htable_release(ht); 3989 #endif 3990 XPV_DISALLOW_MIGRATE(); 3991 pte = hati_mkpte(pfn, attr, 0, flags); 3992 #ifdef __xpv 3993 if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL)) 3994 panic("HYPERVISOR_update_va_mapping() failed"); 3995 #else 3996 { 3997 x86pte_t *pteptr; 3998 3999 pteptr = x86pte_mapin(mmu_btop(pte_pa), 4000 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 4001 if (mmu.pae_hat) 4002 *(x86pte_t *)pteptr = pte; 4003 else 4004 *(x86pte32_t *)pteptr = (x86pte32_t)pte; 4005 mmu_tlbflush_entry(addr); 4006 x86pte_mapout(); 4007 } 4008 #endif 4009 XPV_ALLOW_MIGRATE(); 4010 } 4011 4012 4013 4014 /* 4015 * Hat locking functions 4016 * XXX - these two functions are currently being used by hatstats 4017 * they can be removed by using a per-as mutex for hatstats. 4018 */ 4019 void 4020 hat_enter(hat_t *hat) 4021 { 4022 mutex_enter(&hat->hat_mutex); 4023 } 4024 4025 void 4026 hat_exit(hat_t *hat) 4027 { 4028 mutex_exit(&hat->hat_mutex); 4029 } 4030 4031 /* 4032 * HAT part of cpu initialization. 4033 */ 4034 void 4035 hat_cpu_online(struct cpu *cpup) 4036 { 4037 if (cpup != CPU) { 4038 x86pte_cpu_init(cpup); 4039 hat_vlp_setup(cpup); 4040 } 4041 CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id); 4042 } 4043 4044 /* 4045 * HAT part of cpu deletion. 4046 * (currently, we only call this after the cpu is safely passivated.) 4047 */ 4048 void 4049 hat_cpu_offline(struct cpu *cpup) 4050 { 4051 ASSERT(cpup != CPU); 4052 4053 CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id); 4054 hat_vlp_teardown(cpup); 4055 x86pte_cpu_fini(cpup); 4056 } 4057 4058 /* 4059 * Function called after all CPUs are brought online. 4060 * Used to remove low address boot mappings. 4061 */ 4062 void 4063 clear_boot_mappings(uintptr_t low, uintptr_t high) 4064 { 4065 uintptr_t vaddr = low; 4066 htable_t *ht = NULL; 4067 level_t level; 4068 uint_t entry; 4069 x86pte_t pte; 4070 4071 /* 4072 * On 1st CPU we can unload the prom mappings, basically we blow away 4073 * all virtual mappings under _userlimit. 4074 */ 4075 while (vaddr < high) { 4076 pte = htable_walk(kas.a_hat, &ht, &vaddr, high); 4077 if (ht == NULL) 4078 break; 4079 4080 level = ht->ht_level; 4081 entry = htable_va2entry(vaddr, ht); 4082 ASSERT(level <= mmu.max_page_level); 4083 ASSERT(PTE_ISPAGE(pte, level)); 4084 4085 /* 4086 * Unload the mapping from the page tables. 4087 */ 4088 (void) x86pte_inval(ht, entry, 0, NULL); 4089 ASSERT(ht->ht_valid_cnt > 0); 4090 HTABLE_DEC(ht->ht_valid_cnt); 4091 PGCNT_DEC(ht->ht_hat, ht->ht_level); 4092 4093 vaddr += LEVEL_SIZE(ht->ht_level); 4094 } 4095 if (ht) 4096 htable_release(ht); 4097 } 4098 4099 /* 4100 * Atomically update a new translation for a single page. If the 4101 * currently installed PTE doesn't match the value we expect to find, 4102 * it's not updated and we return the PTE we found. 4103 * 4104 * If activating nosync or NOWRITE and the page was modified we need to sync 4105 * with the page_t. Also sync with page_t if clearing ref/mod bits. 4106 */ 4107 static x86pte_t 4108 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 4109 { 4110 page_t *pp; 4111 uint_t rm = 0; 4112 x86pte_t replaced; 4113 4114 if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC && 4115 PTE_GET(expected, PT_MOD | PT_REF) && 4116 (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) || 4117 !PTE_GET(new, PT_MOD | PT_REF))) { 4118 4119 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level))); 4120 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level)); 4121 ASSERT(pp != NULL); 4122 if (PTE_GET(expected, PT_MOD)) 4123 rm |= P_MOD; 4124 if (PTE_GET(expected, PT_REF)) 4125 rm |= P_REF; 4126 PTE_CLR(new, PT_MOD | PT_REF); 4127 } 4128 4129 replaced = x86pte_update(ht, entry, expected, new); 4130 if (replaced != expected) 4131 return (replaced); 4132 4133 if (rm) { 4134 /* 4135 * sync to all constituent pages of a large page 4136 */ 4137 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level); 4138 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 4139 while (pgcnt-- > 0) { 4140 /* 4141 * hat_page_demote() can't decrease 4142 * pszc below this mapping size 4143 * since large mapping existed after we 4144 * took mlist lock. 4145 */ 4146 ASSERT(pp->p_szc >= ht->ht_level); 4147 hat_page_setattr(pp, rm); 4148 ++pp; 4149 } 4150 } 4151 4152 return (0); 4153 } 4154 4155 /* ARGSUSED */ 4156 void 4157 hat_join_srd(struct hat *hat, vnode_t *evp) 4158 { 4159 } 4160 4161 /* ARGSUSED */ 4162 hat_region_cookie_t 4163 hat_join_region(struct hat *hat, 4164 caddr_t r_saddr, 4165 size_t r_size, 4166 void *r_obj, 4167 u_offset_t r_objoff, 4168 uchar_t r_perm, 4169 uchar_t r_pgszc, 4170 hat_rgn_cb_func_t r_cb_function, 4171 uint_t flags) 4172 { 4173 panic("No shared region support on x86"); 4174 return (HAT_INVALID_REGION_COOKIE); 4175 } 4176 4177 /* ARGSUSED */ 4178 void 4179 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags) 4180 { 4181 panic("No shared region support on x86"); 4182 } 4183 4184 /* ARGSUSED */ 4185 void 4186 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie) 4187 { 4188 panic("No shared region support on x86"); 4189 } 4190 4191 4192 /* 4193 * Kernel Physical Mapping (kpm) facility 4194 * 4195 * Most of the routines needed to support segkpm are almost no-ops on the 4196 * x86 platform. We map in the entire segment when it is created and leave 4197 * it mapped in, so there is no additional work required to set up and tear 4198 * down individual mappings. All of these routines were created to support 4199 * SPARC platforms that have to avoid aliasing in their virtually indexed 4200 * caches. 4201 * 4202 * Most of the routines have sanity checks in them (e.g. verifying that the 4203 * passed-in page is locked). We don't actually care about most of these 4204 * checks on x86, but we leave them in place to identify problems in the 4205 * upper levels. 4206 */ 4207 4208 /* 4209 * Map in a locked page and return the vaddr. 4210 */ 4211 /*ARGSUSED*/ 4212 caddr_t 4213 hat_kpm_mapin(struct page *pp, struct kpme *kpme) 4214 { 4215 caddr_t vaddr; 4216 4217 #ifdef DEBUG 4218 if (kpm_enable == 0) { 4219 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n"); 4220 return ((caddr_t)NULL); 4221 } 4222 4223 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 4224 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n"); 4225 return ((caddr_t)NULL); 4226 } 4227 #endif 4228 4229 vaddr = hat_kpm_page2va(pp, 1); 4230 4231 return (vaddr); 4232 } 4233 4234 /* 4235 * Mapout a locked page. 4236 */ 4237 /*ARGSUSED*/ 4238 void 4239 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 4240 { 4241 #ifdef DEBUG 4242 if (kpm_enable == 0) { 4243 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n"); 4244 return; 4245 } 4246 4247 if (IS_KPM_ADDR(vaddr) == 0) { 4248 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n"); 4249 return; 4250 } 4251 4252 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 4253 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n"); 4254 return; 4255 } 4256 #endif 4257 } 4258 4259 /* 4260 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical 4261 * memory addresses that are not described by a page_t. It can 4262 * also be used for normal pages that are not locked, but beware 4263 * this is dangerous - no locking is performed, so the identity of 4264 * the page could change. hat_kpm_mapin_pfn is not supported when 4265 * vac_colors > 1, because the chosen va depends on the page identity, 4266 * which could change. 4267 * The caller must only pass pfn's for valid physical addresses; violation 4268 * of this rule will cause panic. 4269 */ 4270 caddr_t 4271 hat_kpm_mapin_pfn(pfn_t pfn) 4272 { 4273 caddr_t paddr, vaddr; 4274 4275 if (kpm_enable == 0) 4276 return ((caddr_t)NULL); 4277 4278 paddr = (caddr_t)ptob(pfn); 4279 vaddr = (uintptr_t)kpm_vbase + paddr; 4280 4281 return ((caddr_t)vaddr); 4282 } 4283 4284 /*ARGSUSED*/ 4285 void 4286 hat_kpm_mapout_pfn(pfn_t pfn) 4287 { 4288 /* empty */ 4289 } 4290 4291 /* 4292 * Return the kpm virtual address for a specific pfn 4293 */ 4294 caddr_t 4295 hat_kpm_pfn2va(pfn_t pfn) 4296 { 4297 uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn); 4298 4299 ASSERT(!pfn_is_foreign(pfn)); 4300 return ((caddr_t)vaddr); 4301 } 4302 4303 /* 4304 * Return the kpm virtual address for the page at pp. 4305 */ 4306 /*ARGSUSED*/ 4307 caddr_t 4308 hat_kpm_page2va(struct page *pp, int checkswap) 4309 { 4310 return (hat_kpm_pfn2va(pp->p_pagenum)); 4311 } 4312 4313 /* 4314 * Return the page frame number for the kpm virtual address vaddr. 4315 */ 4316 pfn_t 4317 hat_kpm_va2pfn(caddr_t vaddr) 4318 { 4319 pfn_t pfn; 4320 4321 ASSERT(IS_KPM_ADDR(vaddr)); 4322 4323 pfn = (pfn_t)btop(vaddr - kpm_vbase); 4324 4325 return (pfn); 4326 } 4327 4328 4329 /* 4330 * Return the page for the kpm virtual address vaddr. 4331 */ 4332 page_t * 4333 hat_kpm_vaddr2page(caddr_t vaddr) 4334 { 4335 pfn_t pfn; 4336 4337 ASSERT(IS_KPM_ADDR(vaddr)); 4338 4339 pfn = hat_kpm_va2pfn(vaddr); 4340 4341 return (page_numtopp_nolock(pfn)); 4342 } 4343 4344 /* 4345 * hat_kpm_fault is called from segkpm_fault when we take a page fault on a 4346 * KPM page. This should never happen on x86 4347 */ 4348 int 4349 hat_kpm_fault(hat_t *hat, caddr_t vaddr) 4350 { 4351 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", 4352 (void *)hat, (void *)vaddr); 4353 4354 return (0); 4355 } 4356 4357 /*ARGSUSED*/ 4358 void 4359 hat_kpm_mseghash_clear(int nentries) 4360 {} 4361 4362 /*ARGSUSED*/ 4363 void 4364 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 4365 {} 4366 4367 #ifndef __xpv 4368 void 4369 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs, 4370 offset_t kpm_pages_off) 4371 { 4372 _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off)); 4373 pfn_t base, end; 4374 4375 /* 4376 * kphysm_add_memory_dynamic() does not set nkpmpgs 4377 * when page_t memory is externally allocated. That 4378 * code must properly calculate nkpmpgs in all cases 4379 * if nkpmpgs needs to be used at some point. 4380 */ 4381 4382 /* 4383 * The meta (page_t) pages for dynamically added memory are allocated 4384 * either from the incoming memory itself or from existing memory. 4385 * In the former case the base of the incoming pages will be different 4386 * than the base of the dynamic segment so call memseg_get_start() to 4387 * get the actual base of the incoming memory for each case. 4388 */ 4389 4390 base = memseg_get_start(msp); 4391 end = msp->pages_end; 4392 4393 hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base), 4394 mmu_ptob(end - base), base, PROT_READ | PROT_WRITE, 4395 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST); 4396 } 4397 4398 void 4399 hat_kpm_addmem_mseg_insert(struct memseg *msp) 4400 { 4401 _NOTE(ARGUNUSED(msp)); 4402 } 4403 4404 void 4405 hat_kpm_addmem_memsegs_update(struct memseg *msp) 4406 { 4407 _NOTE(ARGUNUSED(msp)); 4408 } 4409 4410 /* 4411 * Return end of metadata for an already setup memseg. 4412 * X86 platforms don't need per-page meta data to support kpm. 4413 */ 4414 caddr_t 4415 hat_kpm_mseg_reuse(struct memseg *msp) 4416 { 4417 return ((caddr_t)msp->epages); 4418 } 4419 4420 void 4421 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp) 4422 { 4423 _NOTE(ARGUNUSED(msp, mspp)); 4424 ASSERT(0); 4425 } 4426 4427 void 4428 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp, 4429 struct memseg *lo, struct memseg *mid, struct memseg *hi) 4430 { 4431 _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi)); 4432 ASSERT(0); 4433 } 4434 4435 /* 4436 * Walk the memsegs chain, applying func to each memseg span. 4437 */ 4438 void 4439 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg) 4440 { 4441 pfn_t pbase, pend; 4442 void *base; 4443 size_t size; 4444 struct memseg *msp; 4445 4446 for (msp = memsegs; msp; msp = msp->next) { 4447 pbase = msp->pages_base; 4448 pend = msp->pages_end; 4449 base = ptob(pbase) + kpm_vbase; 4450 size = ptob(pend - pbase); 4451 func(arg, base, size); 4452 } 4453 } 4454 4455 #else /* __xpv */ 4456 4457 /* 4458 * There are specific Hypervisor calls to establish and remove mappings 4459 * to grant table references and the privcmd driver. We have to ensure 4460 * that a page table actually exists. 4461 */ 4462 void 4463 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma) 4464 { 4465 maddr_t base_ma; 4466 htable_t *ht; 4467 uint_t entry; 4468 4469 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4470 XPV_DISALLOW_MIGRATE(); 4471 ht = htable_create(hat, (uintptr_t)addr, 0, NULL); 4472 4473 /* 4474 * if an address for pte_ma is passed in, return the MA of the pte 4475 * for this specific address. This address is only valid as long 4476 * as the htable stays locked. 4477 */ 4478 if (pte_ma != NULL) { 4479 entry = htable_va2entry((uintptr_t)addr, ht); 4480 base_ma = pa_to_ma(ptob(ht->ht_pfn)); 4481 *pte_ma = base_ma + (entry << mmu.pte_size_shift); 4482 } 4483 XPV_ALLOW_MIGRATE(); 4484 } 4485 4486 void 4487 hat_release_mapping(hat_t *hat, caddr_t addr) 4488 { 4489 htable_t *ht; 4490 4491 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4492 XPV_DISALLOW_MIGRATE(); 4493 ht = htable_lookup(hat, (uintptr_t)addr, 0); 4494 ASSERT(ht != NULL); 4495 ASSERT(ht->ht_busy >= 2); 4496 htable_release(ht); 4497 htable_release(ht); 4498 XPV_ALLOW_MIGRATE(); 4499 } 4500 #endif /* __xpv */ 4501