1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * VM - Hardware Address Translation management for i386 and amd64 30 * 31 * Implementation of the interfaces described in <common/vm/hat.h> 32 * 33 * Nearly all the details of how the hardware is managed should not be 34 * visible outside this layer except for misc. machine specific functions 35 * that work in conjunction with this code. 36 * 37 * Routines used only inside of i86pc/vm start with hati_ for HAT Internal. 38 */ 39 40 #include <sys/machparam.h> 41 #include <sys/machsystm.h> 42 #include <sys/mman.h> 43 #include <sys/types.h> 44 #include <sys/systm.h> 45 #include <sys/cpuvar.h> 46 #include <sys/thread.h> 47 #include <sys/proc.h> 48 #include <sys/cpu.h> 49 #include <sys/kmem.h> 50 #include <sys/disp.h> 51 #include <sys/shm.h> 52 #include <sys/sysmacros.h> 53 #include <sys/machparam.h> 54 #include <sys/vmem.h> 55 #include <sys/vmsystm.h> 56 #include <sys/promif.h> 57 #include <sys/var.h> 58 #include <sys/x86_archext.h> 59 #include <sys/atomic.h> 60 #include <sys/bitmap.h> 61 #include <sys/controlregs.h> 62 #include <sys/bootconf.h> 63 #include <sys/bootsvcs.h> 64 #include <sys/bootinfo.h> 65 #include <sys/archsystm.h> 66 67 #include <vm/seg_kmem.h> 68 #include <vm/hat_i86.h> 69 #include <vm/as.h> 70 #include <vm/seg.h> 71 #include <vm/page.h> 72 #include <vm/seg_kp.h> 73 #include <vm/seg_kpm.h> 74 #include <vm/vm_dep.h> 75 #ifdef __xpv 76 #include <sys/hypervisor.h> 77 #endif 78 #include <vm/kboot_mmu.h> 79 #include <vm/seg_spt.h> 80 81 #include <sys/cmn_err.h> 82 83 /* 84 * Basic parameters for hat operation. 85 */ 86 struct hat_mmu_info mmu; 87 88 /* 89 * The page that is the kernel's top level pagetable. 90 * 91 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries 92 * on this 4K page for its top level page table. The remaining groups of 93 * 4 entries are used for per processor copies of user VLP pagetables for 94 * running threads. See hat_switch() and reload_pae32() for details. 95 * 96 * vlp_page[0..3] - level==2 PTEs for kernel HAT 97 * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0 98 * vlp_page[8..11] - level==2 PTE for user thread on cpu 1 99 * etc... 100 */ 101 static x86pte_t *vlp_page; 102 103 /* 104 * forward declaration of internal utility routines 105 */ 106 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, 107 x86pte_t new); 108 109 /* 110 * The kernel address space exists in all HATs. To implement this the 111 * kernel reserves a fixed number of entries in the topmost level(s) of page 112 * tables. The values are setup during startup and then copied to every user 113 * hat created by hat_alloc(). This means that kernelbase must be: 114 * 115 * 4Meg aligned for 32 bit kernels 116 * 512Gig aligned for x86_64 64 bit kernel 117 * 118 * The hat_kernel_range_ts describe what needs to be copied from kernel hat 119 * to each user hat. 120 */ 121 typedef struct hat_kernel_range { 122 level_t hkr_level; 123 uintptr_t hkr_start_va; 124 uintptr_t hkr_end_va; /* zero means to end of memory */ 125 } hat_kernel_range_t; 126 #define NUM_KERNEL_RANGE 2 127 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE]; 128 static int num_kernel_ranges; 129 130 uint_t use_boot_reserve = 1; /* cleared after early boot process */ 131 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */ 132 133 /* 134 * enable_1gpg: controls 1g page support for user applications. 135 * By default, 1g pages are exported to user applications. enable_1gpg can 136 * be set to 0 to not export. 137 */ 138 int enable_1gpg = 1; 139 140 /* 141 * AMD shanghai processors provide better management of 1gb ptes in its tlb. 142 * By default, 1g page suppport will be disabled for pre-shanghai AMD 143 * processors that don't have optimal tlb support for the 1g page size. 144 * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal 145 * processors. 146 */ 147 int chk_optimal_1gtlb = 1; 148 149 150 #ifdef DEBUG 151 uint_t map1gcnt; 152 #endif 153 154 155 /* 156 * A cpuset for all cpus. This is used for kernel address cross calls, since 157 * the kernel addresses apply to all cpus. 158 */ 159 cpuset_t khat_cpuset; 160 161 /* 162 * management stuff for hat structures 163 */ 164 kmutex_t hat_list_lock; 165 kcondvar_t hat_list_cv; 166 kmem_cache_t *hat_cache; 167 kmem_cache_t *hat_hash_cache; 168 kmem_cache_t *vlp_hash_cache; 169 170 /* 171 * Simple statistics 172 */ 173 struct hatstats hatstat; 174 175 /* 176 * Some earlier hypervisor versions do not emulate cmpxchg of PTEs 177 * correctly. For such hypervisors we must set PT_USER for kernel 178 * entries ourselves (normally the emulation would set PT_USER for 179 * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is 180 * thus set appropriately. Note that dboot/kbm is OK, as only the full 181 * HAT uses cmpxchg() and the other paths (hypercall etc.) were never 182 * incorrect. 183 */ 184 int pt_kern; 185 186 /* 187 * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's. 188 */ 189 extern void atomic_orb(uchar_t *addr, uchar_t val); 190 extern void atomic_andb(uchar_t *addr, uchar_t val); 191 192 #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask) 193 #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD) 194 #define PP_ISREF(pp) PP_GETRM(pp, P_REF) 195 #define PP_ISRO(pp) PP_GETRM(pp, P_RO) 196 197 #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm) 198 #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD) 199 #define PP_SETREF(pp) PP_SETRM(pp, P_REF) 200 #define PP_SETRO(pp) PP_SETRM(pp, P_RO) 201 202 #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm)) 203 #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD) 204 #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF) 205 #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO) 206 #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO) 207 208 /* 209 * kmem cache constructor for struct hat 210 */ 211 /*ARGSUSED*/ 212 static int 213 hati_constructor(void *buf, void *handle, int kmflags) 214 { 215 hat_t *hat = buf; 216 217 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 218 bzero(hat->hat_pages_mapped, 219 sizeof (pgcnt_t) * (mmu.max_page_level + 1)); 220 hat->hat_ism_pgcnt = 0; 221 hat->hat_stats = 0; 222 hat->hat_flags = 0; 223 CPUSET_ZERO(hat->hat_cpus); 224 hat->hat_htable = NULL; 225 hat->hat_ht_hash = NULL; 226 return (0); 227 } 228 229 /* 230 * Allocate a hat structure for as. We also create the top level 231 * htable and initialize it to contain the kernel hat entries. 232 */ 233 hat_t * 234 hat_alloc(struct as *as) 235 { 236 hat_t *hat; 237 htable_t *ht; /* top level htable */ 238 uint_t use_vlp; 239 uint_t r; 240 hat_kernel_range_t *rp; 241 uintptr_t va; 242 uintptr_t eva; 243 uint_t start; 244 uint_t cnt; 245 htable_t *src; 246 247 /* 248 * Once we start creating user process HATs we can enable 249 * the htable_steal() code. 250 */ 251 if (can_steal_post_boot == 0) 252 can_steal_post_boot = 1; 253 254 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 255 hat = kmem_cache_alloc(hat_cache, KM_SLEEP); 256 hat->hat_as = as; 257 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 258 ASSERT(hat->hat_flags == 0); 259 260 #if defined(__xpv) 261 /* 262 * No VLP stuff on the hypervisor due to the 64-bit split top level 263 * page tables. On 32-bit it's not needed as the hypervisor takes 264 * care of copying the top level PTEs to a below 4Gig page. 265 */ 266 use_vlp = 0; 267 #else /* __xpv */ 268 /* 32 bit processes uses a VLP style hat when running with PAE */ 269 #if defined(__amd64) 270 use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32); 271 #elif defined(__i386) 272 use_vlp = mmu.pae_hat; 273 #endif 274 #endif /* __xpv */ 275 if (use_vlp) { 276 hat->hat_flags = HAT_VLP; 277 bzero(hat->hat_vlp_ptes, VLP_SIZE); 278 } 279 280 /* 281 * Allocate the htable hash 282 */ 283 if ((hat->hat_flags & HAT_VLP)) { 284 hat->hat_num_hash = mmu.vlp_hash_cnt; 285 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP); 286 } else { 287 hat->hat_num_hash = mmu.hash_cnt; 288 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP); 289 } 290 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *)); 291 292 /* 293 * Initialize Kernel HAT entries at the top of the top level page 294 * tables for the new hat. 295 */ 296 hat->hat_htable = NULL; 297 hat->hat_ht_cached = NULL; 298 XPV_DISALLOW_MIGRATE(); 299 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL); 300 hat->hat_htable = ht; 301 302 #if defined(__amd64) 303 if (hat->hat_flags & HAT_VLP) 304 goto init_done; 305 #endif 306 307 for (r = 0; r < num_kernel_ranges; ++r) { 308 rp = &kernel_ranges[r]; 309 for (va = rp->hkr_start_va; va != rp->hkr_end_va; 310 va += cnt * LEVEL_SIZE(rp->hkr_level)) { 311 312 if (rp->hkr_level == TOP_LEVEL(hat)) 313 ht = hat->hat_htable; 314 else 315 ht = htable_create(hat, va, rp->hkr_level, 316 NULL); 317 318 start = htable_va2entry(va, ht); 319 cnt = HTABLE_NUM_PTES(ht) - start; 320 eva = va + 321 ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level)); 322 if (rp->hkr_end_va != 0 && 323 (eva > rp->hkr_end_va || eva == 0)) 324 cnt = htable_va2entry(rp->hkr_end_va, ht) - 325 start; 326 327 #if defined(__i386) && !defined(__xpv) 328 if (ht->ht_flags & HTABLE_VLP) { 329 bcopy(&vlp_page[start], 330 &hat->hat_vlp_ptes[start], 331 cnt * sizeof (x86pte_t)); 332 continue; 333 } 334 #endif 335 src = htable_lookup(kas.a_hat, va, rp->hkr_level); 336 ASSERT(src != NULL); 337 x86pte_copy(src, ht, start, cnt); 338 htable_release(src); 339 } 340 } 341 342 init_done: 343 344 #if defined(__xpv) 345 /* 346 * Pin top level page tables after initializing them 347 */ 348 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level); 349 #if defined(__amd64) 350 xen_pin(hat->hat_user_ptable, mmu.max_level); 351 #endif 352 #endif 353 XPV_ALLOW_MIGRATE(); 354 355 /* 356 * Put it at the start of the global list of all hats (used by stealing) 357 * 358 * kas.a_hat is not in the list but is instead used to find the 359 * first and last items in the list. 360 * 361 * - kas.a_hat->hat_next points to the start of the user hats. 362 * The list ends where hat->hat_next == NULL 363 * 364 * - kas.a_hat->hat_prev points to the last of the user hats. 365 * The list begins where hat->hat_prev == NULL 366 */ 367 mutex_enter(&hat_list_lock); 368 hat->hat_prev = NULL; 369 hat->hat_next = kas.a_hat->hat_next; 370 if (hat->hat_next) 371 hat->hat_next->hat_prev = hat; 372 else 373 kas.a_hat->hat_prev = hat; 374 kas.a_hat->hat_next = hat; 375 mutex_exit(&hat_list_lock); 376 377 return (hat); 378 } 379 380 /* 381 * process has finished executing but as has not been cleaned up yet. 382 */ 383 /*ARGSUSED*/ 384 void 385 hat_free_start(hat_t *hat) 386 { 387 ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock)); 388 389 /* 390 * If the hat is currently a stealing victim, wait for the stealing 391 * to finish. Once we mark it as HAT_FREEING, htable_steal() 392 * won't look at its pagetables anymore. 393 */ 394 mutex_enter(&hat_list_lock); 395 while (hat->hat_flags & HAT_VICTIM) 396 cv_wait(&hat_list_cv, &hat_list_lock); 397 hat->hat_flags |= HAT_FREEING; 398 mutex_exit(&hat_list_lock); 399 } 400 401 /* 402 * An address space is being destroyed, so we destroy the associated hat. 403 */ 404 void 405 hat_free_end(hat_t *hat) 406 { 407 kmem_cache_t *cache; 408 409 ASSERT(hat->hat_flags & HAT_FREEING); 410 411 /* 412 * must not be running on the given hat 413 */ 414 ASSERT(CPU->cpu_current_hat != hat); 415 416 /* 417 * Remove it from the list of HATs 418 */ 419 mutex_enter(&hat_list_lock); 420 if (hat->hat_prev) 421 hat->hat_prev->hat_next = hat->hat_next; 422 else 423 kas.a_hat->hat_next = hat->hat_next; 424 if (hat->hat_next) 425 hat->hat_next->hat_prev = hat->hat_prev; 426 else 427 kas.a_hat->hat_prev = hat->hat_prev; 428 mutex_exit(&hat_list_lock); 429 hat->hat_next = hat->hat_prev = NULL; 430 431 #if defined(__xpv) 432 /* 433 * On the hypervisor, unpin top level page table(s) 434 */ 435 xen_unpin(hat->hat_htable->ht_pfn); 436 #if defined(__amd64) 437 xen_unpin(hat->hat_user_ptable); 438 #endif 439 #endif 440 441 /* 442 * Make a pass through the htables freeing them all up. 443 */ 444 htable_purge_hat(hat); 445 446 /* 447 * Decide which kmem cache the hash table came from, then free it. 448 */ 449 if (hat->hat_flags & HAT_VLP) 450 cache = vlp_hash_cache; 451 else 452 cache = hat_hash_cache; 453 kmem_cache_free(cache, hat->hat_ht_hash); 454 hat->hat_ht_hash = NULL; 455 456 hat->hat_flags = 0; 457 kmem_cache_free(hat_cache, hat); 458 } 459 460 /* 461 * round kernelbase down to a supported value to use for _userlimit 462 * 463 * userlimit must be aligned down to an entry in the top level htable. 464 * The one exception is for 32 bit HAT's running PAE. 465 */ 466 uintptr_t 467 hat_kernelbase(uintptr_t va) 468 { 469 #if defined(__i386) 470 va &= LEVEL_MASK(1); 471 #endif 472 if (IN_VA_HOLE(va)) 473 panic("_userlimit %p will fall in VA hole\n", (void *)va); 474 return (va); 475 } 476 477 /* 478 * 479 */ 480 static void 481 set_max_page_level() 482 { 483 level_t lvl; 484 485 if (!kbm_largepage_support) { 486 lvl = 0; 487 } 488 if (x86_feature & X86_1GPG) { 489 lvl = 2; 490 if (chk_optimal_1gtlb && cpuid_opteron_erratum(CPU, 6671130)) { 491 lvl = 1; 492 } 493 if (plat_mnode_xcheck(LEVEL_SIZE(2) >> LEVEL_SHIFT(0))) { 494 lvl = 1; 495 } 496 } else { 497 lvl = 1; 498 } 499 mmu.max_page_level = lvl; 500 501 if ((lvl == 2) && (enable_1gpg == 0)) 502 mmu.umax_page_level = 1; 503 else 504 mmu.umax_page_level = lvl; 505 } 506 507 /* 508 * Initialize hat data structures based on processor MMU information. 509 */ 510 void 511 mmu_init(void) 512 { 513 uint_t max_htables; 514 uint_t pa_bits; 515 uint_t va_bits; 516 int i; 517 518 /* 519 * If CPU enabled the page table global bit, use it for the kernel 520 * This is bit 7 in CR4 (PGE - Page Global Enable). 521 */ 522 if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0) 523 mmu.pt_global = PT_GLOBAL; 524 525 /* 526 * Detect NX and PAE usage. 527 */ 528 mmu.pae_hat = kbm_pae_support; 529 if (kbm_nx_support) 530 mmu.pt_nx = PT_NX; 531 else 532 mmu.pt_nx = 0; 533 534 /* 535 * Use CPU info to set various MMU parameters 536 */ 537 cpuid_get_addrsize(CPU, &pa_bits, &va_bits); 538 539 if (va_bits < sizeof (void *) * NBBY) { 540 mmu.hole_start = (1ul << (va_bits - 1)); 541 mmu.hole_end = 0ul - mmu.hole_start - 1; 542 } else { 543 mmu.hole_end = 0; 544 mmu.hole_start = mmu.hole_end - 1; 545 } 546 #if defined(OPTERON_ERRATUM_121) 547 /* 548 * If erratum 121 has already been detected at this time, hole_start 549 * contains the value to be subtracted from mmu.hole_start. 550 */ 551 ASSERT(hole_start == 0 || opteron_erratum_121 != 0); 552 hole_start = mmu.hole_start - hole_start; 553 #else 554 hole_start = mmu.hole_start; 555 #endif 556 hole_end = mmu.hole_end; 557 558 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1); 559 if (mmu.pae_hat == 0 && pa_bits > 32) 560 mmu.highest_pfn = PFN_4G - 1; 561 562 if (mmu.pae_hat) { 563 mmu.pte_size = 8; /* 8 byte PTEs */ 564 mmu.pte_size_shift = 3; 565 } else { 566 mmu.pte_size = 4; /* 4 byte PTEs */ 567 mmu.pte_size_shift = 2; 568 } 569 570 if (mmu.pae_hat && (x86_feature & X86_PAE) == 0) 571 panic("Processor does not support PAE"); 572 573 if ((x86_feature & X86_CX8) == 0) 574 panic("Processor does not support cmpxchg8b instruction"); 575 576 #if defined(__amd64) 577 578 mmu.num_level = 4; 579 mmu.max_level = 3; 580 mmu.ptes_per_table = 512; 581 mmu.top_level_count = 512; 582 583 mmu.level_shift[0] = 12; 584 mmu.level_shift[1] = 21; 585 mmu.level_shift[2] = 30; 586 mmu.level_shift[3] = 39; 587 588 #elif defined(__i386) 589 590 if (mmu.pae_hat) { 591 mmu.num_level = 3; 592 mmu.max_level = 2; 593 mmu.ptes_per_table = 512; 594 mmu.top_level_count = 4; 595 596 mmu.level_shift[0] = 12; 597 mmu.level_shift[1] = 21; 598 mmu.level_shift[2] = 30; 599 600 } else { 601 mmu.num_level = 2; 602 mmu.max_level = 1; 603 mmu.ptes_per_table = 1024; 604 mmu.top_level_count = 1024; 605 606 mmu.level_shift[0] = 12; 607 mmu.level_shift[1] = 22; 608 } 609 610 #endif /* __i386 */ 611 612 for (i = 0; i < mmu.num_level; ++i) { 613 mmu.level_size[i] = 1UL << mmu.level_shift[i]; 614 mmu.level_offset[i] = mmu.level_size[i] - 1; 615 mmu.level_mask[i] = ~mmu.level_offset[i]; 616 } 617 618 set_max_page_level(); 619 620 mmu_page_sizes = mmu.max_page_level + 1; 621 mmu_exported_page_sizes = mmu.umax_page_level + 1; 622 623 /* restrict legacy applications from using pagesizes 1g and above */ 624 mmu_legacy_page_sizes = 625 (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes; 626 627 628 for (i = 0; i <= mmu.max_page_level; ++i) { 629 mmu.pte_bits[i] = PT_VALID | pt_kern; 630 if (i > 0) 631 mmu.pte_bits[i] |= PT_PAGESIZE; 632 } 633 634 /* 635 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level. 636 */ 637 for (i = 1; i < mmu.num_level; ++i) 638 mmu.ptp_bits[i] = PT_PTPBITS; 639 640 #if defined(__i386) 641 mmu.ptp_bits[2] = PT_VALID; 642 #endif 643 644 /* 645 * Compute how many hash table entries to have per process for htables. 646 * We start with 1 page's worth of entries. 647 * 648 * If physical memory is small, reduce the amount need to cover it. 649 */ 650 max_htables = physmax / mmu.ptes_per_table; 651 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *); 652 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables) 653 mmu.hash_cnt >>= 1; 654 mmu.vlp_hash_cnt = mmu.hash_cnt; 655 656 #if defined(__amd64) 657 /* 658 * If running in 64 bits and physical memory is large, 659 * increase the size of the cache to cover all of memory for 660 * a 64 bit process. 661 */ 662 #define HASH_MAX_LENGTH 4 663 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables) 664 mmu.hash_cnt <<= 1; 665 #endif 666 } 667 668 669 /* 670 * initialize hat data structures 671 */ 672 void 673 hat_init() 674 { 675 #if defined(__i386) 676 /* 677 * _userlimit must be aligned correctly 678 */ 679 if ((_userlimit & LEVEL_MASK(1)) != _userlimit) { 680 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n", 681 (void *)_userlimit, (void *)LEVEL_SIZE(1)); 682 halt("hat_init(): Unable to continue"); 683 } 684 #endif 685 686 cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL); 687 688 /* 689 * initialize kmem caches 690 */ 691 htable_init(); 692 hment_init(); 693 694 hat_cache = kmem_cache_create("hat_t", 695 sizeof (hat_t), 0, hati_constructor, NULL, NULL, 696 NULL, 0, 0); 697 698 hat_hash_cache = kmem_cache_create("HatHash", 699 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 700 NULL, 0, 0); 701 702 /* 703 * VLP hats can use a smaller hash table size on large memroy machines 704 */ 705 if (mmu.hash_cnt == mmu.vlp_hash_cnt) { 706 vlp_hash_cache = hat_hash_cache; 707 } else { 708 vlp_hash_cache = kmem_cache_create("HatVlpHash", 709 mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 710 NULL, 0, 0); 711 } 712 713 /* 714 * Set up the kernel's hat 715 */ 716 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 717 kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); 718 mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 719 kas.a_hat->hat_as = &kas; 720 kas.a_hat->hat_flags = 0; 721 AS_LOCK_EXIT(&kas, &kas.a_lock); 722 723 CPUSET_ZERO(khat_cpuset); 724 CPUSET_ADD(khat_cpuset, CPU->cpu_id); 725 726 /* 727 * The kernel hat's next pointer serves as the head of the hat list . 728 * The kernel hat's prev pointer tracks the last hat on the list for 729 * htable_steal() to use. 730 */ 731 kas.a_hat->hat_next = NULL; 732 kas.a_hat->hat_prev = NULL; 733 734 /* 735 * Allocate an htable hash bucket for the kernel 736 * XX64 - tune for 64 bit procs 737 */ 738 kas.a_hat->hat_num_hash = mmu.hash_cnt; 739 kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP); 740 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *)); 741 742 /* 743 * zero out the top level and cached htable pointers 744 */ 745 kas.a_hat->hat_ht_cached = NULL; 746 kas.a_hat->hat_htable = NULL; 747 748 /* 749 * Pre-allocate hrm_hashtab before enabling the collection of 750 * refmod statistics. Allocating on the fly would mean us 751 * running the risk of suffering recursive mutex enters or 752 * deadlocks. 753 */ 754 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 755 KM_SLEEP); 756 } 757 758 /* 759 * Prepare CPU specific pagetables for VLP processes on 64 bit kernels. 760 * 761 * Each CPU has a set of 2 pagetables that are reused for any 32 bit 762 * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and 763 * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes. 764 */ 765 /*ARGSUSED*/ 766 static void 767 hat_vlp_setup(struct cpu *cpu) 768 { 769 #if defined(__amd64) && !defined(__xpv) 770 struct hat_cpu_info *hci = cpu->cpu_hat_info; 771 pfn_t pfn; 772 773 /* 774 * allocate the level==2 page table for the bottom most 775 * 512Gig of address space (this is where 32 bit apps live) 776 */ 777 ASSERT(hci != NULL); 778 hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 779 780 /* 781 * Allocate a top level pagetable and copy the kernel's 782 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry. 783 */ 784 hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 785 hci->hci_vlp_pfn = 786 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes); 787 ASSERT(hci->hci_vlp_pfn != PFN_INVALID); 788 bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE); 789 790 pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes); 791 ASSERT(pfn != PFN_INVALID); 792 hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2); 793 #endif /* __amd64 && !__xpv */ 794 } 795 796 /*ARGSUSED*/ 797 static void 798 hat_vlp_teardown(cpu_t *cpu) 799 { 800 #if defined(__amd64) && !defined(__xpv) 801 struct hat_cpu_info *hci; 802 803 if ((hci = cpu->cpu_hat_info) == NULL) 804 return; 805 if (hci->hci_vlp_l2ptes) 806 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE); 807 if (hci->hci_vlp_l3ptes) 808 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE); 809 #endif 810 } 811 812 #define NEXT_HKR(r, l, s, e) { \ 813 kernel_ranges[r].hkr_level = l; \ 814 kernel_ranges[r].hkr_start_va = s; \ 815 kernel_ranges[r].hkr_end_va = e; \ 816 ++r; \ 817 } 818 819 /* 820 * Finish filling in the kernel hat. 821 * Pre fill in all top level kernel page table entries for the kernel's 822 * part of the address range. From this point on we can't use any new 823 * kernel large pages if they need PTE's at max_level 824 * 825 * create the kmap mappings. 826 */ 827 void 828 hat_init_finish(void) 829 { 830 size_t size; 831 uint_t r = 0; 832 uintptr_t va; 833 hat_kernel_range_t *rp; 834 835 836 /* 837 * We are now effectively running on the kernel hat. 838 * Clearing use_boot_reserve shuts off using the pre-allocated boot 839 * reserve for all HAT allocations. From here on, the reserves are 840 * only used when avoiding recursion in kmem_alloc(). 841 */ 842 use_boot_reserve = 0; 843 htable_adjust_reserve(); 844 845 /* 846 * User HATs are initialized with copies of all kernel mappings in 847 * higher level page tables. Ensure that those entries exist. 848 */ 849 #if defined(__amd64) 850 851 NEXT_HKR(r, 3, kernelbase, 0); 852 #if defined(__xpv) 853 NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END); 854 #endif 855 856 #elif defined(__i386) 857 858 #if !defined(__xpv) 859 if (mmu.pae_hat) { 860 va = kernelbase; 861 if ((va & LEVEL_MASK(2)) != va) { 862 va = P2ROUNDUP(va, LEVEL_SIZE(2)); 863 NEXT_HKR(r, 1, kernelbase, va); 864 } 865 if (va != 0) 866 NEXT_HKR(r, 2, va, 0); 867 } else 868 #endif /* __xpv */ 869 NEXT_HKR(r, 1, kernelbase, 0); 870 871 #endif /* __i386 */ 872 873 num_kernel_ranges = r; 874 875 /* 876 * Create all the kernel pagetables that will have entries 877 * shared to user HATs. 878 */ 879 for (r = 0; r < num_kernel_ranges; ++r) { 880 rp = &kernel_ranges[r]; 881 for (va = rp->hkr_start_va; va != rp->hkr_end_va; 882 va += LEVEL_SIZE(rp->hkr_level)) { 883 htable_t *ht; 884 885 if (IN_HYPERVISOR_VA(va)) 886 continue; 887 888 /* can/must skip if a page mapping already exists */ 889 if (rp->hkr_level <= mmu.max_page_level && 890 (ht = htable_getpage(kas.a_hat, va, NULL)) != 891 NULL) { 892 htable_release(ht); 893 continue; 894 } 895 896 (void) htable_create(kas.a_hat, va, rp->hkr_level - 1, 897 NULL); 898 } 899 } 900 901 /* 902 * 32 bit PAE metal kernels use only 4 of the 512 entries in the 903 * page holding the top level pagetable. We use the remainder for 904 * the "per CPU" page tables for VLP processes. 905 * Map the top level kernel pagetable into the kernel to make 906 * it easy to use bcopy access these tables. 907 */ 908 if (mmu.pae_hat) { 909 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP); 910 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE, 911 kas.a_hat->hat_htable->ht_pfn, 912 #if !defined(__xpv) 913 PROT_WRITE | 914 #endif 915 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK, 916 HAT_LOAD | HAT_LOAD_NOCONSIST); 917 } 918 hat_vlp_setup(CPU); 919 920 /* 921 * Create kmap (cached mappings of kernel PTEs) 922 * for 32 bit we map from segmap_start .. ekernelheap 923 * for 64 bit we map from segmap_start .. segmap_start + segmapsize; 924 */ 925 #if defined(__i386) 926 size = (uintptr_t)ekernelheap - segmap_start; 927 #elif defined(__amd64) 928 size = segmapsize; 929 #endif 930 hat_kmap_init((uintptr_t)segmap_start, size); 931 } 932 933 /* 934 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references 935 * are 32 bit, so for safety we must use cas64() to install these. 936 */ 937 #ifdef __i386 938 static void 939 reload_pae32(hat_t *hat, cpu_t *cpu) 940 { 941 x86pte_t *src; 942 x86pte_t *dest; 943 x86pte_t pte; 944 int i; 945 946 /* 947 * Load the 4 entries of the level 2 page table into this 948 * cpu's range of the vlp_page and point cr3 at them. 949 */ 950 ASSERT(mmu.pae_hat); 951 src = hat->hat_vlp_ptes; 952 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES; 953 for (i = 0; i < VLP_NUM_PTES; ++i) { 954 for (;;) { 955 pte = dest[i]; 956 if (pte == src[i]) 957 break; 958 if (cas64(dest + i, pte, src[i]) != src[i]) 959 break; 960 } 961 } 962 } 963 #endif 964 965 /* 966 * Switch to a new active hat, maintaining bit masks to track active CPUs. 967 * 968 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it 969 * remains a 32-bit value. 970 */ 971 void 972 hat_switch(hat_t *hat) 973 { 974 uint64_t newcr3; 975 cpu_t *cpu = CPU; 976 hat_t *old = cpu->cpu_current_hat; 977 978 /* 979 * set up this information first, so we don't miss any cross calls 980 */ 981 if (old != NULL) { 982 if (old == hat) 983 return; 984 if (old != kas.a_hat) 985 CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id); 986 } 987 988 /* 989 * Add this CPU to the active set for this HAT. 990 */ 991 if (hat != kas.a_hat) { 992 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id); 993 } 994 cpu->cpu_current_hat = hat; 995 996 /* 997 * now go ahead and load cr3 998 */ 999 if (hat->hat_flags & HAT_VLP) { 1000 #if defined(__amd64) 1001 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 1002 1003 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1004 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn); 1005 #elif defined(__i386) 1006 reload_pae32(hat, cpu); 1007 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) + 1008 (cpu->cpu_id + 1) * VLP_SIZE; 1009 #endif 1010 } else { 1011 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn); 1012 } 1013 #ifdef __xpv 1014 { 1015 struct mmuext_op t[2]; 1016 uint_t retcnt; 1017 uint_t opcnt = 1; 1018 1019 t[0].cmd = MMUEXT_NEW_BASEPTR; 1020 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1021 #if defined(__amd64) 1022 /* 1023 * There's an interesting problem here, as to what to 1024 * actually specify when switching to the kernel hat. 1025 * For now we'll reuse the kernel hat again. 1026 */ 1027 t[1].cmd = MMUEXT_NEW_USER_BASEPTR; 1028 if (hat == kas.a_hat) 1029 t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1030 else 1031 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable); 1032 ++opcnt; 1033 #endif /* __amd64 */ 1034 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0) 1035 panic("HYPERVISOR_mmu_update() failed"); 1036 ASSERT(retcnt == opcnt); 1037 1038 } 1039 #else 1040 setcr3(newcr3); 1041 #endif 1042 ASSERT(cpu == CPU); 1043 } 1044 1045 /* 1046 * Utility to return a valid x86pte_t from protections, pfn, and level number 1047 */ 1048 static x86pte_t 1049 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags) 1050 { 1051 x86pte_t pte; 1052 uint_t cache_attr = attr & HAT_ORDER_MASK; 1053 1054 pte = MAKEPTE(pfn, level); 1055 1056 if (attr & PROT_WRITE) 1057 PTE_SET(pte, PT_WRITABLE); 1058 1059 if (attr & PROT_USER) 1060 PTE_SET(pte, PT_USER); 1061 1062 if (!(attr & PROT_EXEC)) 1063 PTE_SET(pte, mmu.pt_nx); 1064 1065 /* 1066 * Set the software bits used track ref/mod sync's and hments. 1067 * If not using REF/MOD, set them to avoid h/w rewriting PTEs. 1068 */ 1069 if (flags & HAT_LOAD_NOCONSIST) 1070 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD); 1071 else if (attr & HAT_NOSYNC) 1072 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD); 1073 1074 /* 1075 * Set the caching attributes in the PTE. The combination 1076 * of attributes are poorly defined, so we pay attention 1077 * to them in the given order. 1078 * 1079 * The test for HAT_STRICTORDER is different because it's defined 1080 * as "0" - which was a stupid thing to do, but is too late to change! 1081 */ 1082 if (cache_attr == HAT_STRICTORDER) { 1083 PTE_SET(pte, PT_NOCACHE); 1084 /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */ 1085 } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) { 1086 /* nothing to set */; 1087 } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) { 1088 PTE_SET(pte, PT_NOCACHE); 1089 if (x86_feature & X86_PAT) 1090 PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE); 1091 else 1092 PTE_SET(pte, PT_WRITETHRU); 1093 } else { 1094 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr); 1095 } 1096 1097 return (pte); 1098 } 1099 1100 /* 1101 * Duplicate address translations of the parent to the child. 1102 * This function really isn't used anymore. 1103 */ 1104 /*ARGSUSED*/ 1105 int 1106 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag) 1107 { 1108 ASSERT((uintptr_t)addr < kernelbase); 1109 ASSERT(new != kas.a_hat); 1110 ASSERT(old != kas.a_hat); 1111 return (0); 1112 } 1113 1114 /* 1115 * Allocate any hat resources required for a process being swapped in. 1116 */ 1117 /*ARGSUSED*/ 1118 void 1119 hat_swapin(hat_t *hat) 1120 { 1121 /* do nothing - we let everything fault back in */ 1122 } 1123 1124 /* 1125 * Unload all translations associated with an address space of a process 1126 * that is being swapped out. 1127 */ 1128 void 1129 hat_swapout(hat_t *hat) 1130 { 1131 uintptr_t vaddr = (uintptr_t)0; 1132 uintptr_t eaddr = _userlimit; 1133 htable_t *ht = NULL; 1134 level_t l; 1135 1136 XPV_DISALLOW_MIGRATE(); 1137 /* 1138 * We can't just call hat_unload(hat, 0, _userlimit...) here, because 1139 * seg_spt and shared pagetables can't be swapped out. 1140 * Take a look at segspt_shmswapout() - it's a big no-op. 1141 * 1142 * Instead we'll walk through all the address space and unload 1143 * any mappings which we are sure are not shared, not locked. 1144 */ 1145 ASSERT(IS_PAGEALIGNED(vaddr)); 1146 ASSERT(IS_PAGEALIGNED(eaddr)); 1147 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1148 if ((uintptr_t)hat->hat_as->a_userlimit < eaddr) 1149 eaddr = (uintptr_t)hat->hat_as->a_userlimit; 1150 1151 while (vaddr < eaddr) { 1152 (void) htable_walk(hat, &ht, &vaddr, eaddr); 1153 if (ht == NULL) 1154 break; 1155 1156 ASSERT(!IN_VA_HOLE(vaddr)); 1157 1158 /* 1159 * If the page table is shared skip its entire range. 1160 */ 1161 l = ht->ht_level; 1162 if (ht->ht_flags & HTABLE_SHARED_PFN) { 1163 vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1); 1164 htable_release(ht); 1165 ht = NULL; 1166 continue; 1167 } 1168 1169 /* 1170 * If the page table has no locked entries, unload this one. 1171 */ 1172 if (ht->ht_lock_cnt == 0) 1173 hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l), 1174 HAT_UNLOAD_UNMAP); 1175 1176 /* 1177 * If we have a level 0 page table with locked entries, 1178 * skip the entire page table, otherwise skip just one entry. 1179 */ 1180 if (ht->ht_lock_cnt > 0 && l == 0) 1181 vaddr = ht->ht_vaddr + LEVEL_SIZE(1); 1182 else 1183 vaddr += LEVEL_SIZE(l); 1184 } 1185 if (ht) 1186 htable_release(ht); 1187 1188 /* 1189 * We're in swapout because the system is low on memory, so 1190 * go back and flush all the htables off the cached list. 1191 */ 1192 htable_purge_hat(hat); 1193 XPV_ALLOW_MIGRATE(); 1194 } 1195 1196 /* 1197 * returns number of bytes that have valid mappings in hat. 1198 */ 1199 size_t 1200 hat_get_mapped_size(hat_t *hat) 1201 { 1202 size_t total = 0; 1203 int l; 1204 1205 for (l = 0; l <= mmu.max_page_level; l++) 1206 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l)); 1207 total += hat->hat_ism_pgcnt; 1208 1209 return (total); 1210 } 1211 1212 /* 1213 * enable/disable collection of stats for hat. 1214 */ 1215 int 1216 hat_stats_enable(hat_t *hat) 1217 { 1218 atomic_add_32(&hat->hat_stats, 1); 1219 return (1); 1220 } 1221 1222 void 1223 hat_stats_disable(hat_t *hat) 1224 { 1225 atomic_add_32(&hat->hat_stats, -1); 1226 } 1227 1228 /* 1229 * Utility to sync the ref/mod bits from a page table entry to the page_t 1230 * We must be holding the mapping list lock when this is called. 1231 */ 1232 static void 1233 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level) 1234 { 1235 uint_t rm = 0; 1236 pgcnt_t pgcnt; 1237 1238 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 1239 return; 1240 1241 if (PTE_GET(pte, PT_REF)) 1242 rm |= P_REF; 1243 1244 if (PTE_GET(pte, PT_MOD)) 1245 rm |= P_MOD; 1246 1247 if (rm == 0) 1248 return; 1249 1250 /* 1251 * sync to all constituent pages of a large page 1252 */ 1253 ASSERT(x86_hm_held(pp)); 1254 pgcnt = page_get_pagecnt(level); 1255 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 1256 for (; pgcnt > 0; --pgcnt) { 1257 /* 1258 * hat_page_demote() can't decrease 1259 * pszc below this mapping size 1260 * since this large mapping existed after we 1261 * took mlist lock. 1262 */ 1263 ASSERT(pp->p_szc >= level); 1264 hat_page_setattr(pp, rm); 1265 ++pp; 1266 } 1267 } 1268 1269 /* 1270 * This the set of PTE bits for PFN, permissions and caching 1271 * that are allowed to change on a HAT_LOAD_REMAP 1272 */ 1273 #define PT_REMAP_BITS \ 1274 (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \ 1275 PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD) 1276 1277 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX) 1278 /* 1279 * Do the low-level work to get a mapping entered into a HAT's pagetables 1280 * and in the mapping list of the associated page_t. 1281 */ 1282 static int 1283 hati_pte_map( 1284 htable_t *ht, 1285 uint_t entry, 1286 page_t *pp, 1287 x86pte_t pte, 1288 int flags, 1289 void *pte_ptr) 1290 { 1291 hat_t *hat = ht->ht_hat; 1292 x86pte_t old_pte; 1293 level_t l = ht->ht_level; 1294 hment_t *hm; 1295 uint_t is_consist; 1296 int rv = 0; 1297 1298 /* 1299 * Is this a consistant (ie. need mapping list lock) mapping? 1300 */ 1301 is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0); 1302 1303 /* 1304 * Track locked mapping count in the htable. Do this first, 1305 * as we track locking even if there already is a mapping present. 1306 */ 1307 if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat) 1308 HTABLE_LOCK_INC(ht); 1309 1310 /* 1311 * Acquire the page's mapping list lock and get an hment to use. 1312 * Note that hment_prepare() might return NULL. 1313 */ 1314 if (is_consist) { 1315 x86_hm_enter(pp); 1316 hm = hment_prepare(ht, entry, pp); 1317 } 1318 1319 /* 1320 * Set the new pte, retrieving the old one at the same time. 1321 */ 1322 old_pte = x86pte_set(ht, entry, pte, pte_ptr); 1323 1324 /* 1325 * did we get a large page / page table collision? 1326 */ 1327 if (old_pte == LPAGE_ERROR) { 1328 rv = -1; 1329 goto done; 1330 } 1331 1332 /* 1333 * If the mapping didn't change there is nothing more to do. 1334 */ 1335 if (PTE_EQUIV(pte, old_pte)) 1336 goto done; 1337 1338 /* 1339 * Install a new mapping in the page's mapping list 1340 */ 1341 if (!PTE_ISVALID(old_pte)) { 1342 if (is_consist) { 1343 hment_assign(ht, entry, pp, hm); 1344 x86_hm_exit(pp); 1345 } else { 1346 ASSERT(flags & HAT_LOAD_NOCONSIST); 1347 } 1348 #if defined(__amd64) 1349 if (ht->ht_flags & HTABLE_VLP) { 1350 cpu_t *cpu = CPU; 1351 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 1352 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1353 } 1354 #endif 1355 HTABLE_INC(ht->ht_valid_cnt); 1356 PGCNT_INC(hat, l); 1357 return (rv); 1358 } 1359 1360 /* 1361 * Remap's are more complicated: 1362 * - HAT_LOAD_REMAP must be specified if changing the pfn. 1363 * We also require that NOCONSIST be specified. 1364 * - Otherwise only permission or caching bits may change. 1365 */ 1366 if (!PTE_ISPAGE(old_pte, l)) 1367 panic("non-null/page mapping pte=" FMT_PTE, old_pte); 1368 1369 if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) { 1370 REMAPASSERT(flags & HAT_LOAD_REMAP); 1371 REMAPASSERT(flags & HAT_LOAD_NOCONSIST); 1372 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 1373 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) == 1374 pf_is_memory(PTE2PFN(pte, l))); 1375 REMAPASSERT(!is_consist); 1376 } 1377 1378 /* 1379 * We only let remaps change the certain bits in the PTE. 1380 */ 1381 if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS)) 1382 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n", 1383 old_pte, pte); 1384 1385 /* 1386 * We don't create any mapping list entries on a remap, so release 1387 * any allocated hment after we drop the mapping list lock. 1388 */ 1389 done: 1390 if (is_consist) { 1391 x86_hm_exit(pp); 1392 if (hm != NULL) 1393 hment_free(hm); 1394 } 1395 return (rv); 1396 } 1397 1398 /* 1399 * Internal routine to load a single page table entry. This only fails if 1400 * we attempt to overwrite a page table link with a large page. 1401 */ 1402 static int 1403 hati_load_common( 1404 hat_t *hat, 1405 uintptr_t va, 1406 page_t *pp, 1407 uint_t attr, 1408 uint_t flags, 1409 level_t level, 1410 pfn_t pfn) 1411 { 1412 htable_t *ht; 1413 uint_t entry; 1414 x86pte_t pte; 1415 int rv = 0; 1416 1417 /* 1418 * The number 16 is arbitrary and here to catch a recursion problem 1419 * early before we blow out the kernel stack. 1420 */ 1421 ++curthread->t_hatdepth; 1422 ASSERT(curthread->t_hatdepth < 16); 1423 1424 ASSERT(hat == kas.a_hat || 1425 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1426 1427 if (flags & HAT_LOAD_SHARE) 1428 hat->hat_flags |= HAT_SHARED; 1429 1430 /* 1431 * Find the page table that maps this page if it already exists. 1432 */ 1433 ht = htable_lookup(hat, va, level); 1434 1435 /* 1436 * We must have HAT_LOAD_NOCONSIST if page_t is NULL. 1437 */ 1438 if (pp == NULL) 1439 flags |= HAT_LOAD_NOCONSIST; 1440 1441 if (ht == NULL) { 1442 ht = htable_create(hat, va, level, NULL); 1443 ASSERT(ht != NULL); 1444 } 1445 entry = htable_va2entry(va, ht); 1446 1447 /* 1448 * a bunch of paranoid error checking 1449 */ 1450 ASSERT(ht->ht_busy > 0); 1451 if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) 1452 panic("hati_load_common: bad htable %p, va %p", ht, (void *)va); 1453 ASSERT(ht->ht_level == level); 1454 1455 /* 1456 * construct the new PTE 1457 */ 1458 if (hat == kas.a_hat) 1459 attr &= ~PROT_USER; 1460 pte = hati_mkpte(pfn, attr, level, flags); 1461 if (hat == kas.a_hat && va >= kernelbase) 1462 PTE_SET(pte, mmu.pt_global); 1463 1464 /* 1465 * establish the mapping 1466 */ 1467 rv = hati_pte_map(ht, entry, pp, pte, flags, NULL); 1468 1469 /* 1470 * release the htable and any reserves 1471 */ 1472 htable_release(ht); 1473 --curthread->t_hatdepth; 1474 return (rv); 1475 } 1476 1477 /* 1478 * special case of hat_memload to deal with some kernel addrs for performance 1479 */ 1480 static void 1481 hat_kmap_load( 1482 caddr_t addr, 1483 page_t *pp, 1484 uint_t attr, 1485 uint_t flags) 1486 { 1487 uintptr_t va = (uintptr_t)addr; 1488 x86pte_t pte; 1489 pfn_t pfn = page_pptonum(pp); 1490 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr); 1491 htable_t *ht; 1492 uint_t entry; 1493 void *pte_ptr; 1494 1495 /* 1496 * construct the requested PTE 1497 */ 1498 attr &= ~PROT_USER; 1499 attr |= HAT_STORECACHING_OK; 1500 pte = hati_mkpte(pfn, attr, 0, flags); 1501 PTE_SET(pte, mmu.pt_global); 1502 1503 /* 1504 * Figure out the pte_ptr and htable and use common code to finish up 1505 */ 1506 if (mmu.pae_hat) 1507 pte_ptr = mmu.kmap_ptes + pg_off; 1508 else 1509 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off; 1510 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >> 1511 LEVEL_SHIFT(1)]; 1512 entry = htable_va2entry(va, ht); 1513 ++curthread->t_hatdepth; 1514 ASSERT(curthread->t_hatdepth < 16); 1515 (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr); 1516 --curthread->t_hatdepth; 1517 } 1518 1519 /* 1520 * hat_memload() - load a translation to the given page struct 1521 * 1522 * Flags for hat_memload/hat_devload/hat_*attr. 1523 * 1524 * HAT_LOAD Default flags to load a translation to the page. 1525 * 1526 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 1527 * and hat_devload(). 1528 * 1529 * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list. 1530 * sets PT_NOCONSIST 1531 * 1532 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 1533 * that map some user pages (not kas) is shared by more 1534 * than one process (eg. ISM). 1535 * 1536 * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 1537 * 1538 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 1539 * point, it's setting up mapping to allocate internal 1540 * hat layer data structures. This flag forces hat layer 1541 * to tap its reserves in order to prevent infinite 1542 * recursion. 1543 * 1544 * The following is a protection attribute (like PROT_READ, etc.) 1545 * 1546 * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits 1547 * are never cleared. 1548 * 1549 * Installing new valid PTE's and creation of the mapping list 1550 * entry are controlled under the same lock. It's derived from the 1551 * page_t being mapped. 1552 */ 1553 static uint_t supported_memload_flags = 1554 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST | 1555 HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT; 1556 1557 void 1558 hat_memload( 1559 hat_t *hat, 1560 caddr_t addr, 1561 page_t *pp, 1562 uint_t attr, 1563 uint_t flags) 1564 { 1565 uintptr_t va = (uintptr_t)addr; 1566 level_t level = 0; 1567 pfn_t pfn = page_pptonum(pp); 1568 1569 XPV_DISALLOW_MIGRATE(); 1570 ASSERT(IS_PAGEALIGNED(va)); 1571 ASSERT(hat == kas.a_hat || va < _userlimit); 1572 ASSERT(hat == kas.a_hat || 1573 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1574 ASSERT((flags & supported_memload_flags) == flags); 1575 1576 ASSERT(!IN_VA_HOLE(va)); 1577 ASSERT(!PP_ISFREE(pp)); 1578 1579 /* 1580 * kernel address special case for performance. 1581 */ 1582 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 1583 ASSERT(hat == kas.a_hat); 1584 hat_kmap_load(addr, pp, attr, flags); 1585 XPV_ALLOW_MIGRATE(); 1586 return; 1587 } 1588 1589 /* 1590 * This is used for memory with normal caching enabled, so 1591 * always set HAT_STORECACHING_OK. 1592 */ 1593 attr |= HAT_STORECACHING_OK; 1594 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0) 1595 panic("unexpected hati_load_common() failure"); 1596 XPV_ALLOW_MIGRATE(); 1597 } 1598 1599 /* ARGSUSED */ 1600 void 1601 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 1602 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 1603 { 1604 hat_memload(hat, addr, pp, attr, flags); 1605 } 1606 1607 /* 1608 * Load the given array of page structs using large pages when possible 1609 */ 1610 void 1611 hat_memload_array( 1612 hat_t *hat, 1613 caddr_t addr, 1614 size_t len, 1615 page_t **pages, 1616 uint_t attr, 1617 uint_t flags) 1618 { 1619 uintptr_t va = (uintptr_t)addr; 1620 uintptr_t eaddr = va + len; 1621 level_t level; 1622 size_t pgsize; 1623 pgcnt_t pgindx = 0; 1624 pfn_t pfn; 1625 pgcnt_t i; 1626 1627 XPV_DISALLOW_MIGRATE(); 1628 ASSERT(IS_PAGEALIGNED(va)); 1629 ASSERT(hat == kas.a_hat || va + len <= _userlimit); 1630 ASSERT(hat == kas.a_hat || 1631 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1632 ASSERT((flags & supported_memload_flags) == flags); 1633 1634 /* 1635 * memload is used for memory with full caching enabled, so 1636 * set HAT_STORECACHING_OK. 1637 */ 1638 attr |= HAT_STORECACHING_OK; 1639 1640 /* 1641 * handle all pages using largest possible pagesize 1642 */ 1643 while (va < eaddr) { 1644 /* 1645 * decide what level mapping to use (ie. pagesize) 1646 */ 1647 pfn = page_pptonum(pages[pgindx]); 1648 for (level = mmu.max_page_level; ; --level) { 1649 pgsize = LEVEL_SIZE(level); 1650 if (level == 0) 1651 break; 1652 1653 if (!IS_P2ALIGNED(va, pgsize) || 1654 (eaddr - va) < pgsize || 1655 !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize)) 1656 continue; 1657 1658 /* 1659 * To use a large mapping of this size, all the 1660 * pages we are passed must be sequential subpages 1661 * of the large page. 1662 * hat_page_demote() can't change p_szc because 1663 * all pages are locked. 1664 */ 1665 if (pages[pgindx]->p_szc >= level) { 1666 for (i = 0; i < mmu_btop(pgsize); ++i) { 1667 if (pfn + i != 1668 page_pptonum(pages[pgindx + i])) 1669 break; 1670 ASSERT(pages[pgindx + i]->p_szc >= 1671 level); 1672 ASSERT(pages[pgindx] + i == 1673 pages[pgindx + i]); 1674 } 1675 if (i == mmu_btop(pgsize)) { 1676 #ifdef DEBUG 1677 if (level == 2) 1678 map1gcnt++; 1679 #endif 1680 break; 1681 } 1682 } 1683 } 1684 1685 /* 1686 * Load this page mapping. If the load fails, try a smaller 1687 * pagesize. 1688 */ 1689 ASSERT(!IN_VA_HOLE(va)); 1690 while (hati_load_common(hat, va, pages[pgindx], attr, 1691 flags, level, pfn) != 0) { 1692 if (level == 0) 1693 panic("unexpected hati_load_common() failure"); 1694 --level; 1695 pgsize = LEVEL_SIZE(level); 1696 } 1697 1698 /* 1699 * move to next page 1700 */ 1701 va += pgsize; 1702 pgindx += mmu_btop(pgsize); 1703 } 1704 XPV_ALLOW_MIGRATE(); 1705 } 1706 1707 /* ARGSUSED */ 1708 void 1709 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 1710 struct page **pps, uint_t attr, uint_t flags, 1711 hat_region_cookie_t rcookie) 1712 { 1713 hat_memload_array(hat, addr, len, pps, attr, flags); 1714 } 1715 1716 /* 1717 * void hat_devload(hat, addr, len, pf, attr, flags) 1718 * load/lock the given page frame number 1719 * 1720 * Advisory ordering attributes. Apply only to device mappings. 1721 * 1722 * HAT_STRICTORDER: the CPU must issue the references in order, as the 1723 * programmer specified. This is the default. 1724 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 1725 * of reordering; store or load with store or load). 1726 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 1727 * to consecutive locations (for example, turn two consecutive byte 1728 * stores into one halfword store), and it may batch individual loads 1729 * (for example, turn two consecutive byte loads into one halfword load). 1730 * This also implies re-ordering. 1731 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 1732 * until another store occurs. The default is to fetch new data 1733 * on every load. This also implies merging. 1734 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 1735 * the device (perhaps with other data) at a later time. The default is 1736 * to push the data right away. This also implies load caching. 1737 * 1738 * Equivalent of hat_memload(), but can be used for device memory where 1739 * there are no page_t's and we support additional flags (write merging, etc). 1740 * Note that we can have large page mappings with this interface. 1741 */ 1742 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK | 1743 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK | 1744 HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK; 1745 1746 void 1747 hat_devload( 1748 hat_t *hat, 1749 caddr_t addr, 1750 size_t len, 1751 pfn_t pfn, 1752 uint_t attr, 1753 int flags) 1754 { 1755 uintptr_t va = ALIGN2PAGE(addr); 1756 uintptr_t eva = va + len; 1757 level_t level; 1758 size_t pgsize; 1759 page_t *pp; 1760 int f; /* per PTE copy of flags - maybe modified */ 1761 uint_t a; /* per PTE copy of attr */ 1762 1763 XPV_DISALLOW_MIGRATE(); 1764 ASSERT(IS_PAGEALIGNED(va)); 1765 ASSERT(hat == kas.a_hat || eva <= _userlimit); 1766 ASSERT(hat == kas.a_hat || 1767 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1768 ASSERT((flags & supported_devload_flags) == flags); 1769 1770 /* 1771 * handle all pages 1772 */ 1773 while (va < eva) { 1774 1775 /* 1776 * decide what level mapping to use (ie. pagesize) 1777 */ 1778 for (level = mmu.max_page_level; ; --level) { 1779 pgsize = LEVEL_SIZE(level); 1780 if (level == 0) 1781 break; 1782 if (IS_P2ALIGNED(va, pgsize) && 1783 (eva - va) >= pgsize && 1784 IS_P2ALIGNED(pfn, mmu_btop(pgsize))) { 1785 #ifdef DEBUG 1786 if (level == 2) 1787 map1gcnt++; 1788 #endif 1789 break; 1790 } 1791 } 1792 1793 /* 1794 * If this is just memory then allow caching (this happens 1795 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used 1796 * to override that. If we don't have a page_t then make sure 1797 * NOCONSIST is set. 1798 */ 1799 a = attr; 1800 f = flags; 1801 if (!pf_is_memory(pfn)) 1802 f |= HAT_LOAD_NOCONSIST; 1803 else if (!(a & HAT_PLAT_NOCACHE)) 1804 a |= HAT_STORECACHING_OK; 1805 1806 if (f & HAT_LOAD_NOCONSIST) 1807 pp = NULL; 1808 else 1809 pp = page_numtopp_nolock(pfn); 1810 1811 /* 1812 * load this page mapping 1813 */ 1814 ASSERT(!IN_VA_HOLE(va)); 1815 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) { 1816 if (level == 0) 1817 panic("unexpected hati_load_common() failure"); 1818 --level; 1819 pgsize = LEVEL_SIZE(level); 1820 } 1821 1822 /* 1823 * move to next page 1824 */ 1825 va += pgsize; 1826 pfn += mmu_btop(pgsize); 1827 } 1828 XPV_ALLOW_MIGRATE(); 1829 } 1830 1831 /* 1832 * void hat_unlock(hat, addr, len) 1833 * unlock the mappings to a given range of addresses 1834 * 1835 * Locks are tracked by ht_lock_cnt in the htable. 1836 */ 1837 void 1838 hat_unlock(hat_t *hat, caddr_t addr, size_t len) 1839 { 1840 uintptr_t vaddr = (uintptr_t)addr; 1841 uintptr_t eaddr = vaddr + len; 1842 htable_t *ht = NULL; 1843 1844 /* 1845 * kernel entries are always locked, we don't track lock counts 1846 */ 1847 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 1848 ASSERT(IS_PAGEALIGNED(vaddr)); 1849 ASSERT(IS_PAGEALIGNED(eaddr)); 1850 if (hat == kas.a_hat) 1851 return; 1852 if (eaddr > _userlimit) 1853 panic("hat_unlock() address out of range - above _userlimit"); 1854 1855 XPV_DISALLOW_MIGRATE(); 1856 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1857 while (vaddr < eaddr) { 1858 (void) htable_walk(hat, &ht, &vaddr, eaddr); 1859 if (ht == NULL) 1860 break; 1861 1862 ASSERT(!IN_VA_HOLE(vaddr)); 1863 1864 if (ht->ht_lock_cnt < 1) 1865 panic("hat_unlock(): lock_cnt < 1, " 1866 "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr); 1867 HTABLE_LOCK_DEC(ht); 1868 1869 vaddr += LEVEL_SIZE(ht->ht_level); 1870 } 1871 if (ht) 1872 htable_release(ht); 1873 XPV_ALLOW_MIGRATE(); 1874 } 1875 1876 /* ARGSUSED */ 1877 void 1878 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len, 1879 hat_region_cookie_t rcookie) 1880 { 1881 panic("No shared region support on x86"); 1882 } 1883 1884 #if !defined(__xpv) 1885 /* 1886 * Cross call service routine to demap a virtual page on 1887 * the current CPU or flush all mappings in TLB. 1888 */ 1889 /*ARGSUSED*/ 1890 static int 1891 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 1892 { 1893 hat_t *hat = (hat_t *)a1; 1894 caddr_t addr = (caddr_t)a2; 1895 1896 /* 1897 * If the target hat isn't the kernel and this CPU isn't operating 1898 * in the target hat, we can ignore the cross call. 1899 */ 1900 if (hat != kas.a_hat && hat != CPU->cpu_current_hat) 1901 return (0); 1902 1903 /* 1904 * For a normal address, we just flush one page mapping 1905 */ 1906 if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 1907 mmu_tlbflush_entry(addr); 1908 return (0); 1909 } 1910 1911 /* 1912 * Otherwise we reload cr3 to effect a complete TLB flush. 1913 * 1914 * A reload of cr3 on a VLP process also means we must also recopy in 1915 * the pte values from the struct hat 1916 */ 1917 if (hat->hat_flags & HAT_VLP) { 1918 #if defined(__amd64) 1919 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes; 1920 1921 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1922 #elif defined(__i386) 1923 reload_pae32(hat, CPU); 1924 #endif 1925 } 1926 reload_cr3(); 1927 return (0); 1928 } 1929 1930 /* 1931 * Flush all TLB entries, including global (ie. kernel) ones. 1932 */ 1933 static void 1934 flush_all_tlb_entries(void) 1935 { 1936 ulong_t cr4 = getcr4(); 1937 1938 if (cr4 & CR4_PGE) { 1939 setcr4(cr4 & ~(ulong_t)CR4_PGE); 1940 setcr4(cr4); 1941 1942 /* 1943 * 32 bit PAE also needs to always reload_cr3() 1944 */ 1945 if (mmu.max_level == 2) 1946 reload_cr3(); 1947 } else { 1948 reload_cr3(); 1949 } 1950 } 1951 1952 #define TLB_CPU_HALTED (01ul) 1953 #define TLB_INVAL_ALL (02ul) 1954 #define CAS_TLB_INFO(cpu, old, new) \ 1955 caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new)) 1956 1957 /* 1958 * Record that a CPU is going idle 1959 */ 1960 void 1961 tlb_going_idle(void) 1962 { 1963 atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED); 1964 } 1965 1966 /* 1967 * Service a delayed TLB flush if coming out of being idle. 1968 */ 1969 void 1970 tlb_service(void) 1971 { 1972 ulong_t flags = getflags(); 1973 ulong_t tlb_info; 1974 ulong_t found; 1975 1976 /* 1977 * Be sure interrupts are off while doing this so that 1978 * higher level interrupts correctly wait for flushes to finish. 1979 */ 1980 if (flags & PS_IE) 1981 flags = intr_clear(); 1982 1983 /* 1984 * We only have to do something if coming out of being idle. 1985 */ 1986 tlb_info = CPU->cpu_m.mcpu_tlb_info; 1987 if (tlb_info & TLB_CPU_HALTED) { 1988 ASSERT(CPU->cpu_current_hat == kas.a_hat); 1989 1990 /* 1991 * Atomic clear and fetch of old state. 1992 */ 1993 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) { 1994 ASSERT(found & TLB_CPU_HALTED); 1995 tlb_info = found; 1996 SMT_PAUSE(); 1997 } 1998 if (tlb_info & TLB_INVAL_ALL) 1999 flush_all_tlb_entries(); 2000 } 2001 2002 /* 2003 * Restore interrupt enable control bit. 2004 */ 2005 if (flags & PS_IE) 2006 sti(); 2007 } 2008 #endif /* !__xpv */ 2009 2010 /* 2011 * Internal routine to do cross calls to invalidate a range of pages on 2012 * all CPUs using a given hat. 2013 */ 2014 void 2015 hat_tlb_inval(hat_t *hat, uintptr_t va) 2016 { 2017 extern int flushes_require_xcalls; /* from mp_startup.c */ 2018 cpuset_t justme; 2019 cpuset_t cpus_to_shootdown; 2020 #ifndef __xpv 2021 cpuset_t check_cpus; 2022 cpu_t *cpup; 2023 int c; 2024 #endif 2025 2026 /* 2027 * If the hat is being destroyed, there are no more users, so 2028 * demap need not do anything. 2029 */ 2030 if (hat->hat_flags & HAT_FREEING) 2031 return; 2032 2033 /* 2034 * If demapping from a shared pagetable, we best demap the 2035 * entire set of user TLBs, since we don't know what addresses 2036 * these were shared at. 2037 */ 2038 if (hat->hat_flags & HAT_SHARED) { 2039 hat = kas.a_hat; 2040 va = DEMAP_ALL_ADDR; 2041 } 2042 2043 /* 2044 * if not running with multiple CPUs, don't use cross calls 2045 */ 2046 if (panicstr || !flushes_require_xcalls) { 2047 #ifdef __xpv 2048 if (va == DEMAP_ALL_ADDR) 2049 xen_flush_tlb(); 2050 else 2051 xen_flush_va((caddr_t)va); 2052 #else 2053 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 2054 #endif 2055 return; 2056 } 2057 2058 2059 /* 2060 * Determine CPUs to shootdown. Kernel changes always do all CPUs. 2061 * Otherwise it's just CPUs currently executing in this hat. 2062 */ 2063 kpreempt_disable(); 2064 CPUSET_ONLY(justme, CPU->cpu_id); 2065 if (hat == kas.a_hat) 2066 cpus_to_shootdown = khat_cpuset; 2067 else 2068 cpus_to_shootdown = hat->hat_cpus; 2069 2070 #ifndef __xpv 2071 /* 2072 * If any CPUs in the set are idle, just request a delayed flush 2073 * and avoid waking them up. 2074 */ 2075 check_cpus = cpus_to_shootdown; 2076 for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) { 2077 ulong_t tlb_info; 2078 2079 if (!CPU_IN_SET(check_cpus, c)) 2080 continue; 2081 CPUSET_DEL(check_cpus, c); 2082 cpup = cpu[c]; 2083 if (cpup == NULL) 2084 continue; 2085 2086 tlb_info = cpup->cpu_m.mcpu_tlb_info; 2087 while (tlb_info == TLB_CPU_HALTED) { 2088 (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED, 2089 TLB_CPU_HALTED | TLB_INVAL_ALL); 2090 SMT_PAUSE(); 2091 tlb_info = cpup->cpu_m.mcpu_tlb_info; 2092 } 2093 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) { 2094 HATSTAT_INC(hs_tlb_inval_delayed); 2095 CPUSET_DEL(cpus_to_shootdown, c); 2096 } 2097 } 2098 #endif 2099 2100 if (CPUSET_ISNULL(cpus_to_shootdown) || 2101 CPUSET_ISEQUAL(cpus_to_shootdown, justme)) { 2102 2103 #ifdef __xpv 2104 if (va == DEMAP_ALL_ADDR) 2105 xen_flush_tlb(); 2106 else 2107 xen_flush_va((caddr_t)va); 2108 #else 2109 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 2110 #endif 2111 2112 } else { 2113 2114 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id); 2115 #ifdef __xpv 2116 if (va == DEMAP_ALL_ADDR) 2117 xen_gflush_tlb(cpus_to_shootdown); 2118 else 2119 xen_gflush_va((caddr_t)va, cpus_to_shootdown); 2120 #else 2121 xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, X_CALL_HIPRI, 2122 cpus_to_shootdown, hati_demap_func); 2123 #endif 2124 2125 } 2126 kpreempt_enable(); 2127 } 2128 2129 /* 2130 * Interior routine for HAT_UNLOADs from hat_unload_callback(), 2131 * hat_kmap_unload() OR from hat_steal() code. This routine doesn't 2132 * handle releasing of the htables. 2133 */ 2134 void 2135 hat_pte_unmap( 2136 htable_t *ht, 2137 uint_t entry, 2138 uint_t flags, 2139 x86pte_t old_pte, 2140 void *pte_ptr) 2141 { 2142 hat_t *hat = ht->ht_hat; 2143 hment_t *hm = NULL; 2144 page_t *pp = NULL; 2145 level_t l = ht->ht_level; 2146 pfn_t pfn; 2147 2148 /* 2149 * We always track the locking counts, even if nothing is unmapped 2150 */ 2151 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) { 2152 ASSERT(ht->ht_lock_cnt > 0); 2153 HTABLE_LOCK_DEC(ht); 2154 } 2155 2156 /* 2157 * Figure out which page's mapping list lock to acquire using the PFN 2158 * passed in "old" PTE. We then attempt to invalidate the PTE. 2159 * If another thread, probably a hat_pageunload, has asynchronously 2160 * unmapped/remapped this address we'll loop here. 2161 */ 2162 ASSERT(ht->ht_busy > 0); 2163 while (PTE_ISVALID(old_pte)) { 2164 pfn = PTE2PFN(old_pte, l); 2165 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) { 2166 pp = NULL; 2167 } else { 2168 #ifdef __xpv 2169 if (pfn == PFN_INVALID) 2170 panic("Invalid PFN, but not PT_NOCONSIST"); 2171 #endif 2172 pp = page_numtopp_nolock(pfn); 2173 if (pp == NULL) { 2174 panic("no page_t, not NOCONSIST: old_pte=" 2175 FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx", 2176 old_pte, (uintptr_t)ht, entry, 2177 (uintptr_t)pte_ptr); 2178 } 2179 x86_hm_enter(pp); 2180 } 2181 2182 /* 2183 * If freeing the address space, check that the PTE 2184 * hasn't changed, as the mappings are no longer in use by 2185 * any thread, invalidation is unnecessary. 2186 * If not freeing, do a full invalidate. 2187 * 2188 * On the hypervisor we must always remove mappings, as a 2189 * writable mapping left behind could cause a page table 2190 * allocation to fail. 2191 */ 2192 #if !defined(__xpv) 2193 if (hat->hat_flags & HAT_FREEING) 2194 old_pte = x86pte_get(ht, entry); 2195 else 2196 #endif 2197 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr); 2198 2199 /* 2200 * If the page hadn't changed we've unmapped it and can proceed 2201 */ 2202 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn) 2203 break; 2204 2205 /* 2206 * Otherwise, we'll have to retry with the current old_pte. 2207 * Drop the hment lock, since the pfn may have changed. 2208 */ 2209 if (pp != NULL) { 2210 x86_hm_exit(pp); 2211 pp = NULL; 2212 } else { 2213 ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 2214 } 2215 } 2216 2217 /* 2218 * If the old mapping wasn't valid, there's nothing more to do 2219 */ 2220 if (!PTE_ISVALID(old_pte)) { 2221 if (pp != NULL) 2222 x86_hm_exit(pp); 2223 return; 2224 } 2225 2226 /* 2227 * Take care of syncing any MOD/REF bits and removing the hment. 2228 */ 2229 if (pp != NULL) { 2230 if (!(flags & HAT_UNLOAD_NOSYNC)) 2231 hati_sync_pte_to_page(pp, old_pte, l); 2232 hm = hment_remove(pp, ht, entry); 2233 x86_hm_exit(pp); 2234 if (hm != NULL) 2235 hment_free(hm); 2236 } 2237 2238 /* 2239 * Handle book keeping in the htable and hat 2240 */ 2241 ASSERT(ht->ht_valid_cnt > 0); 2242 HTABLE_DEC(ht->ht_valid_cnt); 2243 PGCNT_DEC(hat, l); 2244 } 2245 2246 /* 2247 * very cheap unload implementation to special case some kernel addresses 2248 */ 2249 static void 2250 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags) 2251 { 2252 uintptr_t va = (uintptr_t)addr; 2253 uintptr_t eva = va + len; 2254 pgcnt_t pg_index; 2255 htable_t *ht; 2256 uint_t entry; 2257 x86pte_t *pte_ptr; 2258 x86pte_t old_pte; 2259 2260 for (; va < eva; va += MMU_PAGESIZE) { 2261 /* 2262 * Get the PTE 2263 */ 2264 pg_index = mmu_btop(va - mmu.kmap_addr); 2265 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index); 2266 old_pte = GET_PTE(pte_ptr); 2267 2268 /* 2269 * get the htable / entry 2270 */ 2271 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) 2272 >> LEVEL_SHIFT(1)]; 2273 entry = htable_va2entry(va, ht); 2274 2275 /* 2276 * use mostly common code to unmap it. 2277 */ 2278 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr); 2279 } 2280 } 2281 2282 2283 /* 2284 * unload a range of virtual address space (no callback) 2285 */ 2286 void 2287 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2288 { 2289 uintptr_t va = (uintptr_t)addr; 2290 2291 XPV_DISALLOW_MIGRATE(); 2292 ASSERT(hat == kas.a_hat || va + len <= _userlimit); 2293 2294 /* 2295 * special case for performance. 2296 */ 2297 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 2298 ASSERT(hat == kas.a_hat); 2299 hat_kmap_unload(addr, len, flags); 2300 } else { 2301 hat_unload_callback(hat, addr, len, flags, NULL); 2302 } 2303 XPV_ALLOW_MIGRATE(); 2304 } 2305 2306 /* 2307 * Do the callbacks for ranges being unloaded. 2308 */ 2309 typedef struct range_info { 2310 uintptr_t rng_va; 2311 ulong_t rng_cnt; 2312 level_t rng_level; 2313 } range_info_t; 2314 2315 static void 2316 handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range) 2317 { 2318 /* 2319 * do callbacks to upper level VM system 2320 */ 2321 while (cb != NULL && cnt > 0) { 2322 --cnt; 2323 cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; 2324 cb->hcb_end_addr = cb->hcb_start_addr; 2325 cb->hcb_end_addr += 2326 range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level); 2327 cb->hcb_function(cb); 2328 } 2329 } 2330 2331 /* 2332 * Unload a given range of addresses (has optional callback) 2333 * 2334 * Flags: 2335 * define HAT_UNLOAD 0x00 2336 * define HAT_UNLOAD_NOSYNC 0x02 2337 * define HAT_UNLOAD_UNLOCK 0x04 2338 * define HAT_UNLOAD_OTHER 0x08 - not used 2339 * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD 2340 */ 2341 #define MAX_UNLOAD_CNT (8) 2342 void 2343 hat_unload_callback( 2344 hat_t *hat, 2345 caddr_t addr, 2346 size_t len, 2347 uint_t flags, 2348 hat_callback_t *cb) 2349 { 2350 uintptr_t vaddr = (uintptr_t)addr; 2351 uintptr_t eaddr = vaddr + len; 2352 htable_t *ht = NULL; 2353 uint_t entry; 2354 uintptr_t contig_va = (uintptr_t)-1L; 2355 range_info_t r[MAX_UNLOAD_CNT]; 2356 uint_t r_cnt = 0; 2357 x86pte_t old_pte; 2358 2359 XPV_DISALLOW_MIGRATE(); 2360 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 2361 ASSERT(IS_PAGEALIGNED(vaddr)); 2362 ASSERT(IS_PAGEALIGNED(eaddr)); 2363 2364 /* 2365 * Special case a single page being unloaded for speed. This happens 2366 * quite frequently, COW faults after a fork() for example. 2367 */ 2368 if (cb == NULL && len == MMU_PAGESIZE) { 2369 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0); 2370 if (ht != NULL) { 2371 if (PTE_ISVALID(old_pte)) 2372 hat_pte_unmap(ht, entry, flags, old_pte, NULL); 2373 htable_release(ht); 2374 } 2375 XPV_ALLOW_MIGRATE(); 2376 return; 2377 } 2378 2379 while (vaddr < eaddr) { 2380 old_pte = htable_walk(hat, &ht, &vaddr, eaddr); 2381 if (ht == NULL) 2382 break; 2383 2384 ASSERT(!IN_VA_HOLE(vaddr)); 2385 2386 if (vaddr < (uintptr_t)addr) 2387 panic("hat_unload_callback(): unmap inside large page"); 2388 2389 /* 2390 * We'll do the call backs for contiguous ranges 2391 */ 2392 if (vaddr != contig_va || 2393 (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) { 2394 if (r_cnt == MAX_UNLOAD_CNT) { 2395 handle_ranges(cb, r_cnt, r); 2396 r_cnt = 0; 2397 } 2398 r[r_cnt].rng_va = vaddr; 2399 r[r_cnt].rng_cnt = 0; 2400 r[r_cnt].rng_level = ht->ht_level; 2401 ++r_cnt; 2402 } 2403 2404 /* 2405 * Unload one mapping from the page tables. 2406 */ 2407 entry = htable_va2entry(vaddr, ht); 2408 hat_pte_unmap(ht, entry, flags, old_pte, NULL); 2409 ASSERT(ht->ht_level <= mmu.max_page_level); 2410 vaddr += LEVEL_SIZE(ht->ht_level); 2411 contig_va = vaddr; 2412 ++r[r_cnt - 1].rng_cnt; 2413 } 2414 if (ht) 2415 htable_release(ht); 2416 2417 /* 2418 * handle last range for callbacks 2419 */ 2420 if (r_cnt > 0) 2421 handle_ranges(cb, r_cnt, r); 2422 XPV_ALLOW_MIGRATE(); 2423 } 2424 2425 /* 2426 * synchronize mapping with software data structures 2427 * 2428 * This interface is currently only used by the working set monitor 2429 * driver. 2430 */ 2431 /*ARGSUSED*/ 2432 void 2433 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2434 { 2435 uintptr_t vaddr = (uintptr_t)addr; 2436 uintptr_t eaddr = vaddr + len; 2437 htable_t *ht = NULL; 2438 uint_t entry; 2439 x86pte_t pte; 2440 x86pte_t save_pte; 2441 x86pte_t new; 2442 page_t *pp; 2443 2444 ASSERT(!IN_VA_HOLE(vaddr)); 2445 ASSERT(IS_PAGEALIGNED(vaddr)); 2446 ASSERT(IS_PAGEALIGNED(eaddr)); 2447 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 2448 2449 XPV_DISALLOW_MIGRATE(); 2450 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 2451 try_again: 2452 pte = htable_walk(hat, &ht, &vaddr, eaddr); 2453 if (ht == NULL) 2454 break; 2455 entry = htable_va2entry(vaddr, ht); 2456 2457 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 2458 PTE_GET(pte, PT_REF | PT_MOD) == 0) 2459 continue; 2460 2461 /* 2462 * We need to acquire the mapping list lock to protect 2463 * against hat_pageunload(), hat_unload(), etc. 2464 */ 2465 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level)); 2466 if (pp == NULL) 2467 break; 2468 x86_hm_enter(pp); 2469 save_pte = pte; 2470 pte = x86pte_get(ht, entry); 2471 if (pte != save_pte) { 2472 x86_hm_exit(pp); 2473 goto try_again; 2474 } 2475 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 2476 PTE_GET(pte, PT_REF | PT_MOD) == 0) { 2477 x86_hm_exit(pp); 2478 continue; 2479 } 2480 2481 /* 2482 * Need to clear ref or mod bits. We may compete with 2483 * hardware updating the R/M bits and have to try again. 2484 */ 2485 if (flags == HAT_SYNC_ZERORM) { 2486 new = pte; 2487 PTE_CLR(new, PT_REF | PT_MOD); 2488 pte = hati_update_pte(ht, entry, pte, new); 2489 if (pte != 0) { 2490 x86_hm_exit(pp); 2491 goto try_again; 2492 } 2493 } else { 2494 /* 2495 * sync the PTE to the page_t 2496 */ 2497 hati_sync_pte_to_page(pp, save_pte, ht->ht_level); 2498 } 2499 x86_hm_exit(pp); 2500 } 2501 if (ht) 2502 htable_release(ht); 2503 XPV_ALLOW_MIGRATE(); 2504 } 2505 2506 /* 2507 * void hat_map(hat, addr, len, flags) 2508 */ 2509 /*ARGSUSED*/ 2510 void 2511 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2512 { 2513 /* does nothing */ 2514 } 2515 2516 /* 2517 * uint_t hat_getattr(hat, addr, *attr) 2518 * returns attr for <hat,addr> in *attr. returns 0 if there was a 2519 * mapping and *attr is valid, nonzero if there was no mapping and 2520 * *attr is not valid. 2521 */ 2522 uint_t 2523 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr) 2524 { 2525 uintptr_t vaddr = ALIGN2PAGE(addr); 2526 htable_t *ht = NULL; 2527 x86pte_t pte; 2528 2529 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2530 2531 if (IN_VA_HOLE(vaddr)) 2532 return ((uint_t)-1); 2533 2534 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level); 2535 if (ht == NULL) 2536 return ((uint_t)-1); 2537 2538 if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) { 2539 htable_release(ht); 2540 return ((uint_t)-1); 2541 } 2542 2543 *attr = PROT_READ; 2544 if (PTE_GET(pte, PT_WRITABLE)) 2545 *attr |= PROT_WRITE; 2546 if (PTE_GET(pte, PT_USER)) 2547 *attr |= PROT_USER; 2548 if (!PTE_GET(pte, mmu.pt_nx)) 2549 *attr |= PROT_EXEC; 2550 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 2551 *attr |= HAT_NOSYNC; 2552 htable_release(ht); 2553 return (0); 2554 } 2555 2556 /* 2557 * hat_updateattr() applies the given attribute change to an existing mapping 2558 */ 2559 #define HAT_LOAD_ATTR 1 2560 #define HAT_SET_ATTR 2 2561 #define HAT_CLR_ATTR 3 2562 2563 static void 2564 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what) 2565 { 2566 uintptr_t vaddr = (uintptr_t)addr; 2567 uintptr_t eaddr = (uintptr_t)addr + len; 2568 htable_t *ht = NULL; 2569 uint_t entry; 2570 x86pte_t oldpte, newpte; 2571 page_t *pp; 2572 2573 XPV_DISALLOW_MIGRATE(); 2574 ASSERT(IS_PAGEALIGNED(vaddr)); 2575 ASSERT(IS_PAGEALIGNED(eaddr)); 2576 ASSERT(hat == kas.a_hat || 2577 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 2578 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 2579 try_again: 2580 oldpte = htable_walk(hat, &ht, &vaddr, eaddr); 2581 if (ht == NULL) 2582 break; 2583 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST) 2584 continue; 2585 2586 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level)); 2587 if (pp == NULL) 2588 continue; 2589 x86_hm_enter(pp); 2590 2591 newpte = oldpte; 2592 /* 2593 * We found a page table entry in the desired range, 2594 * figure out the new attributes. 2595 */ 2596 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) { 2597 if ((attr & PROT_WRITE) && 2598 !PTE_GET(oldpte, PT_WRITABLE)) 2599 newpte |= PT_WRITABLE; 2600 2601 if ((attr & HAT_NOSYNC) && 2602 PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC) 2603 newpte |= PT_NOSYNC; 2604 2605 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx)) 2606 newpte &= ~mmu.pt_nx; 2607 } 2608 2609 if (what == HAT_LOAD_ATTR) { 2610 if (!(attr & PROT_WRITE) && 2611 PTE_GET(oldpte, PT_WRITABLE)) 2612 newpte &= ~PT_WRITABLE; 2613 2614 if (!(attr & HAT_NOSYNC) && 2615 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2616 newpte &= ~PT_SOFTWARE; 2617 2618 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 2619 newpte |= mmu.pt_nx; 2620 } 2621 2622 if (what == HAT_CLR_ATTR) { 2623 if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE)) 2624 newpte &= ~PT_WRITABLE; 2625 2626 if ((attr & HAT_NOSYNC) && 2627 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2628 newpte &= ~PT_SOFTWARE; 2629 2630 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 2631 newpte |= mmu.pt_nx; 2632 } 2633 2634 /* 2635 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set. 2636 * x86pte_set() depends on this. 2637 */ 2638 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC) 2639 newpte |= PT_REF | PT_MOD; 2640 2641 /* 2642 * what about PROT_READ or others? this code only handles: 2643 * EXEC, WRITE, NOSYNC 2644 */ 2645 2646 /* 2647 * If new PTE really changed, update the table. 2648 */ 2649 if (newpte != oldpte) { 2650 entry = htable_va2entry(vaddr, ht); 2651 oldpte = hati_update_pte(ht, entry, oldpte, newpte); 2652 if (oldpte != 0) { 2653 x86_hm_exit(pp); 2654 goto try_again; 2655 } 2656 } 2657 x86_hm_exit(pp); 2658 } 2659 if (ht) 2660 htable_release(ht); 2661 XPV_ALLOW_MIGRATE(); 2662 } 2663 2664 /* 2665 * Various wrappers for hat_updateattr() 2666 */ 2667 void 2668 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2669 { 2670 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2671 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR); 2672 } 2673 2674 void 2675 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2676 { 2677 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2678 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR); 2679 } 2680 2681 void 2682 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2683 { 2684 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2685 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR); 2686 } 2687 2688 void 2689 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot) 2690 { 2691 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2692 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR); 2693 } 2694 2695 /* 2696 * size_t hat_getpagesize(hat, addr) 2697 * returns pagesize in bytes for <hat, addr>. returns -1 of there is 2698 * no mapping. This is an advisory call. 2699 */ 2700 ssize_t 2701 hat_getpagesize(hat_t *hat, caddr_t addr) 2702 { 2703 uintptr_t vaddr = ALIGN2PAGE(addr); 2704 htable_t *ht; 2705 size_t pagesize; 2706 2707 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2708 if (IN_VA_HOLE(vaddr)) 2709 return (-1); 2710 ht = htable_getpage(hat, vaddr, NULL); 2711 if (ht == NULL) 2712 return (-1); 2713 pagesize = LEVEL_SIZE(ht->ht_level); 2714 htable_release(ht); 2715 return (pagesize); 2716 } 2717 2718 2719 2720 /* 2721 * pfn_t hat_getpfnum(hat, addr) 2722 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 2723 */ 2724 pfn_t 2725 hat_getpfnum(hat_t *hat, caddr_t addr) 2726 { 2727 uintptr_t vaddr = ALIGN2PAGE(addr); 2728 htable_t *ht; 2729 uint_t entry; 2730 pfn_t pfn = PFN_INVALID; 2731 2732 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2733 if (khat_running == 0) 2734 return (PFN_INVALID); 2735 2736 if (IN_VA_HOLE(vaddr)) 2737 return (PFN_INVALID); 2738 2739 XPV_DISALLOW_MIGRATE(); 2740 /* 2741 * A very common use of hat_getpfnum() is from the DDI for kernel pages. 2742 * Use the kmap_ptes (which also covers the 32 bit heap) to speed 2743 * this up. 2744 */ 2745 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 2746 x86pte_t pte; 2747 pgcnt_t pg_index; 2748 2749 pg_index = mmu_btop(vaddr - mmu.kmap_addr); 2750 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index)); 2751 if (PTE_ISVALID(pte)) 2752 /*LINTED [use of constant 0 causes a lint warning] */ 2753 pfn = PTE2PFN(pte, 0); 2754 XPV_ALLOW_MIGRATE(); 2755 return (pfn); 2756 } 2757 2758 ht = htable_getpage(hat, vaddr, &entry); 2759 if (ht == NULL) { 2760 XPV_ALLOW_MIGRATE(); 2761 return (PFN_INVALID); 2762 } 2763 ASSERT(vaddr >= ht->ht_vaddr); 2764 ASSERT(vaddr <= HTABLE_LAST_PAGE(ht)); 2765 pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level); 2766 if (ht->ht_level > 0) 2767 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level)); 2768 htable_release(ht); 2769 XPV_ALLOW_MIGRATE(); 2770 return (pfn); 2771 } 2772 2773 /* 2774 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 2775 * Use hat_getpfnum(kas.a_hat, ...) instead. 2776 * 2777 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 2778 * but can't right now due to the fact that some software has grown to use 2779 * this interface incorrectly. So for now when the interface is misused, 2780 * return a warning to the user that in the future it won't work in the 2781 * way they're abusing it, and carry on. 2782 * 2783 * Note that hat_getkpfnum() is never supported on amd64. 2784 */ 2785 #if !defined(__amd64) 2786 pfn_t 2787 hat_getkpfnum(caddr_t addr) 2788 { 2789 pfn_t pfn; 2790 int badcaller = 0; 2791 2792 if (khat_running == 0) 2793 panic("hat_getkpfnum(): called too early\n"); 2794 if ((uintptr_t)addr < kernelbase) 2795 return (PFN_INVALID); 2796 2797 XPV_DISALLOW_MIGRATE(); 2798 if (segkpm && IS_KPM_ADDR(addr)) { 2799 badcaller = 1; 2800 pfn = hat_kpm_va2pfn(addr); 2801 } else { 2802 pfn = hat_getpfnum(kas.a_hat, addr); 2803 badcaller = pf_is_memory(pfn); 2804 } 2805 2806 if (badcaller) 2807 hat_getkpfnum_badcall(caller()); 2808 XPV_ALLOW_MIGRATE(); 2809 return (pfn); 2810 } 2811 #endif /* __amd64 */ 2812 2813 /* 2814 * int hat_probe(hat, addr) 2815 * return 0 if no valid mapping is present. Faster version 2816 * of hat_getattr in certain architectures. 2817 */ 2818 int 2819 hat_probe(hat_t *hat, caddr_t addr) 2820 { 2821 uintptr_t vaddr = ALIGN2PAGE(addr); 2822 uint_t entry; 2823 htable_t *ht; 2824 pgcnt_t pg_off; 2825 2826 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2827 ASSERT(hat == kas.a_hat || 2828 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 2829 if (IN_VA_HOLE(vaddr)) 2830 return (0); 2831 2832 /* 2833 * Most common use of hat_probe is from segmap. We special case it 2834 * for performance. 2835 */ 2836 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 2837 pg_off = mmu_btop(vaddr - mmu.kmap_addr); 2838 if (mmu.pae_hat) 2839 return (PTE_ISVALID(mmu.kmap_ptes[pg_off])); 2840 else 2841 return (PTE_ISVALID( 2842 ((x86pte32_t *)mmu.kmap_ptes)[pg_off])); 2843 } 2844 2845 ht = htable_getpage(hat, vaddr, &entry); 2846 htable_release(ht); 2847 return (ht != NULL); 2848 } 2849 2850 /* 2851 * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM. 2852 */ 2853 static int 2854 is_it_dism(hat_t *hat, caddr_t va) 2855 { 2856 struct seg *seg; 2857 struct shm_data *shmd; 2858 struct spt_data *sptd; 2859 2860 seg = as_findseg(hat->hat_as, va, 0); 2861 ASSERT(seg != NULL); 2862 ASSERT(seg->s_base <= va); 2863 shmd = (struct shm_data *)seg->s_data; 2864 ASSERT(shmd != NULL); 2865 sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2866 ASSERT(sptd != NULL); 2867 if (sptd->spt_flags & SHM_PAGEABLE) 2868 return (1); 2869 return (0); 2870 } 2871 2872 /* 2873 * Simple implementation of ISM. hat_share() is similar to hat_memload_array(), 2874 * except that we use the ism_hat's existing mappings to determine the pages 2875 * and protections to use for this hat. If we find a full properly aligned 2876 * and sized pagetable, we will attempt to share the pagetable itself. 2877 */ 2878 /*ARGSUSED*/ 2879 int 2880 hat_share( 2881 hat_t *hat, 2882 caddr_t addr, 2883 hat_t *ism_hat, 2884 caddr_t src_addr, 2885 size_t len, /* almost useless value, see below.. */ 2886 uint_t ismszc) 2887 { 2888 uintptr_t vaddr_start = (uintptr_t)addr; 2889 uintptr_t vaddr; 2890 uintptr_t eaddr = vaddr_start + len; 2891 uintptr_t ism_addr_start = (uintptr_t)src_addr; 2892 uintptr_t ism_addr = ism_addr_start; 2893 uintptr_t e_ism_addr = ism_addr + len; 2894 htable_t *ism_ht = NULL; 2895 htable_t *ht; 2896 x86pte_t pte; 2897 page_t *pp; 2898 pfn_t pfn; 2899 level_t l; 2900 pgcnt_t pgcnt; 2901 uint_t prot; 2902 int is_dism; 2903 int flags; 2904 2905 /* 2906 * We might be asked to share an empty DISM hat by as_dup() 2907 */ 2908 ASSERT(hat != kas.a_hat); 2909 ASSERT(eaddr <= _userlimit); 2910 if (!(ism_hat->hat_flags & HAT_SHARED)) { 2911 ASSERT(hat_get_mapped_size(ism_hat) == 0); 2912 return (0); 2913 } 2914 XPV_DISALLOW_MIGRATE(); 2915 2916 /* 2917 * The SPT segment driver often passes us a size larger than there are 2918 * valid mappings. That's because it rounds the segment size up to a 2919 * large pagesize, even if the actual memory mapped by ism_hat is less. 2920 */ 2921 ASSERT(IS_PAGEALIGNED(vaddr_start)); 2922 ASSERT(IS_PAGEALIGNED(ism_addr_start)); 2923 ASSERT(ism_hat->hat_flags & HAT_SHARED); 2924 is_dism = is_it_dism(hat, addr); 2925 while (ism_addr < e_ism_addr) { 2926 /* 2927 * use htable_walk to get the next valid ISM mapping 2928 */ 2929 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr); 2930 if (ism_ht == NULL) 2931 break; 2932 2933 /* 2934 * First check to see if we already share the page table. 2935 */ 2936 l = ism_ht->ht_level; 2937 vaddr = vaddr_start + (ism_addr - ism_addr_start); 2938 ht = htable_lookup(hat, vaddr, l); 2939 if (ht != NULL) { 2940 if (ht->ht_flags & HTABLE_SHARED_PFN) 2941 goto shared; 2942 htable_release(ht); 2943 goto not_shared; 2944 } 2945 2946 /* 2947 * Can't ever share top table. 2948 */ 2949 if (l == mmu.max_level) 2950 goto not_shared; 2951 2952 /* 2953 * Avoid level mismatches later due to DISM faults. 2954 */ 2955 if (is_dism && l > 0) 2956 goto not_shared; 2957 2958 /* 2959 * addresses and lengths must align 2960 * table must be fully populated 2961 * no lower level page tables 2962 */ 2963 if (ism_addr != ism_ht->ht_vaddr || 2964 (vaddr & LEVEL_OFFSET(l + 1)) != 0) 2965 goto not_shared; 2966 2967 /* 2968 * The range of address space must cover a full table. 2969 */ 2970 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1)) 2971 goto not_shared; 2972 2973 /* 2974 * All entries in the ISM page table must be leaf PTEs. 2975 */ 2976 if (l > 0) { 2977 int e; 2978 2979 /* 2980 * We know the 0th is from htable_walk() above. 2981 */ 2982 for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) { 2983 x86pte_t pte; 2984 pte = x86pte_get(ism_ht, e); 2985 if (!PTE_ISPAGE(pte, l)) 2986 goto not_shared; 2987 } 2988 } 2989 2990 /* 2991 * share the page table 2992 */ 2993 ht = htable_create(hat, vaddr, l, ism_ht); 2994 shared: 2995 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN); 2996 ASSERT(ht->ht_shares == ism_ht); 2997 hat->hat_ism_pgcnt += 2998 (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) << 2999 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 3000 ht->ht_valid_cnt = ism_ht->ht_valid_cnt; 3001 htable_release(ht); 3002 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1); 3003 htable_release(ism_ht); 3004 ism_ht = NULL; 3005 continue; 3006 3007 not_shared: 3008 /* 3009 * Unable to share the page table. Instead we will 3010 * create new mappings from the values in the ISM mappings. 3011 * Figure out what level size mappings to use; 3012 */ 3013 for (l = ism_ht->ht_level; l > 0; --l) { 3014 if (LEVEL_SIZE(l) <= eaddr - vaddr && 3015 (vaddr & LEVEL_OFFSET(l)) == 0) 3016 break; 3017 } 3018 3019 /* 3020 * The ISM mapping might be larger than the share area, 3021 * be careful to truncate it if needed. 3022 */ 3023 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) { 3024 pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level)); 3025 } else { 3026 pgcnt = mmu_btop(eaddr - vaddr); 3027 l = 0; 3028 } 3029 3030 pfn = PTE2PFN(pte, ism_ht->ht_level); 3031 ASSERT(pfn != PFN_INVALID); 3032 while (pgcnt > 0) { 3033 /* 3034 * Make a new pte for the PFN for this level. 3035 * Copy protections for the pte from the ISM pte. 3036 */ 3037 pp = page_numtopp_nolock(pfn); 3038 ASSERT(pp != NULL); 3039 3040 prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK; 3041 if (PTE_GET(pte, PT_WRITABLE)) 3042 prot |= PROT_WRITE; 3043 if (!PTE_GET(pte, PT_NX)) 3044 prot |= PROT_EXEC; 3045 3046 flags = HAT_LOAD; 3047 if (!is_dism) 3048 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST; 3049 while (hati_load_common(hat, vaddr, pp, prot, flags, 3050 l, pfn) != 0) { 3051 if (l == 0) 3052 panic("hati_load_common() failure"); 3053 --l; 3054 } 3055 3056 vaddr += LEVEL_SIZE(l); 3057 ism_addr += LEVEL_SIZE(l); 3058 pfn += mmu_btop(LEVEL_SIZE(l)); 3059 pgcnt -= mmu_btop(LEVEL_SIZE(l)); 3060 } 3061 } 3062 if (ism_ht != NULL) 3063 htable_release(ism_ht); 3064 XPV_ALLOW_MIGRATE(); 3065 return (0); 3066 } 3067 3068 3069 /* 3070 * hat_unshare() is similar to hat_unload_callback(), but 3071 * we have to look for empty shared pagetables. Note that 3072 * hat_unshare() is always invoked against an entire segment. 3073 */ 3074 /*ARGSUSED*/ 3075 void 3076 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc) 3077 { 3078 uint64_t vaddr = (uintptr_t)addr; 3079 uintptr_t eaddr = vaddr + len; 3080 htable_t *ht = NULL; 3081 uint_t need_demaps = 0; 3082 int flags = HAT_UNLOAD_UNMAP; 3083 level_t l; 3084 3085 ASSERT(hat != kas.a_hat); 3086 ASSERT(eaddr <= _userlimit); 3087 ASSERT(IS_PAGEALIGNED(vaddr)); 3088 ASSERT(IS_PAGEALIGNED(eaddr)); 3089 XPV_DISALLOW_MIGRATE(); 3090 3091 /* 3092 * First go through and remove any shared pagetables. 3093 * 3094 * Note that it's ok to delay the TLB shootdown till the entire range is 3095 * finished, because if hat_pageunload() were to unload a shared 3096 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate. 3097 */ 3098 l = mmu.max_page_level; 3099 if (l == mmu.max_level) 3100 --l; 3101 for (; l >= 0; --l) { 3102 for (vaddr = (uintptr_t)addr; vaddr < eaddr; 3103 vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) { 3104 ASSERT(!IN_VA_HOLE(vaddr)); 3105 /* 3106 * find a pagetable that maps the current address 3107 */ 3108 ht = htable_lookup(hat, vaddr, l); 3109 if (ht == NULL) 3110 continue; 3111 if (ht->ht_flags & HTABLE_SHARED_PFN) { 3112 /* 3113 * clear page count, set valid_cnt to 0, 3114 * let htable_release() finish the job 3115 */ 3116 hat->hat_ism_pgcnt -= ht->ht_valid_cnt << 3117 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 3118 ht->ht_valid_cnt = 0; 3119 need_demaps = 1; 3120 } 3121 htable_release(ht); 3122 } 3123 } 3124 3125 /* 3126 * flush the TLBs - since we're probably dealing with MANY mappings 3127 * we do just one CR3 reload. 3128 */ 3129 if (!(hat->hat_flags & HAT_FREEING) && need_demaps) 3130 hat_tlb_inval(hat, DEMAP_ALL_ADDR); 3131 3132 /* 3133 * Now go back and clean up any unaligned mappings that 3134 * couldn't share pagetables. 3135 */ 3136 if (!is_it_dism(hat, addr)) 3137 flags |= HAT_UNLOAD_UNLOCK; 3138 hat_unload(hat, addr, len, flags); 3139 XPV_ALLOW_MIGRATE(); 3140 } 3141 3142 3143 /* 3144 * hat_reserve() does nothing 3145 */ 3146 /*ARGSUSED*/ 3147 void 3148 hat_reserve(struct as *as, caddr_t addr, size_t len) 3149 { 3150 } 3151 3152 3153 /* 3154 * Called when all mappings to a page should have write permission removed. 3155 * Mostly stolem from hat_pagesync() 3156 */ 3157 static void 3158 hati_page_clrwrt(struct page *pp) 3159 { 3160 hment_t *hm = NULL; 3161 htable_t *ht; 3162 uint_t entry; 3163 x86pte_t old; 3164 x86pte_t new; 3165 uint_t pszc = 0; 3166 3167 XPV_DISALLOW_MIGRATE(); 3168 next_size: 3169 /* 3170 * walk thru the mapping list clearing write permission 3171 */ 3172 x86_hm_enter(pp); 3173 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 3174 if (ht->ht_level < pszc) 3175 continue; 3176 old = x86pte_get(ht, entry); 3177 3178 for (;;) { 3179 /* 3180 * Is this mapping of interest? 3181 */ 3182 if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum || 3183 PTE_GET(old, PT_WRITABLE) == 0) 3184 break; 3185 3186 /* 3187 * Clear ref/mod writable bits. This requires cross 3188 * calls to ensure any executing TLBs see cleared bits. 3189 */ 3190 new = old; 3191 PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE); 3192 old = hati_update_pte(ht, entry, old, new); 3193 if (old != 0) 3194 continue; 3195 3196 break; 3197 } 3198 } 3199 x86_hm_exit(pp); 3200 while (pszc < pp->p_szc) { 3201 page_t *tpp; 3202 pszc++; 3203 tpp = PP_GROUPLEADER(pp, pszc); 3204 if (pp != tpp) { 3205 pp = tpp; 3206 goto next_size; 3207 } 3208 } 3209 XPV_ALLOW_MIGRATE(); 3210 } 3211 3212 /* 3213 * void hat_page_setattr(pp, flag) 3214 * void hat_page_clrattr(pp, flag) 3215 * used to set/clr ref/mod bits. 3216 */ 3217 void 3218 hat_page_setattr(struct page *pp, uint_t flag) 3219 { 3220 vnode_t *vp = pp->p_vnode; 3221 kmutex_t *vphm = NULL; 3222 page_t **listp; 3223 int noshuffle; 3224 3225 noshuffle = flag & P_NSH; 3226 flag &= ~P_NSH; 3227 3228 if (PP_GETRM(pp, flag) == flag) 3229 return; 3230 3231 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 3232 !noshuffle) { 3233 vphm = page_vnode_mutex(vp); 3234 mutex_enter(vphm); 3235 } 3236 3237 PP_SETRM(pp, flag); 3238 3239 if (vphm != NULL) { 3240 3241 /* 3242 * Some File Systems examine v_pages for NULL w/o 3243 * grabbing the vphm mutex. Must not let it become NULL when 3244 * pp is the only page on the list. 3245 */ 3246 if (pp->p_vpnext != pp) { 3247 page_vpsub(&vp->v_pages, pp); 3248 if (vp->v_pages != NULL) 3249 listp = &vp->v_pages->p_vpprev->p_vpnext; 3250 else 3251 listp = &vp->v_pages; 3252 page_vpadd(listp, pp); 3253 } 3254 mutex_exit(vphm); 3255 } 3256 } 3257 3258 void 3259 hat_page_clrattr(struct page *pp, uint_t flag) 3260 { 3261 vnode_t *vp = pp->p_vnode; 3262 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 3263 3264 /* 3265 * Caller is expected to hold page's io lock for VMODSORT to work 3266 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 3267 * bit is cleared. 3268 * We don't have assert to avoid tripping some existing third party 3269 * code. The dirty page is moved back to top of the v_page list 3270 * after IO is done in pvn_write_done(). 3271 */ 3272 PP_CLRRM(pp, flag); 3273 3274 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 3275 3276 /* 3277 * VMODSORT works by removing write permissions and getting 3278 * a fault when a page is made dirty. At this point 3279 * we need to remove write permission from all mappings 3280 * to this page. 3281 */ 3282 hati_page_clrwrt(pp); 3283 } 3284 } 3285 3286 /* 3287 * If flag is specified, returns 0 if attribute is disabled 3288 * and non zero if enabled. If flag specifes multiple attributs 3289 * then returns 0 if ALL atriibutes are disabled. This is an advisory 3290 * call. 3291 */ 3292 uint_t 3293 hat_page_getattr(struct page *pp, uint_t flag) 3294 { 3295 return (PP_GETRM(pp, flag)); 3296 } 3297 3298 3299 /* 3300 * common code used by hat_pageunload() and hment_steal() 3301 */ 3302 hment_t * 3303 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry) 3304 { 3305 x86pte_t old_pte; 3306 pfn_t pfn = pp->p_pagenum; 3307 hment_t *hm; 3308 3309 /* 3310 * We need to acquire a hold on the htable in order to 3311 * do the invalidate. We know the htable must exist, since 3312 * unmap's don't release the htable until after removing any 3313 * hment. Having x86_hm_enter() keeps that from proceeding. 3314 */ 3315 htable_acquire(ht); 3316 3317 /* 3318 * Invalidate the PTE and remove the hment. 3319 */ 3320 old_pte = x86pte_inval(ht, entry, 0, NULL); 3321 if (PTE2PFN(old_pte, ht->ht_level) != pfn) { 3322 panic("x86pte_inval() failure found PTE = " FMT_PTE 3323 " pfn being unmapped is %lx ht=0x%lx entry=0x%x", 3324 old_pte, pfn, (uintptr_t)ht, entry); 3325 } 3326 3327 /* 3328 * Clean up all the htable information for this mapping 3329 */ 3330 ASSERT(ht->ht_valid_cnt > 0); 3331 HTABLE_DEC(ht->ht_valid_cnt); 3332 PGCNT_DEC(ht->ht_hat, ht->ht_level); 3333 3334 /* 3335 * sync ref/mod bits to the page_t 3336 */ 3337 if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC) 3338 hati_sync_pte_to_page(pp, old_pte, ht->ht_level); 3339 3340 /* 3341 * Remove the mapping list entry for this page. 3342 */ 3343 hm = hment_remove(pp, ht, entry); 3344 3345 /* 3346 * drop the mapping list lock so that we might free the 3347 * hment and htable. 3348 */ 3349 x86_hm_exit(pp); 3350 htable_release(ht); 3351 return (hm); 3352 } 3353 3354 extern int vpm_enable; 3355 /* 3356 * Unload all translations to a page. If the page is a subpage of a large 3357 * page, the large page mappings are also removed. 3358 * 3359 * The forceflags are unused. 3360 */ 3361 3362 /*ARGSUSED*/ 3363 static int 3364 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag) 3365 { 3366 page_t *cur_pp = pp; 3367 hment_t *hm; 3368 hment_t *prev; 3369 htable_t *ht; 3370 uint_t entry; 3371 level_t level; 3372 3373 XPV_DISALLOW_MIGRATE(); 3374 #if defined(__amd64) 3375 /* 3376 * clear the vpm ref. 3377 */ 3378 if (vpm_enable) { 3379 pp->p_vpmref = 0; 3380 } 3381 #endif 3382 /* 3383 * The loop with next_size handles pages with multiple pagesize mappings 3384 */ 3385 next_size: 3386 for (;;) { 3387 3388 /* 3389 * Get a mapping list entry 3390 */ 3391 x86_hm_enter(cur_pp); 3392 for (prev = NULL; ; prev = hm) { 3393 hm = hment_walk(cur_pp, &ht, &entry, prev); 3394 if (hm == NULL) { 3395 x86_hm_exit(cur_pp); 3396 3397 /* 3398 * If not part of a larger page, we're done. 3399 */ 3400 if (cur_pp->p_szc <= pg_szcd) { 3401 XPV_ALLOW_MIGRATE(); 3402 return (0); 3403 } 3404 3405 /* 3406 * Else check the next larger page size. 3407 * hat_page_demote() may decrease p_szc 3408 * but that's ok we'll just take an extra 3409 * trip discover there're no larger mappings 3410 * and return. 3411 */ 3412 ++pg_szcd; 3413 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd); 3414 goto next_size; 3415 } 3416 3417 /* 3418 * If this mapping size matches, remove it. 3419 */ 3420 level = ht->ht_level; 3421 if (level == pg_szcd) 3422 break; 3423 } 3424 3425 /* 3426 * Remove the mapping list entry for this page. 3427 * Note this does the x86_hm_exit() for us. 3428 */ 3429 hm = hati_page_unmap(cur_pp, ht, entry); 3430 if (hm != NULL) 3431 hment_free(hm); 3432 } 3433 } 3434 3435 int 3436 hat_pageunload(struct page *pp, uint_t forceflag) 3437 { 3438 ASSERT(PAGE_EXCL(pp)); 3439 return (hati_pageunload(pp, 0, forceflag)); 3440 } 3441 3442 /* 3443 * Unload all large mappings to pp and reduce by 1 p_szc field of every large 3444 * page level that included pp. 3445 * 3446 * pp must be locked EXCL. Even though no other constituent pages are locked 3447 * it's legal to unload large mappings to pp because all constituent pages of 3448 * large locked mappings have to be locked SHARED. therefore if we have EXCL 3449 * lock on one of constituent pages none of the large mappings to pp are 3450 * locked. 3451 * 3452 * Change (always decrease) p_szc field starting from the last constituent 3453 * page and ending with root constituent page so that root's pszc always shows 3454 * the area where hat_page_demote() may be active. 3455 * 3456 * This mechanism is only used for file system pages where it's not always 3457 * possible to get EXCL locks on all constituent pages to demote the size code 3458 * (as is done for anonymous or kernel large pages). 3459 */ 3460 void 3461 hat_page_demote(page_t *pp) 3462 { 3463 uint_t pszc; 3464 uint_t rszc; 3465 uint_t szc; 3466 page_t *rootpp; 3467 page_t *firstpp; 3468 page_t *lastpp; 3469 pgcnt_t pgcnt; 3470 3471 ASSERT(PAGE_EXCL(pp)); 3472 ASSERT(!PP_ISFREE(pp)); 3473 ASSERT(page_szc_lock_assert(pp)); 3474 3475 if (pp->p_szc == 0) 3476 return; 3477 3478 rootpp = PP_GROUPLEADER(pp, 1); 3479 (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD); 3480 3481 /* 3482 * all large mappings to pp are gone 3483 * and no new can be setup since pp is locked exclusively. 3484 * 3485 * Lock the root to make sure there's only one hat_page_demote() 3486 * outstanding within the area of this root's pszc. 3487 * 3488 * Second potential hat_page_demote() is already eliminated by upper 3489 * VM layer via page_szc_lock() but we don't rely on it and use our 3490 * own locking (so that upper layer locking can be changed without 3491 * assumptions that hat depends on upper layer VM to prevent multiple 3492 * hat_page_demote() to be issued simultaneously to the same large 3493 * page). 3494 */ 3495 again: 3496 pszc = pp->p_szc; 3497 if (pszc == 0) 3498 return; 3499 rootpp = PP_GROUPLEADER(pp, pszc); 3500 x86_hm_enter(rootpp); 3501 /* 3502 * If root's p_szc is different from pszc we raced with another 3503 * hat_page_demote(). Drop the lock and try to find the root again. 3504 * If root's p_szc is greater than pszc previous hat_page_demote() is 3505 * not done yet. Take and release mlist lock of root's root to wait 3506 * for previous hat_page_demote() to complete. 3507 */ 3508 if ((rszc = rootpp->p_szc) != pszc) { 3509 x86_hm_exit(rootpp); 3510 if (rszc > pszc) { 3511 /* p_szc of a locked non free page can't increase */ 3512 ASSERT(pp != rootpp); 3513 3514 rootpp = PP_GROUPLEADER(rootpp, rszc); 3515 x86_hm_enter(rootpp); 3516 x86_hm_exit(rootpp); 3517 } 3518 goto again; 3519 } 3520 ASSERT(pp->p_szc == pszc); 3521 3522 /* 3523 * Decrement by 1 p_szc of every constituent page of a region that 3524 * covered pp. For example if original szc is 3 it gets changed to 2 3525 * everywhere except in region 2 that covered pp. Region 2 that 3526 * covered pp gets demoted to 1 everywhere except in region 1 that 3527 * covered pp. The region 1 that covered pp is demoted to region 3528 * 0. It's done this way because from region 3 we removed level 3 3529 * mappings, from region 2 that covered pp we removed level 2 mappings 3530 * and from region 1 that covered pp we removed level 1 mappings. All 3531 * changes are done from from high pfn's to low pfn's so that roots 3532 * are changed last allowing one to know the largest region where 3533 * hat_page_demote() is stil active by only looking at the root page. 3534 * 3535 * This algorithm is implemented in 2 while loops. First loop changes 3536 * p_szc of pages to the right of pp's level 1 region and second 3537 * loop changes p_szc of pages of level 1 region that covers pp 3538 * and all pages to the left of level 1 region that covers pp. 3539 * In the first loop p_szc keeps dropping with every iteration 3540 * and in the second loop it keeps increasing with every iteration. 3541 * 3542 * First loop description: Demote pages to the right of pp outside of 3543 * level 1 region that covers pp. In every iteration of the while 3544 * loop below find the last page of szc region and the first page of 3545 * (szc - 1) region that is immediately to the right of (szc - 1) 3546 * region that covers pp. From last such page to first such page 3547 * change every page's szc to szc - 1. Decrement szc and continue 3548 * looping until szc is 1. If pp belongs to the last (szc - 1) region 3549 * of szc region skip to the next iteration. 3550 */ 3551 szc = pszc; 3552 while (szc > 1) { 3553 lastpp = PP_GROUPLEADER(pp, szc); 3554 pgcnt = page_get_pagecnt(szc); 3555 lastpp += pgcnt - 1; 3556 firstpp = PP_GROUPLEADER(pp, (szc - 1)); 3557 pgcnt = page_get_pagecnt(szc - 1); 3558 if (lastpp - firstpp < pgcnt) { 3559 szc--; 3560 continue; 3561 } 3562 firstpp += pgcnt; 3563 while (lastpp != firstpp) { 3564 ASSERT(lastpp->p_szc == pszc); 3565 lastpp->p_szc = szc - 1; 3566 lastpp--; 3567 } 3568 firstpp->p_szc = szc - 1; 3569 szc--; 3570 } 3571 3572 /* 3573 * Second loop description: 3574 * First iteration changes p_szc to 0 of every 3575 * page of level 1 region that covers pp. 3576 * Subsequent iterations find last page of szc region 3577 * immediately to the left of szc region that covered pp 3578 * and first page of (szc + 1) region that covers pp. 3579 * From last to first page change p_szc of every page to szc. 3580 * Increment szc and continue looping until szc is pszc. 3581 * If pp belongs to the fist szc region of (szc + 1) region 3582 * skip to the next iteration. 3583 * 3584 */ 3585 szc = 0; 3586 while (szc < pszc) { 3587 firstpp = PP_GROUPLEADER(pp, (szc + 1)); 3588 if (szc == 0) { 3589 pgcnt = page_get_pagecnt(1); 3590 lastpp = firstpp + (pgcnt - 1); 3591 } else { 3592 lastpp = PP_GROUPLEADER(pp, szc); 3593 if (firstpp == lastpp) { 3594 szc++; 3595 continue; 3596 } 3597 lastpp--; 3598 pgcnt = page_get_pagecnt(szc); 3599 } 3600 while (lastpp != firstpp) { 3601 ASSERT(lastpp->p_szc == pszc); 3602 lastpp->p_szc = szc; 3603 lastpp--; 3604 } 3605 firstpp->p_szc = szc; 3606 if (firstpp == rootpp) 3607 break; 3608 szc++; 3609 } 3610 x86_hm_exit(rootpp); 3611 } 3612 3613 /* 3614 * get hw stats from hardware into page struct and reset hw stats 3615 * returns attributes of page 3616 * Flags for hat_pagesync, hat_getstat, hat_sync 3617 * 3618 * define HAT_SYNC_ZERORM 0x01 3619 * 3620 * Additional flags for hat_pagesync 3621 * 3622 * define HAT_SYNC_STOPON_REF 0x02 3623 * define HAT_SYNC_STOPON_MOD 0x04 3624 * define HAT_SYNC_STOPON_RM 0x06 3625 * define HAT_SYNC_STOPON_SHARED 0x08 3626 */ 3627 uint_t 3628 hat_pagesync(struct page *pp, uint_t flags) 3629 { 3630 hment_t *hm = NULL; 3631 htable_t *ht; 3632 uint_t entry; 3633 x86pte_t old, save_old; 3634 x86pte_t new; 3635 uchar_t nrmbits = P_REF|P_MOD|P_RO; 3636 extern ulong_t po_share; 3637 page_t *save_pp = pp; 3638 uint_t pszc = 0; 3639 3640 ASSERT(PAGE_LOCKED(pp) || panicstr); 3641 3642 if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD)) 3643 return (pp->p_nrm & nrmbits); 3644 3645 if ((flags & HAT_SYNC_ZERORM) == 0) { 3646 3647 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp)) 3648 return (pp->p_nrm & nrmbits); 3649 3650 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp)) 3651 return (pp->p_nrm & nrmbits); 3652 3653 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 && 3654 hat_page_getshare(pp) > po_share) { 3655 if (PP_ISRO(pp)) 3656 PP_SETREF(pp); 3657 return (pp->p_nrm & nrmbits); 3658 } 3659 } 3660 3661 XPV_DISALLOW_MIGRATE(); 3662 next_size: 3663 /* 3664 * walk thru the mapping list syncing (and clearing) ref/mod bits. 3665 */ 3666 x86_hm_enter(pp); 3667 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 3668 if (ht->ht_level < pszc) 3669 continue; 3670 old = x86pte_get(ht, entry); 3671 try_again: 3672 3673 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum); 3674 3675 if (PTE_GET(old, PT_REF | PT_MOD) == 0) 3676 continue; 3677 3678 save_old = old; 3679 if ((flags & HAT_SYNC_ZERORM) != 0) { 3680 3681 /* 3682 * Need to clear ref or mod bits. Need to demap 3683 * to make sure any executing TLBs see cleared bits. 3684 */ 3685 new = old; 3686 PTE_CLR(new, PT_REF | PT_MOD); 3687 old = hati_update_pte(ht, entry, old, new); 3688 if (old != 0) 3689 goto try_again; 3690 3691 old = save_old; 3692 } 3693 3694 /* 3695 * Sync the PTE 3696 */ 3697 if (!(flags & HAT_SYNC_ZERORM) && 3698 PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC) 3699 hati_sync_pte_to_page(pp, old, ht->ht_level); 3700 3701 /* 3702 * can stop short if we found a ref'd or mod'd page 3703 */ 3704 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) || 3705 (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) { 3706 x86_hm_exit(pp); 3707 goto done; 3708 } 3709 } 3710 x86_hm_exit(pp); 3711 while (pszc < pp->p_szc) { 3712 page_t *tpp; 3713 pszc++; 3714 tpp = PP_GROUPLEADER(pp, pszc); 3715 if (pp != tpp) { 3716 pp = tpp; 3717 goto next_size; 3718 } 3719 } 3720 done: 3721 XPV_ALLOW_MIGRATE(); 3722 return (save_pp->p_nrm & nrmbits); 3723 } 3724 3725 /* 3726 * returns approx number of mappings to this pp. A return of 0 implies 3727 * there are no mappings to the page. 3728 */ 3729 ulong_t 3730 hat_page_getshare(page_t *pp) 3731 { 3732 uint_t cnt; 3733 cnt = hment_mapcnt(pp); 3734 #if defined(__amd64) 3735 if (vpm_enable && pp->p_vpmref) { 3736 cnt += 1; 3737 } 3738 #endif 3739 return (cnt); 3740 } 3741 3742 /* 3743 * Return 1 the number of mappings exceeds sh_thresh. Return 0 3744 * otherwise. 3745 */ 3746 int 3747 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 3748 { 3749 return (hat_page_getshare(pp) > sh_thresh); 3750 } 3751 3752 /* 3753 * hat_softlock isn't supported anymore 3754 */ 3755 /*ARGSUSED*/ 3756 faultcode_t 3757 hat_softlock( 3758 hat_t *hat, 3759 caddr_t addr, 3760 size_t *len, 3761 struct page **page_array, 3762 uint_t flags) 3763 { 3764 return (FC_NOSUPPORT); 3765 } 3766 3767 3768 3769 /* 3770 * Routine to expose supported HAT features to platform independent code. 3771 */ 3772 /*ARGSUSED*/ 3773 int 3774 hat_supported(enum hat_features feature, void *arg) 3775 { 3776 switch (feature) { 3777 3778 case HAT_SHARED_PT: /* this is really ISM */ 3779 return (1); 3780 3781 case HAT_DYNAMIC_ISM_UNMAP: 3782 return (0); 3783 3784 case HAT_VMODSORT: 3785 return (1); 3786 3787 case HAT_SHARED_REGIONS: 3788 return (0); 3789 3790 default: 3791 panic("hat_supported() - unknown feature"); 3792 } 3793 return (0); 3794 } 3795 3796 /* 3797 * Called when a thread is exiting and has been switched to the kernel AS 3798 */ 3799 void 3800 hat_thread_exit(kthread_t *thd) 3801 { 3802 ASSERT(thd->t_procp->p_as == &kas); 3803 XPV_DISALLOW_MIGRATE(); 3804 hat_switch(thd->t_procp->p_as->a_hat); 3805 XPV_ALLOW_MIGRATE(); 3806 } 3807 3808 /* 3809 * Setup the given brand new hat structure as the new HAT on this cpu's mmu. 3810 */ 3811 /*ARGSUSED*/ 3812 void 3813 hat_setup(hat_t *hat, int flags) 3814 { 3815 XPV_DISALLOW_MIGRATE(); 3816 kpreempt_disable(); 3817 3818 hat_switch(hat); 3819 3820 kpreempt_enable(); 3821 XPV_ALLOW_MIGRATE(); 3822 } 3823 3824 /* 3825 * Prepare for a CPU private mapping for the given address. 3826 * 3827 * The address can only be used from a single CPU and can be remapped 3828 * using hat_mempte_remap(). Return the address of the PTE. 3829 * 3830 * We do the htable_create() if necessary and increment the valid count so 3831 * the htable can't disappear. We also hat_devload() the page table into 3832 * kernel so that the PTE is quickly accessed. 3833 */ 3834 hat_mempte_t 3835 hat_mempte_setup(caddr_t addr) 3836 { 3837 uintptr_t va = (uintptr_t)addr; 3838 htable_t *ht; 3839 uint_t entry; 3840 x86pte_t oldpte; 3841 hat_mempte_t p; 3842 3843 ASSERT(IS_PAGEALIGNED(va)); 3844 ASSERT(!IN_VA_HOLE(va)); 3845 ++curthread->t_hatdepth; 3846 XPV_DISALLOW_MIGRATE(); 3847 ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0); 3848 if (ht == NULL) { 3849 ht = htable_create(kas.a_hat, va, 0, NULL); 3850 entry = htable_va2entry(va, ht); 3851 ASSERT(ht->ht_level == 0); 3852 oldpte = x86pte_get(ht, entry); 3853 } 3854 if (PTE_ISVALID(oldpte)) 3855 panic("hat_mempte_setup(): address already mapped" 3856 "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte); 3857 3858 /* 3859 * increment ht_valid_cnt so that the pagetable can't disappear 3860 */ 3861 HTABLE_INC(ht->ht_valid_cnt); 3862 3863 /* 3864 * return the PTE physical address to the caller. 3865 */ 3866 htable_release(ht); 3867 XPV_ALLOW_MIGRATE(); 3868 p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry); 3869 --curthread->t_hatdepth; 3870 return (p); 3871 } 3872 3873 /* 3874 * Release a CPU private mapping for the given address. 3875 * We decrement the htable valid count so it might be destroyed. 3876 */ 3877 /*ARGSUSED1*/ 3878 void 3879 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa) 3880 { 3881 htable_t *ht; 3882 3883 XPV_DISALLOW_MIGRATE(); 3884 /* 3885 * invalidate any left over mapping and decrement the htable valid count 3886 */ 3887 #ifdef __xpv 3888 if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0, 3889 UVMF_INVLPG | UVMF_LOCAL)) 3890 panic("HYPERVISOR_update_va_mapping() failed"); 3891 #else 3892 { 3893 x86pte_t *pteptr; 3894 3895 pteptr = x86pte_mapin(mmu_btop(pte_pa), 3896 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 3897 if (mmu.pae_hat) 3898 *pteptr = 0; 3899 else 3900 *(x86pte32_t *)pteptr = 0; 3901 mmu_tlbflush_entry(addr); 3902 x86pte_mapout(); 3903 } 3904 #endif 3905 3906 ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0); 3907 if (ht == NULL) 3908 panic("hat_mempte_release(): invalid address"); 3909 ASSERT(ht->ht_level == 0); 3910 HTABLE_DEC(ht->ht_valid_cnt); 3911 htable_release(ht); 3912 XPV_ALLOW_MIGRATE(); 3913 } 3914 3915 /* 3916 * Apply a temporary CPU private mapping to a page. We flush the TLB only 3917 * on this CPU, so this ought to have been called with preemption disabled. 3918 */ 3919 void 3920 hat_mempte_remap( 3921 pfn_t pfn, 3922 caddr_t addr, 3923 hat_mempte_t pte_pa, 3924 uint_t attr, 3925 uint_t flags) 3926 { 3927 uintptr_t va = (uintptr_t)addr; 3928 x86pte_t pte; 3929 3930 /* 3931 * Remap the given PTE to the new page's PFN. Invalidate only 3932 * on this CPU. 3933 */ 3934 #ifdef DEBUG 3935 htable_t *ht; 3936 uint_t entry; 3937 3938 ASSERT(IS_PAGEALIGNED(va)); 3939 ASSERT(!IN_VA_HOLE(va)); 3940 ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0); 3941 ASSERT(ht != NULL); 3942 ASSERT(ht->ht_level == 0); 3943 ASSERT(ht->ht_valid_cnt > 0); 3944 ASSERT(ht->ht_pfn == mmu_btop(pte_pa)); 3945 htable_release(ht); 3946 #endif 3947 XPV_DISALLOW_MIGRATE(); 3948 pte = hati_mkpte(pfn, attr, 0, flags); 3949 #ifdef __xpv 3950 if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL)) 3951 panic("HYPERVISOR_update_va_mapping() failed"); 3952 #else 3953 { 3954 x86pte_t *pteptr; 3955 3956 pteptr = x86pte_mapin(mmu_btop(pte_pa), 3957 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 3958 if (mmu.pae_hat) 3959 *(x86pte_t *)pteptr = pte; 3960 else 3961 *(x86pte32_t *)pteptr = (x86pte32_t)pte; 3962 mmu_tlbflush_entry(addr); 3963 x86pte_mapout(); 3964 } 3965 #endif 3966 XPV_ALLOW_MIGRATE(); 3967 } 3968 3969 3970 3971 /* 3972 * Hat locking functions 3973 * XXX - these two functions are currently being used by hatstats 3974 * they can be removed by using a per-as mutex for hatstats. 3975 */ 3976 void 3977 hat_enter(hat_t *hat) 3978 { 3979 mutex_enter(&hat->hat_mutex); 3980 } 3981 3982 void 3983 hat_exit(hat_t *hat) 3984 { 3985 mutex_exit(&hat->hat_mutex); 3986 } 3987 3988 /* 3989 * HAT part of cpu initialization. 3990 */ 3991 void 3992 hat_cpu_online(struct cpu *cpup) 3993 { 3994 if (cpup != CPU) { 3995 x86pte_cpu_init(cpup); 3996 hat_vlp_setup(cpup); 3997 } 3998 CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id); 3999 } 4000 4001 /* 4002 * HAT part of cpu deletion. 4003 * (currently, we only call this after the cpu is safely passivated.) 4004 */ 4005 void 4006 hat_cpu_offline(struct cpu *cpup) 4007 { 4008 ASSERT(cpup != CPU); 4009 4010 CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id); 4011 x86pte_cpu_fini(cpup); 4012 hat_vlp_teardown(cpup); 4013 } 4014 4015 /* 4016 * Function called after all CPUs are brought online. 4017 * Used to remove low address boot mappings. 4018 */ 4019 void 4020 clear_boot_mappings(uintptr_t low, uintptr_t high) 4021 { 4022 uintptr_t vaddr = low; 4023 htable_t *ht = NULL; 4024 level_t level; 4025 uint_t entry; 4026 x86pte_t pte; 4027 4028 /* 4029 * On 1st CPU we can unload the prom mappings, basically we blow away 4030 * all virtual mappings under _userlimit. 4031 */ 4032 while (vaddr < high) { 4033 pte = htable_walk(kas.a_hat, &ht, &vaddr, high); 4034 if (ht == NULL) 4035 break; 4036 4037 level = ht->ht_level; 4038 entry = htable_va2entry(vaddr, ht); 4039 ASSERT(level <= mmu.max_page_level); 4040 ASSERT(PTE_ISPAGE(pte, level)); 4041 4042 /* 4043 * Unload the mapping from the page tables. 4044 */ 4045 (void) x86pte_inval(ht, entry, 0, NULL); 4046 ASSERT(ht->ht_valid_cnt > 0); 4047 HTABLE_DEC(ht->ht_valid_cnt); 4048 PGCNT_DEC(ht->ht_hat, ht->ht_level); 4049 4050 vaddr += LEVEL_SIZE(ht->ht_level); 4051 } 4052 if (ht) 4053 htable_release(ht); 4054 } 4055 4056 /* 4057 * Atomically update a new translation for a single page. If the 4058 * currently installed PTE doesn't match the value we expect to find, 4059 * it's not updated and we return the PTE we found. 4060 * 4061 * If activating nosync or NOWRITE and the page was modified we need to sync 4062 * with the page_t. Also sync with page_t if clearing ref/mod bits. 4063 */ 4064 static x86pte_t 4065 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 4066 { 4067 page_t *pp; 4068 uint_t rm = 0; 4069 x86pte_t replaced; 4070 4071 if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC && 4072 PTE_GET(expected, PT_MOD | PT_REF) && 4073 (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) || 4074 !PTE_GET(new, PT_MOD | PT_REF))) { 4075 4076 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level))); 4077 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level)); 4078 ASSERT(pp != NULL); 4079 if (PTE_GET(expected, PT_MOD)) 4080 rm |= P_MOD; 4081 if (PTE_GET(expected, PT_REF)) 4082 rm |= P_REF; 4083 PTE_CLR(new, PT_MOD | PT_REF); 4084 } 4085 4086 replaced = x86pte_update(ht, entry, expected, new); 4087 if (replaced != expected) 4088 return (replaced); 4089 4090 if (rm) { 4091 /* 4092 * sync to all constituent pages of a large page 4093 */ 4094 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level); 4095 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 4096 while (pgcnt-- > 0) { 4097 /* 4098 * hat_page_demote() can't decrease 4099 * pszc below this mapping size 4100 * since large mapping existed after we 4101 * took mlist lock. 4102 */ 4103 ASSERT(pp->p_szc >= ht->ht_level); 4104 hat_page_setattr(pp, rm); 4105 ++pp; 4106 } 4107 } 4108 4109 return (0); 4110 } 4111 4112 /* ARGSUSED */ 4113 void 4114 hat_join_srd(struct hat *hat, vnode_t *evp) 4115 { 4116 } 4117 4118 /* ARGSUSED */ 4119 hat_region_cookie_t 4120 hat_join_region(struct hat *hat, 4121 caddr_t r_saddr, 4122 size_t r_size, 4123 void *r_obj, 4124 u_offset_t r_objoff, 4125 uchar_t r_perm, 4126 uchar_t r_pgszc, 4127 hat_rgn_cb_func_t r_cb_function, 4128 uint_t flags) 4129 { 4130 panic("No shared region support on x86"); 4131 return (HAT_INVALID_REGION_COOKIE); 4132 } 4133 4134 /* ARGSUSED */ 4135 void 4136 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags) 4137 { 4138 panic("No shared region support on x86"); 4139 } 4140 4141 /* ARGSUSED */ 4142 void 4143 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie) 4144 { 4145 panic("No shared region support on x86"); 4146 } 4147 4148 4149 /* 4150 * Kernel Physical Mapping (kpm) facility 4151 * 4152 * Most of the routines needed to support segkpm are almost no-ops on the 4153 * x86 platform. We map in the entire segment when it is created and leave 4154 * it mapped in, so there is no additional work required to set up and tear 4155 * down individual mappings. All of these routines were created to support 4156 * SPARC platforms that have to avoid aliasing in their virtually indexed 4157 * caches. 4158 * 4159 * Most of the routines have sanity checks in them (e.g. verifying that the 4160 * passed-in page is locked). We don't actually care about most of these 4161 * checks on x86, but we leave them in place to identify problems in the 4162 * upper levels. 4163 */ 4164 4165 /* 4166 * Map in a locked page and return the vaddr. 4167 */ 4168 /*ARGSUSED*/ 4169 caddr_t 4170 hat_kpm_mapin(struct page *pp, struct kpme *kpme) 4171 { 4172 caddr_t vaddr; 4173 4174 #ifdef DEBUG 4175 if (kpm_enable == 0) { 4176 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n"); 4177 return ((caddr_t)NULL); 4178 } 4179 4180 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 4181 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n"); 4182 return ((caddr_t)NULL); 4183 } 4184 #endif 4185 4186 vaddr = hat_kpm_page2va(pp, 1); 4187 4188 return (vaddr); 4189 } 4190 4191 /* 4192 * Mapout a locked page. 4193 */ 4194 /*ARGSUSED*/ 4195 void 4196 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 4197 { 4198 #ifdef DEBUG 4199 if (kpm_enable == 0) { 4200 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n"); 4201 return; 4202 } 4203 4204 if (IS_KPM_ADDR(vaddr) == 0) { 4205 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n"); 4206 return; 4207 } 4208 4209 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 4210 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n"); 4211 return; 4212 } 4213 #endif 4214 } 4215 4216 /* 4217 * Return the kpm virtual address for a specific pfn 4218 */ 4219 caddr_t 4220 hat_kpm_pfn2va(pfn_t pfn) 4221 { 4222 uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn); 4223 4224 ASSERT(!pfn_is_foreign(pfn)); 4225 return ((caddr_t)vaddr); 4226 } 4227 4228 /* 4229 * Return the kpm virtual address for the page at pp. 4230 */ 4231 /*ARGSUSED*/ 4232 caddr_t 4233 hat_kpm_page2va(struct page *pp, int checkswap) 4234 { 4235 return (hat_kpm_pfn2va(pp->p_pagenum)); 4236 } 4237 4238 /* 4239 * Return the page frame number for the kpm virtual address vaddr. 4240 */ 4241 pfn_t 4242 hat_kpm_va2pfn(caddr_t vaddr) 4243 { 4244 pfn_t pfn; 4245 4246 ASSERT(IS_KPM_ADDR(vaddr)); 4247 4248 pfn = (pfn_t)btop(vaddr - kpm_vbase); 4249 4250 return (pfn); 4251 } 4252 4253 4254 /* 4255 * Return the page for the kpm virtual address vaddr. 4256 */ 4257 page_t * 4258 hat_kpm_vaddr2page(caddr_t vaddr) 4259 { 4260 pfn_t pfn; 4261 4262 ASSERT(IS_KPM_ADDR(vaddr)); 4263 4264 pfn = hat_kpm_va2pfn(vaddr); 4265 4266 return (page_numtopp_nolock(pfn)); 4267 } 4268 4269 /* 4270 * hat_kpm_fault is called from segkpm_fault when we take a page fault on a 4271 * KPM page. This should never happen on x86 4272 */ 4273 int 4274 hat_kpm_fault(hat_t *hat, caddr_t vaddr) 4275 { 4276 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", hat, vaddr); 4277 4278 return (0); 4279 } 4280 4281 /*ARGSUSED*/ 4282 void 4283 hat_kpm_mseghash_clear(int nentries) 4284 {} 4285 4286 /*ARGSUSED*/ 4287 void 4288 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 4289 {} 4290 4291 #ifdef __xpv 4292 /* 4293 * There are specific Hypervisor calls to establish and remove mappings 4294 * to grant table references and the privcmd driver. We have to ensure 4295 * that a page table actually exists. 4296 */ 4297 void 4298 hat_prepare_mapping(hat_t *hat, caddr_t addr) 4299 { 4300 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4301 XPV_DISALLOW_MIGRATE(); 4302 (void) htable_create(hat, (uintptr_t)addr, 0, NULL); 4303 XPV_ALLOW_MIGRATE(); 4304 } 4305 4306 void 4307 hat_release_mapping(hat_t *hat, caddr_t addr) 4308 { 4309 htable_t *ht; 4310 4311 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4312 XPV_DISALLOW_MIGRATE(); 4313 ht = htable_lookup(hat, (uintptr_t)addr, 0); 4314 ASSERT(ht != NULL); 4315 ASSERT(ht->ht_busy >= 2); 4316 htable_release(ht); 4317 htable_release(ht); 4318 XPV_ALLOW_MIGRATE(); 4319 } 4320 #endif 4321