1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * VM - Hardware Address Translation management for Spitfire MMU. 30 * 31 * This file implements the machine specific hardware translation 32 * needed by the VM system. The machine independent interface is 33 * described in <vm/hat.h> while the machine dependent interface 34 * and data structures are described in <vm/hat_sfmmu.h>. 35 * 36 * The hat layer manages the address translation hardware as a cache 37 * driven by calls from the higher levels in the VM system. 38 */ 39 40 #include <sys/types.h> 41 #include <sys/kstat.h> 42 #include <vm/hat.h> 43 #include <vm/hat_sfmmu.h> 44 #include <vm/page.h> 45 #include <sys/pte.h> 46 #include <sys/systm.h> 47 #include <sys/mman.h> 48 #include <sys/sysmacros.h> 49 #include <sys/machparam.h> 50 #include <sys/vtrace.h> 51 #include <sys/kmem.h> 52 #include <sys/mmu.h> 53 #include <sys/cmn_err.h> 54 #include <sys/cpu.h> 55 #include <sys/cpuvar.h> 56 #include <sys/debug.h> 57 #include <sys/lgrp.h> 58 #include <sys/archsystm.h> 59 #include <sys/machsystm.h> 60 #include <sys/vmsystm.h> 61 #include <vm/as.h> 62 #include <vm/seg.h> 63 #include <vm/seg_kp.h> 64 #include <vm/seg_kmem.h> 65 #include <vm/seg_kpm.h> 66 #include <vm/rm.h> 67 #include <sys/t_lock.h> 68 #include <sys/obpdefs.h> 69 #include <sys/vm_machparam.h> 70 #include <sys/var.h> 71 #include <sys/trap.h> 72 #include <sys/machtrap.h> 73 #include <sys/scb.h> 74 #include <sys/bitmap.h> 75 #include <sys/machlock.h> 76 #include <sys/membar.h> 77 #include <sys/atomic.h> 78 #include <sys/cpu_module.h> 79 #include <sys/prom_debug.h> 80 #include <sys/ksynch.h> 81 #include <sys/mem_config.h> 82 #include <sys/mem_cage.h> 83 #include <vm/vm_dep.h> 84 #include <vm/xhat_sfmmu.h> 85 #include <sys/fpu/fpusystm.h> 86 #include <vm/mach_kpm.h> 87 88 #if defined(SF_ERRATA_57) 89 extern caddr_t errata57_limit; 90 #endif 91 92 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 93 (sizeof (int64_t))) 94 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 95 96 #define HBLK_RESERVE_CNT 128 97 #define HBLK_RESERVE_MIN 20 98 99 static struct hme_blk *freehblkp; 100 static kmutex_t freehblkp_lock; 101 static int freehblkcnt; 102 103 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 104 static kmutex_t hblk_reserve_lock; 105 static kthread_t *hblk_reserve_thread; 106 107 static nucleus_hblk8_info_t nucleus_hblk8; 108 static nucleus_hblk1_info_t nucleus_hblk1; 109 110 /* 111 * SFMMU specific hat functions 112 */ 113 void hat_pagecachectl(struct page *, int); 114 115 /* flags for hat_pagecachectl */ 116 #define HAT_CACHE 0x1 117 #define HAT_UNCACHE 0x2 118 #define HAT_TMPNC 0x4 119 120 /* 121 * Flag to allow the creation of non-cacheable translations 122 * to system memory. It is off by default. At the moment this 123 * flag is used by the ecache error injector. The error injector 124 * will turn it on when creating such a translation then shut it 125 * off when it's finished. 126 */ 127 128 int sfmmu_allow_nc_trans = 0; 129 130 /* 131 * Flag to disable large page support. 132 * value of 1 => disable all large pages. 133 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 134 * 135 * For example, use the value 0x4 to disable 512K pages. 136 * 137 */ 138 #define LARGE_PAGES_OFF 0x1 139 140 /* 141 * The disable_large_pages and disable_ism_large_pages variables control 142 * hat_memload_array and the page sizes to be used by ISM and the kernel. 143 * 144 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 145 * are only used to control which OOB pages to use at upper VM segment creation 146 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 147 * Their values may come from platform or CPU specific code to disable page 148 * sizes that should not be used. 149 * 150 * WARNING: 512K pages are currently not supported for ISM/DISM. 151 */ 152 uint_t disable_large_pages = 0; 153 uint_t disable_ism_large_pages = (1 << TTE512K); 154 uint_t disable_auto_data_large_pages = 0; 155 uint_t disable_auto_text_large_pages = 0; 156 157 /* 158 * Private sfmmu data structures for hat management 159 */ 160 static struct kmem_cache *sfmmuid_cache; 161 static struct kmem_cache *mmuctxdom_cache; 162 163 /* 164 * Private sfmmu data structures for tsb management 165 */ 166 static struct kmem_cache *sfmmu_tsbinfo_cache; 167 static struct kmem_cache *sfmmu_tsb8k_cache; 168 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 169 static vmem_t *kmem_tsb_arena; 170 171 /* 172 * sfmmu static variables for hmeblk resource management. 173 */ 174 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 175 static struct kmem_cache *sfmmu8_cache; 176 static struct kmem_cache *sfmmu1_cache; 177 static struct kmem_cache *pa_hment_cache; 178 179 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 180 /* 181 * private data for ism 182 */ 183 static struct kmem_cache *ism_blk_cache; 184 static struct kmem_cache *ism_ment_cache; 185 #define ISMID_STARTADDR NULL 186 187 /* 188 * Whether to delay TLB flushes and use Cheetah's flush-all support 189 * when removing contexts from the dirty list. 190 */ 191 int delay_tlb_flush; 192 int disable_delay_tlb_flush; 193 194 /* 195 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 196 * HAT flags, synchronizing TLB/TSB coherency, and context management. 197 * The lock is hashed on the sfmmup since the case where we need to lock 198 * all processes is rare but does occur (e.g. we need to unload a shared 199 * mapping from all processes using the mapping). We have a lot of buckets, 200 * and each slab of sfmmu_t's can use about a quarter of them, giving us 201 * a fairly good distribution without wasting too much space and overhead 202 * when we have to grab them all. 203 */ 204 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 205 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 206 207 /* 208 * Hash algorithm optimized for a small number of slabs. 209 * 7 is (highbit((sizeof sfmmu_t)) - 1) 210 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 211 * kmem_cache, and thus they will be sequential within that cache. In 212 * addition, each new slab will have a different "color" up to cache_maxcolor 213 * which will skew the hashing for each successive slab which is allocated. 214 * If the size of sfmmu_t changed to a larger size, this algorithm may need 215 * to be revisited. 216 */ 217 #define TSB_HASH_SHIFT_BITS (7) 218 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 219 220 #ifdef DEBUG 221 int tsb_hash_debug = 0; 222 #define TSB_HASH(sfmmup) \ 223 (tsb_hash_debug ? &hat_lock[0] : \ 224 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 225 #else /* DEBUG */ 226 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 227 #endif /* DEBUG */ 228 229 230 /* sfmmu_replace_tsb() return codes. */ 231 typedef enum tsb_replace_rc { 232 TSB_SUCCESS, 233 TSB_ALLOCFAIL, 234 TSB_LOSTRACE, 235 TSB_ALREADY_SWAPPED, 236 TSB_CANTGROW 237 } tsb_replace_rc_t; 238 239 /* 240 * Flags for TSB allocation routines. 241 */ 242 #define TSB_ALLOC 0x01 243 #define TSB_FORCEALLOC 0x02 244 #define TSB_GROW 0x04 245 #define TSB_SHRINK 0x08 246 #define TSB_SWAPIN 0x10 247 248 /* 249 * Support for HAT callbacks. 250 */ 251 #define SFMMU_MAX_RELOC_CALLBACKS 10 252 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 253 static id_t sfmmu_cb_nextid = 0; 254 static id_t sfmmu_tsb_cb_id; 255 struct sfmmu_callback *sfmmu_cb_table; 256 257 /* 258 * Kernel page relocation is enabled by default for non-caged 259 * kernel pages. This has little effect unless segkmem_reloc is 260 * set, since by default kernel memory comes from inside the 261 * kernel cage. 262 */ 263 int hat_kpr_enabled = 1; 264 265 kmutex_t kpr_mutex; 266 kmutex_t kpr_suspendlock; 267 kthread_t *kreloc_thread; 268 269 /* 270 * Enable VA->PA translation sanity checking on DEBUG kernels. 271 * Disabled by default. This is incompatible with some 272 * drivers (error injector, RSM) so if it breaks you get 273 * to keep both pieces. 274 */ 275 int hat_check_vtop = 0; 276 277 /* 278 * Private sfmmu routines (prototypes) 279 */ 280 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 281 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 282 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t); 283 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 284 caddr_t, demap_range_t *, uint_t); 285 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 286 caddr_t, int); 287 static void sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *, 288 uint64_t, struct hme_blk **); 289 static void sfmmu_hblks_list_purge(struct hme_blk **); 290 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 291 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 292 static struct hme_blk *sfmmu_hblk_steal(int); 293 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 294 struct hme_blk *, uint64_t, uint64_t, 295 struct hme_blk *); 296 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 297 298 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 299 uint_t, uint_t, pgcnt_t); 300 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 301 uint_t); 302 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 303 uint_t); 304 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 305 caddr_t, int); 306 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 307 struct hmehash_bucket *, caddr_t, uint_t, uint_t); 308 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 309 caddr_t, page_t **, uint_t); 310 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 311 312 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 313 pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *); 314 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 315 #ifdef VAC 316 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 317 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 318 int tst_tnc(page_t *pp, pgcnt_t); 319 void conv_tnc(page_t *pp, int); 320 #endif 321 322 static void sfmmu_get_ctx(sfmmu_t *); 323 static void sfmmu_free_sfmmu(sfmmu_t *); 324 325 static void sfmmu_gettte(struct hat *, caddr_t, tte_t *); 326 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 327 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 328 329 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 330 static void hat_pagereload(struct page *, struct page *); 331 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 332 #ifdef VAC 333 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 334 static void sfmmu_page_cache(page_t *, int, int, int); 335 #endif 336 337 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 338 pfn_t, int, int, int, int); 339 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 340 pfn_t, int); 341 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 342 static void sfmmu_tlb_range_demap(demap_range_t *); 343 static void sfmmu_invalidate_ctx(sfmmu_t *); 344 static void sfmmu_sync_mmustate(sfmmu_t *); 345 346 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 347 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 348 sfmmu_t *); 349 static void sfmmu_tsb_free(struct tsb_info *); 350 static void sfmmu_tsbinfo_free(struct tsb_info *); 351 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 352 sfmmu_t *); 353 354 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 355 static int sfmmu_select_tsb_szc(pgcnt_t); 356 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 357 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 358 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 359 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 360 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 361 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 362 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 363 hatlock_t *, uint_t); 364 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 365 366 #ifdef VAC 367 void sfmmu_cache_flush(pfn_t, int); 368 void sfmmu_cache_flushcolor(int, pfn_t); 369 #endif 370 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 371 caddr_t, demap_range_t *, uint_t, int); 372 373 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 374 static uint_t sfmmu_ptov_attr(tte_t *); 375 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 376 caddr_t, demap_range_t *, uint_t); 377 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 378 static int sfmmu_idcache_constructor(void *, void *, int); 379 static void sfmmu_idcache_destructor(void *, void *); 380 static int sfmmu_hblkcache_constructor(void *, void *, int); 381 static void sfmmu_hblkcache_destructor(void *, void *); 382 static void sfmmu_hblkcache_reclaim(void *); 383 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 384 struct hmehash_bucket *); 385 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 386 static void sfmmu_rm_large_mappings(page_t *, int); 387 388 static void hat_lock_init(void); 389 static void hat_kstat_init(void); 390 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 391 static void sfmmu_check_page_sizes(sfmmu_t *, int); 392 int fnd_mapping_sz(page_t *); 393 static void iment_add(struct ism_ment *, struct hat *); 394 static void iment_sub(struct ism_ment *, struct hat *); 395 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 396 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 397 #ifdef sun4v 398 extern void sfmmu_invalidate_tsbinfo(sfmmu_t *); 399 #endif /* sun4v */ 400 extern void sfmmu_clear_utsbinfo(void); 401 402 static void sfmmu_ctx_wrap_around(mmu_ctx_t *); 403 404 /* kpm globals */ 405 #ifdef DEBUG 406 /* 407 * Enable trap level tsbmiss handling 408 */ 409 int kpm_tsbmtl = 1; 410 411 /* 412 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 413 * required TLB shootdowns in this case, so handle w/ care. Off by default. 414 */ 415 int kpm_tlb_flush; 416 #endif /* DEBUG */ 417 418 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 419 420 #ifdef DEBUG 421 static void sfmmu_check_hblk_flist(); 422 #endif 423 424 /* 425 * Semi-private sfmmu data structures. Some of them are initialize in 426 * startup or in hat_init. Some of them are private but accessed by 427 * assembly code or mach_sfmmu.c 428 */ 429 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 430 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 431 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 432 uint64_t khme_hash_pa; /* PA of khme_hash */ 433 int uhmehash_num; /* # of buckets in user hash table */ 434 int khmehash_num; /* # of buckets in kernel hash table */ 435 436 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 437 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 438 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 439 440 #define DEFAULT_NUM_CTXS_PER_MMU 8192 441 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 442 443 int cache; /* describes system cache */ 444 445 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 446 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 447 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 448 int ktsb_sz; /* kernel 8k-indexed tsb size */ 449 450 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 451 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 452 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 453 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 454 455 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 456 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 457 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 458 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 459 460 #ifndef sun4v 461 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 462 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 463 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 464 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 465 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 466 #endif /* sun4v */ 467 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 468 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 469 470 /* 471 * Size to use for TSB slabs. Future platforms that support page sizes 472 * larger than 4M may wish to change these values, and provide their own 473 * assembly macros for building and decoding the TSB base register contents. 474 * Note disable_large_pages will override the value set here. 475 */ 476 uint_t tsb_slab_ttesz = TTE4M; 477 uint_t tsb_slab_size; 478 uint_t tsb_slab_shift; 479 uint_t tsb_slab_mask; /* PFN mask for TTE */ 480 481 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 482 int tsb_max_growsize = UTSB_MAX_SZCODE; 483 484 /* 485 * Tunable parameters dealing with TSB policies. 486 */ 487 488 /* 489 * This undocumented tunable forces all 8K TSBs to be allocated from 490 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 491 */ 492 #ifdef DEBUG 493 int tsb_forceheap = 0; 494 #endif /* DEBUG */ 495 496 /* 497 * Decide whether to use per-lgroup arenas, or one global set of 498 * TSB arenas. The default is not to break up per-lgroup, since 499 * most platforms don't recognize any tangible benefit from it. 500 */ 501 int tsb_lgrp_affinity = 0; 502 503 /* 504 * Used for growing the TSB based on the process RSS. 505 * tsb_rss_factor is based on the smallest TSB, and is 506 * shifted by the TSB size to determine if we need to grow. 507 * The default will grow the TSB if the number of TTEs for 508 * this page size exceeds 75% of the number of TSB entries, 509 * which should _almost_ eliminate all conflict misses 510 * (at the expense of using up lots and lots of memory). 511 */ 512 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 513 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 514 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 515 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 516 default_tsb_size) 517 #define TSB_OK_SHRINK() \ 518 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 519 #define TSB_OK_GROW() \ 520 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 521 522 int enable_tsb_rss_sizing = 1; 523 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 524 525 /* which TSB size code to use for new address spaces or if rss sizing off */ 526 int default_tsb_size = TSB_8K_SZCODE; 527 528 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 529 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 530 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 531 532 #ifdef DEBUG 533 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 534 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 535 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 536 static int tsb_alloc_fail_mtbf = 0; 537 static int tsb_alloc_count = 0; 538 #endif /* DEBUG */ 539 540 /* if set to 1, will remap valid TTEs when growing TSB. */ 541 int tsb_remap_ttes = 1; 542 543 /* 544 * If we have more than this many mappings, allocate a second TSB. 545 * This default is chosen because the I/D fully associative TLBs are 546 * assumed to have at least 8 available entries. Platforms with a 547 * larger fully-associative TLB could probably override the default. 548 */ 549 int tsb_sectsb_threshold = 8; 550 551 /* 552 * kstat data 553 */ 554 struct sfmmu_global_stat sfmmu_global_stat; 555 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 556 557 /* 558 * Global data 559 */ 560 sfmmu_t *ksfmmup; /* kernel's hat id */ 561 562 #ifdef DEBUG 563 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 564 #endif 565 566 /* sfmmu locking operations */ 567 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 568 static int sfmmu_mlspl_held(struct page *, int); 569 570 kmutex_t *sfmmu_page_enter(page_t *); 571 void sfmmu_page_exit(kmutex_t *); 572 int sfmmu_page_spl_held(struct page *); 573 574 /* sfmmu internal locking operations - accessed directly */ 575 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 576 kmutex_t **, kmutex_t **); 577 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 578 static hatlock_t * 579 sfmmu_hat_enter(sfmmu_t *); 580 static hatlock_t * 581 sfmmu_hat_tryenter(sfmmu_t *); 582 static void sfmmu_hat_exit(hatlock_t *); 583 static void sfmmu_hat_lock_all(void); 584 static void sfmmu_hat_unlock_all(void); 585 static void sfmmu_ismhat_enter(sfmmu_t *, int); 586 static void sfmmu_ismhat_exit(sfmmu_t *, int); 587 588 /* 589 * Array of mutexes protecting a page's mapping list and p_nrm field. 590 * 591 * The hash function looks complicated, but is made up so that: 592 * 593 * "pp" not shifted, so adjacent pp values will hash to different cache lines 594 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 595 * 596 * "pp" >> mml_shift, incorporates more source bits into the hash result 597 * 598 * "& (mml_table_size - 1), should be faster than using remainder "%" 599 * 600 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 601 * cacheline, since they get declared next to each other below. We'll trust 602 * ld not to do something random. 603 */ 604 #ifdef DEBUG 605 int mlist_hash_debug = 0; 606 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 607 &mml_table[((uintptr_t)(pp) + \ 608 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 609 #else /* !DEBUG */ 610 #define MLIST_HASH(pp) &mml_table[ \ 611 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 612 #endif /* !DEBUG */ 613 614 kmutex_t *mml_table; 615 uint_t mml_table_sz; /* must be a power of 2 */ 616 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 617 618 kpm_hlk_t *kpmp_table; 619 uint_t kpmp_table_sz; /* must be a power of 2 */ 620 uchar_t kpmp_shift; 621 622 kpm_shlk_t *kpmp_stable; 623 uint_t kpmp_stable_sz; /* must be a power of 2 */ 624 625 /* 626 * SPL_HASH was improved to avoid false cache line sharing 627 */ 628 #define SPL_TABLE_SIZE 128 629 #define SPL_MASK (SPL_TABLE_SIZE - 1) 630 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 631 632 #define SPL_INDEX(pp) \ 633 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 634 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 635 (SPL_TABLE_SIZE - 1)) 636 637 #define SPL_HASH(pp) \ 638 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 639 640 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 641 642 643 /* 644 * hat_unload_callback() will group together callbacks in order 645 * to avoid xt_sync() calls. This is the maximum size of the group. 646 */ 647 #define MAX_CB_ADDR 32 648 649 tte_t hw_tte; 650 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 651 652 static char *mmu_ctx_kstat_names[] = { 653 "mmu_ctx_tsb_exceptions", 654 "mmu_ctx_tsb_raise_exception", 655 "mmu_ctx_wrap_around", 656 }; 657 658 /* 659 * Wrapper for vmem_xalloc since vmem_create only allows limited 660 * parameters for vm_source_alloc functions. This function allows us 661 * to specify alignment consistent with the size of the object being 662 * allocated. 663 */ 664 static void * 665 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 666 { 667 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 668 } 669 670 /* Common code for setting tsb_alloc_hiwater. */ 671 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 672 ptob(pages) / tsb_alloc_hiwater_factor 673 674 /* 675 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 676 * a single TSB. physmem is the number of physical pages so we need physmem 8K 677 * TTEs to represent all those physical pages. We round this up by using 678 * 1<<highbit(). To figure out which size code to use, remember that the size 679 * code is just an amount to shift the smallest TSB size to get the size of 680 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 681 * highbit() - 1) to get the size code for the smallest TSB that can represent 682 * all of physical memory, while erring on the side of too much. 683 * 684 * If the computed size code is less than the current tsb_max_growsize, we set 685 * tsb_max_growsize to the computed size code. In the case where the computed 686 * size code is greater than tsb_max_growsize, we have these restrictions that 687 * apply to increasing tsb_max_growsize: 688 * 1) TSBs can't grow larger than the TSB slab size 689 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 690 */ 691 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 692 int i, szc; \ 693 \ 694 i = highbit(pages); \ 695 if ((1 << (i - 1)) == (pages)) \ 696 i--; /* 2^n case, round down */ \ 697 szc = i - TSB_START_SIZE; \ 698 if (szc < tsb_max_growsize) \ 699 tsb_max_growsize = szc; \ 700 else if ((szc > tsb_max_growsize) && \ 701 (szc <= tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT))) \ 702 tsb_max_growsize = MIN(szc, UTSB_MAX_SZCODE); \ 703 } 704 705 /* 706 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 707 * tsb_info which handles that TTE size. 708 */ 709 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) \ 710 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 711 ASSERT(sfmmu_hat_lock_held(sfmmup)); \ 712 if ((tte_szc) >= TTE4M) \ 713 (tsbinfop) = (tsbinfop)->tsb_next; 714 715 /* 716 * Return the number of mappings present in the HAT 717 * for a particular process and page size. 718 */ 719 #define SFMMU_TTE_CNT(sfmmup, szc) \ 720 (sfmmup)->sfmmu_iblk? \ 721 (sfmmup)->sfmmu_ismttecnt[(szc)] + \ 722 (sfmmup)->sfmmu_ttecnt[(szc)] : \ 723 (sfmmup)->sfmmu_ttecnt[(szc)]; 724 725 /* 726 * Macro to use to unload entries from the TSB. 727 * It has knowledge of which page sizes get replicated in the TSB 728 * and will call the appropriate unload routine for the appropriate size. 729 */ 730 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp) \ 731 { \ 732 int ttesz = get_hblk_ttesz(hmeblkp); \ 733 if (ttesz == TTE8K || ttesz == TTE4M) { \ 734 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 735 } else { \ 736 caddr_t sva = (caddr_t)get_hblk_base(hmeblkp); \ 737 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 738 ASSERT(addr >= sva && addr < eva); \ 739 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 740 } \ 741 } 742 743 744 /* Update tsb_alloc_hiwater after memory is configured. */ 745 /*ARGSUSED*/ 746 static void 747 sfmmu_update_tsb_post_add(void *arg, pgcnt_t delta_pages) 748 { 749 /* Assumes physmem has already been updated. */ 750 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 751 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 752 } 753 754 /* 755 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 756 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 757 * deleted. 758 */ 759 /*ARGSUSED*/ 760 static int 761 sfmmu_update_tsb_pre_del(void *arg, pgcnt_t delta_pages) 762 { 763 return (0); 764 } 765 766 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 767 /*ARGSUSED*/ 768 static void 769 sfmmu_update_tsb_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 770 { 771 /* 772 * Whether the delete was cancelled or not, just go ahead and update 773 * tsb_alloc_hiwater and tsb_max_growsize. 774 */ 775 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 776 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 777 } 778 779 static kphysm_setup_vector_t sfmmu_update_tsb_vec = { 780 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 781 sfmmu_update_tsb_post_add, /* post_add */ 782 sfmmu_update_tsb_pre_del, /* pre_del */ 783 sfmmu_update_tsb_post_del /* post_del */ 784 }; 785 786 787 /* 788 * HME_BLK HASH PRIMITIVES 789 */ 790 791 /* 792 * Enter a hme on the mapping list for page pp. 793 * When large pages are more prevalent in the system we might want to 794 * keep the mapping list in ascending order by the hment size. For now, 795 * small pages are more frequent, so don't slow it down. 796 */ 797 #define HME_ADD(hme, pp) \ 798 { \ 799 ASSERT(sfmmu_mlist_held(pp)); \ 800 \ 801 hme->hme_prev = NULL; \ 802 hme->hme_next = pp->p_mapping; \ 803 hme->hme_page = pp; \ 804 if (pp->p_mapping) { \ 805 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 806 ASSERT(pp->p_share > 0); \ 807 } else { \ 808 /* EMPTY */ \ 809 ASSERT(pp->p_share == 0); \ 810 } \ 811 pp->p_mapping = hme; \ 812 pp->p_share++; \ 813 } 814 815 /* 816 * Enter a hme on the mapping list for page pp. 817 * If we are unmapping a large translation, we need to make sure that the 818 * change is reflect in the corresponding bit of the p_index field. 819 */ 820 #define HME_SUB(hme, pp) \ 821 { \ 822 ASSERT(sfmmu_mlist_held(pp)); \ 823 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 824 \ 825 if (pp->p_mapping == NULL) { \ 826 panic("hme_remove - no mappings"); \ 827 } \ 828 \ 829 membar_stst(); /* ensure previous stores finish */ \ 830 \ 831 ASSERT(pp->p_share > 0); \ 832 pp->p_share--; \ 833 \ 834 if (hme->hme_prev) { \ 835 ASSERT(pp->p_mapping != hme); \ 836 ASSERT(hme->hme_prev->hme_page == pp || \ 837 IS_PAHME(hme->hme_prev)); \ 838 hme->hme_prev->hme_next = hme->hme_next; \ 839 } else { \ 840 ASSERT(pp->p_mapping == hme); \ 841 pp->p_mapping = hme->hme_next; \ 842 ASSERT((pp->p_mapping == NULL) ? \ 843 (pp->p_share == 0) : 1); \ 844 } \ 845 \ 846 if (hme->hme_next) { \ 847 ASSERT(hme->hme_next->hme_page == pp || \ 848 IS_PAHME(hme->hme_next)); \ 849 hme->hme_next->hme_prev = hme->hme_prev; \ 850 } \ 851 \ 852 /* zero out the entry */ \ 853 hme->hme_next = NULL; \ 854 hme->hme_prev = NULL; \ 855 hme->hme_page = NULL; \ 856 \ 857 if (hme_size(hme) > TTE8K) { \ 858 /* remove mappings for remainder of large pg */ \ 859 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 860 } \ 861 } 862 863 /* 864 * This function returns the hment given the hme_blk and a vaddr. 865 * It assumes addr has already been checked to belong to hme_blk's 866 * range. 867 */ 868 #define HBLKTOHME(hment, hmeblkp, addr) \ 869 { \ 870 int index; \ 871 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 872 } 873 874 /* 875 * Version of HBLKTOHME that also returns the index in hmeblkp 876 * of the hment. 877 */ 878 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 879 { \ 880 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 881 \ 882 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 883 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 884 } else \ 885 idx = 0; \ 886 \ 887 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 888 } 889 890 /* 891 * Disable any page sizes not supported by the CPU 892 */ 893 void 894 hat_init_pagesizes() 895 { 896 int i; 897 898 mmu_exported_page_sizes = 0; 899 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 900 901 szc_2_userszc[i] = (uint_t)-1; 902 userszc_2_szc[i] = (uint_t)-1; 903 904 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 905 disable_large_pages |= (1 << i); 906 } else { 907 szc_2_userszc[i] = mmu_exported_page_sizes; 908 userszc_2_szc[mmu_exported_page_sizes] = i; 909 mmu_exported_page_sizes++; 910 } 911 } 912 913 disable_ism_large_pages |= disable_large_pages; 914 disable_auto_data_large_pages = disable_large_pages; 915 disable_auto_text_large_pages = disable_large_pages; 916 917 /* 918 * Initialize mmu-specific large page sizes. 919 */ 920 if (&mmu_large_pages_disabled) { 921 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 922 disable_ism_large_pages |= 923 mmu_large_pages_disabled(HAT_LOAD_SHARE); 924 disable_auto_data_large_pages |= 925 mmu_large_pages_disabled(HAT_AUTO_DATA); 926 disable_auto_text_large_pages |= 927 mmu_large_pages_disabled(HAT_AUTO_TEXT); 928 } 929 } 930 931 /* 932 * Initialize the hardware address translation structures. 933 */ 934 void 935 hat_init(void) 936 { 937 int i; 938 uint_t sz; 939 uint_t maxtsb; 940 size_t size; 941 942 hat_lock_init(); 943 hat_kstat_init(); 944 945 /* 946 * Hardware-only bits in a TTE 947 */ 948 MAKE_TTE_MASK(&hw_tte); 949 950 hat_init_pagesizes(); 951 952 /* Initialize the hash locks */ 953 for (i = 0; i < khmehash_num; i++) { 954 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 955 MUTEX_DEFAULT, NULL); 956 } 957 for (i = 0; i < uhmehash_num; i++) { 958 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 959 MUTEX_DEFAULT, NULL); 960 } 961 khmehash_num--; /* make sure counter starts from 0 */ 962 uhmehash_num--; /* make sure counter starts from 0 */ 963 964 /* 965 * Allocate context domain structures. 966 * 967 * A platform may choose to modify max_mmu_ctxdoms in 968 * set_platform_defaults(). If a platform does not define 969 * a set_platform_defaults() or does not choose to modify 970 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 971 * 972 * For sun4v, there will be one global context domain, this is to 973 * avoid the ldom cpu substitution problem. 974 * 975 * For all platforms that have CPUs sharing MMUs, this 976 * value must be defined. 977 */ 978 if (max_mmu_ctxdoms == 0) { 979 #ifndef sun4v 980 max_mmu_ctxdoms = max_ncpus; 981 #else /* sun4v */ 982 max_mmu_ctxdoms = 1; 983 #endif /* sun4v */ 984 } 985 986 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 987 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 988 989 /* mmu_ctx_t is 64 bytes aligned */ 990 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 991 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 992 /* 993 * MMU context domain initialization for the Boot CPU. 994 * This needs the context domains array allocated above. 995 */ 996 mutex_enter(&cpu_lock); 997 sfmmu_cpu_init(CPU); 998 mutex_exit(&cpu_lock); 999 1000 /* 1001 * Intialize ism mapping list lock. 1002 */ 1003 1004 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1005 1006 /* 1007 * Each sfmmu structure carries an array of MMU context info 1008 * structures, one per context domain. The size of this array depends 1009 * on the maximum number of context domains. So, the size of the 1010 * sfmmu structure varies per platform. 1011 * 1012 * sfmmu is allocated from static arena, because trap 1013 * handler at TL > 0 is not allowed to touch kernel relocatable 1014 * memory. sfmmu's alignment is changed to 64 bytes from 1015 * default 8 bytes, as the lower 6 bits will be used to pass 1016 * pgcnt to vtag_flush_pgcnt_tl1. 1017 */ 1018 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1019 1020 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1021 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1022 NULL, NULL, static_arena, 0); 1023 1024 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1025 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1026 1027 /* 1028 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1029 * from the heap when low on memory or when TSB_FORCEALLOC is 1030 * specified, don't use magazines to cache them--we want to return 1031 * them to the system as quickly as possible. 1032 */ 1033 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1034 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1035 static_arena, KMC_NOMAGAZINE); 1036 1037 /* 1038 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1039 * memory, which corresponds to the old static reserve for TSBs. 1040 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1041 * memory we'll allocate for TSB slabs; beyond this point TSB 1042 * allocations will be taken from the kernel heap (via 1043 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1044 * consumer. 1045 */ 1046 if (tsb_alloc_hiwater_factor == 0) { 1047 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1048 } 1049 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1050 1051 /* Set tsb_max_growsize. */ 1052 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1053 1054 /* 1055 * On smaller memory systems, allocate TSB memory in smaller chunks 1056 * than the default 4M slab size. We also honor disable_large_pages 1057 * here. 1058 * 1059 * The trap handlers need to be patched with the final slab shift, 1060 * since they need to be able to construct the TSB pointer at runtime. 1061 */ 1062 if (tsb_max_growsize <= TSB_512K_SZCODE) 1063 tsb_slab_ttesz = TTE512K; 1064 1065 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1066 if (!(disable_large_pages & (1 << sz))) 1067 break; 1068 } 1069 1070 tsb_slab_ttesz = sz; 1071 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1072 tsb_slab_size = 1 << tsb_slab_shift; 1073 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1074 1075 maxtsb = tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); 1076 if (tsb_max_growsize > maxtsb) 1077 tsb_max_growsize = maxtsb; 1078 1079 /* 1080 * Set up memory callback to update tsb_alloc_hiwater and 1081 * tsb_max_growsize. 1082 */ 1083 i = kphysm_setup_func_register(&sfmmu_update_tsb_vec, (void *) 0); 1084 ASSERT(i == 0); 1085 1086 /* 1087 * kmem_tsb_arena is the source from which large TSB slabs are 1088 * drawn. The quantum of this arena corresponds to the largest 1089 * TSB size we can dynamically allocate for user processes. 1090 * Currently it must also be a supported page size since we 1091 * use exactly one translation entry to map each slab page. 1092 * 1093 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1094 * which most TSBs are allocated. Since most TSB allocations are 1095 * typically 8K we have a kmem cache we stack on top of each 1096 * kmem_tsb_default_arena to speed up those allocations. 1097 * 1098 * Note the two-level scheme of arenas is required only 1099 * because vmem_create doesn't allow us to specify alignment 1100 * requirements. If this ever changes the code could be 1101 * simplified to use only one level of arenas. 1102 */ 1103 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1104 sfmmu_vmem_xalloc_aligned_wrapper, vmem_xfree, heap_arena, 1105 0, VM_SLEEP); 1106 1107 if (tsb_lgrp_affinity) { 1108 char s[50]; 1109 for (i = 0; i < NLGRPS_MAX; i++) { 1110 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1111 kmem_tsb_default_arena[i] = 1112 vmem_create(s, NULL, 0, PAGESIZE, 1113 sfmmu_tsb_segkmem_alloc, sfmmu_tsb_segkmem_free, 1114 kmem_tsb_arena, 0, VM_SLEEP | VM_BESTFIT); 1115 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1116 sfmmu_tsb_cache[i] = kmem_cache_create(s, PAGESIZE, 1117 PAGESIZE, NULL, NULL, NULL, NULL, 1118 kmem_tsb_default_arena[i], 0); 1119 } 1120 } else { 1121 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1122 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1123 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1124 VM_SLEEP | VM_BESTFIT); 1125 1126 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1127 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1128 kmem_tsb_default_arena[0], 0); 1129 } 1130 1131 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1132 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1133 sfmmu_hblkcache_destructor, 1134 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1135 hat_memload_arena, KMC_NOHASH); 1136 1137 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1138 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 1139 1140 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1141 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1142 sfmmu_hblkcache_destructor, 1143 NULL, (void *)HME1BLK_SZ, 1144 hat_memload1_arena, KMC_NOHASH); 1145 1146 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1147 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1148 1149 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1150 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1151 NULL, NULL, static_arena, KMC_NOHASH); 1152 1153 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1154 sizeof (ism_ment_t), 0, NULL, NULL, 1155 NULL, NULL, NULL, 0); 1156 1157 /* 1158 * We grab the first hat for the kernel, 1159 */ 1160 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1161 kas.a_hat = hat_alloc(&kas); 1162 AS_LOCK_EXIT(&kas, &kas.a_lock); 1163 1164 /* 1165 * Initialize hblk_reserve. 1166 */ 1167 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1168 va_to_pa((caddr_t)hblk_reserve); 1169 1170 #ifndef UTSB_PHYS 1171 /* 1172 * Reserve some kernel virtual address space for the locked TTEs 1173 * that allow us to probe the TSB from TL>0. 1174 */ 1175 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1176 0, 0, NULL, NULL, VM_SLEEP); 1177 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1178 0, 0, NULL, NULL, VM_SLEEP); 1179 #endif 1180 1181 #ifdef VAC 1182 /* 1183 * The big page VAC handling code assumes VAC 1184 * will not be bigger than the smallest big 1185 * page- which is 64K. 1186 */ 1187 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1188 cmn_err(CE_PANIC, "VAC too big!"); 1189 } 1190 #endif 1191 1192 (void) xhat_init(); 1193 1194 uhme_hash_pa = va_to_pa(uhme_hash); 1195 khme_hash_pa = va_to_pa(khme_hash); 1196 1197 /* 1198 * Initialize relocation locks. kpr_suspendlock is held 1199 * at PIL_MAX to prevent interrupts from pinning the holder 1200 * of a suspended TTE which may access it leading to a 1201 * deadlock condition. 1202 */ 1203 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1204 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1205 1206 /* 1207 * Pre-allocate hrm_hashtab before enabling the collection of 1208 * refmod statistics. Allocating on the fly would mean us 1209 * running the risk of suffering recursive mutex enters or 1210 * deadlocks. 1211 */ 1212 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 1213 KM_SLEEP); 1214 } 1215 1216 /* 1217 * Initialize locking for the hat layer, called early during boot. 1218 */ 1219 static void 1220 hat_lock_init() 1221 { 1222 int i; 1223 1224 /* 1225 * initialize the array of mutexes protecting a page's mapping 1226 * list and p_nrm field. 1227 */ 1228 for (i = 0; i < mml_table_sz; i++) 1229 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1230 1231 if (kpm_enable) { 1232 for (i = 0; i < kpmp_table_sz; i++) { 1233 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1234 MUTEX_DEFAULT, NULL); 1235 } 1236 } 1237 1238 /* 1239 * Initialize array of mutex locks that protects sfmmu fields and 1240 * TSB lists. 1241 */ 1242 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1243 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1244 NULL); 1245 } 1246 1247 #define SFMMU_KERNEL_MAXVA \ 1248 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1249 1250 /* 1251 * Allocate a hat structure. 1252 * Called when an address space first uses a hat. 1253 */ 1254 struct hat * 1255 hat_alloc(struct as *as) 1256 { 1257 sfmmu_t *sfmmup; 1258 int i; 1259 uint64_t cnum; 1260 extern uint_t get_color_start(struct as *); 1261 1262 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1263 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1264 sfmmup->sfmmu_as = as; 1265 sfmmup->sfmmu_flags = 0; 1266 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1267 1268 if (as == &kas) { 1269 ksfmmup = sfmmup; 1270 sfmmup->sfmmu_cext = 0; 1271 cnum = KCONTEXT; 1272 1273 sfmmup->sfmmu_clrstart = 0; 1274 sfmmup->sfmmu_tsb = NULL; 1275 /* 1276 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1277 * to setup tsb_info for ksfmmup. 1278 */ 1279 } else { 1280 1281 /* 1282 * Just set to invalid ctx. When it faults, it will 1283 * get a valid ctx. This would avoid the situation 1284 * where we get a ctx, but it gets stolen and then 1285 * we fault when we try to run and so have to get 1286 * another ctx. 1287 */ 1288 sfmmup->sfmmu_cext = 0; 1289 cnum = INVALID_CONTEXT; 1290 1291 /* initialize original physical page coloring bin */ 1292 sfmmup->sfmmu_clrstart = get_color_start(as); 1293 #ifdef DEBUG 1294 if (tsb_random_size) { 1295 uint32_t randval = (uint32_t)gettick() >> 4; 1296 int size = randval % (tsb_max_growsize + 1); 1297 1298 /* chose a random tsb size for stress testing */ 1299 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1300 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1301 } else 1302 #endif /* DEBUG */ 1303 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1304 default_tsb_size, 1305 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1306 sfmmup->sfmmu_flags = HAT_SWAPPED; 1307 ASSERT(sfmmup->sfmmu_tsb != NULL); 1308 } 1309 1310 ASSERT(max_mmu_ctxdoms > 0); 1311 for (i = 0; i < max_mmu_ctxdoms; i++) { 1312 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1313 sfmmup->sfmmu_ctxs[i].gnum = 0; 1314 } 1315 1316 sfmmu_setup_tsbinfo(sfmmup); 1317 for (i = 0; i < max_mmu_page_sizes; i++) { 1318 sfmmup->sfmmu_ttecnt[i] = 0; 1319 sfmmup->sfmmu_ismttecnt[i] = 0; 1320 sfmmup->sfmmu_pgsz[i] = TTE8K; 1321 } 1322 1323 sfmmup->sfmmu_iblk = NULL; 1324 sfmmup->sfmmu_ismhat = 0; 1325 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1326 if (sfmmup == ksfmmup) { 1327 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1328 } else { 1329 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1330 } 1331 sfmmup->sfmmu_free = 0; 1332 sfmmup->sfmmu_rmstat = 0; 1333 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1334 sfmmup->sfmmu_xhat_provider = NULL; 1335 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1336 return (sfmmup); 1337 } 1338 1339 /* 1340 * Create per-MMU context domain kstats for a given MMU ctx. 1341 */ 1342 static void 1343 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1344 { 1345 mmu_ctx_stat_t stat; 1346 kstat_t *mmu_kstat; 1347 1348 ASSERT(MUTEX_HELD(&cpu_lock)); 1349 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1350 1351 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1352 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1353 1354 if (mmu_kstat == NULL) { 1355 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1356 mmu_ctxp->mmu_idx); 1357 } else { 1358 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1359 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1360 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1361 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1362 mmu_ctxp->mmu_kstat = mmu_kstat; 1363 kstat_install(mmu_kstat); 1364 } 1365 } 1366 1367 /* 1368 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1369 * context domain information for a given CPU. If a platform does not 1370 * specify that interface, then the function below is used instead to return 1371 * default information. The defaults are as follows: 1372 * 1373 * - For sun4u systems there's one MMU context domain per CPU. 1374 * This default is used by all sun4u systems except OPL. OPL systems 1375 * provide platform specific interface to map CPU ids to MMU ids 1376 * because on OPL more than 1 CPU shares a single MMU. 1377 * Note that on sun4v, there is one global context domain for 1378 * the entire system. This is to avoid running into potential problem 1379 * with ldom physical cpu substitution feature. 1380 * - The number of MMU context IDs supported on any CPU in the 1381 * system is 8K. 1382 */ 1383 /*ARGSUSED*/ 1384 static void 1385 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1386 { 1387 infop->mmu_nctxs = nctxs; 1388 #ifndef sun4v 1389 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1390 #else /* sun4v */ 1391 infop->mmu_idx = 0; 1392 #endif /* sun4v */ 1393 } 1394 1395 /* 1396 * Called during CPU initialization to set the MMU context-related information 1397 * for a CPU. 1398 * 1399 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1400 */ 1401 void 1402 sfmmu_cpu_init(cpu_t *cp) 1403 { 1404 mmu_ctx_info_t info; 1405 mmu_ctx_t *mmu_ctxp; 1406 1407 ASSERT(MUTEX_HELD(&cpu_lock)); 1408 1409 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1410 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1411 else 1412 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1413 1414 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1415 1416 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1417 /* Each mmu_ctx is cacheline aligned. */ 1418 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1419 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1420 1421 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1422 (void *)ipltospl(DISP_LEVEL)); 1423 mmu_ctxp->mmu_idx = info.mmu_idx; 1424 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1425 /* 1426 * Globally for lifetime of a system, 1427 * gnum must always increase. 1428 * mmu_saved_gnum is protected by the cpu_lock. 1429 */ 1430 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1431 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1432 1433 sfmmu_mmu_kstat_create(mmu_ctxp); 1434 1435 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1436 } else { 1437 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1438 } 1439 1440 /* 1441 * The mmu_lock is acquired here to prevent races with 1442 * the wrap-around code. 1443 */ 1444 mutex_enter(&mmu_ctxp->mmu_lock); 1445 1446 1447 mmu_ctxp->mmu_ncpus++; 1448 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1449 CPU_MMU_IDX(cp) = info.mmu_idx; 1450 CPU_MMU_CTXP(cp) = mmu_ctxp; 1451 1452 mutex_exit(&mmu_ctxp->mmu_lock); 1453 } 1454 1455 /* 1456 * Called to perform MMU context-related cleanup for a CPU. 1457 */ 1458 void 1459 sfmmu_cpu_cleanup(cpu_t *cp) 1460 { 1461 mmu_ctx_t *mmu_ctxp; 1462 1463 ASSERT(MUTEX_HELD(&cpu_lock)); 1464 1465 mmu_ctxp = CPU_MMU_CTXP(cp); 1466 ASSERT(mmu_ctxp != NULL); 1467 1468 /* 1469 * The mmu_lock is acquired here to prevent races with 1470 * the wrap-around code. 1471 */ 1472 mutex_enter(&mmu_ctxp->mmu_lock); 1473 1474 CPU_MMU_CTXP(cp) = NULL; 1475 1476 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1477 if (--mmu_ctxp->mmu_ncpus == 0) { 1478 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1479 mutex_exit(&mmu_ctxp->mmu_lock); 1480 mutex_destroy(&mmu_ctxp->mmu_lock); 1481 1482 if (mmu_ctxp->mmu_kstat) 1483 kstat_delete(mmu_ctxp->mmu_kstat); 1484 1485 /* mmu_saved_gnum is protected by the cpu_lock. */ 1486 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1487 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1488 1489 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1490 1491 return; 1492 } 1493 1494 mutex_exit(&mmu_ctxp->mmu_lock); 1495 } 1496 1497 /* 1498 * Hat_setup, makes an address space context the current active one. 1499 * In sfmmu this translates to setting the secondary context with the 1500 * corresponding context. 1501 */ 1502 void 1503 hat_setup(struct hat *sfmmup, int allocflag) 1504 { 1505 hatlock_t *hatlockp; 1506 1507 /* Init needs some special treatment. */ 1508 if (allocflag == HAT_INIT) { 1509 /* 1510 * Make sure that we have 1511 * 1. a TSB 1512 * 2. a valid ctx that doesn't get stolen after this point. 1513 */ 1514 hatlockp = sfmmu_hat_enter(sfmmup); 1515 1516 /* 1517 * Swap in the TSB. hat_init() allocates tsbinfos without 1518 * TSBs, but we need one for init, since the kernel does some 1519 * special things to set up its stack and needs the TSB to 1520 * resolve page faults. 1521 */ 1522 sfmmu_tsb_swapin(sfmmup, hatlockp); 1523 1524 sfmmu_get_ctx(sfmmup); 1525 1526 sfmmu_hat_exit(hatlockp); 1527 } else { 1528 ASSERT(allocflag == HAT_ALLOC); 1529 1530 hatlockp = sfmmu_hat_enter(sfmmup); 1531 kpreempt_disable(); 1532 1533 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1534 1535 /* 1536 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1537 * pagesize bits don't matter in this case since we are passing 1538 * INVALID_CONTEXT to it. 1539 */ 1540 sfmmu_setctx_sec(INVALID_CONTEXT); 1541 sfmmu_clear_utsbinfo(); 1542 1543 kpreempt_enable(); 1544 sfmmu_hat_exit(hatlockp); 1545 } 1546 } 1547 1548 /* 1549 * Free all the translation resources for the specified address space. 1550 * Called from as_free when an address space is being destroyed. 1551 */ 1552 void 1553 hat_free_start(struct hat *sfmmup) 1554 { 1555 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1556 ASSERT(sfmmup != ksfmmup); 1557 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1558 1559 sfmmup->sfmmu_free = 1; 1560 } 1561 1562 void 1563 hat_free_end(struct hat *sfmmup) 1564 { 1565 int i; 1566 1567 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1568 if (sfmmup->sfmmu_ismhat) { 1569 for (i = 0; i < mmu_page_sizes; i++) { 1570 sfmmup->sfmmu_ttecnt[i] = 0; 1571 sfmmup->sfmmu_ismttecnt[i] = 0; 1572 } 1573 } else { 1574 /* EMPTY */ 1575 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1576 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1577 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1578 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1579 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1580 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1581 } 1582 1583 if (sfmmup->sfmmu_rmstat) { 1584 hat_freestat(sfmmup->sfmmu_as, NULL); 1585 } 1586 1587 while (sfmmup->sfmmu_tsb != NULL) { 1588 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1589 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1590 sfmmup->sfmmu_tsb = next; 1591 } 1592 sfmmu_free_sfmmu(sfmmup); 1593 1594 kmem_cache_free(sfmmuid_cache, sfmmup); 1595 } 1596 1597 /* 1598 * Set up any translation structures, for the specified address space, 1599 * that are needed or preferred when the process is being swapped in. 1600 */ 1601 /* ARGSUSED */ 1602 void 1603 hat_swapin(struct hat *hat) 1604 { 1605 ASSERT(hat->sfmmu_xhat_provider == NULL); 1606 } 1607 1608 /* 1609 * Free all of the translation resources, for the specified address space, 1610 * that can be freed while the process is swapped out. Called from as_swapout. 1611 * Also, free up the ctx that this process was using. 1612 */ 1613 void 1614 hat_swapout(struct hat *sfmmup) 1615 { 1616 struct hmehash_bucket *hmebp; 1617 struct hme_blk *hmeblkp; 1618 struct hme_blk *pr_hblk = NULL; 1619 struct hme_blk *nx_hblk; 1620 int i; 1621 uint64_t hblkpa, prevpa, nx_pa; 1622 struct hme_blk *list = NULL; 1623 hatlock_t *hatlockp; 1624 struct tsb_info *tsbinfop; 1625 struct free_tsb { 1626 struct free_tsb *next; 1627 struct tsb_info *tsbinfop; 1628 }; /* free list of TSBs */ 1629 struct free_tsb *freelist, *last, *next; 1630 1631 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1632 SFMMU_STAT(sf_swapout); 1633 1634 /* 1635 * There is no way to go from an as to all its translations in sfmmu. 1636 * Here is one of the times when we take the big hit and traverse 1637 * the hash looking for hme_blks to free up. Not only do we free up 1638 * this as hme_blks but all those that are free. We are obviously 1639 * swapping because we need memory so let's free up as much 1640 * as we can. 1641 * 1642 * Note that we don't flush TLB/TSB here -- it's not necessary 1643 * because: 1644 * 1) we free the ctx we're using and throw away the TSB(s); 1645 * 2) processes aren't runnable while being swapped out. 1646 */ 1647 ASSERT(sfmmup != KHATID); 1648 for (i = 0; i <= UHMEHASH_SZ; i++) { 1649 hmebp = &uhme_hash[i]; 1650 SFMMU_HASH_LOCK(hmebp); 1651 hmeblkp = hmebp->hmeblkp; 1652 hblkpa = hmebp->hmeh_nextpa; 1653 prevpa = 0; 1654 pr_hblk = NULL; 1655 while (hmeblkp) { 1656 1657 ASSERT(!hmeblkp->hblk_xhat_bit); 1658 1659 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1660 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1661 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1662 (caddr_t)get_hblk_base(hmeblkp), 1663 get_hblk_endaddr(hmeblkp), 1664 NULL, HAT_UNLOAD); 1665 } 1666 nx_hblk = hmeblkp->hblk_next; 1667 nx_pa = hmeblkp->hblk_nextpa; 1668 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1669 ASSERT(!hmeblkp->hblk_lckcnt); 1670 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 1671 prevpa, pr_hblk); 1672 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 1673 } else { 1674 pr_hblk = hmeblkp; 1675 prevpa = hblkpa; 1676 } 1677 hmeblkp = nx_hblk; 1678 hblkpa = nx_pa; 1679 } 1680 SFMMU_HASH_UNLOCK(hmebp); 1681 } 1682 1683 sfmmu_hblks_list_purge(&list); 1684 1685 /* 1686 * Now free up the ctx so that others can reuse it. 1687 */ 1688 hatlockp = sfmmu_hat_enter(sfmmup); 1689 1690 sfmmu_invalidate_ctx(sfmmup); 1691 1692 /* 1693 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1694 * If TSBs were never swapped in, just return. 1695 * This implies that we don't support partial swapping 1696 * of TSBs -- either all are swapped out, or none are. 1697 * 1698 * We must hold the HAT lock here to prevent racing with another 1699 * thread trying to unmap TTEs from the TSB or running the post- 1700 * relocator after relocating the TSB's memory. Unfortunately, we 1701 * can't free memory while holding the HAT lock or we could 1702 * deadlock, so we build a list of TSBs to be freed after marking 1703 * the tsbinfos as swapped out and free them after dropping the 1704 * lock. 1705 */ 1706 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1707 sfmmu_hat_exit(hatlockp); 1708 return; 1709 } 1710 1711 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1712 last = freelist = NULL; 1713 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1714 tsbinfop = tsbinfop->tsb_next) { 1715 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1716 1717 /* 1718 * Cast the TSB into a struct free_tsb and put it on the free 1719 * list. 1720 */ 1721 if (freelist == NULL) { 1722 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1723 } else { 1724 last->next = (struct free_tsb *)tsbinfop->tsb_va; 1725 last = last->next; 1726 } 1727 last->next = NULL; 1728 last->tsbinfop = tsbinfop; 1729 tsbinfop->tsb_flags |= TSB_SWAPPED; 1730 /* 1731 * Zero out the TTE to clear the valid bit. 1732 * Note we can't use a value like 0xbad because we want to 1733 * ensure diagnostic bits are NEVER set on TTEs that might 1734 * be loaded. The intent is to catch any invalid access 1735 * to the swapped TSB, such as a thread running with a valid 1736 * context without first calling sfmmu_tsb_swapin() to 1737 * allocate TSB memory. 1738 */ 1739 tsbinfop->tsb_tte.ll = 0; 1740 } 1741 1742 #ifdef sun4v 1743 if (freelist) 1744 sfmmu_invalidate_tsbinfo(sfmmup); 1745 #endif /* sun4v */ 1746 1747 /* Now we can drop the lock and free the TSB memory. */ 1748 sfmmu_hat_exit(hatlockp); 1749 for (; freelist != NULL; freelist = next) { 1750 next = freelist->next; 1751 sfmmu_tsb_free(freelist->tsbinfop); 1752 } 1753 } 1754 1755 /* 1756 * Duplicate the translations of an as into another newas 1757 */ 1758 /* ARGSUSED */ 1759 int 1760 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 1761 uint_t flag) 1762 { 1763 extern uint_t get_color_start(struct as *); 1764 1765 ASSERT(hat->sfmmu_xhat_provider == NULL); 1766 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW)); 1767 1768 if (flag == HAT_DUP_COW) { 1769 panic("hat_dup: HAT_DUP_COW not supported"); 1770 } 1771 if (flag == HAT_DUP_ALL && consistent_coloring == 0 && 1772 update_proc_pgcolorbase_after_fork != 0) { 1773 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as); 1774 } 1775 return (0); 1776 } 1777 1778 /* 1779 * Set up addr to map to page pp with protection prot. 1780 * As an optimization we also load the TSB with the 1781 * corresponding tte but it is no big deal if the tte gets kicked out. 1782 */ 1783 void 1784 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 1785 uint_t attr, uint_t flags) 1786 { 1787 tte_t tte; 1788 1789 1790 ASSERT(hat != NULL); 1791 ASSERT(PAGE_LOCKED(pp)); 1792 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1793 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1794 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1795 1796 if (PP_ISFREE(pp)) { 1797 panic("hat_memload: loading a mapping to free page %p", 1798 (void *)pp); 1799 } 1800 1801 if (hat->sfmmu_xhat_provider) { 1802 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 1803 return; 1804 } 1805 1806 ASSERT((hat == ksfmmup) || 1807 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1808 1809 if (flags & ~SFMMU_LOAD_ALLFLAG) 1810 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 1811 flags & ~SFMMU_LOAD_ALLFLAG); 1812 1813 if (hat->sfmmu_rmstat) 1814 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 1815 1816 #if defined(SF_ERRATA_57) 1817 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1818 (addr < errata57_limit) && (attr & PROT_EXEC) && 1819 !(flags & HAT_LOAD_SHARE)) { 1820 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 1821 " page executable"); 1822 attr &= ~PROT_EXEC; 1823 } 1824 #endif 1825 1826 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 1827 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags); 1828 1829 /* 1830 * Check TSB and TLB page sizes. 1831 */ 1832 if ((flags & HAT_LOAD_SHARE) == 0) { 1833 sfmmu_check_page_sizes(hat, 1); 1834 } 1835 } 1836 1837 /* 1838 * hat_devload can be called to map real memory (e.g. 1839 * /dev/kmem) and even though hat_devload will determine pf is 1840 * for memory, it will be unable to get a shared lock on the 1841 * page (because someone else has it exclusively) and will 1842 * pass dp = NULL. If tteload doesn't get a non-NULL 1843 * page pointer it can't cache memory. 1844 */ 1845 void 1846 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 1847 uint_t attr, int flags) 1848 { 1849 tte_t tte; 1850 struct page *pp = NULL; 1851 int use_lgpg = 0; 1852 1853 ASSERT(hat != NULL); 1854 1855 if (hat->sfmmu_xhat_provider) { 1856 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 1857 return; 1858 } 1859 1860 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1861 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1862 ASSERT((hat == ksfmmup) || 1863 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1864 if (len == 0) 1865 panic("hat_devload: zero len"); 1866 if (flags & ~SFMMU_LOAD_ALLFLAG) 1867 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 1868 flags & ~SFMMU_LOAD_ALLFLAG); 1869 1870 #if defined(SF_ERRATA_57) 1871 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1872 (addr < errata57_limit) && (attr & PROT_EXEC) && 1873 !(flags & HAT_LOAD_SHARE)) { 1874 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 1875 " page executable"); 1876 attr &= ~PROT_EXEC; 1877 } 1878 #endif 1879 1880 /* 1881 * If it's a memory page find its pp 1882 */ 1883 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 1884 pp = page_numtopp_nolock(pfn); 1885 if (pp == NULL) { 1886 flags |= HAT_LOAD_NOCONSIST; 1887 } else { 1888 if (PP_ISFREE(pp)) { 1889 panic("hat_memload: loading " 1890 "a mapping to free page %p", 1891 (void *)pp); 1892 } 1893 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1894 panic("hat_memload: loading a mapping " 1895 "to unlocked relocatable page %p", 1896 (void *)pp); 1897 } 1898 ASSERT(len == MMU_PAGESIZE); 1899 } 1900 } 1901 1902 if (hat->sfmmu_rmstat) 1903 hat_resvstat(len, hat->sfmmu_as, addr); 1904 1905 if (flags & HAT_LOAD_NOCONSIST) { 1906 attr |= SFMMU_UNCACHEVTTE; 1907 use_lgpg = 1; 1908 } 1909 if (!pf_is_memory(pfn)) { 1910 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 1911 use_lgpg = 1; 1912 switch (attr & HAT_ORDER_MASK) { 1913 case HAT_STRICTORDER: 1914 case HAT_UNORDERED_OK: 1915 /* 1916 * we set the side effect bit for all non 1917 * memory mappings unless merging is ok 1918 */ 1919 attr |= SFMMU_SIDEFFECT; 1920 break; 1921 case HAT_MERGING_OK: 1922 case HAT_LOADCACHING_OK: 1923 case HAT_STORECACHING_OK: 1924 break; 1925 default: 1926 panic("hat_devload: bad attr"); 1927 break; 1928 } 1929 } 1930 while (len) { 1931 if (!use_lgpg) { 1932 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1933 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1934 flags); 1935 len -= MMU_PAGESIZE; 1936 addr += MMU_PAGESIZE; 1937 pfn++; 1938 continue; 1939 } 1940 /* 1941 * try to use large pages, check va/pa alignments 1942 * Note that 32M/256M page sizes are not (yet) supported. 1943 */ 1944 if ((len >= MMU_PAGESIZE4M) && 1945 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 1946 !(disable_large_pages & (1 << TTE4M)) && 1947 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 1948 sfmmu_memtte(&tte, pfn, attr, TTE4M); 1949 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1950 flags); 1951 len -= MMU_PAGESIZE4M; 1952 addr += MMU_PAGESIZE4M; 1953 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 1954 } else if ((len >= MMU_PAGESIZE512K) && 1955 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 1956 !(disable_large_pages & (1 << TTE512K)) && 1957 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 1958 sfmmu_memtte(&tte, pfn, attr, TTE512K); 1959 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1960 flags); 1961 len -= MMU_PAGESIZE512K; 1962 addr += MMU_PAGESIZE512K; 1963 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 1964 } else if ((len >= MMU_PAGESIZE64K) && 1965 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 1966 !(disable_large_pages & (1 << TTE64K)) && 1967 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 1968 sfmmu_memtte(&tte, pfn, attr, TTE64K); 1969 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1970 flags); 1971 len -= MMU_PAGESIZE64K; 1972 addr += MMU_PAGESIZE64K; 1973 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 1974 } else { 1975 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1976 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1977 flags); 1978 len -= MMU_PAGESIZE; 1979 addr += MMU_PAGESIZE; 1980 pfn++; 1981 } 1982 } 1983 1984 /* 1985 * Check TSB and TLB page sizes. 1986 */ 1987 if ((flags & HAT_LOAD_SHARE) == 0) { 1988 sfmmu_check_page_sizes(hat, 1); 1989 } 1990 } 1991 1992 /* 1993 * Map the largest extend possible out of the page array. The array may NOT 1994 * be in order. The largest possible mapping a page can have 1995 * is specified in the p_szc field. The p_szc field 1996 * cannot change as long as there any mappings (large or small) 1997 * to any of the pages that make up the large page. (ie. any 1998 * promotion/demotion of page size is not up to the hat but up to 1999 * the page free list manager). The array 2000 * should consist of properly aligned contigous pages that are 2001 * part of a big page for a large mapping to be created. 2002 */ 2003 void 2004 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 2005 struct page **pps, uint_t attr, uint_t flags) 2006 { 2007 int ttesz; 2008 size_t mapsz; 2009 pgcnt_t numpg, npgs; 2010 tte_t tte; 2011 page_t *pp; 2012 uint_t large_pages_disable; 2013 2014 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2015 2016 if (hat->sfmmu_xhat_provider) { 2017 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 2018 return; 2019 } 2020 2021 if (hat->sfmmu_rmstat) 2022 hat_resvstat(len, hat->sfmmu_as, addr); 2023 2024 #if defined(SF_ERRATA_57) 2025 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2026 (addr < errata57_limit) && (attr & PROT_EXEC) && 2027 !(flags & HAT_LOAD_SHARE)) { 2028 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2029 "user page executable"); 2030 attr &= ~PROT_EXEC; 2031 } 2032 #endif 2033 2034 /* Get number of pages */ 2035 npgs = len >> MMU_PAGESHIFT; 2036 2037 if (flags & HAT_LOAD_SHARE) { 2038 large_pages_disable = disable_ism_large_pages; 2039 } else { 2040 large_pages_disable = disable_large_pages; 2041 } 2042 2043 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2044 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 2045 return; 2046 } 2047 2048 while (npgs >= NHMENTS) { 2049 pp = *pps; 2050 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2051 /* 2052 * Check if this page size is disabled. 2053 */ 2054 if (large_pages_disable & (1 << ttesz)) 2055 continue; 2056 2057 numpg = TTEPAGES(ttesz); 2058 mapsz = numpg << MMU_PAGESHIFT; 2059 if ((npgs >= numpg) && 2060 IS_P2ALIGNED(addr, mapsz) && 2061 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2062 /* 2063 * At this point we have enough pages and 2064 * we know the virtual address and the pfn 2065 * are properly aligned. We still need 2066 * to check for physical contiguity but since 2067 * it is very likely that this is the case 2068 * we will assume they are so and undo 2069 * the request if necessary. It would 2070 * be great if we could get a hint flag 2071 * like HAT_CONTIG which would tell us 2072 * the pages are contigous for sure. 2073 */ 2074 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2075 attr, ttesz); 2076 if (!sfmmu_tteload_array(hat, &tte, addr, 2077 pps, flags)) { 2078 break; 2079 } 2080 } 2081 } 2082 if (ttesz == TTE8K) { 2083 /* 2084 * We were not able to map array using a large page 2085 * batch a hmeblk or fraction at a time. 2086 */ 2087 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2088 & (NHMENTS-1); 2089 numpg = NHMENTS - numpg; 2090 ASSERT(numpg <= npgs); 2091 mapsz = numpg * MMU_PAGESIZE; 2092 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2093 numpg); 2094 } 2095 addr += mapsz; 2096 npgs -= numpg; 2097 pps += numpg; 2098 } 2099 2100 if (npgs) { 2101 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 2102 } 2103 2104 /* 2105 * Check TSB and TLB page sizes. 2106 */ 2107 if ((flags & HAT_LOAD_SHARE) == 0) { 2108 sfmmu_check_page_sizes(hat, 1); 2109 } 2110 } 2111 2112 /* 2113 * Function tries to batch 8K pages into the same hme blk. 2114 */ 2115 static void 2116 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2117 uint_t attr, uint_t flags, pgcnt_t npgs) 2118 { 2119 tte_t tte; 2120 page_t *pp; 2121 struct hmehash_bucket *hmebp; 2122 struct hme_blk *hmeblkp; 2123 int index; 2124 2125 while (npgs) { 2126 /* 2127 * Acquire the hash bucket. 2128 */ 2129 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K); 2130 ASSERT(hmebp); 2131 2132 /* 2133 * Find the hment block. 2134 */ 2135 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2136 TTE8K, flags); 2137 ASSERT(hmeblkp); 2138 2139 do { 2140 /* 2141 * Make the tte. 2142 */ 2143 pp = *pps; 2144 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2145 2146 /* 2147 * Add the translation. 2148 */ 2149 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2150 vaddr, pps, flags); 2151 2152 /* 2153 * Goto next page. 2154 */ 2155 pps++; 2156 npgs--; 2157 2158 /* 2159 * Goto next address. 2160 */ 2161 vaddr += MMU_PAGESIZE; 2162 2163 /* 2164 * Don't crossover into a different hmentblk. 2165 */ 2166 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2167 (NHMENTS-1)); 2168 2169 } while (index != 0 && npgs != 0); 2170 2171 /* 2172 * Release the hash bucket. 2173 */ 2174 2175 sfmmu_tteload_release_hashbucket(hmebp); 2176 } 2177 } 2178 2179 /* 2180 * Construct a tte for a page: 2181 * 2182 * tte_valid = 1 2183 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2184 * tte_size = size 2185 * tte_nfo = attr & HAT_NOFAULT 2186 * tte_ie = attr & HAT_STRUCTURE_LE 2187 * tte_hmenum = hmenum 2188 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2189 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2190 * tte_ref = 1 (optimization) 2191 * tte_wr_perm = attr & PROT_WRITE; 2192 * tte_no_sync = attr & HAT_NOSYNC 2193 * tte_lock = attr & SFMMU_LOCKTTE 2194 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2195 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2196 * tte_e = attr & SFMMU_SIDEFFECT 2197 * tte_priv = !(attr & PROT_USER) 2198 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2199 * tte_glb = 0 2200 */ 2201 void 2202 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2203 { 2204 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2205 2206 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2207 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2208 2209 if (TTE_IS_NOSYNC(ttep)) { 2210 TTE_SET_REF(ttep); 2211 if (TTE_IS_WRITABLE(ttep)) { 2212 TTE_SET_MOD(ttep); 2213 } 2214 } 2215 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2216 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2217 } 2218 } 2219 2220 /* 2221 * This function will add a translation to the hme_blk and allocate the 2222 * hme_blk if one does not exist. 2223 * If a page structure is specified then it will add the 2224 * corresponding hment to the mapping list. 2225 * It will also update the hmenum field for the tte. 2226 */ 2227 void 2228 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2229 uint_t flags) 2230 { 2231 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags); 2232 } 2233 2234 /* 2235 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2236 * Assumes that a particular page size may only be resident in one TSB. 2237 */ 2238 static void 2239 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2240 { 2241 struct tsb_info *tsbinfop = NULL; 2242 uint64_t tag; 2243 struct tsbe *tsbe_addr; 2244 uint64_t tsb_base; 2245 uint_t tsb_size; 2246 int vpshift = MMU_PAGESHIFT; 2247 int phys = 0; 2248 2249 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2250 phys = ktsb_phys; 2251 if (ttesz >= TTE4M) { 2252 #ifndef sun4v 2253 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2254 #endif 2255 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2256 tsb_size = ktsb4m_szcode; 2257 } else { 2258 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2259 tsb_size = ktsb_szcode; 2260 } 2261 } else { 2262 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2263 2264 /* 2265 * If there isn't a TSB for this page size, or the TSB is 2266 * swapped out, there is nothing to do. Note that the latter 2267 * case seems impossible but can occur if hat_pageunload() 2268 * is called on an ISM mapping while the process is swapped 2269 * out. 2270 */ 2271 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2272 return; 2273 2274 /* 2275 * If another thread is in the middle of relocating a TSB 2276 * we can't unload the entry so set a flag so that the 2277 * TSB will be flushed before it can be accessed by the 2278 * process. 2279 */ 2280 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2281 if (ttep == NULL) 2282 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2283 return; 2284 } 2285 #if defined(UTSB_PHYS) 2286 phys = 1; 2287 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2288 #else 2289 tsb_base = (uint64_t)tsbinfop->tsb_va; 2290 #endif 2291 tsb_size = tsbinfop->tsb_szc; 2292 } 2293 if (ttesz >= TTE4M) 2294 vpshift = MMU_PAGESHIFT4M; 2295 2296 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2297 tag = sfmmu_make_tsbtag(vaddr); 2298 2299 if (ttep == NULL) { 2300 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2301 } else { 2302 if (ttesz >= TTE4M) { 2303 SFMMU_STAT(sf_tsb_load4m); 2304 } else { 2305 SFMMU_STAT(sf_tsb_load8k); 2306 } 2307 2308 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2309 } 2310 } 2311 2312 /* 2313 * Unmap all entries from [start, end) matching the given page size. 2314 * 2315 * This function is used primarily to unmap replicated 64K or 512K entries 2316 * from the TSB that are inserted using the base page size TSB pointer, but 2317 * it may also be called to unmap a range of addresses from the TSB. 2318 */ 2319 void 2320 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2321 { 2322 struct tsb_info *tsbinfop; 2323 uint64_t tag; 2324 struct tsbe *tsbe_addr; 2325 caddr_t vaddr; 2326 uint64_t tsb_base; 2327 int vpshift, vpgsz; 2328 uint_t tsb_size; 2329 int phys = 0; 2330 2331 /* 2332 * Assumptions: 2333 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2334 * at a time shooting down any valid entries we encounter. 2335 * 2336 * If ttesz >= 4M we walk the range 4M at a time shooting 2337 * down any valid mappings we find. 2338 */ 2339 if (sfmmup == ksfmmup) { 2340 phys = ktsb_phys; 2341 if (ttesz >= TTE4M) { 2342 #ifndef sun4v 2343 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2344 #endif 2345 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2346 tsb_size = ktsb4m_szcode; 2347 } else { 2348 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2349 tsb_size = ktsb_szcode; 2350 } 2351 } else { 2352 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2353 2354 /* 2355 * If there isn't a TSB for this page size, or the TSB is 2356 * swapped out, there is nothing to do. Note that the latter 2357 * case seems impossible but can occur if hat_pageunload() 2358 * is called on an ISM mapping while the process is swapped 2359 * out. 2360 */ 2361 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2362 return; 2363 2364 /* 2365 * If another thread is in the middle of relocating a TSB 2366 * we can't unload the entry so set a flag so that the 2367 * TSB will be flushed before it can be accessed by the 2368 * process. 2369 */ 2370 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2371 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2372 return; 2373 } 2374 #if defined(UTSB_PHYS) 2375 phys = 1; 2376 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2377 #else 2378 tsb_base = (uint64_t)tsbinfop->tsb_va; 2379 #endif 2380 tsb_size = tsbinfop->tsb_szc; 2381 } 2382 if (ttesz >= TTE4M) { 2383 vpshift = MMU_PAGESHIFT4M; 2384 vpgsz = MMU_PAGESIZE4M; 2385 } else { 2386 vpshift = MMU_PAGESHIFT; 2387 vpgsz = MMU_PAGESIZE; 2388 } 2389 2390 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2391 tag = sfmmu_make_tsbtag(vaddr); 2392 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2393 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2394 } 2395 } 2396 2397 /* 2398 * Select the optimum TSB size given the number of mappings 2399 * that need to be cached. 2400 */ 2401 static int 2402 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2403 { 2404 int szc = 0; 2405 2406 #ifdef DEBUG 2407 if (tsb_grow_stress) { 2408 uint32_t randval = (uint32_t)gettick() >> 4; 2409 return (randval % (tsb_max_growsize + 1)); 2410 } 2411 #endif /* DEBUG */ 2412 2413 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2414 szc++; 2415 return (szc); 2416 } 2417 2418 /* 2419 * This function will add a translation to the hme_blk and allocate the 2420 * hme_blk if one does not exist. 2421 * If a page structure is specified then it will add the 2422 * corresponding hment to the mapping list. 2423 * It will also update the hmenum field for the tte. 2424 * Furthermore, it attempts to create a large page translation 2425 * for <addr,hat> at page array pps. It assumes addr and first 2426 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2427 */ 2428 static int 2429 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2430 page_t **pps, uint_t flags) 2431 { 2432 struct hmehash_bucket *hmebp; 2433 struct hme_blk *hmeblkp; 2434 int ret; 2435 uint_t size; 2436 2437 /* 2438 * Get mapping size. 2439 */ 2440 size = TTE_CSZ(ttep); 2441 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2442 2443 /* 2444 * Acquire the hash bucket. 2445 */ 2446 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size); 2447 ASSERT(hmebp); 2448 2449 /* 2450 * Find the hment block. 2451 */ 2452 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags); 2453 ASSERT(hmeblkp); 2454 2455 /* 2456 * Add the translation. 2457 */ 2458 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags); 2459 2460 /* 2461 * Release the hash bucket. 2462 */ 2463 sfmmu_tteload_release_hashbucket(hmebp); 2464 2465 return (ret); 2466 } 2467 2468 /* 2469 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2470 */ 2471 static struct hmehash_bucket * 2472 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size) 2473 { 2474 struct hmehash_bucket *hmebp; 2475 int hmeshift; 2476 2477 hmeshift = HME_HASH_SHIFT(size); 2478 2479 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 2480 2481 SFMMU_HASH_LOCK(hmebp); 2482 2483 return (hmebp); 2484 } 2485 2486 /* 2487 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2488 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2489 * allocated. 2490 */ 2491 static struct hme_blk * 2492 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2493 caddr_t vaddr, uint_t size, uint_t flags) 2494 { 2495 hmeblk_tag hblktag; 2496 int hmeshift; 2497 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2498 uint64_t hblkpa, prevpa; 2499 struct kmem_cache *sfmmu_cache; 2500 uint_t forcefree; 2501 2502 hblktag.htag_id = sfmmup; 2503 hmeshift = HME_HASH_SHIFT(size); 2504 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2505 hblktag.htag_rehash = HME_HASH_REHASH(size); 2506 2507 ttearray_realloc: 2508 2509 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 2510 pr_hblk, prevpa, &list); 2511 2512 /* 2513 * We block until hblk_reserve_lock is released; it's held by 2514 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2515 * replaced by a hblk from sfmmu8_cache. 2516 */ 2517 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2518 hblk_reserve_thread != curthread) { 2519 SFMMU_HASH_UNLOCK(hmebp); 2520 mutex_enter(&hblk_reserve_lock); 2521 mutex_exit(&hblk_reserve_lock); 2522 SFMMU_STAT(sf_hblk_reserve_hit); 2523 SFMMU_HASH_LOCK(hmebp); 2524 goto ttearray_realloc; 2525 } 2526 2527 if (hmeblkp == NULL) { 2528 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2529 hblktag, flags); 2530 } else { 2531 /* 2532 * It is possible for 8k and 64k hblks to collide since they 2533 * have the same rehash value. This is because we 2534 * lazily free hblks and 8K/64K blks could be lingering. 2535 * If we find size mismatch we free the block and & try again. 2536 */ 2537 if (get_hblk_ttesz(hmeblkp) != size) { 2538 ASSERT(!hmeblkp->hblk_vcnt); 2539 ASSERT(!hmeblkp->hblk_hmecnt); 2540 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 2541 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 2542 goto ttearray_realloc; 2543 } 2544 if (hmeblkp->hblk_shw_bit) { 2545 /* 2546 * if the hblk was previously used as a shadow hblk then 2547 * we will change it to a normal hblk 2548 */ 2549 if (hmeblkp->hblk_shw_mask) { 2550 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2551 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2552 goto ttearray_realloc; 2553 } else { 2554 hmeblkp->hblk_shw_bit = 0; 2555 } 2556 } 2557 SFMMU_STAT(sf_hblk_hit); 2558 } 2559 2560 /* 2561 * hat_memload() should never call kmem_cache_free(); see block 2562 * comment showing the stacktrace in sfmmu_hblk_alloc(); 2563 * enqueue each hblk in the list to reserve list if it's created 2564 * from sfmmu8_cache *and* sfmmup == KHATID. 2565 */ 2566 forcefree = (sfmmup == KHATID) ? 1 : 0; 2567 while ((pr_hblk = list) != NULL) { 2568 list = pr_hblk->hblk_next; 2569 sfmmu_cache = get_hblk_cache(pr_hblk); 2570 if ((sfmmu_cache == sfmmu8_cache) && 2571 sfmmu_put_free_hblk(pr_hblk, forcefree)) 2572 continue; 2573 2574 ASSERT(sfmmup != KHATID); 2575 kmem_cache_free(sfmmu_cache, pr_hblk); 2576 } 2577 2578 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2579 ASSERT(!hmeblkp->hblk_shw_bit); 2580 2581 return (hmeblkp); 2582 } 2583 2584 /* 2585 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2586 * otherwise. 2587 */ 2588 static int 2589 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2590 caddr_t vaddr, page_t **pps, uint_t flags) 2591 { 2592 page_t *pp = *pps; 2593 int hmenum, size, remap; 2594 tte_t tteold, flush_tte; 2595 #ifdef DEBUG 2596 tte_t orig_old; 2597 #endif /* DEBUG */ 2598 struct sf_hment *sfhme; 2599 kmutex_t *pml, *pmtx; 2600 hatlock_t *hatlockp; 2601 2602 /* 2603 * remove this panic when we decide to let user virtual address 2604 * space be >= USERLIMIT. 2605 */ 2606 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2607 panic("user addr %p in kernel space", vaddr); 2608 #if defined(TTE_IS_GLOBAL) 2609 if (TTE_IS_GLOBAL(ttep)) 2610 panic("sfmmu_tteload: creating global tte"); 2611 #endif 2612 2613 #ifdef DEBUG 2614 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2615 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2616 panic("sfmmu_tteload: non cacheable memory tte"); 2617 #endif /* DEBUG */ 2618 2619 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2620 !TTE_IS_MOD(ttep)) { 2621 /* 2622 * Don't load TSB for dummy as in ISM. Also don't preload 2623 * the TSB if the TTE isn't writable since we're likely to 2624 * fault on it again -- preloading can be fairly expensive. 2625 */ 2626 flags |= SFMMU_NO_TSBLOAD; 2627 } 2628 2629 size = TTE_CSZ(ttep); 2630 switch (size) { 2631 case TTE8K: 2632 SFMMU_STAT(sf_tteload8k); 2633 break; 2634 case TTE64K: 2635 SFMMU_STAT(sf_tteload64k); 2636 break; 2637 case TTE512K: 2638 SFMMU_STAT(sf_tteload512k); 2639 break; 2640 case TTE4M: 2641 SFMMU_STAT(sf_tteload4m); 2642 break; 2643 case (TTE32M): 2644 SFMMU_STAT(sf_tteload32m); 2645 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2646 break; 2647 case (TTE256M): 2648 SFMMU_STAT(sf_tteload256m); 2649 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2650 break; 2651 } 2652 2653 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2654 2655 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 2656 2657 /* 2658 * Need to grab mlist lock here so that pageunload 2659 * will not change tte behind us. 2660 */ 2661 if (pp) { 2662 pml = sfmmu_mlist_enter(pp); 2663 } 2664 2665 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2666 /* 2667 * Look for corresponding hment and if valid verify 2668 * pfns are equal. 2669 */ 2670 remap = TTE_IS_VALID(&tteold); 2671 if (remap) { 2672 pfn_t new_pfn, old_pfn; 2673 2674 old_pfn = TTE_TO_PFN(vaddr, &tteold); 2675 new_pfn = TTE_TO_PFN(vaddr, ttep); 2676 2677 if (flags & HAT_LOAD_REMAP) { 2678 /* make sure we are remapping same type of pages */ 2679 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 2680 panic("sfmmu_tteload - tte remap io<->memory"); 2681 } 2682 if (old_pfn != new_pfn && 2683 (pp != NULL || sfhme->hme_page != NULL)) { 2684 panic("sfmmu_tteload - tte remap pp != NULL"); 2685 } 2686 } else if (old_pfn != new_pfn) { 2687 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 2688 (void *)hmeblkp); 2689 } 2690 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 2691 } 2692 2693 if (pp) { 2694 if (size == TTE8K) { 2695 #ifdef VAC 2696 /* 2697 * Handle VAC consistency 2698 */ 2699 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 2700 sfmmu_vac_conflict(sfmmup, vaddr, pp); 2701 } 2702 #endif 2703 2704 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2705 pmtx = sfmmu_page_enter(pp); 2706 PP_CLRRO(pp); 2707 sfmmu_page_exit(pmtx); 2708 } else if (!PP_ISMAPPED(pp) && 2709 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 2710 pmtx = sfmmu_page_enter(pp); 2711 if (!(PP_ISMOD(pp))) { 2712 PP_SETRO(pp); 2713 } 2714 sfmmu_page_exit(pmtx); 2715 } 2716 2717 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 2718 /* 2719 * sfmmu_pagearray_setup failed so return 2720 */ 2721 sfmmu_mlist_exit(pml); 2722 return (1); 2723 } 2724 } 2725 2726 /* 2727 * Make sure hment is not on a mapping list. 2728 */ 2729 ASSERT(remap || (sfhme->hme_page == NULL)); 2730 2731 /* if it is not a remap then hme->next better be NULL */ 2732 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 2733 2734 if (flags & HAT_LOAD_LOCK) { 2735 if (((int)hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 2736 panic("too high lckcnt-hmeblk %p", 2737 (void *)hmeblkp); 2738 } 2739 atomic_add_16(&hmeblkp->hblk_lckcnt, 1); 2740 2741 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 2742 } 2743 2744 #ifdef VAC 2745 if (pp && PP_ISNC(pp)) { 2746 /* 2747 * If the physical page is marked to be uncacheable, like 2748 * by a vac conflict, make sure the new mapping is also 2749 * uncacheable. 2750 */ 2751 TTE_CLR_VCACHEABLE(ttep); 2752 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 2753 } 2754 #endif 2755 ttep->tte_hmenum = hmenum; 2756 2757 #ifdef DEBUG 2758 orig_old = tteold; 2759 #endif /* DEBUG */ 2760 2761 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 2762 if ((sfmmup == KHATID) && 2763 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 2764 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2765 } 2766 #ifdef DEBUG 2767 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 2768 #endif /* DEBUG */ 2769 } 2770 2771 if (!TTE_IS_VALID(&tteold)) { 2772 2773 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 2774 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 2775 2776 /* 2777 * HAT_RELOAD_SHARE has been deprecated with lpg DISM. 2778 */ 2779 2780 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 2781 sfmmup != ksfmmup) { 2782 /* 2783 * If this is the first large mapping for the process 2784 * we must force any CPUs running this process to TL=0 2785 * where they will reload the HAT flags from the 2786 * tsbmiss area. This is necessary to make the large 2787 * mappings we are about to load visible to those CPUs; 2788 * otherwise they'll loop forever calling pagefault() 2789 * since we don't search large hash chains by default. 2790 */ 2791 hatlockp = sfmmu_hat_enter(sfmmup); 2792 if (size == TTE512K && 2793 !SFMMU_FLAGS_ISSET(sfmmup, HAT_512K_FLAG)) { 2794 SFMMU_FLAGS_SET(sfmmup, HAT_512K_FLAG); 2795 sfmmu_sync_mmustate(sfmmup); 2796 } else if (size == TTE4M && 2797 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 2798 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 2799 sfmmu_sync_mmustate(sfmmup); 2800 } else if (size == TTE64K && 2801 !SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) { 2802 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 2803 /* no sync mmustate; 64K shares 8K hashes */ 2804 } else if (mmu_page_sizes == max_mmu_page_sizes) { 2805 if (size == TTE32M && 2806 !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 2807 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 2808 sfmmu_sync_mmustate(sfmmup); 2809 } else if (size == TTE256M && 2810 !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 2811 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 2812 sfmmu_sync_mmustate(sfmmup); 2813 } 2814 } 2815 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 2816 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 2817 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 2818 } 2819 sfmmu_hat_exit(hatlockp); 2820 } 2821 } 2822 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 2823 2824 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 2825 hw_tte.tte_intlo; 2826 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 2827 hw_tte.tte_inthi; 2828 2829 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 2830 /* 2831 * If remap and new tte differs from old tte we need 2832 * to sync the mod bit and flush TLB/TSB. We don't 2833 * need to sync ref bit because we currently always set 2834 * ref bit in tteload. 2835 */ 2836 ASSERT(TTE_IS_REF(ttep)); 2837 if (TTE_IS_MOD(&tteold)) { 2838 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 2839 } 2840 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 2841 xt_sync(sfmmup->sfmmu_cpusran); 2842 } 2843 2844 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 2845 /* 2846 * We only preload 8K and 4M mappings into the TSB, since 2847 * 64K and 512K mappings are replicated and hence don't 2848 * have a single, unique TSB entry. Ditto for 32M/256M. 2849 */ 2850 if (size == TTE8K || size == TTE4M) { 2851 hatlockp = sfmmu_hat_enter(sfmmup); 2852 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, size); 2853 sfmmu_hat_exit(hatlockp); 2854 } 2855 } 2856 if (pp) { 2857 if (!remap) { 2858 HME_ADD(sfhme, pp); 2859 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 2860 ASSERT(hmeblkp->hblk_hmecnt > 0); 2861 2862 /* 2863 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 2864 * see pageunload() for comment. 2865 */ 2866 } 2867 sfmmu_mlist_exit(pml); 2868 } 2869 2870 return (0); 2871 } 2872 /* 2873 * Function unlocks hash bucket. 2874 */ 2875 static void 2876 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 2877 { 2878 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2879 SFMMU_HASH_UNLOCK(hmebp); 2880 } 2881 2882 /* 2883 * function which checks and sets up page array for a large 2884 * translation. Will set p_vcolor, p_index, p_ro fields. 2885 * Assumes addr and pfnum of first page are properly aligned. 2886 * Will check for physical contiguity. If check fails it return 2887 * non null. 2888 */ 2889 static int 2890 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 2891 { 2892 int i, index, ttesz; 2893 pfn_t pfnum; 2894 pgcnt_t npgs; 2895 page_t *pp, *pp1; 2896 kmutex_t *pmtx; 2897 #ifdef VAC 2898 int osz; 2899 int cflags = 0; 2900 int vac_err = 0; 2901 #endif 2902 int newidx = 0; 2903 2904 ttesz = TTE_CSZ(ttep); 2905 2906 ASSERT(ttesz > TTE8K); 2907 2908 npgs = TTEPAGES(ttesz); 2909 index = PAGESZ_TO_INDEX(ttesz); 2910 2911 pfnum = (*pps)->p_pagenum; 2912 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 2913 2914 /* 2915 * Save the first pp so we can do HAT_TMPNC at the end. 2916 */ 2917 pp1 = *pps; 2918 #ifdef VAC 2919 osz = fnd_mapping_sz(pp1); 2920 #endif 2921 2922 for (i = 0; i < npgs; i++, pps++) { 2923 pp = *pps; 2924 ASSERT(PAGE_LOCKED(pp)); 2925 ASSERT(pp->p_szc >= ttesz); 2926 ASSERT(pp->p_szc == pp1->p_szc); 2927 ASSERT(sfmmu_mlist_held(pp)); 2928 2929 /* 2930 * XXX is it possible to maintain P_RO on the root only? 2931 */ 2932 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2933 pmtx = sfmmu_page_enter(pp); 2934 PP_CLRRO(pp); 2935 sfmmu_page_exit(pmtx); 2936 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 2937 !PP_ISMOD(pp)) { 2938 pmtx = sfmmu_page_enter(pp); 2939 if (!(PP_ISMOD(pp))) { 2940 PP_SETRO(pp); 2941 } 2942 sfmmu_page_exit(pmtx); 2943 } 2944 2945 /* 2946 * If this is a remap we skip vac & contiguity checks. 2947 */ 2948 if (remap) 2949 continue; 2950 2951 /* 2952 * set p_vcolor and detect any vac conflicts. 2953 */ 2954 #ifdef VAC 2955 if (vac_err == 0) { 2956 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 2957 2958 } 2959 #endif 2960 2961 /* 2962 * Save current index in case we need to undo it. 2963 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 2964 * "SFMMU_INDEX_SHIFT 6" 2965 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 2966 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 2967 * 2968 * So: index = PAGESZ_TO_INDEX(ttesz); 2969 * if ttesz == 1 then index = 0x2 2970 * 2 then index = 0x4 2971 * 3 then index = 0x8 2972 * 4 then index = 0x10 2973 * 5 then index = 0x20 2974 * The code below checks if it's a new pagesize (ie, newidx) 2975 * in case we need to take it back out of p_index, 2976 * and then or's the new index into the existing index. 2977 */ 2978 if ((PP_MAPINDEX(pp) & index) == 0) 2979 newidx = 1; 2980 pp->p_index = (PP_MAPINDEX(pp) | index); 2981 2982 /* 2983 * contiguity check 2984 */ 2985 if (pp->p_pagenum != pfnum) { 2986 /* 2987 * If we fail the contiguity test then 2988 * the only thing we need to fix is the p_index field. 2989 * We might get a few extra flushes but since this 2990 * path is rare that is ok. The p_ro field will 2991 * get automatically fixed on the next tteload to 2992 * the page. NO TNC bit is set yet. 2993 */ 2994 while (i >= 0) { 2995 pp = *pps; 2996 if (newidx) 2997 pp->p_index = (PP_MAPINDEX(pp) & 2998 ~index); 2999 pps--; 3000 i--; 3001 } 3002 return (1); 3003 } 3004 pfnum++; 3005 addr += MMU_PAGESIZE; 3006 } 3007 3008 #ifdef VAC 3009 if (vac_err) { 3010 if (ttesz > osz) { 3011 /* 3012 * There are some smaller mappings that causes vac 3013 * conflicts. Convert all existing small mappings to 3014 * TNC. 3015 */ 3016 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3017 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3018 npgs); 3019 } else { 3020 /* EMPTY */ 3021 /* 3022 * If there exists an big page mapping, 3023 * that means the whole existing big page 3024 * has TNC setting already. No need to covert to 3025 * TNC again. 3026 */ 3027 ASSERT(PP_ISTNC(pp1)); 3028 } 3029 } 3030 #endif /* VAC */ 3031 3032 return (0); 3033 } 3034 3035 #ifdef VAC 3036 /* 3037 * Routine that detects vac consistency for a large page. It also 3038 * sets virtual color for all pp's for this big mapping. 3039 */ 3040 static int 3041 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3042 { 3043 int vcolor, ocolor; 3044 3045 ASSERT(sfmmu_mlist_held(pp)); 3046 3047 if (PP_ISNC(pp)) { 3048 return (HAT_TMPNC); 3049 } 3050 3051 vcolor = addr_to_vcolor(addr); 3052 if (PP_NEWPAGE(pp)) { 3053 PP_SET_VCOLOR(pp, vcolor); 3054 return (0); 3055 } 3056 3057 ocolor = PP_GET_VCOLOR(pp); 3058 if (ocolor == vcolor) { 3059 return (0); 3060 } 3061 3062 if (!PP_ISMAPPED(pp)) { 3063 /* 3064 * Previous user of page had a differnet color 3065 * but since there are no current users 3066 * we just flush the cache and change the color. 3067 * As an optimization for large pages we flush the 3068 * entire cache of that color and set a flag. 3069 */ 3070 SFMMU_STAT(sf_pgcolor_conflict); 3071 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3072 CacheColor_SetFlushed(*cflags, ocolor); 3073 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3074 } 3075 PP_SET_VCOLOR(pp, vcolor); 3076 return (0); 3077 } 3078 3079 /* 3080 * We got a real conflict with a current mapping. 3081 * set flags to start unencaching all mappings 3082 * and return failure so we restart looping 3083 * the pp array from the beginning. 3084 */ 3085 return (HAT_TMPNC); 3086 } 3087 #endif /* VAC */ 3088 3089 /* 3090 * creates a large page shadow hmeblk for a tte. 3091 * The purpose of this routine is to allow us to do quick unloads because 3092 * the vm layer can easily pass a very large but sparsely populated range. 3093 */ 3094 static struct hme_blk * 3095 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3096 { 3097 struct hmehash_bucket *hmebp; 3098 hmeblk_tag hblktag; 3099 int hmeshift, size, vshift; 3100 uint_t shw_mask, newshw_mask; 3101 struct hme_blk *hmeblkp; 3102 3103 ASSERT(sfmmup != KHATID); 3104 if (mmu_page_sizes == max_mmu_page_sizes) { 3105 ASSERT(ttesz < TTE256M); 3106 } else { 3107 ASSERT(ttesz < TTE4M); 3108 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3109 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3110 } 3111 3112 if (ttesz == TTE8K) { 3113 size = TTE512K; 3114 } else { 3115 size = ++ttesz; 3116 } 3117 3118 hblktag.htag_id = sfmmup; 3119 hmeshift = HME_HASH_SHIFT(size); 3120 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3121 hblktag.htag_rehash = HME_HASH_REHASH(size); 3122 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3123 3124 SFMMU_HASH_LOCK(hmebp); 3125 3126 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3127 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3128 if (hmeblkp == NULL) { 3129 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3130 hblktag, flags); 3131 } 3132 ASSERT(hmeblkp); 3133 if (!hmeblkp->hblk_shw_mask) { 3134 /* 3135 * if this is a unused hblk it was just allocated or could 3136 * potentially be a previous large page hblk so we need to 3137 * set the shadow bit. 3138 */ 3139 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3140 hmeblkp->hblk_shw_bit = 1; 3141 } else if (hmeblkp->hblk_shw_bit == 0) { 3142 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p", 3143 (void *)hmeblkp); 3144 } 3145 3146 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3147 ASSERT(vshift < 8); 3148 /* 3149 * Atomically set shw mask bit 3150 */ 3151 do { 3152 shw_mask = hmeblkp->hblk_shw_mask; 3153 newshw_mask = shw_mask | (1 << vshift); 3154 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3155 newshw_mask); 3156 } while (newshw_mask != shw_mask); 3157 3158 SFMMU_HASH_UNLOCK(hmebp); 3159 3160 return (hmeblkp); 3161 } 3162 3163 /* 3164 * This routine cleanup a previous shadow hmeblk and changes it to 3165 * a regular hblk. This happens rarely but it is possible 3166 * when a process wants to use large pages and there are hblks still 3167 * lying around from the previous as that used these hmeblks. 3168 * The alternative was to cleanup the shadow hblks at unload time 3169 * but since so few user processes actually use large pages, it is 3170 * better to be lazy and cleanup at this time. 3171 */ 3172 static void 3173 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3174 struct hmehash_bucket *hmebp) 3175 { 3176 caddr_t addr, endaddr; 3177 int hashno, size; 3178 3179 ASSERT(hmeblkp->hblk_shw_bit); 3180 3181 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3182 3183 if (!hmeblkp->hblk_shw_mask) { 3184 hmeblkp->hblk_shw_bit = 0; 3185 return; 3186 } 3187 addr = (caddr_t)get_hblk_base(hmeblkp); 3188 endaddr = get_hblk_endaddr(hmeblkp); 3189 size = get_hblk_ttesz(hmeblkp); 3190 hashno = size - 1; 3191 ASSERT(hashno > 0); 3192 SFMMU_HASH_UNLOCK(hmebp); 3193 3194 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3195 3196 SFMMU_HASH_LOCK(hmebp); 3197 } 3198 3199 static void 3200 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3201 int hashno) 3202 { 3203 int hmeshift, shadow = 0; 3204 hmeblk_tag hblktag; 3205 struct hmehash_bucket *hmebp; 3206 struct hme_blk *hmeblkp; 3207 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3208 uint64_t hblkpa, prevpa, nx_pa; 3209 3210 ASSERT(hashno > 0); 3211 hblktag.htag_id = sfmmup; 3212 hblktag.htag_rehash = hashno; 3213 3214 hmeshift = HME_HASH_SHIFT(hashno); 3215 3216 while (addr < endaddr) { 3217 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3218 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3219 SFMMU_HASH_LOCK(hmebp); 3220 /* inline HME_HASH_SEARCH */ 3221 hmeblkp = hmebp->hmeblkp; 3222 hblkpa = hmebp->hmeh_nextpa; 3223 prevpa = 0; 3224 pr_hblk = NULL; 3225 while (hmeblkp) { 3226 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 3227 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3228 /* found hme_blk */ 3229 if (hmeblkp->hblk_shw_bit) { 3230 if (hmeblkp->hblk_shw_mask) { 3231 shadow = 1; 3232 sfmmu_shadow_hcleanup(sfmmup, 3233 hmeblkp, hmebp); 3234 break; 3235 } else { 3236 hmeblkp->hblk_shw_bit = 0; 3237 } 3238 } 3239 3240 /* 3241 * Hblk_hmecnt and hblk_vcnt could be non zero 3242 * since hblk_unload() does not gurantee that. 3243 * 3244 * XXX - this could cause tteload() to spin 3245 * where sfmmu_shadow_hcleanup() is called. 3246 */ 3247 } 3248 3249 nx_hblk = hmeblkp->hblk_next; 3250 nx_pa = hmeblkp->hblk_nextpa; 3251 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3252 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 3253 pr_hblk); 3254 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3255 } else { 3256 pr_hblk = hmeblkp; 3257 prevpa = hblkpa; 3258 } 3259 hmeblkp = nx_hblk; 3260 hblkpa = nx_pa; 3261 } 3262 3263 SFMMU_HASH_UNLOCK(hmebp); 3264 3265 if (shadow) { 3266 /* 3267 * We found another shadow hblk so cleaned its 3268 * children. We need to go back and cleanup 3269 * the original hblk so we don't change the 3270 * addr. 3271 */ 3272 shadow = 0; 3273 } else { 3274 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3275 (1 << hmeshift)); 3276 } 3277 } 3278 sfmmu_hblks_list_purge(&list); 3279 } 3280 3281 /* 3282 * Release one hardware address translation lock on the given address range. 3283 */ 3284 void 3285 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3286 { 3287 struct hmehash_bucket *hmebp; 3288 hmeblk_tag hblktag; 3289 int hmeshift, hashno = 1; 3290 struct hme_blk *hmeblkp, *list = NULL; 3291 caddr_t endaddr; 3292 3293 ASSERT(sfmmup != NULL); 3294 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3295 3296 ASSERT((sfmmup == ksfmmup) || 3297 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3298 ASSERT((len & MMU_PAGEOFFSET) == 0); 3299 endaddr = addr + len; 3300 hblktag.htag_id = sfmmup; 3301 3302 /* 3303 * Spitfire supports 4 page sizes. 3304 * Most pages are expected to be of the smallest page size (8K) and 3305 * these will not need to be rehashed. 64K pages also don't need to be 3306 * rehashed because an hmeblk spans 64K of address space. 512K pages 3307 * might need 1 rehash and and 4M pages might need 2 rehashes. 3308 */ 3309 while (addr < endaddr) { 3310 hmeshift = HME_HASH_SHIFT(hashno); 3311 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3312 hblktag.htag_rehash = hashno; 3313 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3314 3315 SFMMU_HASH_LOCK(hmebp); 3316 3317 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3318 if (hmeblkp != NULL) { 3319 /* 3320 * If we encounter a shadow hmeblk then 3321 * we know there are no valid hmeblks mapping 3322 * this address at this size or larger. 3323 * Just increment address by the smallest 3324 * page size. 3325 */ 3326 if (hmeblkp->hblk_shw_bit) { 3327 addr += MMU_PAGESIZE; 3328 } else { 3329 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3330 endaddr); 3331 } 3332 SFMMU_HASH_UNLOCK(hmebp); 3333 hashno = 1; 3334 continue; 3335 } 3336 SFMMU_HASH_UNLOCK(hmebp); 3337 3338 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3339 /* 3340 * We have traversed the whole list and rehashed 3341 * if necessary without finding the address to unlock 3342 * which should never happen. 3343 */ 3344 panic("sfmmu_unlock: addr not found. " 3345 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3346 } else { 3347 hashno++; 3348 } 3349 } 3350 3351 sfmmu_hblks_list_purge(&list); 3352 } 3353 3354 /* 3355 * Function to unlock a range of addresses in an hmeblk. It returns the 3356 * next address that needs to be unlocked. 3357 * Should be called with the hash lock held. 3358 */ 3359 static caddr_t 3360 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 3361 { 3362 struct sf_hment *sfhme; 3363 tte_t tteold, ttemod; 3364 int ttesz, ret; 3365 3366 ASSERT(in_hblk_range(hmeblkp, addr)); 3367 ASSERT(hmeblkp->hblk_shw_bit == 0); 3368 3369 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 3370 ttesz = get_hblk_ttesz(hmeblkp); 3371 3372 HBLKTOHME(sfhme, hmeblkp, addr); 3373 while (addr < endaddr) { 3374 readtte: 3375 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3376 if (TTE_IS_VALID(&tteold)) { 3377 3378 ttemod = tteold; 3379 3380 ret = sfmmu_modifytte_try(&tteold, &ttemod, 3381 &sfhme->hme_tte); 3382 3383 if (ret < 0) 3384 goto readtte; 3385 3386 if (hmeblkp->hblk_lckcnt == 0) 3387 panic("zero hblk lckcnt"); 3388 3389 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 3390 (uintptr_t)endaddr) 3391 panic("can't unlock large tte"); 3392 3393 ASSERT(hmeblkp->hblk_lckcnt > 0); 3394 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 3395 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 3396 } else { 3397 panic("sfmmu_hblk_unlock: invalid tte"); 3398 } 3399 addr += TTEBYTES(ttesz); 3400 sfhme++; 3401 } 3402 return (addr); 3403 } 3404 3405 /* 3406 * Physical Address Mapping Framework 3407 * 3408 * General rules: 3409 * 3410 * (1) Applies only to seg_kmem memory pages. To make things easier, 3411 * seg_kpm addresses are also accepted by the routines, but nothing 3412 * is done with them since by definition their PA mappings are static. 3413 * (2) hat_add_callback() may only be called while holding the page lock 3414 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 3415 * or passing HAC_PAGELOCK flag. 3416 * (3) prehandler() and posthandler() may not call hat_add_callback() or 3417 * hat_delete_callback(), nor should they allocate memory. Post quiesce 3418 * callbacks may not sleep or acquire adaptive mutex locks. 3419 * (4) Either prehandler() or posthandler() (but not both) may be specified 3420 * as being NULL. Specifying an errhandler() is optional. 3421 * 3422 * Details of using the framework: 3423 * 3424 * registering a callback (hat_register_callback()) 3425 * 3426 * Pass prehandler, posthandler, errhandler addresses 3427 * as described below. If capture_cpus argument is nonzero, 3428 * suspend callback to the prehandler will occur with CPUs 3429 * captured and executing xc_loop() and CPUs will remain 3430 * captured until after the posthandler suspend callback 3431 * occurs. 3432 * 3433 * adding a callback (hat_add_callback()) 3434 * 3435 * as_pagelock(); 3436 * hat_add_callback(); 3437 * save returned pfn in private data structures or program registers; 3438 * as_pageunlock(); 3439 * 3440 * prehandler() 3441 * 3442 * Stop all accesses by physical address to this memory page. 3443 * Called twice: the first, PRESUSPEND, is a context safe to acquire 3444 * adaptive locks. The second, SUSPEND, is called at high PIL with 3445 * CPUs captured so adaptive locks may NOT be acquired (and all spin 3446 * locks must be XCALL_PIL or higher locks). 3447 * 3448 * May return the following errors: 3449 * EIO: A fatal error has occurred. This will result in panic. 3450 * EAGAIN: The page cannot be suspended. This will fail the 3451 * relocation. 3452 * 0: Success. 3453 * 3454 * posthandler() 3455 * 3456 * Save new pfn in private data structures or program registers; 3457 * not allowed to fail (non-zero return values will result in panic). 3458 * 3459 * errhandler() 3460 * 3461 * called when an error occurs related to the callback. Currently 3462 * the only such error is HAT_CB_ERR_LEAKED which indicates that 3463 * a page is being freed, but there are still outstanding callback(s) 3464 * registered on the page. 3465 * 3466 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 3467 * 3468 * stop using physical address 3469 * hat_delete_callback(); 3470 * 3471 */ 3472 3473 /* 3474 * Register a callback class. Each subsystem should do this once and 3475 * cache the id_t returned for use in setting up and tearing down callbacks. 3476 * 3477 * There is no facility for removing callback IDs once they are created; 3478 * the "key" should be unique for each module, so in case a module is unloaded 3479 * and subsequently re-loaded, we can recycle the module's previous entry. 3480 */ 3481 id_t 3482 hat_register_callback(int key, 3483 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 3484 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 3485 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 3486 int capture_cpus) 3487 { 3488 id_t id; 3489 3490 /* 3491 * Search the table for a pre-existing callback associated with 3492 * the identifier "key". If one exists, we re-use that entry in 3493 * the table for this instance, otherwise we assign the next 3494 * available table slot. 3495 */ 3496 for (id = 0; id < sfmmu_max_cb_id; id++) { 3497 if (sfmmu_cb_table[id].key == key) 3498 break; 3499 } 3500 3501 if (id == sfmmu_max_cb_id) { 3502 id = sfmmu_cb_nextid++; 3503 if (id >= sfmmu_max_cb_id) 3504 panic("hat_register_callback: out of callback IDs"); 3505 } 3506 3507 ASSERT(prehandler != NULL || posthandler != NULL); 3508 3509 sfmmu_cb_table[id].key = key; 3510 sfmmu_cb_table[id].prehandler = prehandler; 3511 sfmmu_cb_table[id].posthandler = posthandler; 3512 sfmmu_cb_table[id].errhandler = errhandler; 3513 sfmmu_cb_table[id].capture_cpus = capture_cpus; 3514 3515 return (id); 3516 } 3517 3518 #define HAC_COOKIE_NONE (void *)-1 3519 3520 /* 3521 * Add relocation callbacks to the specified addr/len which will be called 3522 * when relocating the associated page. See the description of pre and 3523 * posthandler above for more details. 3524 * 3525 * If HAC_PAGELOCK is included in flags, the underlying memory page is 3526 * locked internally so the caller must be able to deal with the callback 3527 * running even before this function has returned. If HAC_PAGELOCK is not 3528 * set, it is assumed that the underlying memory pages are locked. 3529 * 3530 * Since the caller must track the individual page boundaries anyway, 3531 * we only allow a callback to be added to a single page (large 3532 * or small). Thus [addr, addr + len) MUST be contained within a single 3533 * page. 3534 * 3535 * Registering multiple callbacks on the same [addr, addr+len) is supported, 3536 * _provided_that_ a unique parameter is specified for each callback. 3537 * If multiple callbacks are registered on the same range the callback will 3538 * be invoked with each unique parameter. Registering the same callback with 3539 * the same argument more than once will result in corrupted kernel state. 3540 * 3541 * Returns the pfn of the underlying kernel page in *rpfn 3542 * on success, or PFN_INVALID on failure. 3543 * 3544 * cookiep (if passed) provides storage space for an opaque cookie 3545 * to return later to hat_delete_callback(). This cookie makes the callback 3546 * deletion significantly quicker by avoiding a potentially lengthy hash 3547 * search. 3548 * 3549 * Returns values: 3550 * 0: success 3551 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 3552 * EINVAL: callback ID is not valid 3553 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 3554 * space 3555 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 3556 */ 3557 int 3558 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 3559 void *pvt, pfn_t *rpfn, void **cookiep) 3560 { 3561 struct hmehash_bucket *hmebp; 3562 hmeblk_tag hblktag; 3563 struct hme_blk *hmeblkp; 3564 int hmeshift, hashno; 3565 caddr_t saddr, eaddr, baseaddr; 3566 struct pa_hment *pahmep; 3567 struct sf_hment *sfhmep, *osfhmep; 3568 kmutex_t *pml; 3569 tte_t tte; 3570 page_t *pp; 3571 vnode_t *vp; 3572 u_offset_t off; 3573 pfn_t pfn; 3574 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 3575 int locked = 0; 3576 3577 /* 3578 * For KPM mappings, just return the physical address since we 3579 * don't need to register any callbacks. 3580 */ 3581 if (IS_KPM_ADDR(vaddr)) { 3582 uint64_t paddr; 3583 SFMMU_KPM_VTOP(vaddr, paddr); 3584 *rpfn = btop(paddr); 3585 if (cookiep != NULL) 3586 *cookiep = HAC_COOKIE_NONE; 3587 return (0); 3588 } 3589 3590 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 3591 *rpfn = PFN_INVALID; 3592 return (EINVAL); 3593 } 3594 3595 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 3596 *rpfn = PFN_INVALID; 3597 return (ENOMEM); 3598 } 3599 3600 sfhmep = &pahmep->sfment; 3601 3602 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3603 eaddr = saddr + len; 3604 3605 rehash: 3606 /* Find the mapping(s) for this page */ 3607 for (hashno = TTE64K, hmeblkp = NULL; 3608 hmeblkp == NULL && hashno <= mmu_hashcnt; 3609 hashno++) { 3610 hmeshift = HME_HASH_SHIFT(hashno); 3611 hblktag.htag_id = ksfmmup; 3612 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3613 hblktag.htag_rehash = hashno; 3614 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3615 3616 SFMMU_HASH_LOCK(hmebp); 3617 3618 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3619 3620 if (hmeblkp == NULL) 3621 SFMMU_HASH_UNLOCK(hmebp); 3622 } 3623 3624 if (hmeblkp == NULL) { 3625 kmem_cache_free(pa_hment_cache, pahmep); 3626 *rpfn = PFN_INVALID; 3627 return (ENXIO); 3628 } 3629 3630 HBLKTOHME(osfhmep, hmeblkp, saddr); 3631 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3632 3633 if (!TTE_IS_VALID(&tte)) { 3634 SFMMU_HASH_UNLOCK(hmebp); 3635 kmem_cache_free(pa_hment_cache, pahmep); 3636 *rpfn = PFN_INVALID; 3637 return (ENXIO); 3638 } 3639 3640 /* 3641 * Make sure the boundaries for the callback fall within this 3642 * single mapping. 3643 */ 3644 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 3645 ASSERT(saddr >= baseaddr); 3646 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 3647 SFMMU_HASH_UNLOCK(hmebp); 3648 kmem_cache_free(pa_hment_cache, pahmep); 3649 *rpfn = PFN_INVALID; 3650 return (ERANGE); 3651 } 3652 3653 pfn = sfmmu_ttetopfn(&tte, vaddr); 3654 3655 /* 3656 * The pfn may not have a page_t underneath in which case we 3657 * just return it. This can happen if we are doing I/O to a 3658 * static portion of the kernel's address space, for instance. 3659 */ 3660 pp = osfhmep->hme_page; 3661 if (pp == NULL) { 3662 SFMMU_HASH_UNLOCK(hmebp); 3663 kmem_cache_free(pa_hment_cache, pahmep); 3664 *rpfn = pfn; 3665 if (cookiep) 3666 *cookiep = HAC_COOKIE_NONE; 3667 return (0); 3668 } 3669 ASSERT(pp == PP_PAGEROOT(pp)); 3670 3671 vp = pp->p_vnode; 3672 off = pp->p_offset; 3673 3674 pml = sfmmu_mlist_enter(pp); 3675 3676 if (flags & HAC_PAGELOCK) { 3677 if (!page_trylock(pp, SE_SHARED)) { 3678 /* 3679 * Somebody is holding SE_EXCL lock. Might 3680 * even be hat_page_relocate(). Drop all 3681 * our locks, lookup the page in &kvp, and 3682 * retry. If it doesn't exist in &kvp and &zvp, 3683 * then we must be dealing with a kernel mapped 3684 * page which doesn't actually belong to 3685 * segkmem so we punt. 3686 */ 3687 sfmmu_mlist_exit(pml); 3688 SFMMU_HASH_UNLOCK(hmebp); 3689 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3690 3691 /* check zvp before giving up */ 3692 if (pp == NULL) 3693 pp = page_lookup(&zvp, (u_offset_t)saddr, 3694 SE_SHARED); 3695 3696 /* Okay, we didn't find it, give up */ 3697 if (pp == NULL) { 3698 kmem_cache_free(pa_hment_cache, pahmep); 3699 *rpfn = pfn; 3700 if (cookiep) 3701 *cookiep = HAC_COOKIE_NONE; 3702 return (0); 3703 } 3704 page_unlock(pp); 3705 goto rehash; 3706 } 3707 locked = 1; 3708 } 3709 3710 if (!PAGE_LOCKED(pp) && !panicstr) 3711 panic("hat_add_callback: page 0x%p not locked", pp); 3712 3713 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 3714 pp->p_offset != off) { 3715 /* 3716 * The page moved before we got our hands on it. Drop 3717 * all the locks and try again. 3718 */ 3719 ASSERT((flags & HAC_PAGELOCK) != 0); 3720 sfmmu_mlist_exit(pml); 3721 SFMMU_HASH_UNLOCK(hmebp); 3722 page_unlock(pp); 3723 locked = 0; 3724 goto rehash; 3725 } 3726 3727 if (!VN_ISKAS(vp)) { 3728 /* 3729 * This is not a segkmem page but another page which 3730 * has been kernel mapped. It had better have at least 3731 * a share lock on it. Return the pfn. 3732 */ 3733 sfmmu_mlist_exit(pml); 3734 SFMMU_HASH_UNLOCK(hmebp); 3735 if (locked) 3736 page_unlock(pp); 3737 kmem_cache_free(pa_hment_cache, pahmep); 3738 ASSERT(PAGE_LOCKED(pp)); 3739 *rpfn = pfn; 3740 if (cookiep) 3741 *cookiep = HAC_COOKIE_NONE; 3742 return (0); 3743 } 3744 3745 /* 3746 * Setup this pa_hment and link its embedded dummy sf_hment into 3747 * the mapping list. 3748 */ 3749 pp->p_share++; 3750 pahmep->cb_id = callback_id; 3751 pahmep->addr = vaddr; 3752 pahmep->len = len; 3753 pahmep->refcnt = 1; 3754 pahmep->flags = 0; 3755 pahmep->pvt = pvt; 3756 3757 sfhmep->hme_tte.ll = 0; 3758 sfhmep->hme_data = pahmep; 3759 sfhmep->hme_prev = osfhmep; 3760 sfhmep->hme_next = osfhmep->hme_next; 3761 3762 if (osfhmep->hme_next) 3763 osfhmep->hme_next->hme_prev = sfhmep; 3764 3765 osfhmep->hme_next = sfhmep; 3766 3767 sfmmu_mlist_exit(pml); 3768 SFMMU_HASH_UNLOCK(hmebp); 3769 3770 if (locked) 3771 page_unlock(pp); 3772 3773 *rpfn = pfn; 3774 if (cookiep) 3775 *cookiep = (void *)pahmep; 3776 3777 return (0); 3778 } 3779 3780 /* 3781 * Remove the relocation callbacks from the specified addr/len. 3782 */ 3783 void 3784 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 3785 void *cookie) 3786 { 3787 struct hmehash_bucket *hmebp; 3788 hmeblk_tag hblktag; 3789 struct hme_blk *hmeblkp; 3790 int hmeshift, hashno; 3791 caddr_t saddr; 3792 struct pa_hment *pahmep; 3793 struct sf_hment *sfhmep, *osfhmep; 3794 kmutex_t *pml; 3795 tte_t tte; 3796 page_t *pp; 3797 vnode_t *vp; 3798 u_offset_t off; 3799 int locked = 0; 3800 3801 /* 3802 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 3803 * remove so just return. 3804 */ 3805 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 3806 return; 3807 3808 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3809 3810 rehash: 3811 /* Find the mapping(s) for this page */ 3812 for (hashno = TTE64K, hmeblkp = NULL; 3813 hmeblkp == NULL && hashno <= mmu_hashcnt; 3814 hashno++) { 3815 hmeshift = HME_HASH_SHIFT(hashno); 3816 hblktag.htag_id = ksfmmup; 3817 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3818 hblktag.htag_rehash = hashno; 3819 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3820 3821 SFMMU_HASH_LOCK(hmebp); 3822 3823 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3824 3825 if (hmeblkp == NULL) 3826 SFMMU_HASH_UNLOCK(hmebp); 3827 } 3828 3829 if (hmeblkp == NULL) 3830 return; 3831 3832 HBLKTOHME(osfhmep, hmeblkp, saddr); 3833 3834 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3835 if (!TTE_IS_VALID(&tte)) { 3836 SFMMU_HASH_UNLOCK(hmebp); 3837 return; 3838 } 3839 3840 pp = osfhmep->hme_page; 3841 if (pp == NULL) { 3842 SFMMU_HASH_UNLOCK(hmebp); 3843 ASSERT(cookie == NULL); 3844 return; 3845 } 3846 3847 vp = pp->p_vnode; 3848 off = pp->p_offset; 3849 3850 pml = sfmmu_mlist_enter(pp); 3851 3852 if (flags & HAC_PAGELOCK) { 3853 if (!page_trylock(pp, SE_SHARED)) { 3854 /* 3855 * Somebody is holding SE_EXCL lock. Might 3856 * even be hat_page_relocate(). Drop all 3857 * our locks, lookup the page in &kvp, and 3858 * retry. If it doesn't exist in &kvp and &zvp, 3859 * then we must be dealing with a kernel mapped 3860 * page which doesn't actually belong to 3861 * segkmem so we punt. 3862 */ 3863 sfmmu_mlist_exit(pml); 3864 SFMMU_HASH_UNLOCK(hmebp); 3865 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3866 /* check zvp before giving up */ 3867 if (pp == NULL) 3868 pp = page_lookup(&zvp, (u_offset_t)saddr, 3869 SE_SHARED); 3870 3871 if (pp == NULL) { 3872 ASSERT(cookie == NULL); 3873 return; 3874 } 3875 page_unlock(pp); 3876 goto rehash; 3877 } 3878 locked = 1; 3879 } 3880 3881 ASSERT(PAGE_LOCKED(pp)); 3882 3883 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 3884 pp->p_offset != off) { 3885 /* 3886 * The page moved before we got our hands on it. Drop 3887 * all the locks and try again. 3888 */ 3889 ASSERT((flags & HAC_PAGELOCK) != 0); 3890 sfmmu_mlist_exit(pml); 3891 SFMMU_HASH_UNLOCK(hmebp); 3892 page_unlock(pp); 3893 locked = 0; 3894 goto rehash; 3895 } 3896 3897 if (!VN_ISKAS(vp)) { 3898 /* 3899 * This is not a segkmem page but another page which 3900 * has been kernel mapped. 3901 */ 3902 sfmmu_mlist_exit(pml); 3903 SFMMU_HASH_UNLOCK(hmebp); 3904 if (locked) 3905 page_unlock(pp); 3906 ASSERT(cookie == NULL); 3907 return; 3908 } 3909 3910 if (cookie != NULL) { 3911 pahmep = (struct pa_hment *)cookie; 3912 sfhmep = &pahmep->sfment; 3913 } else { 3914 for (sfhmep = pp->p_mapping; sfhmep != NULL; 3915 sfhmep = sfhmep->hme_next) { 3916 3917 /* 3918 * skip va<->pa mappings 3919 */ 3920 if (!IS_PAHME(sfhmep)) 3921 continue; 3922 3923 pahmep = sfhmep->hme_data; 3924 ASSERT(pahmep != NULL); 3925 3926 /* 3927 * if pa_hment matches, remove it 3928 */ 3929 if ((pahmep->pvt == pvt) && 3930 (pahmep->addr == vaddr) && 3931 (pahmep->len == len)) { 3932 break; 3933 } 3934 } 3935 } 3936 3937 if (sfhmep == NULL) { 3938 if (!panicstr) { 3939 panic("hat_delete_callback: pa_hment not found, pp %p", 3940 (void *)pp); 3941 } 3942 return; 3943 } 3944 3945 /* 3946 * Note: at this point a valid kernel mapping must still be 3947 * present on this page. 3948 */ 3949 pp->p_share--; 3950 if (pp->p_share <= 0) 3951 panic("hat_delete_callback: zero p_share"); 3952 3953 if (--pahmep->refcnt == 0) { 3954 if (pahmep->flags != 0) 3955 panic("hat_delete_callback: pa_hment is busy"); 3956 3957 /* 3958 * Remove sfhmep from the mapping list for the page. 3959 */ 3960 if (sfhmep->hme_prev) { 3961 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 3962 } else { 3963 pp->p_mapping = sfhmep->hme_next; 3964 } 3965 3966 if (sfhmep->hme_next) 3967 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 3968 3969 sfmmu_mlist_exit(pml); 3970 SFMMU_HASH_UNLOCK(hmebp); 3971 3972 if (locked) 3973 page_unlock(pp); 3974 3975 kmem_cache_free(pa_hment_cache, pahmep); 3976 return; 3977 } 3978 3979 sfmmu_mlist_exit(pml); 3980 SFMMU_HASH_UNLOCK(hmebp); 3981 if (locked) 3982 page_unlock(pp); 3983 } 3984 3985 /* 3986 * hat_probe returns 1 if the translation for the address 'addr' is 3987 * loaded, zero otherwise. 3988 * 3989 * hat_probe should be used only for advisorary purposes because it may 3990 * occasionally return the wrong value. The implementation must guarantee that 3991 * returning the wrong value is a very rare event. hat_probe is used 3992 * to implement optimizations in the segment drivers. 3993 * 3994 */ 3995 int 3996 hat_probe(struct hat *sfmmup, caddr_t addr) 3997 { 3998 pfn_t pfn; 3999 tte_t tte; 4000 4001 ASSERT(sfmmup != NULL); 4002 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4003 4004 ASSERT((sfmmup == ksfmmup) || 4005 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4006 4007 if (sfmmup == ksfmmup) { 4008 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 4009 == PFN_SUSPENDED) { 4010 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 4011 } 4012 } else { 4013 pfn = sfmmu_uvatopfn(addr, sfmmup); 4014 } 4015 4016 if (pfn != PFN_INVALID) 4017 return (1); 4018 else 4019 return (0); 4020 } 4021 4022 ssize_t 4023 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 4024 { 4025 tte_t tte; 4026 4027 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4028 4029 sfmmu_gettte(sfmmup, addr, &tte); 4030 if (TTE_IS_VALID(&tte)) { 4031 return (TTEBYTES(TTE_CSZ(&tte))); 4032 } 4033 return (-1); 4034 } 4035 4036 static void 4037 sfmmu_gettte(struct hat *sfmmup, caddr_t addr, tte_t *ttep) 4038 { 4039 struct hmehash_bucket *hmebp; 4040 hmeblk_tag hblktag; 4041 int hmeshift, hashno = 1; 4042 struct hme_blk *hmeblkp, *list = NULL; 4043 struct sf_hment *sfhmep; 4044 4045 /* support for ISM */ 4046 ism_map_t *ism_map; 4047 ism_blk_t *ism_blkp; 4048 int i; 4049 sfmmu_t *ism_hatid = NULL; 4050 sfmmu_t *locked_hatid = NULL; 4051 4052 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4053 4054 ism_blkp = sfmmup->sfmmu_iblk; 4055 if (ism_blkp) { 4056 sfmmu_ismhat_enter(sfmmup, 0); 4057 locked_hatid = sfmmup; 4058 } 4059 while (ism_blkp && ism_hatid == NULL) { 4060 ism_map = ism_blkp->iblk_maps; 4061 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 4062 if (addr >= ism_start(ism_map[i]) && 4063 addr < ism_end(ism_map[i])) { 4064 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 4065 addr = (caddr_t)(addr - 4066 ism_start(ism_map[i])); 4067 break; 4068 } 4069 } 4070 ism_blkp = ism_blkp->iblk_next; 4071 } 4072 if (locked_hatid) { 4073 sfmmu_ismhat_exit(locked_hatid, 0); 4074 } 4075 4076 hblktag.htag_id = sfmmup; 4077 ttep->ll = 0; 4078 4079 do { 4080 hmeshift = HME_HASH_SHIFT(hashno); 4081 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4082 hblktag.htag_rehash = hashno; 4083 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4084 4085 SFMMU_HASH_LOCK(hmebp); 4086 4087 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4088 if (hmeblkp != NULL) { 4089 HBLKTOHME(sfhmep, hmeblkp, addr); 4090 sfmmu_copytte(&sfhmep->hme_tte, ttep); 4091 SFMMU_HASH_UNLOCK(hmebp); 4092 break; 4093 } 4094 SFMMU_HASH_UNLOCK(hmebp); 4095 hashno++; 4096 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 4097 4098 sfmmu_hblks_list_purge(&list); 4099 } 4100 4101 uint_t 4102 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4103 { 4104 tte_t tte; 4105 4106 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4107 4108 sfmmu_gettte(sfmmup, addr, &tte); 4109 if (TTE_IS_VALID(&tte)) { 4110 *attr = sfmmu_ptov_attr(&tte); 4111 return (0); 4112 } 4113 *attr = 0; 4114 return ((uint_t)0xffffffff); 4115 } 4116 4117 /* 4118 * Enables more attributes on specified address range (ie. logical OR) 4119 */ 4120 void 4121 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4122 { 4123 if (hat->sfmmu_xhat_provider) { 4124 XHAT_SETATTR(hat, addr, len, attr); 4125 return; 4126 } else { 4127 /* 4128 * This must be a CPU HAT. If the address space has 4129 * XHATs attached, change attributes for all of them, 4130 * just in case 4131 */ 4132 ASSERT(hat->sfmmu_as != NULL); 4133 if (hat->sfmmu_as->a_xhat != NULL) 4134 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 4135 } 4136 4137 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4138 } 4139 4140 /* 4141 * Assigns attributes to the specified address range. All the attributes 4142 * are specified. 4143 */ 4144 void 4145 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4146 { 4147 if (hat->sfmmu_xhat_provider) { 4148 XHAT_CHGATTR(hat, addr, len, attr); 4149 return; 4150 } else { 4151 /* 4152 * This must be a CPU HAT. If the address space has 4153 * XHATs attached, change attributes for all of them, 4154 * just in case 4155 */ 4156 ASSERT(hat->sfmmu_as != NULL); 4157 if (hat->sfmmu_as->a_xhat != NULL) 4158 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 4159 } 4160 4161 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4162 } 4163 4164 /* 4165 * Remove attributes on the specified address range (ie. loginal NAND) 4166 */ 4167 void 4168 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4169 { 4170 if (hat->sfmmu_xhat_provider) { 4171 XHAT_CLRATTR(hat, addr, len, attr); 4172 return; 4173 } else { 4174 /* 4175 * This must be a CPU HAT. If the address space has 4176 * XHATs attached, change attributes for all of them, 4177 * just in case 4178 */ 4179 ASSERT(hat->sfmmu_as != NULL); 4180 if (hat->sfmmu_as->a_xhat != NULL) 4181 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4182 } 4183 4184 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4185 } 4186 4187 /* 4188 * Change attributes on an address range to that specified by attr and mode. 4189 */ 4190 static void 4191 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4192 int mode) 4193 { 4194 struct hmehash_bucket *hmebp; 4195 hmeblk_tag hblktag; 4196 int hmeshift, hashno = 1; 4197 struct hme_blk *hmeblkp, *list = NULL; 4198 caddr_t endaddr; 4199 cpuset_t cpuset; 4200 demap_range_t dmr; 4201 4202 CPUSET_ZERO(cpuset); 4203 4204 ASSERT((sfmmup == ksfmmup) || 4205 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4206 ASSERT((len & MMU_PAGEOFFSET) == 0); 4207 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4208 4209 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4210 ((addr + len) > (caddr_t)USERLIMIT)) { 4211 panic("user addr %p in kernel space", 4212 (void *)addr); 4213 } 4214 4215 endaddr = addr + len; 4216 hblktag.htag_id = sfmmup; 4217 DEMAP_RANGE_INIT(sfmmup, &dmr); 4218 4219 while (addr < endaddr) { 4220 hmeshift = HME_HASH_SHIFT(hashno); 4221 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4222 hblktag.htag_rehash = hashno; 4223 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4224 4225 SFMMU_HASH_LOCK(hmebp); 4226 4227 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4228 if (hmeblkp != NULL) { 4229 /* 4230 * We've encountered a shadow hmeblk so skip the range 4231 * of the next smaller mapping size. 4232 */ 4233 if (hmeblkp->hblk_shw_bit) { 4234 ASSERT(sfmmup != ksfmmup); 4235 ASSERT(hashno > 1); 4236 addr = (caddr_t)P2END((uintptr_t)addr, 4237 TTEBYTES(hashno - 1)); 4238 } else { 4239 addr = sfmmu_hblk_chgattr(sfmmup, 4240 hmeblkp, addr, endaddr, &dmr, attr, mode); 4241 } 4242 SFMMU_HASH_UNLOCK(hmebp); 4243 hashno = 1; 4244 continue; 4245 } 4246 SFMMU_HASH_UNLOCK(hmebp); 4247 4248 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4249 /* 4250 * We have traversed the whole list and rehashed 4251 * if necessary without finding the address to chgattr. 4252 * This is ok, so we increment the address by the 4253 * smallest hmeblk range for kernel mappings or for 4254 * user mappings with no large pages, and the largest 4255 * hmeblk range, to account for shadow hmeblks, for 4256 * user mappings with large pages and continue. 4257 */ 4258 if (sfmmup == ksfmmup) 4259 addr = (caddr_t)P2END((uintptr_t)addr, 4260 TTEBYTES(1)); 4261 else 4262 addr = (caddr_t)P2END((uintptr_t)addr, 4263 TTEBYTES(hashno)); 4264 hashno = 1; 4265 } else { 4266 hashno++; 4267 } 4268 } 4269 4270 sfmmu_hblks_list_purge(&list); 4271 DEMAP_RANGE_FLUSH(&dmr); 4272 cpuset = sfmmup->sfmmu_cpusran; 4273 xt_sync(cpuset); 4274 } 4275 4276 /* 4277 * This function chgattr on a range of addresses in an hmeblk. It returns the 4278 * next addres that needs to be chgattr. 4279 * It should be called with the hash lock held. 4280 * XXX It should be possible to optimize chgattr by not flushing every time but 4281 * on the other hand: 4282 * 1. do one flush crosscall. 4283 * 2. only flush if we are increasing permissions (make sure this will work) 4284 */ 4285 static caddr_t 4286 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4287 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4288 { 4289 tte_t tte, tteattr, tteflags, ttemod; 4290 struct sf_hment *sfhmep; 4291 int ttesz; 4292 struct page *pp = NULL; 4293 kmutex_t *pml, *pmtx; 4294 int ret; 4295 int use_demap_range; 4296 #if defined(SF_ERRATA_57) 4297 int check_exec; 4298 #endif 4299 4300 ASSERT(in_hblk_range(hmeblkp, addr)); 4301 ASSERT(hmeblkp->hblk_shw_bit == 0); 4302 4303 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4304 ttesz = get_hblk_ttesz(hmeblkp); 4305 4306 /* 4307 * Flush the current demap region if addresses have been 4308 * skipped or the page size doesn't match. 4309 */ 4310 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4311 if (use_demap_range) { 4312 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4313 } else { 4314 DEMAP_RANGE_FLUSH(dmrp); 4315 } 4316 4317 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4318 #if defined(SF_ERRATA_57) 4319 check_exec = (sfmmup != ksfmmup) && 4320 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4321 TTE_IS_EXECUTABLE(&tteattr); 4322 #endif 4323 HBLKTOHME(sfhmep, hmeblkp, addr); 4324 while (addr < endaddr) { 4325 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4326 if (TTE_IS_VALID(&tte)) { 4327 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4328 /* 4329 * if the new attr is the same as old 4330 * continue 4331 */ 4332 goto next_addr; 4333 } 4334 if (!TTE_IS_WRITABLE(&tteattr)) { 4335 /* 4336 * make sure we clear hw modify bit if we 4337 * removing write protections 4338 */ 4339 tteflags.tte_intlo |= TTE_HWWR_INT; 4340 } 4341 4342 pml = NULL; 4343 pp = sfhmep->hme_page; 4344 if (pp) { 4345 pml = sfmmu_mlist_enter(pp); 4346 } 4347 4348 if (pp != sfhmep->hme_page) { 4349 /* 4350 * tte must have been unloaded. 4351 */ 4352 ASSERT(pml); 4353 sfmmu_mlist_exit(pml); 4354 continue; 4355 } 4356 4357 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4358 4359 ttemod = tte; 4360 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4361 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4362 4363 #if defined(SF_ERRATA_57) 4364 if (check_exec && addr < errata57_limit) 4365 ttemod.tte_exec_perm = 0; 4366 #endif 4367 ret = sfmmu_modifytte_try(&tte, &ttemod, 4368 &sfhmep->hme_tte); 4369 4370 if (ret < 0) { 4371 /* tte changed underneath us */ 4372 if (pml) { 4373 sfmmu_mlist_exit(pml); 4374 } 4375 continue; 4376 } 4377 4378 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4379 /* 4380 * need to sync if we are clearing modify bit. 4381 */ 4382 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4383 } 4384 4385 if (pp && PP_ISRO(pp)) { 4386 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4387 pmtx = sfmmu_page_enter(pp); 4388 PP_CLRRO(pp); 4389 sfmmu_page_exit(pmtx); 4390 } 4391 } 4392 4393 if (ret > 0 && use_demap_range) { 4394 DEMAP_RANGE_MARKPG(dmrp, addr); 4395 } else if (ret > 0) { 4396 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4397 } 4398 4399 if (pml) { 4400 sfmmu_mlist_exit(pml); 4401 } 4402 } 4403 next_addr: 4404 addr += TTEBYTES(ttesz); 4405 sfhmep++; 4406 DEMAP_RANGE_NEXTPG(dmrp); 4407 } 4408 return (addr); 4409 } 4410 4411 /* 4412 * This routine converts virtual attributes to physical ones. It will 4413 * update the tteflags field with the tte mask corresponding to the attributes 4414 * affected and it returns the new attributes. It will also clear the modify 4415 * bit if we are taking away write permission. This is necessary since the 4416 * modify bit is the hardware permission bit and we need to clear it in order 4417 * to detect write faults. 4418 */ 4419 static uint64_t 4420 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 4421 { 4422 tte_t ttevalue; 4423 4424 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 4425 4426 switch (mode) { 4427 case SFMMU_CHGATTR: 4428 /* all attributes specified */ 4429 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 4430 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 4431 ttemaskp->tte_inthi = TTEINTHI_ATTR; 4432 ttemaskp->tte_intlo = TTEINTLO_ATTR; 4433 break; 4434 case SFMMU_SETATTR: 4435 ASSERT(!(attr & ~HAT_PROT_MASK)); 4436 ttemaskp->ll = 0; 4437 ttevalue.ll = 0; 4438 /* 4439 * a valid tte implies exec and read for sfmmu 4440 * so no need to do anything about them. 4441 * since priviledged access implies user access 4442 * PROT_USER doesn't make sense either. 4443 */ 4444 if (attr & PROT_WRITE) { 4445 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 4446 ttevalue.tte_intlo |= TTE_WRPRM_INT; 4447 } 4448 break; 4449 case SFMMU_CLRATTR: 4450 /* attributes will be nand with current ones */ 4451 if (attr & ~(PROT_WRITE | PROT_USER)) { 4452 panic("sfmmu: attr %x not supported", attr); 4453 } 4454 ttemaskp->ll = 0; 4455 ttevalue.ll = 0; 4456 if (attr & PROT_WRITE) { 4457 /* clear both writable and modify bit */ 4458 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 4459 } 4460 if (attr & PROT_USER) { 4461 ttemaskp->tte_intlo |= TTE_PRIV_INT; 4462 ttevalue.tte_intlo |= TTE_PRIV_INT; 4463 } 4464 break; 4465 default: 4466 panic("sfmmu_vtop_attr: bad mode %x", mode); 4467 } 4468 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 4469 return (ttevalue.ll); 4470 } 4471 4472 static uint_t 4473 sfmmu_ptov_attr(tte_t *ttep) 4474 { 4475 uint_t attr; 4476 4477 ASSERT(TTE_IS_VALID(ttep)); 4478 4479 attr = PROT_READ; 4480 4481 if (TTE_IS_WRITABLE(ttep)) { 4482 attr |= PROT_WRITE; 4483 } 4484 if (TTE_IS_EXECUTABLE(ttep)) { 4485 attr |= PROT_EXEC; 4486 } 4487 if (!TTE_IS_PRIVILEGED(ttep)) { 4488 attr |= PROT_USER; 4489 } 4490 if (TTE_IS_NFO(ttep)) { 4491 attr |= HAT_NOFAULT; 4492 } 4493 if (TTE_IS_NOSYNC(ttep)) { 4494 attr |= HAT_NOSYNC; 4495 } 4496 if (TTE_IS_SIDEFFECT(ttep)) { 4497 attr |= SFMMU_SIDEFFECT; 4498 } 4499 if (!TTE_IS_VCACHEABLE(ttep)) { 4500 attr |= SFMMU_UNCACHEVTTE; 4501 } 4502 if (!TTE_IS_PCACHEABLE(ttep)) { 4503 attr |= SFMMU_UNCACHEPTTE; 4504 } 4505 return (attr); 4506 } 4507 4508 /* 4509 * hat_chgprot is a deprecated hat call. New segment drivers 4510 * should store all attributes and use hat_*attr calls. 4511 * 4512 * Change the protections in the virtual address range 4513 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 4514 * then remove write permission, leaving the other 4515 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 4516 * 4517 */ 4518 void 4519 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 4520 { 4521 struct hmehash_bucket *hmebp; 4522 hmeblk_tag hblktag; 4523 int hmeshift, hashno = 1; 4524 struct hme_blk *hmeblkp, *list = NULL; 4525 caddr_t endaddr; 4526 cpuset_t cpuset; 4527 demap_range_t dmr; 4528 4529 ASSERT((len & MMU_PAGEOFFSET) == 0); 4530 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4531 4532 if (sfmmup->sfmmu_xhat_provider) { 4533 XHAT_CHGPROT(sfmmup, addr, len, vprot); 4534 return; 4535 } else { 4536 /* 4537 * This must be a CPU HAT. If the address space has 4538 * XHATs attached, change attributes for all of them, 4539 * just in case 4540 */ 4541 ASSERT(sfmmup->sfmmu_as != NULL); 4542 if (sfmmup->sfmmu_as->a_xhat != NULL) 4543 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 4544 } 4545 4546 CPUSET_ZERO(cpuset); 4547 4548 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 4549 ((addr + len) > (caddr_t)USERLIMIT)) { 4550 panic("user addr %p vprot %x in kernel space", 4551 (void *)addr, vprot); 4552 } 4553 endaddr = addr + len; 4554 hblktag.htag_id = sfmmup; 4555 DEMAP_RANGE_INIT(sfmmup, &dmr); 4556 4557 while (addr < endaddr) { 4558 hmeshift = HME_HASH_SHIFT(hashno); 4559 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4560 hblktag.htag_rehash = hashno; 4561 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4562 4563 SFMMU_HASH_LOCK(hmebp); 4564 4565 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4566 if (hmeblkp != NULL) { 4567 /* 4568 * We've encountered a shadow hmeblk so skip the range 4569 * of the next smaller mapping size. 4570 */ 4571 if (hmeblkp->hblk_shw_bit) { 4572 ASSERT(sfmmup != ksfmmup); 4573 ASSERT(hashno > 1); 4574 addr = (caddr_t)P2END((uintptr_t)addr, 4575 TTEBYTES(hashno - 1)); 4576 } else { 4577 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 4578 addr, endaddr, &dmr, vprot); 4579 } 4580 SFMMU_HASH_UNLOCK(hmebp); 4581 hashno = 1; 4582 continue; 4583 } 4584 SFMMU_HASH_UNLOCK(hmebp); 4585 4586 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4587 /* 4588 * We have traversed the whole list and rehashed 4589 * if necessary without finding the address to chgprot. 4590 * This is ok so we increment the address by the 4591 * smallest hmeblk range for kernel mappings and the 4592 * largest hmeblk range, to account for shadow hmeblks, 4593 * for user mappings and continue. 4594 */ 4595 if (sfmmup == ksfmmup) 4596 addr = (caddr_t)P2END((uintptr_t)addr, 4597 TTEBYTES(1)); 4598 else 4599 addr = (caddr_t)P2END((uintptr_t)addr, 4600 TTEBYTES(hashno)); 4601 hashno = 1; 4602 } else { 4603 hashno++; 4604 } 4605 } 4606 4607 sfmmu_hblks_list_purge(&list); 4608 DEMAP_RANGE_FLUSH(&dmr); 4609 cpuset = sfmmup->sfmmu_cpusran; 4610 xt_sync(cpuset); 4611 } 4612 4613 /* 4614 * This function chgprots a range of addresses in an hmeblk. It returns the 4615 * next addres that needs to be chgprot. 4616 * It should be called with the hash lock held. 4617 * XXX It shold be possible to optimize chgprot by not flushing every time but 4618 * on the other hand: 4619 * 1. do one flush crosscall. 4620 * 2. only flush if we are increasing permissions (make sure this will work) 4621 */ 4622 static caddr_t 4623 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4624 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 4625 { 4626 uint_t pprot; 4627 tte_t tte, ttemod; 4628 struct sf_hment *sfhmep; 4629 uint_t tteflags; 4630 int ttesz; 4631 struct page *pp = NULL; 4632 kmutex_t *pml, *pmtx; 4633 int ret; 4634 int use_demap_range; 4635 #if defined(SF_ERRATA_57) 4636 int check_exec; 4637 #endif 4638 4639 ASSERT(in_hblk_range(hmeblkp, addr)); 4640 ASSERT(hmeblkp->hblk_shw_bit == 0); 4641 4642 #ifdef DEBUG 4643 if (get_hblk_ttesz(hmeblkp) != TTE8K && 4644 (endaddr < get_hblk_endaddr(hmeblkp))) { 4645 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 4646 } 4647 #endif /* DEBUG */ 4648 4649 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4650 ttesz = get_hblk_ttesz(hmeblkp); 4651 4652 pprot = sfmmu_vtop_prot(vprot, &tteflags); 4653 #if defined(SF_ERRATA_57) 4654 check_exec = (sfmmup != ksfmmup) && 4655 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4656 ((vprot & PROT_EXEC) == PROT_EXEC); 4657 #endif 4658 HBLKTOHME(sfhmep, hmeblkp, addr); 4659 4660 /* 4661 * Flush the current demap region if addresses have been 4662 * skipped or the page size doesn't match. 4663 */ 4664 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 4665 if (use_demap_range) { 4666 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4667 } else { 4668 DEMAP_RANGE_FLUSH(dmrp); 4669 } 4670 4671 while (addr < endaddr) { 4672 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4673 if (TTE_IS_VALID(&tte)) { 4674 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 4675 /* 4676 * if the new protection is the same as old 4677 * continue 4678 */ 4679 goto next_addr; 4680 } 4681 pml = NULL; 4682 pp = sfhmep->hme_page; 4683 if (pp) { 4684 pml = sfmmu_mlist_enter(pp); 4685 } 4686 if (pp != sfhmep->hme_page) { 4687 /* 4688 * tte most have been unloaded 4689 * underneath us. Recheck 4690 */ 4691 ASSERT(pml); 4692 sfmmu_mlist_exit(pml); 4693 continue; 4694 } 4695 4696 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4697 4698 ttemod = tte; 4699 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 4700 #if defined(SF_ERRATA_57) 4701 if (check_exec && addr < errata57_limit) 4702 ttemod.tte_exec_perm = 0; 4703 #endif 4704 ret = sfmmu_modifytte_try(&tte, &ttemod, 4705 &sfhmep->hme_tte); 4706 4707 if (ret < 0) { 4708 /* tte changed underneath us */ 4709 if (pml) { 4710 sfmmu_mlist_exit(pml); 4711 } 4712 continue; 4713 } 4714 4715 if (tteflags & TTE_HWWR_INT) { 4716 /* 4717 * need to sync if we are clearing modify bit. 4718 */ 4719 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4720 } 4721 4722 if (pp && PP_ISRO(pp)) { 4723 if (pprot & TTE_WRPRM_INT) { 4724 pmtx = sfmmu_page_enter(pp); 4725 PP_CLRRO(pp); 4726 sfmmu_page_exit(pmtx); 4727 } 4728 } 4729 4730 if (ret > 0 && use_demap_range) { 4731 DEMAP_RANGE_MARKPG(dmrp, addr); 4732 } else if (ret > 0) { 4733 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4734 } 4735 4736 if (pml) { 4737 sfmmu_mlist_exit(pml); 4738 } 4739 } 4740 next_addr: 4741 addr += TTEBYTES(ttesz); 4742 sfhmep++; 4743 DEMAP_RANGE_NEXTPG(dmrp); 4744 } 4745 return (addr); 4746 } 4747 4748 /* 4749 * This routine is deprecated and should only be used by hat_chgprot. 4750 * The correct routine is sfmmu_vtop_attr. 4751 * This routine converts virtual page protections to physical ones. It will 4752 * update the tteflags field with the tte mask corresponding to the protections 4753 * affected and it returns the new protections. It will also clear the modify 4754 * bit if we are taking away write permission. This is necessary since the 4755 * modify bit is the hardware permission bit and we need to clear it in order 4756 * to detect write faults. 4757 * It accepts the following special protections: 4758 * ~PROT_WRITE = remove write permissions. 4759 * ~PROT_USER = remove user permissions. 4760 */ 4761 static uint_t 4762 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 4763 { 4764 if (vprot == (uint_t)~PROT_WRITE) { 4765 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 4766 return (0); /* will cause wrprm to be cleared */ 4767 } 4768 if (vprot == (uint_t)~PROT_USER) { 4769 *tteflagsp = TTE_PRIV_INT; 4770 return (0); /* will cause privprm to be cleared */ 4771 } 4772 if ((vprot == 0) || (vprot == PROT_USER) || 4773 ((vprot & PROT_ALL) != vprot)) { 4774 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4775 } 4776 4777 switch (vprot) { 4778 case (PROT_READ): 4779 case (PROT_EXEC): 4780 case (PROT_EXEC | PROT_READ): 4781 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4782 return (TTE_PRIV_INT); /* set prv and clr wrt */ 4783 case (PROT_WRITE): 4784 case (PROT_WRITE | PROT_READ): 4785 case (PROT_EXEC | PROT_WRITE): 4786 case (PROT_EXEC | PROT_WRITE | PROT_READ): 4787 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4788 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 4789 case (PROT_USER | PROT_READ): 4790 case (PROT_USER | PROT_EXEC): 4791 case (PROT_USER | PROT_EXEC | PROT_READ): 4792 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4793 return (0); /* clr prv and wrt */ 4794 case (PROT_USER | PROT_WRITE): 4795 case (PROT_USER | PROT_WRITE | PROT_READ): 4796 case (PROT_USER | PROT_EXEC | PROT_WRITE): 4797 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 4798 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4799 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 4800 default: 4801 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4802 } 4803 return (0); 4804 } 4805 4806 /* 4807 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 4808 * the normal algorithm would take too long for a very large VA range with 4809 * few real mappings. This routine just walks thru all HMEs in the global 4810 * hash table to find and remove mappings. 4811 */ 4812 static void 4813 hat_unload_large_virtual( 4814 struct hat *sfmmup, 4815 caddr_t startaddr, 4816 size_t len, 4817 uint_t flags, 4818 hat_callback_t *callback) 4819 { 4820 struct hmehash_bucket *hmebp; 4821 struct hme_blk *hmeblkp; 4822 struct hme_blk *pr_hblk = NULL; 4823 struct hme_blk *nx_hblk; 4824 struct hme_blk *list = NULL; 4825 int i; 4826 uint64_t hblkpa, prevpa, nx_pa; 4827 demap_range_t dmr, *dmrp; 4828 cpuset_t cpuset; 4829 caddr_t endaddr = startaddr + len; 4830 caddr_t sa; 4831 caddr_t ea; 4832 caddr_t cb_sa[MAX_CB_ADDR]; 4833 caddr_t cb_ea[MAX_CB_ADDR]; 4834 int addr_cnt = 0; 4835 int a = 0; 4836 4837 if (sfmmup->sfmmu_free) { 4838 dmrp = NULL; 4839 } else { 4840 dmrp = &dmr; 4841 DEMAP_RANGE_INIT(sfmmup, dmrp); 4842 } 4843 4844 /* 4845 * Loop through all the hash buckets of HME blocks looking for matches. 4846 */ 4847 for (i = 0; i <= UHMEHASH_SZ; i++) { 4848 hmebp = &uhme_hash[i]; 4849 SFMMU_HASH_LOCK(hmebp); 4850 hmeblkp = hmebp->hmeblkp; 4851 hblkpa = hmebp->hmeh_nextpa; 4852 prevpa = 0; 4853 pr_hblk = NULL; 4854 while (hmeblkp) { 4855 nx_hblk = hmeblkp->hblk_next; 4856 nx_pa = hmeblkp->hblk_nextpa; 4857 4858 /* 4859 * skip if not this context, if a shadow block or 4860 * if the mapping is not in the requested range 4861 */ 4862 if (hmeblkp->hblk_tag.htag_id != sfmmup || 4863 hmeblkp->hblk_shw_bit || 4864 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 4865 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 4866 pr_hblk = hmeblkp; 4867 prevpa = hblkpa; 4868 goto next_block; 4869 } 4870 4871 /* 4872 * unload if there are any current valid mappings 4873 */ 4874 if (hmeblkp->hblk_vcnt != 0 || 4875 hmeblkp->hblk_hmecnt != 0) 4876 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 4877 sa, ea, dmrp, flags); 4878 4879 /* 4880 * on unmap we also release the HME block itself, once 4881 * all mappings are gone. 4882 */ 4883 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 4884 !hmeblkp->hblk_vcnt && 4885 !hmeblkp->hblk_hmecnt) { 4886 ASSERT(!hmeblkp->hblk_lckcnt); 4887 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 4888 prevpa, pr_hblk); 4889 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 4890 } else { 4891 pr_hblk = hmeblkp; 4892 prevpa = hblkpa; 4893 } 4894 4895 if (callback == NULL) 4896 goto next_block; 4897 4898 /* 4899 * HME blocks may span more than one page, but we may be 4900 * unmapping only one page, so check for a smaller range 4901 * for the callback 4902 */ 4903 if (sa < startaddr) 4904 sa = startaddr; 4905 if (--ea > endaddr) 4906 ea = endaddr - 1; 4907 4908 cb_sa[addr_cnt] = sa; 4909 cb_ea[addr_cnt] = ea; 4910 if (++addr_cnt == MAX_CB_ADDR) { 4911 if (dmrp != NULL) { 4912 DEMAP_RANGE_FLUSH(dmrp); 4913 cpuset = sfmmup->sfmmu_cpusran; 4914 xt_sync(cpuset); 4915 } 4916 4917 for (a = 0; a < MAX_CB_ADDR; ++a) { 4918 callback->hcb_start_addr = cb_sa[a]; 4919 callback->hcb_end_addr = cb_ea[a]; 4920 callback->hcb_function(callback); 4921 } 4922 addr_cnt = 0; 4923 } 4924 4925 next_block: 4926 hmeblkp = nx_hblk; 4927 hblkpa = nx_pa; 4928 } 4929 SFMMU_HASH_UNLOCK(hmebp); 4930 } 4931 4932 sfmmu_hblks_list_purge(&list); 4933 if (dmrp != NULL) { 4934 DEMAP_RANGE_FLUSH(dmrp); 4935 cpuset = sfmmup->sfmmu_cpusran; 4936 xt_sync(cpuset); 4937 } 4938 4939 for (a = 0; a < addr_cnt; ++a) { 4940 callback->hcb_start_addr = cb_sa[a]; 4941 callback->hcb_end_addr = cb_ea[a]; 4942 callback->hcb_function(callback); 4943 } 4944 4945 /* 4946 * Check TSB and TLB page sizes if the process isn't exiting. 4947 */ 4948 if (!sfmmup->sfmmu_free) 4949 sfmmu_check_page_sizes(sfmmup, 0); 4950 } 4951 4952 /* 4953 * Unload all the mappings in the range [addr..addr+len). addr and len must 4954 * be MMU_PAGESIZE aligned. 4955 */ 4956 4957 extern struct seg *segkmap; 4958 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 4959 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 4960 4961 4962 void 4963 hat_unload_callback( 4964 struct hat *sfmmup, 4965 caddr_t addr, 4966 size_t len, 4967 uint_t flags, 4968 hat_callback_t *callback) 4969 { 4970 struct hmehash_bucket *hmebp; 4971 hmeblk_tag hblktag; 4972 int hmeshift, hashno, iskernel; 4973 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 4974 caddr_t endaddr; 4975 cpuset_t cpuset; 4976 uint64_t hblkpa, prevpa; 4977 int addr_count = 0; 4978 int a; 4979 caddr_t cb_start_addr[MAX_CB_ADDR]; 4980 caddr_t cb_end_addr[MAX_CB_ADDR]; 4981 int issegkmap = ISSEGKMAP(sfmmup, addr); 4982 demap_range_t dmr, *dmrp; 4983 4984 if (sfmmup->sfmmu_xhat_provider) { 4985 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 4986 return; 4987 } else { 4988 /* 4989 * This must be a CPU HAT. If the address space has 4990 * XHATs attached, unload the mappings for all of them, 4991 * just in case 4992 */ 4993 ASSERT(sfmmup->sfmmu_as != NULL); 4994 if (sfmmup->sfmmu_as->a_xhat != NULL) 4995 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 4996 len, flags, callback); 4997 } 4998 4999 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 5000 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5001 5002 ASSERT(sfmmup != NULL); 5003 ASSERT((len & MMU_PAGEOFFSET) == 0); 5004 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 5005 5006 /* 5007 * Probing through a large VA range (say 63 bits) will be slow, even 5008 * at 4 Meg steps between the probes. So, when the virtual address range 5009 * is very large, search the HME entries for what to unload. 5010 * 5011 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 5012 * 5013 * UHMEHASH_SZ is number of hash buckets to examine 5014 * 5015 */ 5016 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 5017 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 5018 return; 5019 } 5020 5021 CPUSET_ZERO(cpuset); 5022 5023 /* 5024 * If the process is exiting, we can save a lot of fuss since 5025 * we'll flush the TLB when we free the ctx anyway. 5026 */ 5027 if (sfmmup->sfmmu_free) 5028 dmrp = NULL; 5029 else 5030 dmrp = &dmr; 5031 5032 DEMAP_RANGE_INIT(sfmmup, dmrp); 5033 endaddr = addr + len; 5034 hblktag.htag_id = sfmmup; 5035 5036 /* 5037 * It is likely for the vm to call unload over a wide range of 5038 * addresses that are actually very sparsely populated by 5039 * translations. In order to speed this up the sfmmu hat supports 5040 * the concept of shadow hmeblks. Dummy large page hmeblks that 5041 * correspond to actual small translations are allocated at tteload 5042 * time and are referred to as shadow hmeblks. Now, during unload 5043 * time, we first check if we have a shadow hmeblk for that 5044 * translation. The absence of one means the corresponding address 5045 * range is empty and can be skipped. 5046 * 5047 * The kernel is an exception to above statement and that is why 5048 * we don't use shadow hmeblks and hash starting from the smallest 5049 * page size. 5050 */ 5051 if (sfmmup == KHATID) { 5052 iskernel = 1; 5053 hashno = TTE64K; 5054 } else { 5055 iskernel = 0; 5056 if (mmu_page_sizes == max_mmu_page_sizes) { 5057 hashno = TTE256M; 5058 } else { 5059 hashno = TTE4M; 5060 } 5061 } 5062 while (addr < endaddr) { 5063 hmeshift = HME_HASH_SHIFT(hashno); 5064 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5065 hblktag.htag_rehash = hashno; 5066 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5067 5068 SFMMU_HASH_LOCK(hmebp); 5069 5070 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 5071 prevpa, &list); 5072 if (hmeblkp == NULL) { 5073 /* 5074 * didn't find an hmeblk. skip the appropiate 5075 * address range. 5076 */ 5077 SFMMU_HASH_UNLOCK(hmebp); 5078 if (iskernel) { 5079 if (hashno < mmu_hashcnt) { 5080 hashno++; 5081 continue; 5082 } else { 5083 hashno = TTE64K; 5084 addr = (caddr_t)roundup((uintptr_t)addr 5085 + 1, MMU_PAGESIZE64K); 5086 continue; 5087 } 5088 } 5089 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5090 (1 << hmeshift)); 5091 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5092 ASSERT(hashno == TTE64K); 5093 continue; 5094 } 5095 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5096 hashno = TTE512K; 5097 continue; 5098 } 5099 if (mmu_page_sizes == max_mmu_page_sizes) { 5100 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5101 hashno = TTE4M; 5102 continue; 5103 } 5104 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5105 hashno = TTE32M; 5106 continue; 5107 } 5108 hashno = TTE256M; 5109 continue; 5110 } else { 5111 hashno = TTE4M; 5112 continue; 5113 } 5114 } 5115 ASSERT(hmeblkp); 5116 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5117 /* 5118 * If the valid count is zero we can skip the range 5119 * mapped by this hmeblk. 5120 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5121 * is used by segment drivers as a hint 5122 * that the mapping resource won't be used any longer. 5123 * The best example of this is during exit(). 5124 */ 5125 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5126 get_hblk_span(hmeblkp)); 5127 if ((flags & HAT_UNLOAD_UNMAP) || 5128 (iskernel && !issegkmap)) { 5129 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5130 pr_hblk); 5131 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5132 } 5133 SFMMU_HASH_UNLOCK(hmebp); 5134 5135 if (iskernel) { 5136 hashno = TTE64K; 5137 continue; 5138 } 5139 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5140 ASSERT(hashno == TTE64K); 5141 continue; 5142 } 5143 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5144 hashno = TTE512K; 5145 continue; 5146 } 5147 if (mmu_page_sizes == max_mmu_page_sizes) { 5148 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5149 hashno = TTE4M; 5150 continue; 5151 } 5152 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5153 hashno = TTE32M; 5154 continue; 5155 } 5156 hashno = TTE256M; 5157 continue; 5158 } else { 5159 hashno = TTE4M; 5160 continue; 5161 } 5162 } 5163 if (hmeblkp->hblk_shw_bit) { 5164 /* 5165 * If we encounter a shadow hmeblk we know there is 5166 * smaller sized hmeblks mapping the same address space. 5167 * Decrement the hash size and rehash. 5168 */ 5169 ASSERT(sfmmup != KHATID); 5170 hashno--; 5171 SFMMU_HASH_UNLOCK(hmebp); 5172 continue; 5173 } 5174 5175 /* 5176 * track callback address ranges. 5177 * only start a new range when it's not contiguous 5178 */ 5179 if (callback != NULL) { 5180 if (addr_count > 0 && 5181 addr == cb_end_addr[addr_count - 1]) 5182 --addr_count; 5183 else 5184 cb_start_addr[addr_count] = addr; 5185 } 5186 5187 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5188 dmrp, flags); 5189 5190 if (callback != NULL) 5191 cb_end_addr[addr_count++] = addr; 5192 5193 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5194 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5195 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5196 pr_hblk); 5197 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5198 } 5199 SFMMU_HASH_UNLOCK(hmebp); 5200 5201 /* 5202 * Notify our caller as to exactly which pages 5203 * have been unloaded. We do these in clumps, 5204 * to minimize the number of xt_sync()s that need to occur. 5205 */ 5206 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5207 DEMAP_RANGE_FLUSH(dmrp); 5208 if (dmrp != NULL) { 5209 cpuset = sfmmup->sfmmu_cpusran; 5210 xt_sync(cpuset); 5211 } 5212 5213 for (a = 0; a < MAX_CB_ADDR; ++a) { 5214 callback->hcb_start_addr = cb_start_addr[a]; 5215 callback->hcb_end_addr = cb_end_addr[a]; 5216 callback->hcb_function(callback); 5217 } 5218 addr_count = 0; 5219 } 5220 if (iskernel) { 5221 hashno = TTE64K; 5222 continue; 5223 } 5224 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5225 ASSERT(hashno == TTE64K); 5226 continue; 5227 } 5228 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5229 hashno = TTE512K; 5230 continue; 5231 } 5232 if (mmu_page_sizes == max_mmu_page_sizes) { 5233 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5234 hashno = TTE4M; 5235 continue; 5236 } 5237 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5238 hashno = TTE32M; 5239 continue; 5240 } 5241 hashno = TTE256M; 5242 } else { 5243 hashno = TTE4M; 5244 } 5245 } 5246 5247 sfmmu_hblks_list_purge(&list); 5248 DEMAP_RANGE_FLUSH(dmrp); 5249 if (dmrp != NULL) { 5250 cpuset = sfmmup->sfmmu_cpusran; 5251 xt_sync(cpuset); 5252 } 5253 if (callback && addr_count != 0) { 5254 for (a = 0; a < addr_count; ++a) { 5255 callback->hcb_start_addr = cb_start_addr[a]; 5256 callback->hcb_end_addr = cb_end_addr[a]; 5257 callback->hcb_function(callback); 5258 } 5259 } 5260 5261 /* 5262 * Check TSB and TLB page sizes if the process isn't exiting. 5263 */ 5264 if (!sfmmup->sfmmu_free) 5265 sfmmu_check_page_sizes(sfmmup, 0); 5266 } 5267 5268 /* 5269 * Unload all the mappings in the range [addr..addr+len). addr and len must 5270 * be MMU_PAGESIZE aligned. 5271 */ 5272 void 5273 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5274 { 5275 if (sfmmup->sfmmu_xhat_provider) { 5276 XHAT_UNLOAD(sfmmup, addr, len, flags); 5277 return; 5278 } 5279 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5280 } 5281 5282 5283 /* 5284 * Find the largest mapping size for this page. 5285 */ 5286 int 5287 fnd_mapping_sz(page_t *pp) 5288 { 5289 int sz; 5290 int p_index; 5291 5292 p_index = PP_MAPINDEX(pp); 5293 5294 sz = 0; 5295 p_index >>= 1; /* don't care about 8K bit */ 5296 for (; p_index; p_index >>= 1) { 5297 sz++; 5298 } 5299 5300 return (sz); 5301 } 5302 5303 /* 5304 * This function unloads a range of addresses for an hmeblk. 5305 * It returns the next address to be unloaded. 5306 * It should be called with the hash lock held. 5307 */ 5308 static caddr_t 5309 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5310 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5311 { 5312 tte_t tte, ttemod; 5313 struct sf_hment *sfhmep; 5314 int ttesz; 5315 long ttecnt; 5316 page_t *pp; 5317 kmutex_t *pml; 5318 int ret; 5319 int use_demap_range; 5320 5321 ASSERT(in_hblk_range(hmeblkp, addr)); 5322 ASSERT(!hmeblkp->hblk_shw_bit); 5323 #ifdef DEBUG 5324 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5325 (endaddr < get_hblk_endaddr(hmeblkp))) { 5326 panic("sfmmu_hblk_unload: partial unload of large page"); 5327 } 5328 #endif /* DEBUG */ 5329 5330 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5331 ttesz = get_hblk_ttesz(hmeblkp); 5332 5333 use_demap_range = (do_virtual_coloring && 5334 ((dmrp == NULL) || TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5335 if (use_demap_range) { 5336 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5337 } else { 5338 DEMAP_RANGE_FLUSH(dmrp); 5339 } 5340 ttecnt = 0; 5341 HBLKTOHME(sfhmep, hmeblkp, addr); 5342 5343 while (addr < endaddr) { 5344 pml = NULL; 5345 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5346 if (TTE_IS_VALID(&tte)) { 5347 pp = sfhmep->hme_page; 5348 if (pp != NULL) { 5349 pml = sfmmu_mlist_enter(pp); 5350 } 5351 5352 /* 5353 * Verify if hme still points to 'pp' now that 5354 * we have p_mapping lock. 5355 */ 5356 if (sfhmep->hme_page != pp) { 5357 if (pp != NULL && sfhmep->hme_page != NULL) { 5358 ASSERT(pml != NULL); 5359 sfmmu_mlist_exit(pml); 5360 /* Re-start this iteration. */ 5361 continue; 5362 } 5363 ASSERT((pp != NULL) && 5364 (sfhmep->hme_page == NULL)); 5365 goto tte_unloaded; 5366 } 5367 5368 /* 5369 * This point on we have both HASH and p_mapping 5370 * lock. 5371 */ 5372 ASSERT(pp == sfhmep->hme_page); 5373 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5374 5375 /* 5376 * We need to loop on modify tte because it is 5377 * possible for pagesync to come along and 5378 * change the software bits beneath us. 5379 * 5380 * Page_unload can also invalidate the tte after 5381 * we read tte outside of p_mapping lock. 5382 */ 5383 again: 5384 ttemod = tte; 5385 5386 TTE_SET_INVALID(&ttemod); 5387 ret = sfmmu_modifytte_try(&tte, &ttemod, 5388 &sfhmep->hme_tte); 5389 5390 if (ret <= 0) { 5391 if (TTE_IS_VALID(&tte)) { 5392 ASSERT(ret < 0); 5393 goto again; 5394 } 5395 if (pp != NULL) { 5396 panic("sfmmu_hblk_unload: pp = 0x%p " 5397 "tte became invalid under mlist" 5398 " lock = 0x%p", pp, pml); 5399 } 5400 continue; 5401 } 5402 5403 if (!(flags & HAT_UNLOAD_NOSYNC)) { 5404 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5405 } 5406 5407 /* 5408 * Ok- we invalidated the tte. Do the rest of the job. 5409 */ 5410 ttecnt++; 5411 5412 if (flags & HAT_UNLOAD_UNLOCK) { 5413 ASSERT(hmeblkp->hblk_lckcnt > 0); 5414 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 5415 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 5416 } 5417 5418 /* 5419 * Normally we would need to flush the page 5420 * from the virtual cache at this point in 5421 * order to prevent a potential cache alias 5422 * inconsistency. 5423 * The particular scenario we need to worry 5424 * about is: 5425 * Given: va1 and va2 are two virtual address 5426 * that alias and map the same physical 5427 * address. 5428 * 1. mapping exists from va1 to pa and data 5429 * has been read into the cache. 5430 * 2. unload va1. 5431 * 3. load va2 and modify data using va2. 5432 * 4 unload va2. 5433 * 5. load va1 and reference data. Unless we 5434 * flush the data cache when we unload we will 5435 * get stale data. 5436 * Fortunately, page coloring eliminates the 5437 * above scenario by remembering the color a 5438 * physical page was last or is currently 5439 * mapped to. Now, we delay the flush until 5440 * the loading of translations. Only when the 5441 * new translation is of a different color 5442 * are we forced to flush. 5443 */ 5444 if (use_demap_range) { 5445 /* 5446 * Mark this page as needing a demap. 5447 */ 5448 DEMAP_RANGE_MARKPG(dmrp, addr); 5449 } else { 5450 if (do_virtual_coloring) { 5451 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 5452 sfmmup->sfmmu_free, 0); 5453 } else { 5454 pfn_t pfnum; 5455 5456 pfnum = TTE_TO_PFN(addr, &tte); 5457 sfmmu_tlbcache_demap(addr, sfmmup, 5458 hmeblkp, pfnum, sfmmup->sfmmu_free, 5459 FLUSH_NECESSARY_CPUS, 5460 CACHE_FLUSH, 0); 5461 } 5462 } 5463 5464 if (pp) { 5465 /* 5466 * Remove the hment from the mapping list 5467 */ 5468 ASSERT(hmeblkp->hblk_hmecnt > 0); 5469 5470 /* 5471 * Again, we cannot 5472 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 5473 */ 5474 HME_SUB(sfhmep, pp); 5475 membar_stst(); 5476 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 5477 } 5478 5479 ASSERT(hmeblkp->hblk_vcnt > 0); 5480 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 5481 5482 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 5483 !hmeblkp->hblk_lckcnt); 5484 5485 #ifdef VAC 5486 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 5487 if (PP_ISTNC(pp)) { 5488 /* 5489 * If page was temporary 5490 * uncached, try to recache 5491 * it. Note that HME_SUB() was 5492 * called above so p_index and 5493 * mlist had been updated. 5494 */ 5495 conv_tnc(pp, ttesz); 5496 } else if (pp->p_mapping == NULL) { 5497 ASSERT(kpm_enable); 5498 /* 5499 * Page is marked to be in VAC conflict 5500 * to an existing kpm mapping and/or is 5501 * kpm mapped using only the regular 5502 * pagesize. 5503 */ 5504 sfmmu_kpm_hme_unload(pp); 5505 } 5506 } 5507 #endif /* VAC */ 5508 } else if ((pp = sfhmep->hme_page) != NULL) { 5509 /* 5510 * TTE is invalid but the hme 5511 * still exists. let pageunload 5512 * complete its job. 5513 */ 5514 ASSERT(pml == NULL); 5515 pml = sfmmu_mlist_enter(pp); 5516 if (sfhmep->hme_page != NULL) { 5517 sfmmu_mlist_exit(pml); 5518 continue; 5519 } 5520 ASSERT(sfhmep->hme_page == NULL); 5521 } else if (hmeblkp->hblk_hmecnt != 0) { 5522 /* 5523 * pageunload may have not finished decrementing 5524 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 5525 * wait for pageunload to finish. Rely on pageunload 5526 * to decrement hblk_hmecnt after hblk_vcnt. 5527 */ 5528 pfn_t pfn = TTE_TO_TTEPFN(&tte); 5529 ASSERT(pml == NULL); 5530 if (pf_is_memory(pfn)) { 5531 pp = page_numtopp_nolock(pfn); 5532 if (pp != NULL) { 5533 pml = sfmmu_mlist_enter(pp); 5534 sfmmu_mlist_exit(pml); 5535 pml = NULL; 5536 } 5537 } 5538 } 5539 5540 tte_unloaded: 5541 /* 5542 * At this point, the tte we are looking at 5543 * should be unloaded, and hme has been unlinked 5544 * from page too. This is important because in 5545 * pageunload, it does ttesync() then HME_SUB. 5546 * We need to make sure HME_SUB has been completed 5547 * so we know ttesync() has been completed. Otherwise, 5548 * at exit time, after return from hat layer, VM will 5549 * release as structure which hat_setstat() (called 5550 * by ttesync()) needs. 5551 */ 5552 #ifdef DEBUG 5553 { 5554 tte_t dtte; 5555 5556 ASSERT(sfhmep->hme_page == NULL); 5557 5558 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 5559 ASSERT(!TTE_IS_VALID(&dtte)); 5560 } 5561 #endif 5562 5563 if (pml) { 5564 sfmmu_mlist_exit(pml); 5565 } 5566 5567 addr += TTEBYTES(ttesz); 5568 sfhmep++; 5569 DEMAP_RANGE_NEXTPG(dmrp); 5570 } 5571 if (ttecnt > 0) 5572 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 5573 return (addr); 5574 } 5575 5576 /* 5577 * Synchronize all the mappings in the range [addr..addr+len). 5578 * Can be called with clearflag having two states: 5579 * HAT_SYNC_DONTZERO means just return the rm stats 5580 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 5581 */ 5582 void 5583 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 5584 { 5585 struct hmehash_bucket *hmebp; 5586 hmeblk_tag hblktag; 5587 int hmeshift, hashno = 1; 5588 struct hme_blk *hmeblkp, *list = NULL; 5589 caddr_t endaddr; 5590 cpuset_t cpuset; 5591 5592 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 5593 ASSERT((sfmmup == ksfmmup) || 5594 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5595 ASSERT((len & MMU_PAGEOFFSET) == 0); 5596 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 5597 (clearflag == HAT_SYNC_ZERORM)); 5598 5599 CPUSET_ZERO(cpuset); 5600 5601 endaddr = addr + len; 5602 hblktag.htag_id = sfmmup; 5603 /* 5604 * Spitfire supports 4 page sizes. 5605 * Most pages are expected to be of the smallest page 5606 * size (8K) and these will not need to be rehashed. 64K 5607 * pages also don't need to be rehashed because the an hmeblk 5608 * spans 64K of address space. 512K pages might need 1 rehash and 5609 * and 4M pages 2 rehashes. 5610 */ 5611 while (addr < endaddr) { 5612 hmeshift = HME_HASH_SHIFT(hashno); 5613 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5614 hblktag.htag_rehash = hashno; 5615 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5616 5617 SFMMU_HASH_LOCK(hmebp); 5618 5619 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5620 if (hmeblkp != NULL) { 5621 /* 5622 * We've encountered a shadow hmeblk so skip the range 5623 * of the next smaller mapping size. 5624 */ 5625 if (hmeblkp->hblk_shw_bit) { 5626 ASSERT(sfmmup != ksfmmup); 5627 ASSERT(hashno > 1); 5628 addr = (caddr_t)P2END((uintptr_t)addr, 5629 TTEBYTES(hashno - 1)); 5630 } else { 5631 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 5632 addr, endaddr, clearflag); 5633 } 5634 SFMMU_HASH_UNLOCK(hmebp); 5635 hashno = 1; 5636 continue; 5637 } 5638 SFMMU_HASH_UNLOCK(hmebp); 5639 5640 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5641 /* 5642 * We have traversed the whole list and rehashed 5643 * if necessary without finding the address to sync. 5644 * This is ok so we increment the address by the 5645 * smallest hmeblk range for kernel mappings and the 5646 * largest hmeblk range, to account for shadow hmeblks, 5647 * for user mappings and continue. 5648 */ 5649 if (sfmmup == ksfmmup) 5650 addr = (caddr_t)P2END((uintptr_t)addr, 5651 TTEBYTES(1)); 5652 else 5653 addr = (caddr_t)P2END((uintptr_t)addr, 5654 TTEBYTES(hashno)); 5655 hashno = 1; 5656 } else { 5657 hashno++; 5658 } 5659 } 5660 sfmmu_hblks_list_purge(&list); 5661 cpuset = sfmmup->sfmmu_cpusran; 5662 xt_sync(cpuset); 5663 } 5664 5665 static caddr_t 5666 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5667 caddr_t endaddr, int clearflag) 5668 { 5669 tte_t tte, ttemod; 5670 struct sf_hment *sfhmep; 5671 int ttesz; 5672 struct page *pp; 5673 kmutex_t *pml; 5674 int ret; 5675 5676 ASSERT(hmeblkp->hblk_shw_bit == 0); 5677 5678 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5679 5680 ttesz = get_hblk_ttesz(hmeblkp); 5681 HBLKTOHME(sfhmep, hmeblkp, addr); 5682 5683 while (addr < endaddr) { 5684 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5685 if (TTE_IS_VALID(&tte)) { 5686 pml = NULL; 5687 pp = sfhmep->hme_page; 5688 if (pp) { 5689 pml = sfmmu_mlist_enter(pp); 5690 } 5691 if (pp != sfhmep->hme_page) { 5692 /* 5693 * tte most have been unloaded 5694 * underneath us. Recheck 5695 */ 5696 ASSERT(pml); 5697 sfmmu_mlist_exit(pml); 5698 continue; 5699 } 5700 5701 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5702 5703 if (clearflag == HAT_SYNC_ZERORM) { 5704 ttemod = tte; 5705 TTE_CLR_RM(&ttemod); 5706 ret = sfmmu_modifytte_try(&tte, &ttemod, 5707 &sfhmep->hme_tte); 5708 if (ret < 0) { 5709 if (pml) { 5710 sfmmu_mlist_exit(pml); 5711 } 5712 continue; 5713 } 5714 5715 if (ret > 0) { 5716 sfmmu_tlb_demap(addr, sfmmup, 5717 hmeblkp, 0, 0); 5718 } 5719 } 5720 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5721 if (pml) { 5722 sfmmu_mlist_exit(pml); 5723 } 5724 } 5725 addr += TTEBYTES(ttesz); 5726 sfhmep++; 5727 } 5728 return (addr); 5729 } 5730 5731 /* 5732 * This function will sync a tte to the page struct and it will 5733 * update the hat stats. Currently it allows us to pass a NULL pp 5734 * and we will simply update the stats. We may want to change this 5735 * so we only keep stats for pages backed by pp's. 5736 */ 5737 static void 5738 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 5739 { 5740 uint_t rm = 0; 5741 int sz; 5742 pgcnt_t npgs; 5743 5744 ASSERT(TTE_IS_VALID(ttep)); 5745 5746 if (TTE_IS_NOSYNC(ttep)) { 5747 return; 5748 } 5749 5750 if (TTE_IS_REF(ttep)) { 5751 rm = P_REF; 5752 } 5753 if (TTE_IS_MOD(ttep)) { 5754 rm |= P_MOD; 5755 } 5756 5757 if (rm == 0) { 5758 return; 5759 } 5760 5761 sz = TTE_CSZ(ttep); 5762 if (sfmmup->sfmmu_rmstat) { 5763 int i; 5764 caddr_t vaddr = addr; 5765 5766 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 5767 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 5768 } 5769 5770 } 5771 5772 /* 5773 * XXX I want to use cas to update nrm bits but they 5774 * currently belong in common/vm and not in hat where 5775 * they should be. 5776 * The nrm bits are protected by the same mutex as 5777 * the one that protects the page's mapping list. 5778 */ 5779 if (!pp) 5780 return; 5781 ASSERT(sfmmu_mlist_held(pp)); 5782 /* 5783 * If the tte is for a large page, we need to sync all the 5784 * pages covered by the tte. 5785 */ 5786 if (sz != TTE8K) { 5787 ASSERT(pp->p_szc != 0); 5788 pp = PP_GROUPLEADER(pp, sz); 5789 ASSERT(sfmmu_mlist_held(pp)); 5790 } 5791 5792 /* Get number of pages from tte size. */ 5793 npgs = TTEPAGES(sz); 5794 5795 do { 5796 ASSERT(pp); 5797 ASSERT(sfmmu_mlist_held(pp)); 5798 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 5799 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 5800 hat_page_setattr(pp, rm); 5801 5802 /* 5803 * Are we done? If not, we must have a large mapping. 5804 * For large mappings we need to sync the rest of the pages 5805 * covered by this tte; goto the next page. 5806 */ 5807 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 5808 } 5809 5810 /* 5811 * Execute pre-callback handler of each pa_hment linked to pp 5812 * 5813 * Inputs: 5814 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 5815 * capture_cpus: pointer to return value (below) 5816 * 5817 * Returns: 5818 * Propagates the subsystem callback return values back to the caller; 5819 * returns 0 on success. If capture_cpus is non-NULL, the value returned 5820 * is zero if all of the pa_hments are of a type that do not require 5821 * capturing CPUs prior to suspending the mapping, else it is 1. 5822 */ 5823 static int 5824 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 5825 { 5826 struct sf_hment *sfhmep; 5827 struct pa_hment *pahmep; 5828 int (*f)(caddr_t, uint_t, uint_t, void *); 5829 int ret; 5830 id_t id; 5831 int locked = 0; 5832 kmutex_t *pml; 5833 5834 ASSERT(PAGE_EXCL(pp)); 5835 if (!sfmmu_mlist_held(pp)) { 5836 pml = sfmmu_mlist_enter(pp); 5837 locked = 1; 5838 } 5839 5840 if (capture_cpus) 5841 *capture_cpus = 0; 5842 5843 top: 5844 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5845 /* 5846 * skip sf_hments corresponding to VA<->PA mappings; 5847 * for pa_hment's, hme_tte.ll is zero 5848 */ 5849 if (!IS_PAHME(sfhmep)) 5850 continue; 5851 5852 pahmep = sfhmep->hme_data; 5853 ASSERT(pahmep != NULL); 5854 5855 /* 5856 * skip if pre-handler has been called earlier in this loop 5857 */ 5858 if (pahmep->flags & flag) 5859 continue; 5860 5861 id = pahmep->cb_id; 5862 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5863 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 5864 *capture_cpus = 1; 5865 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 5866 pahmep->flags |= flag; 5867 continue; 5868 } 5869 5870 /* 5871 * Drop the mapping list lock to avoid locking order issues. 5872 */ 5873 if (locked) 5874 sfmmu_mlist_exit(pml); 5875 5876 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 5877 if (ret != 0) 5878 return (ret); /* caller must do the cleanup */ 5879 5880 if (locked) { 5881 pml = sfmmu_mlist_enter(pp); 5882 pahmep->flags |= flag; 5883 goto top; 5884 } 5885 5886 pahmep->flags |= flag; 5887 } 5888 5889 if (locked) 5890 sfmmu_mlist_exit(pml); 5891 5892 return (0); 5893 } 5894 5895 /* 5896 * Execute post-callback handler of each pa_hment linked to pp 5897 * 5898 * Same overall assumptions and restrictions apply as for 5899 * hat_pageprocess_precallbacks(). 5900 */ 5901 static void 5902 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 5903 { 5904 pfn_t pgpfn = pp->p_pagenum; 5905 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 5906 pfn_t newpfn; 5907 struct sf_hment *sfhmep; 5908 struct pa_hment *pahmep; 5909 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 5910 id_t id; 5911 int locked = 0; 5912 kmutex_t *pml; 5913 5914 ASSERT(PAGE_EXCL(pp)); 5915 if (!sfmmu_mlist_held(pp)) { 5916 pml = sfmmu_mlist_enter(pp); 5917 locked = 1; 5918 } 5919 5920 top: 5921 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5922 /* 5923 * skip sf_hments corresponding to VA<->PA mappings; 5924 * for pa_hment's, hme_tte.ll is zero 5925 */ 5926 if (!IS_PAHME(sfhmep)) 5927 continue; 5928 5929 pahmep = sfhmep->hme_data; 5930 ASSERT(pahmep != NULL); 5931 5932 if ((pahmep->flags & flag) == 0) 5933 continue; 5934 5935 pahmep->flags &= ~flag; 5936 5937 id = pahmep->cb_id; 5938 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5939 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 5940 continue; 5941 5942 /* 5943 * Convert the base page PFN into the constituent PFN 5944 * which is needed by the callback handler. 5945 */ 5946 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 5947 5948 /* 5949 * Drop the mapping list lock to avoid locking order issues. 5950 */ 5951 if (locked) 5952 sfmmu_mlist_exit(pml); 5953 5954 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 5955 != 0) 5956 panic("sfmmu: posthandler failed"); 5957 5958 if (locked) { 5959 pml = sfmmu_mlist_enter(pp); 5960 goto top; 5961 } 5962 } 5963 5964 if (locked) 5965 sfmmu_mlist_exit(pml); 5966 } 5967 5968 /* 5969 * Suspend locked kernel mapping 5970 */ 5971 void 5972 hat_pagesuspend(struct page *pp) 5973 { 5974 struct sf_hment *sfhmep; 5975 sfmmu_t *sfmmup; 5976 tte_t tte, ttemod; 5977 struct hme_blk *hmeblkp; 5978 caddr_t addr; 5979 int index, cons; 5980 cpuset_t cpuset; 5981 5982 ASSERT(PAGE_EXCL(pp)); 5983 ASSERT(sfmmu_mlist_held(pp)); 5984 5985 mutex_enter(&kpr_suspendlock); 5986 5987 /* 5988 * We're about to suspend a kernel mapping so mark this thread as 5989 * non-traceable by DTrace. This prevents us from running into issues 5990 * with probe context trying to touch a suspended page 5991 * in the relocation codepath itself. 5992 */ 5993 curthread->t_flag |= T_DONTDTRACE; 5994 5995 index = PP_MAPINDEX(pp); 5996 cons = TTE8K; 5997 5998 retry: 5999 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6000 6001 if (IS_PAHME(sfhmep)) 6002 continue; 6003 6004 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 6005 continue; 6006 6007 /* 6008 * Loop until we successfully set the suspend bit in 6009 * the TTE. 6010 */ 6011 again: 6012 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6013 ASSERT(TTE_IS_VALID(&tte)); 6014 6015 ttemod = tte; 6016 TTE_SET_SUSPEND(&ttemod); 6017 if (sfmmu_modifytte_try(&tte, &ttemod, 6018 &sfhmep->hme_tte) < 0) 6019 goto again; 6020 6021 /* 6022 * Invalidate TSB entry 6023 */ 6024 hmeblkp = sfmmu_hmetohblk(sfhmep); 6025 6026 sfmmup = hblktosfmmu(hmeblkp); 6027 ASSERT(sfmmup == ksfmmup); 6028 6029 addr = tte_to_vaddr(hmeblkp, tte); 6030 6031 /* 6032 * No need to make sure that the TSB for this sfmmu is 6033 * not being relocated since it is ksfmmup and thus it 6034 * will never be relocated. 6035 */ 6036 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 6037 6038 /* 6039 * Update xcall stats 6040 */ 6041 cpuset = cpu_ready_set; 6042 CPUSET_DEL(cpuset, CPU->cpu_id); 6043 6044 /* LINTED: constant in conditional context */ 6045 SFMMU_XCALL_STATS(ksfmmup); 6046 6047 /* 6048 * Flush TLB entry on remote CPU's 6049 */ 6050 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6051 (uint64_t)ksfmmup); 6052 xt_sync(cpuset); 6053 6054 /* 6055 * Flush TLB entry on local CPU 6056 */ 6057 vtag_flushpage(addr, (uint64_t)ksfmmup); 6058 } 6059 6060 while (index != 0) { 6061 index = index >> 1; 6062 if (index != 0) 6063 cons++; 6064 if (index & 0x1) { 6065 pp = PP_GROUPLEADER(pp, cons); 6066 goto retry; 6067 } 6068 } 6069 } 6070 6071 #ifdef DEBUG 6072 6073 #define N_PRLE 1024 6074 struct prle { 6075 page_t *targ; 6076 page_t *repl; 6077 int status; 6078 int pausecpus; 6079 hrtime_t whence; 6080 }; 6081 6082 static struct prle page_relocate_log[N_PRLE]; 6083 static int prl_entry; 6084 static kmutex_t prl_mutex; 6085 6086 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6087 mutex_enter(&prl_mutex); \ 6088 page_relocate_log[prl_entry].targ = *(t); \ 6089 page_relocate_log[prl_entry].repl = *(r); \ 6090 page_relocate_log[prl_entry].status = (s); \ 6091 page_relocate_log[prl_entry].pausecpus = (p); \ 6092 page_relocate_log[prl_entry].whence = gethrtime(); \ 6093 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6094 mutex_exit(&prl_mutex); 6095 6096 #else /* !DEBUG */ 6097 #define PAGE_RELOCATE_LOG(t, r, s, p) 6098 #endif 6099 6100 /* 6101 * Core Kernel Page Relocation Algorithm 6102 * 6103 * Input: 6104 * 6105 * target : constituent pages are SE_EXCL locked. 6106 * replacement: constituent pages are SE_EXCL locked. 6107 * 6108 * Output: 6109 * 6110 * nrelocp: number of pages relocated 6111 */ 6112 int 6113 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6114 { 6115 page_t *targ, *repl; 6116 page_t *tpp, *rpp; 6117 kmutex_t *low, *high; 6118 spgcnt_t npages, i; 6119 page_t *pl = NULL; 6120 int old_pil; 6121 cpuset_t cpuset; 6122 int cap_cpus; 6123 int ret; 6124 6125 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 6126 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6127 return (EAGAIN); 6128 } 6129 6130 mutex_enter(&kpr_mutex); 6131 kreloc_thread = curthread; 6132 6133 targ = *target; 6134 repl = *replacement; 6135 ASSERT(repl != NULL); 6136 ASSERT(targ->p_szc == repl->p_szc); 6137 6138 npages = page_get_pagecnt(targ->p_szc); 6139 6140 /* 6141 * unload VA<->PA mappings that are not locked 6142 */ 6143 tpp = targ; 6144 for (i = 0; i < npages; i++) { 6145 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6146 tpp++; 6147 } 6148 6149 /* 6150 * Do "presuspend" callbacks, in a context from which we can still 6151 * block as needed. Note that we don't hold the mapping list lock 6152 * of "targ" at this point due to potential locking order issues; 6153 * we assume that between the hat_pageunload() above and holding 6154 * the SE_EXCL lock that the mapping list *cannot* change at this 6155 * point. 6156 */ 6157 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6158 if (ret != 0) { 6159 /* 6160 * EIO translates to fatal error, for all others cleanup 6161 * and return EAGAIN. 6162 */ 6163 ASSERT(ret != EIO); 6164 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6165 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6166 kreloc_thread = NULL; 6167 mutex_exit(&kpr_mutex); 6168 return (EAGAIN); 6169 } 6170 6171 /* 6172 * acquire p_mapping list lock for both the target and replacement 6173 * root pages. 6174 * 6175 * low and high refer to the need to grab the mlist locks in a 6176 * specific order in order to prevent race conditions. Thus the 6177 * lower lock must be grabbed before the higher lock. 6178 * 6179 * This will block hat_unload's accessing p_mapping list. Since 6180 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6181 * blocked. Thus, no one else will be accessing the p_mapping list 6182 * while we suspend and reload the locked mapping below. 6183 */ 6184 tpp = targ; 6185 rpp = repl; 6186 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6187 6188 kpreempt_disable(); 6189 6190 #ifdef VAC 6191 /* 6192 * If the replacement page is of a different virtual color 6193 * than the page it is replacing, we need to handle the VAC 6194 * consistency for it just as we would if we were setting up 6195 * a new mapping to a page. 6196 */ 6197 if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) { 6198 if (tpp->p_vcolor != rpp->p_vcolor) { 6199 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6200 rpp->p_pagenum); 6201 } 6202 } 6203 #endif 6204 6205 /* 6206 * We raise our PIL to 13 so that we don't get captured by 6207 * another CPU or pinned by an interrupt thread. We can't go to 6208 * PIL 14 since the nexus driver(s) may need to interrupt at 6209 * that level in the case of IOMMU pseudo mappings. 6210 */ 6211 cpuset = cpu_ready_set; 6212 CPUSET_DEL(cpuset, CPU->cpu_id); 6213 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6214 old_pil = splr(XCALL_PIL); 6215 } else { 6216 old_pil = -1; 6217 xc_attention(cpuset); 6218 } 6219 ASSERT(getpil() == XCALL_PIL); 6220 6221 /* 6222 * Now do suspend callbacks. In the case of an IOMMU mapping 6223 * this will suspend all DMA activity to the page while it is 6224 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6225 * may be captured at this point we should have acquired any needed 6226 * locks in the presuspend callback. 6227 */ 6228 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6229 if (ret != 0) { 6230 repl = targ; 6231 goto suspend_fail; 6232 } 6233 6234 /* 6235 * Raise the PIL yet again, this time to block all high-level 6236 * interrupts on this CPU. This is necessary to prevent an 6237 * interrupt routine from pinning the thread which holds the 6238 * mapping suspended and then touching the suspended page. 6239 * 6240 * Once the page is suspended we also need to be careful to 6241 * avoid calling any functions which touch any seg_kmem memory 6242 * since that memory may be backed by the very page we are 6243 * relocating in here! 6244 */ 6245 hat_pagesuspend(targ); 6246 6247 /* 6248 * Now that we are confident everybody has stopped using this page, 6249 * copy the page contents. Note we use a physical copy to prevent 6250 * locking issues and to avoid fpRAS because we can't handle it in 6251 * this context. 6252 */ 6253 for (i = 0; i < npages; i++, tpp++, rpp++) { 6254 /* 6255 * Copy the contents of the page. 6256 */ 6257 ppcopy_kernel(tpp, rpp); 6258 } 6259 6260 tpp = targ; 6261 rpp = repl; 6262 for (i = 0; i < npages; i++, tpp++, rpp++) { 6263 /* 6264 * Copy attributes. VAC consistency was handled above, 6265 * if required. 6266 */ 6267 rpp->p_nrm = tpp->p_nrm; 6268 tpp->p_nrm = 0; 6269 rpp->p_index = tpp->p_index; 6270 tpp->p_index = 0; 6271 #ifdef VAC 6272 rpp->p_vcolor = tpp->p_vcolor; 6273 #endif 6274 } 6275 6276 /* 6277 * First, unsuspend the page, if we set the suspend bit, and transfer 6278 * the mapping list from the target page to the replacement page. 6279 * Next process postcallbacks; since pa_hment's are linked only to the 6280 * p_mapping list of root page, we don't iterate over the constituent 6281 * pages. 6282 */ 6283 hat_pagereload(targ, repl); 6284 6285 suspend_fail: 6286 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6287 6288 /* 6289 * Now lower our PIL and release any captured CPUs since we 6290 * are out of the "danger zone". After this it will again be 6291 * safe to acquire adaptive mutex locks, or to drop them... 6292 */ 6293 if (old_pil != -1) { 6294 splx(old_pil); 6295 } else { 6296 xc_dismissed(cpuset); 6297 } 6298 6299 kpreempt_enable(); 6300 6301 sfmmu_mlist_reloc_exit(low, high); 6302 6303 /* 6304 * Postsuspend callbacks should drop any locks held across 6305 * the suspend callbacks. As before, we don't hold the mapping 6306 * list lock at this point.. our assumption is that the mapping 6307 * list still can't change due to our holding SE_EXCL lock and 6308 * there being no unlocked mappings left. Hence the restriction 6309 * on calling context to hat_delete_callback() 6310 */ 6311 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6312 if (ret != 0) { 6313 /* 6314 * The second presuspend call failed: we got here through 6315 * the suspend_fail label above. 6316 */ 6317 ASSERT(ret != EIO); 6318 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6319 kreloc_thread = NULL; 6320 mutex_exit(&kpr_mutex); 6321 return (EAGAIN); 6322 } 6323 6324 /* 6325 * Now that we're out of the performance critical section we can 6326 * take care of updating the hash table, since we still 6327 * hold all the pages locked SE_EXCL at this point we 6328 * needn't worry about things changing out from under us. 6329 */ 6330 tpp = targ; 6331 rpp = repl; 6332 for (i = 0; i < npages; i++, tpp++, rpp++) { 6333 6334 /* 6335 * replace targ with replacement in page_hash table 6336 */ 6337 targ = tpp; 6338 page_relocate_hash(rpp, targ); 6339 6340 /* 6341 * concatenate target; caller of platform_page_relocate() 6342 * expects target to be concatenated after returning. 6343 */ 6344 ASSERT(targ->p_next == targ); 6345 ASSERT(targ->p_prev == targ); 6346 page_list_concat(&pl, &targ); 6347 } 6348 6349 ASSERT(*target == pl); 6350 *nrelocp = npages; 6351 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6352 kreloc_thread = NULL; 6353 mutex_exit(&kpr_mutex); 6354 return (0); 6355 } 6356 6357 /* 6358 * Called when stray pa_hments are found attached to a page which is 6359 * being freed. Notify the subsystem which attached the pa_hment of 6360 * the error if it registered a suitable handler, else panic. 6361 */ 6362 static void 6363 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6364 { 6365 id_t cb_id = pahmep->cb_id; 6366 6367 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6368 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6369 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6370 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6371 return; /* non-fatal */ 6372 } 6373 panic("pa_hment leaked: 0x%p", pahmep); 6374 } 6375 6376 /* 6377 * Remove all mappings to page 'pp'. 6378 */ 6379 int 6380 hat_pageunload(struct page *pp, uint_t forceflag) 6381 { 6382 struct page *origpp = pp; 6383 struct sf_hment *sfhme, *tmphme; 6384 struct hme_blk *hmeblkp; 6385 kmutex_t *pml; 6386 #ifdef VAC 6387 kmutex_t *pmtx; 6388 #endif 6389 cpuset_t cpuset, tset; 6390 int index, cons; 6391 int xhme_blks; 6392 int pa_hments; 6393 6394 ASSERT(PAGE_EXCL(pp)); 6395 6396 retry_xhat: 6397 tmphme = NULL; 6398 xhme_blks = 0; 6399 pa_hments = 0; 6400 CPUSET_ZERO(cpuset); 6401 6402 pml = sfmmu_mlist_enter(pp); 6403 6404 #ifdef VAC 6405 if (pp->p_kpmref) 6406 sfmmu_kpm_pageunload(pp); 6407 ASSERT(!PP_ISMAPPED_KPM(pp)); 6408 #endif 6409 6410 index = PP_MAPINDEX(pp); 6411 cons = TTE8K; 6412 retry: 6413 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6414 tmphme = sfhme->hme_next; 6415 6416 if (IS_PAHME(sfhme)) { 6417 ASSERT(sfhme->hme_data != NULL); 6418 pa_hments++; 6419 continue; 6420 } 6421 6422 hmeblkp = sfmmu_hmetohblk(sfhme); 6423 if (hmeblkp->hblk_xhat_bit) { 6424 struct xhat_hme_blk *xblk = 6425 (struct xhat_hme_blk *)hmeblkp; 6426 6427 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 6428 pp, forceflag, XBLK2PROVBLK(xblk)); 6429 6430 xhme_blks = 1; 6431 continue; 6432 } 6433 6434 /* 6435 * If there are kernel mappings don't unload them, they will 6436 * be suspended. 6437 */ 6438 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 6439 hmeblkp->hblk_tag.htag_id == ksfmmup) 6440 continue; 6441 6442 tset = sfmmu_pageunload(pp, sfhme, cons); 6443 CPUSET_OR(cpuset, tset); 6444 } 6445 6446 while (index != 0) { 6447 index = index >> 1; 6448 if (index != 0) 6449 cons++; 6450 if (index & 0x1) { 6451 /* Go to leading page */ 6452 pp = PP_GROUPLEADER(pp, cons); 6453 ASSERT(sfmmu_mlist_held(pp)); 6454 goto retry; 6455 } 6456 } 6457 6458 /* 6459 * cpuset may be empty if the page was only mapped by segkpm, 6460 * in which case we won't actually cross-trap. 6461 */ 6462 xt_sync(cpuset); 6463 6464 /* 6465 * The page should have no mappings at this point, unless 6466 * we were called from hat_page_relocate() in which case we 6467 * leave the locked mappings which will be suspended later. 6468 */ 6469 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 6470 (forceflag == SFMMU_KERNEL_RELOC)); 6471 6472 #ifdef VAC 6473 if (PP_ISTNC(pp)) { 6474 if (cons == TTE8K) { 6475 pmtx = sfmmu_page_enter(pp); 6476 PP_CLRTNC(pp); 6477 sfmmu_page_exit(pmtx); 6478 } else { 6479 conv_tnc(pp, cons); 6480 } 6481 } 6482 #endif /* VAC */ 6483 6484 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 6485 /* 6486 * Unlink any pa_hments and free them, calling back 6487 * the responsible subsystem to notify it of the error. 6488 * This can occur in situations such as drivers leaking 6489 * DMA handles: naughty, but common enough that we'd like 6490 * to keep the system running rather than bringing it 6491 * down with an obscure error like "pa_hment leaked" 6492 * which doesn't aid the user in debugging their driver. 6493 */ 6494 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6495 tmphme = sfhme->hme_next; 6496 if (IS_PAHME(sfhme)) { 6497 struct pa_hment *pahmep = sfhme->hme_data; 6498 sfmmu_pahment_leaked(pahmep); 6499 HME_SUB(sfhme, pp); 6500 kmem_cache_free(pa_hment_cache, pahmep); 6501 } 6502 } 6503 6504 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 6505 } 6506 6507 sfmmu_mlist_exit(pml); 6508 6509 /* 6510 * XHAT may not have finished unloading pages 6511 * because some other thread was waiting for 6512 * mlist lock and XHAT_PAGEUNLOAD let it do 6513 * the job. 6514 */ 6515 if (xhme_blks) { 6516 pp = origpp; 6517 goto retry_xhat; 6518 } 6519 6520 return (0); 6521 } 6522 6523 cpuset_t 6524 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 6525 { 6526 struct hme_blk *hmeblkp; 6527 sfmmu_t *sfmmup; 6528 tte_t tte, ttemod; 6529 #ifdef DEBUG 6530 tte_t orig_old; 6531 #endif /* DEBUG */ 6532 caddr_t addr; 6533 int ttesz; 6534 int ret; 6535 cpuset_t cpuset; 6536 6537 ASSERT(pp != NULL); 6538 ASSERT(sfmmu_mlist_held(pp)); 6539 ASSERT(!PP_ISKAS(pp)); 6540 6541 CPUSET_ZERO(cpuset); 6542 6543 hmeblkp = sfmmu_hmetohblk(sfhme); 6544 6545 readtte: 6546 sfmmu_copytte(&sfhme->hme_tte, &tte); 6547 if (TTE_IS_VALID(&tte)) { 6548 sfmmup = hblktosfmmu(hmeblkp); 6549 ttesz = get_hblk_ttesz(hmeblkp); 6550 /* 6551 * Only unload mappings of 'cons' size. 6552 */ 6553 if (ttesz != cons) 6554 return (cpuset); 6555 6556 /* 6557 * Note that we have p_mapping lock, but no hash lock here. 6558 * hblk_unload() has to have both hash lock AND p_mapping 6559 * lock before it tries to modify tte. So, the tte could 6560 * not become invalid in the sfmmu_modifytte_try() below. 6561 */ 6562 ttemod = tte; 6563 #ifdef DEBUG 6564 orig_old = tte; 6565 #endif /* DEBUG */ 6566 6567 TTE_SET_INVALID(&ttemod); 6568 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6569 if (ret < 0) { 6570 #ifdef DEBUG 6571 /* only R/M bits can change. */ 6572 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 6573 #endif /* DEBUG */ 6574 goto readtte; 6575 } 6576 6577 if (ret == 0) { 6578 panic("pageunload: cas failed?"); 6579 } 6580 6581 addr = tte_to_vaddr(hmeblkp, tte); 6582 6583 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6584 6585 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 6586 6587 /* 6588 * We need to flush the page from the virtual cache 6589 * in order to prevent a virtual cache alias 6590 * inconsistency. The particular scenario we need 6591 * to worry about is: 6592 * Given: va1 and va2 are two virtual address that 6593 * alias and will map the same physical address. 6594 * 1. mapping exists from va1 to pa and data has 6595 * been read into the cache. 6596 * 2. unload va1. 6597 * 3. load va2 and modify data using va2. 6598 * 4 unload va2. 6599 * 5. load va1 and reference data. Unless we flush 6600 * the data cache when we unload we will get 6601 * stale data. 6602 * This scenario is taken care of by using virtual 6603 * page coloring. 6604 */ 6605 if (sfmmup->sfmmu_ismhat) { 6606 /* 6607 * Flush TSBs, TLBs and caches 6608 * of every process 6609 * sharing this ism segment. 6610 */ 6611 sfmmu_hat_lock_all(); 6612 mutex_enter(&ism_mlist_lock); 6613 kpreempt_disable(); 6614 if (do_virtual_coloring) 6615 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6616 pp->p_pagenum, CACHE_NO_FLUSH); 6617 else 6618 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6619 pp->p_pagenum, CACHE_FLUSH); 6620 kpreempt_enable(); 6621 mutex_exit(&ism_mlist_lock); 6622 sfmmu_hat_unlock_all(); 6623 cpuset = cpu_ready_set; 6624 } else if (do_virtual_coloring) { 6625 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6626 cpuset = sfmmup->sfmmu_cpusran; 6627 } else { 6628 sfmmu_tlbcache_demap(addr, sfmmup, hmeblkp, 6629 pp->p_pagenum, 0, FLUSH_NECESSARY_CPUS, 6630 CACHE_FLUSH, 0); 6631 cpuset = sfmmup->sfmmu_cpusran; 6632 } 6633 6634 /* 6635 * Hme_sub has to run after ttesync() and a_rss update. 6636 * See hblk_unload(). 6637 */ 6638 HME_SUB(sfhme, pp); 6639 membar_stst(); 6640 6641 /* 6642 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 6643 * since pteload may have done a HME_ADD() right after 6644 * we did the HME_SUB() above. Hmecnt is now maintained 6645 * by cas only. no lock guranteed its value. The only 6646 * gurantee we have is the hmecnt should not be less than 6647 * what it should be so the hblk will not be taken away. 6648 * It's also important that we decremented the hmecnt after 6649 * we are done with hmeblkp so that this hmeblk won't be 6650 * stolen. 6651 */ 6652 ASSERT(hmeblkp->hblk_hmecnt > 0); 6653 ASSERT(hmeblkp->hblk_vcnt > 0); 6654 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6655 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6656 /* 6657 * This is bug 4063182. 6658 * XXX: fixme 6659 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6660 * !hmeblkp->hblk_lckcnt); 6661 */ 6662 } else { 6663 panic("invalid tte? pp %p &tte %p", 6664 (void *)pp, (void *)&tte); 6665 } 6666 6667 return (cpuset); 6668 } 6669 6670 /* 6671 * While relocating a kernel page, this function will move the mappings 6672 * from tpp to dpp and modify any associated data with these mappings. 6673 * It also unsuspends the suspended kernel mapping. 6674 */ 6675 static void 6676 hat_pagereload(struct page *tpp, struct page *dpp) 6677 { 6678 struct sf_hment *sfhme; 6679 tte_t tte, ttemod; 6680 int index, cons; 6681 6682 ASSERT(getpil() == PIL_MAX); 6683 ASSERT(sfmmu_mlist_held(tpp)); 6684 ASSERT(sfmmu_mlist_held(dpp)); 6685 6686 index = PP_MAPINDEX(tpp); 6687 cons = TTE8K; 6688 6689 /* Update real mappings to the page */ 6690 retry: 6691 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 6692 if (IS_PAHME(sfhme)) 6693 continue; 6694 sfmmu_copytte(&sfhme->hme_tte, &tte); 6695 ttemod = tte; 6696 6697 /* 6698 * replace old pfn with new pfn in TTE 6699 */ 6700 PFN_TO_TTE(ttemod, dpp->p_pagenum); 6701 6702 /* 6703 * clear suspend bit 6704 */ 6705 ASSERT(TTE_IS_SUSPEND(&ttemod)); 6706 TTE_CLR_SUSPEND(&ttemod); 6707 6708 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 6709 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 6710 6711 /* 6712 * set hme_page point to new page 6713 */ 6714 sfhme->hme_page = dpp; 6715 } 6716 6717 /* 6718 * move p_mapping list from old page to new page 6719 */ 6720 dpp->p_mapping = tpp->p_mapping; 6721 tpp->p_mapping = NULL; 6722 dpp->p_share = tpp->p_share; 6723 tpp->p_share = 0; 6724 6725 while (index != 0) { 6726 index = index >> 1; 6727 if (index != 0) 6728 cons++; 6729 if (index & 0x1) { 6730 tpp = PP_GROUPLEADER(tpp, cons); 6731 dpp = PP_GROUPLEADER(dpp, cons); 6732 goto retry; 6733 } 6734 } 6735 6736 curthread->t_flag &= ~T_DONTDTRACE; 6737 mutex_exit(&kpr_suspendlock); 6738 } 6739 6740 uint_t 6741 hat_pagesync(struct page *pp, uint_t clearflag) 6742 { 6743 struct sf_hment *sfhme, *tmphme = NULL; 6744 struct hme_blk *hmeblkp; 6745 kmutex_t *pml; 6746 cpuset_t cpuset, tset; 6747 int index, cons; 6748 extern ulong_t po_share; 6749 page_t *save_pp = pp; 6750 6751 CPUSET_ZERO(cpuset); 6752 6753 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 6754 return (PP_GENERIC_ATTR(pp)); 6755 } 6756 6757 if ((clearflag == (HAT_SYNC_STOPON_REF | HAT_SYNC_DONTZERO)) && 6758 PP_ISREF(pp)) { 6759 return (PP_GENERIC_ATTR(pp)); 6760 } 6761 6762 if ((clearflag == (HAT_SYNC_STOPON_MOD | HAT_SYNC_DONTZERO)) && 6763 PP_ISMOD(pp)) { 6764 return (PP_GENERIC_ATTR(pp)); 6765 } 6766 6767 if ((clearflag & HAT_SYNC_STOPON_SHARED) != 0 && 6768 (pp->p_share > po_share) && 6769 !(clearflag & HAT_SYNC_ZERORM)) { 6770 if (PP_ISRO(pp)) 6771 hat_page_setattr(pp, P_REF); 6772 return (PP_GENERIC_ATTR(pp)); 6773 } 6774 6775 clearflag &= ~HAT_SYNC_STOPON_SHARED; 6776 pml = sfmmu_mlist_enter(pp); 6777 index = PP_MAPINDEX(pp); 6778 cons = TTE8K; 6779 retry: 6780 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6781 /* 6782 * We need to save the next hment on the list since 6783 * it is possible for pagesync to remove an invalid hment 6784 * from the list. 6785 */ 6786 tmphme = sfhme->hme_next; 6787 /* 6788 * If we are looking for large mappings and this hme doesn't 6789 * reach the range we are seeking, just ignore its. 6790 */ 6791 hmeblkp = sfmmu_hmetohblk(sfhme); 6792 if (hmeblkp->hblk_xhat_bit) 6793 continue; 6794 6795 if (hme_size(sfhme) < cons) 6796 continue; 6797 tset = sfmmu_pagesync(pp, sfhme, 6798 clearflag & ~HAT_SYNC_STOPON_RM); 6799 CPUSET_OR(cpuset, tset); 6800 /* 6801 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 6802 * as the "ref" or "mod" is set. 6803 */ 6804 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 6805 ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 6806 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) { 6807 index = 0; 6808 break; 6809 } 6810 } 6811 6812 while (index) { 6813 index = index >> 1; 6814 cons++; 6815 if (index & 0x1) { 6816 /* Go to leading page */ 6817 pp = PP_GROUPLEADER(pp, cons); 6818 goto retry; 6819 } 6820 } 6821 6822 xt_sync(cpuset); 6823 sfmmu_mlist_exit(pml); 6824 return (PP_GENERIC_ATTR(save_pp)); 6825 } 6826 6827 /* 6828 * Get all the hardware dependent attributes for a page struct 6829 */ 6830 static cpuset_t 6831 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 6832 uint_t clearflag) 6833 { 6834 caddr_t addr; 6835 tte_t tte, ttemod; 6836 struct hme_blk *hmeblkp; 6837 int ret; 6838 sfmmu_t *sfmmup; 6839 cpuset_t cpuset; 6840 6841 ASSERT(pp != NULL); 6842 ASSERT(sfmmu_mlist_held(pp)); 6843 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6844 (clearflag == HAT_SYNC_ZERORM)); 6845 6846 SFMMU_STAT(sf_pagesync); 6847 6848 CPUSET_ZERO(cpuset); 6849 6850 sfmmu_pagesync_retry: 6851 6852 sfmmu_copytte(&sfhme->hme_tte, &tte); 6853 if (TTE_IS_VALID(&tte)) { 6854 hmeblkp = sfmmu_hmetohblk(sfhme); 6855 sfmmup = hblktosfmmu(hmeblkp); 6856 addr = tte_to_vaddr(hmeblkp, tte); 6857 if (clearflag == HAT_SYNC_ZERORM) { 6858 ttemod = tte; 6859 TTE_CLR_RM(&ttemod); 6860 ret = sfmmu_modifytte_try(&tte, &ttemod, 6861 &sfhme->hme_tte); 6862 if (ret < 0) { 6863 /* 6864 * cas failed and the new value is not what 6865 * we want. 6866 */ 6867 goto sfmmu_pagesync_retry; 6868 } 6869 6870 if (ret > 0) { 6871 /* we win the cas */ 6872 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6873 cpuset = sfmmup->sfmmu_cpusran; 6874 } 6875 } 6876 6877 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6878 } 6879 return (cpuset); 6880 } 6881 6882 /* 6883 * Remove write permission from a mappings to a page, so that 6884 * we can detect the next modification of it. This requires modifying 6885 * the TTE then invalidating (demap) any TLB entry using that TTE. 6886 * This code is similar to sfmmu_pagesync(). 6887 */ 6888 static cpuset_t 6889 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 6890 { 6891 caddr_t addr; 6892 tte_t tte; 6893 tte_t ttemod; 6894 struct hme_blk *hmeblkp; 6895 int ret; 6896 sfmmu_t *sfmmup; 6897 cpuset_t cpuset; 6898 6899 ASSERT(pp != NULL); 6900 ASSERT(sfmmu_mlist_held(pp)); 6901 6902 CPUSET_ZERO(cpuset); 6903 SFMMU_STAT(sf_clrwrt); 6904 6905 retry: 6906 6907 sfmmu_copytte(&sfhme->hme_tte, &tte); 6908 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 6909 hmeblkp = sfmmu_hmetohblk(sfhme); 6910 6911 /* 6912 * xhat mappings should never be to a VMODSORT page. 6913 */ 6914 ASSERT(hmeblkp->hblk_xhat_bit == 0); 6915 6916 sfmmup = hblktosfmmu(hmeblkp); 6917 addr = tte_to_vaddr(hmeblkp, tte); 6918 6919 ttemod = tte; 6920 TTE_CLR_WRT(&ttemod); 6921 TTE_CLR_MOD(&ttemod); 6922 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6923 6924 /* 6925 * if cas failed and the new value is not what 6926 * we want retry 6927 */ 6928 if (ret < 0) 6929 goto retry; 6930 6931 /* we win the cas */ 6932 if (ret > 0) { 6933 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6934 cpuset = sfmmup->sfmmu_cpusran; 6935 } 6936 } 6937 6938 return (cpuset); 6939 } 6940 6941 /* 6942 * Walk all mappings of a page, removing write permission and clearing the 6943 * ref/mod bits. This code is similar to hat_pagesync() 6944 */ 6945 static void 6946 hat_page_clrwrt(page_t *pp) 6947 { 6948 struct sf_hment *sfhme; 6949 struct sf_hment *tmphme = NULL; 6950 kmutex_t *pml; 6951 cpuset_t cpuset; 6952 cpuset_t tset; 6953 int index; 6954 int cons; 6955 6956 CPUSET_ZERO(cpuset); 6957 6958 pml = sfmmu_mlist_enter(pp); 6959 index = PP_MAPINDEX(pp); 6960 cons = TTE8K; 6961 retry: 6962 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6963 tmphme = sfhme->hme_next; 6964 6965 /* 6966 * If we are looking for large mappings and this hme doesn't 6967 * reach the range we are seeking, just ignore its. 6968 */ 6969 6970 if (hme_size(sfhme) < cons) 6971 continue; 6972 6973 tset = sfmmu_pageclrwrt(pp, sfhme); 6974 CPUSET_OR(cpuset, tset); 6975 } 6976 6977 while (index) { 6978 index = index >> 1; 6979 cons++; 6980 if (index & 0x1) { 6981 /* Go to leading page */ 6982 pp = PP_GROUPLEADER(pp, cons); 6983 goto retry; 6984 } 6985 } 6986 6987 xt_sync(cpuset); 6988 sfmmu_mlist_exit(pml); 6989 } 6990 6991 /* 6992 * Set the given REF/MOD/RO bits for the given page. 6993 * For a vnode with a sorted v_pages list, we need to change 6994 * the attributes and the v_pages list together under page_vnode_mutex. 6995 */ 6996 void 6997 hat_page_setattr(page_t *pp, uint_t flag) 6998 { 6999 vnode_t *vp = pp->p_vnode; 7000 page_t **listp; 7001 kmutex_t *pmtx; 7002 kmutex_t *vphm = NULL; 7003 int noshuffle; 7004 7005 noshuffle = flag & P_NSH; 7006 flag &= ~P_NSH; 7007 7008 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7009 7010 /* 7011 * nothing to do if attribute already set 7012 */ 7013 if ((pp->p_nrm & flag) == flag) 7014 return; 7015 7016 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 7017 !noshuffle) { 7018 vphm = page_vnode_mutex(vp); 7019 mutex_enter(vphm); 7020 } 7021 7022 pmtx = sfmmu_page_enter(pp); 7023 pp->p_nrm |= flag; 7024 sfmmu_page_exit(pmtx); 7025 7026 if (vphm != NULL) { 7027 /* 7028 * Some File Systems examine v_pages for NULL w/o 7029 * grabbing the vphm mutex. Must not let it become NULL when 7030 * pp is the only page on the list. 7031 */ 7032 if (pp->p_vpnext != pp) { 7033 page_vpsub(&vp->v_pages, pp); 7034 if (vp->v_pages != NULL) 7035 listp = &vp->v_pages->p_vpprev->p_vpnext; 7036 else 7037 listp = &vp->v_pages; 7038 page_vpadd(listp, pp); 7039 } 7040 mutex_exit(vphm); 7041 } 7042 } 7043 7044 void 7045 hat_page_clrattr(page_t *pp, uint_t flag) 7046 { 7047 vnode_t *vp = pp->p_vnode; 7048 kmutex_t *pmtx; 7049 7050 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7051 7052 pmtx = sfmmu_page_enter(pp); 7053 7054 /* 7055 * Caller is expected to hold page's io lock for VMODSORT to work 7056 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7057 * bit is cleared. 7058 * We don't have assert to avoid tripping some existing third party 7059 * code. The dirty page is moved back to top of the v_page list 7060 * after IO is done in pvn_write_done(). 7061 */ 7062 pp->p_nrm &= ~flag; 7063 sfmmu_page_exit(pmtx); 7064 7065 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7066 7067 /* 7068 * VMODSORT works by removing write permissions and getting 7069 * a fault when a page is made dirty. At this point 7070 * we need to remove write permission from all mappings 7071 * to this page. 7072 */ 7073 hat_page_clrwrt(pp); 7074 } 7075 } 7076 7077 uint_t 7078 hat_page_getattr(page_t *pp, uint_t flag) 7079 { 7080 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7081 return ((uint_t)(pp->p_nrm & flag)); 7082 } 7083 7084 /* 7085 * DEBUG kernels: verify that a kernel va<->pa translation 7086 * is safe by checking the underlying page_t is in a page 7087 * relocation-safe state. 7088 */ 7089 #ifdef DEBUG 7090 void 7091 sfmmu_check_kpfn(pfn_t pfn) 7092 { 7093 page_t *pp; 7094 int index, cons; 7095 7096 if (hat_check_vtop == 0) 7097 return; 7098 7099 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 7100 return; 7101 7102 pp = page_numtopp_nolock(pfn); 7103 if (!pp) 7104 return; 7105 7106 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7107 return; 7108 7109 /* 7110 * Handed a large kernel page, we dig up the root page since we 7111 * know the root page might have the lock also. 7112 */ 7113 if (pp->p_szc != 0) { 7114 index = PP_MAPINDEX(pp); 7115 cons = TTE8K; 7116 again: 7117 while (index != 0) { 7118 index >>= 1; 7119 if (index != 0) 7120 cons++; 7121 if (index & 0x1) { 7122 pp = PP_GROUPLEADER(pp, cons); 7123 goto again; 7124 } 7125 } 7126 } 7127 7128 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7129 return; 7130 7131 /* 7132 * Pages need to be locked or allocated "permanent" (either from 7133 * static_arena arena or explicitly setting PG_NORELOC when calling 7134 * page_create_va()) for VA->PA translations to be valid. 7135 */ 7136 if (!PP_ISNORELOC(pp)) 7137 panic("Illegal VA->PA translation, pp 0x%p not permanent", pp); 7138 else 7139 panic("Illegal VA->PA translation, pp 0x%p not locked", pp); 7140 } 7141 #endif /* DEBUG */ 7142 7143 /* 7144 * Returns a page frame number for a given virtual address. 7145 * Returns PFN_INVALID to indicate an invalid mapping 7146 */ 7147 pfn_t 7148 hat_getpfnum(struct hat *hat, caddr_t addr) 7149 { 7150 pfn_t pfn; 7151 tte_t tte; 7152 7153 /* 7154 * We would like to 7155 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7156 * but we can't because the iommu driver will call this 7157 * routine at interrupt time and it can't grab the as lock 7158 * or it will deadlock: A thread could have the as lock 7159 * and be waiting for io. The io can't complete 7160 * because the interrupt thread is blocked trying to grab 7161 * the as lock. 7162 */ 7163 7164 ASSERT(hat->sfmmu_xhat_provider == NULL); 7165 7166 if (hat == ksfmmup) { 7167 if (IS_KMEM_VA_LARGEPAGE(addr)) { 7168 ASSERT(segkmem_lpszc > 0); 7169 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc); 7170 if (pfn != PFN_INVALID) { 7171 sfmmu_check_kpfn(pfn); 7172 return (pfn); 7173 } 7174 } else if (segkpm && IS_KPM_ADDR(addr)) { 7175 return (sfmmu_kpm_vatopfn(addr)); 7176 } 7177 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7178 == PFN_SUSPENDED) { 7179 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7180 } 7181 sfmmu_check_kpfn(pfn); 7182 return (pfn); 7183 } else { 7184 return (sfmmu_uvatopfn(addr, hat)); 7185 } 7186 } 7187 7188 /* 7189 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7190 * Use hat_getpfnum(kas.a_hat, ...) instead. 7191 * 7192 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7193 * but can't right now due to the fact that some software has grown to use 7194 * this interface incorrectly. So for now when the interface is misused, 7195 * return a warning to the user that in the future it won't work in the 7196 * way they're abusing it, and carry on (after disabling page relocation). 7197 */ 7198 pfn_t 7199 hat_getkpfnum(caddr_t addr) 7200 { 7201 pfn_t pfn; 7202 tte_t tte; 7203 int badcaller = 0; 7204 extern int segkmem_reloc; 7205 7206 if (segkpm && IS_KPM_ADDR(addr)) { 7207 badcaller = 1; 7208 pfn = sfmmu_kpm_vatopfn(addr); 7209 } else { 7210 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7211 == PFN_SUSPENDED) { 7212 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7213 } 7214 badcaller = pf_is_memory(pfn); 7215 } 7216 7217 if (badcaller) { 7218 /* 7219 * We can't return PFN_INVALID or the caller may panic 7220 * or corrupt the system. The only alternative is to 7221 * disable page relocation at this point for all kernel 7222 * memory. This will impact any callers of page_relocate() 7223 * such as FMA or DR. 7224 * 7225 * RFE: Add junk here to spit out an ereport so the sysadmin 7226 * can be advised that he should upgrade his device driver 7227 * so that this doesn't happen. 7228 */ 7229 hat_getkpfnum_badcall(caller()); 7230 if (hat_kpr_enabled && segkmem_reloc) { 7231 hat_kpr_enabled = 0; 7232 segkmem_reloc = 0; 7233 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 7234 } 7235 } 7236 return (pfn); 7237 } 7238 7239 pfn_t 7240 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup) 7241 { 7242 struct hmehash_bucket *hmebp; 7243 hmeblk_tag hblktag; 7244 int hmeshift, hashno = 1; 7245 struct hme_blk *hmeblkp = NULL; 7246 7247 struct sf_hment *sfhmep; 7248 tte_t tte; 7249 pfn_t pfn; 7250 7251 /* support for ISM */ 7252 ism_map_t *ism_map; 7253 ism_blk_t *ism_blkp; 7254 int i; 7255 sfmmu_t *ism_hatid = NULL; 7256 sfmmu_t *locked_hatid = NULL; 7257 7258 7259 ASSERT(sfmmup != ksfmmup); 7260 SFMMU_STAT(sf_user_vtop); 7261 /* 7262 * Set ism_hatid if vaddr falls in a ISM segment. 7263 */ 7264 ism_blkp = sfmmup->sfmmu_iblk; 7265 if (ism_blkp) { 7266 sfmmu_ismhat_enter(sfmmup, 0); 7267 locked_hatid = sfmmup; 7268 } 7269 while (ism_blkp && ism_hatid == NULL) { 7270 ism_map = ism_blkp->iblk_maps; 7271 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7272 if (vaddr >= ism_start(ism_map[i]) && 7273 vaddr < ism_end(ism_map[i])) { 7274 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7275 vaddr = (caddr_t)(vaddr - 7276 ism_start(ism_map[i])); 7277 break; 7278 } 7279 } 7280 ism_blkp = ism_blkp->iblk_next; 7281 } 7282 if (locked_hatid) { 7283 sfmmu_ismhat_exit(locked_hatid, 0); 7284 } 7285 7286 hblktag.htag_id = sfmmup; 7287 do { 7288 hmeshift = HME_HASH_SHIFT(hashno); 7289 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7290 hblktag.htag_rehash = hashno; 7291 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7292 7293 SFMMU_HASH_LOCK(hmebp); 7294 7295 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7296 if (hmeblkp != NULL) { 7297 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7298 sfmmu_copytte(&sfhmep->hme_tte, &tte); 7299 if (TTE_IS_VALID(&tte)) { 7300 pfn = TTE_TO_PFN(vaddr, &tte); 7301 } else { 7302 pfn = PFN_INVALID; 7303 } 7304 SFMMU_HASH_UNLOCK(hmebp); 7305 return (pfn); 7306 } 7307 SFMMU_HASH_UNLOCK(hmebp); 7308 hashno++; 7309 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7310 return (PFN_INVALID); 7311 } 7312 7313 7314 /* 7315 * For compatability with AT&T and later optimizations 7316 */ 7317 /* ARGSUSED */ 7318 void 7319 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 7320 { 7321 ASSERT(hat != NULL); 7322 ASSERT(hat->sfmmu_xhat_provider == NULL); 7323 } 7324 7325 /* 7326 * Return the number of mappings to a particular page. 7327 * This number is an approximation of the number of 7328 * number of people sharing the page. 7329 */ 7330 ulong_t 7331 hat_page_getshare(page_t *pp) 7332 { 7333 page_t *spp = pp; /* start page */ 7334 kmutex_t *pml; 7335 ulong_t cnt; 7336 int index, sz = TTE64K; 7337 7338 /* 7339 * We need to grab the mlist lock to make sure any outstanding 7340 * load/unloads complete. Otherwise we could return zero 7341 * even though the unload(s) hasn't finished yet. 7342 */ 7343 pml = sfmmu_mlist_enter(spp); 7344 cnt = spp->p_share; 7345 7346 #ifdef VAC 7347 if (kpm_enable) 7348 cnt += spp->p_kpmref; 7349 #endif 7350 7351 /* 7352 * If we have any large mappings, we count the number of 7353 * mappings that this large page is part of. 7354 */ 7355 index = PP_MAPINDEX(spp); 7356 index >>= 1; 7357 while (index) { 7358 pp = PP_GROUPLEADER(spp, sz); 7359 if ((index & 0x1) && pp != spp) { 7360 cnt += pp->p_share; 7361 spp = pp; 7362 } 7363 index >>= 1; 7364 sz++; 7365 } 7366 sfmmu_mlist_exit(pml); 7367 return (cnt); 7368 } 7369 7370 /* 7371 * Unload all large mappings to the pp and reset the p_szc field of every 7372 * constituent page according to the remaining mappings. 7373 * 7374 * pp must be locked SE_EXCL. Even though no other constituent pages are 7375 * locked it's legal to unload the large mappings to the pp because all 7376 * constituent pages of large locked mappings have to be locked SE_SHARED. 7377 * This means if we have SE_EXCL lock on one of constituent pages none of the 7378 * large mappings to pp are locked. 7379 * 7380 * Decrease p_szc field starting from the last constituent page and ending 7381 * with the root page. This method is used because other threads rely on the 7382 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 7383 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 7384 * ensures that p_szc changes of the constituent pages appears atomic for all 7385 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 7386 * 7387 * This mechanism is only used for file system pages where it's not always 7388 * possible to get SE_EXCL locks on all constituent pages to demote the size 7389 * code (as is done for anonymous or kernel large pages). 7390 * 7391 * See more comments in front of sfmmu_mlspl_enter(). 7392 */ 7393 void 7394 hat_page_demote(page_t *pp) 7395 { 7396 int index; 7397 int sz; 7398 cpuset_t cpuset; 7399 int sync = 0; 7400 page_t *rootpp; 7401 struct sf_hment *sfhme; 7402 struct sf_hment *tmphme = NULL; 7403 struct hme_blk *hmeblkp; 7404 uint_t pszc; 7405 page_t *lastpp; 7406 cpuset_t tset; 7407 pgcnt_t npgs; 7408 kmutex_t *pml; 7409 kmutex_t *pmtx = NULL; 7410 7411 ASSERT(PAGE_EXCL(pp)); 7412 ASSERT(!PP_ISFREE(pp)); 7413 ASSERT(page_szc_lock_assert(pp)); 7414 pml = sfmmu_mlist_enter(pp); 7415 7416 pszc = pp->p_szc; 7417 if (pszc == 0) { 7418 goto out; 7419 } 7420 7421 index = PP_MAPINDEX(pp) >> 1; 7422 7423 if (index) { 7424 CPUSET_ZERO(cpuset); 7425 sz = TTE64K; 7426 sync = 1; 7427 } 7428 7429 while (index) { 7430 if (!(index & 0x1)) { 7431 index >>= 1; 7432 sz++; 7433 continue; 7434 } 7435 ASSERT(sz <= pszc); 7436 rootpp = PP_GROUPLEADER(pp, sz); 7437 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 7438 tmphme = sfhme->hme_next; 7439 hmeblkp = sfmmu_hmetohblk(sfhme); 7440 if (hme_size(sfhme) != sz) { 7441 continue; 7442 } 7443 if (hmeblkp->hblk_xhat_bit) { 7444 cmn_err(CE_PANIC, 7445 "hat_page_demote: xhat hmeblk"); 7446 } 7447 tset = sfmmu_pageunload(rootpp, sfhme, sz); 7448 CPUSET_OR(cpuset, tset); 7449 } 7450 if (index >>= 1) { 7451 sz++; 7452 } 7453 } 7454 7455 ASSERT(!PP_ISMAPPED_LARGE(pp)); 7456 7457 if (sync) { 7458 xt_sync(cpuset); 7459 #ifdef VAC 7460 if (PP_ISTNC(pp)) { 7461 conv_tnc(rootpp, sz); 7462 } 7463 #endif /* VAC */ 7464 } 7465 7466 pmtx = sfmmu_page_enter(pp); 7467 7468 ASSERT(pp->p_szc == pszc); 7469 rootpp = PP_PAGEROOT(pp); 7470 ASSERT(rootpp->p_szc == pszc); 7471 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 7472 7473 while (lastpp != rootpp) { 7474 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 7475 ASSERT(sz < pszc); 7476 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 7477 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 7478 while (--npgs > 0) { 7479 lastpp->p_szc = (uchar_t)sz; 7480 lastpp = PP_PAGEPREV(lastpp); 7481 } 7482 if (sz) { 7483 /* 7484 * make sure before current root's pszc 7485 * is updated all updates to constituent pages pszc 7486 * fields are globally visible. 7487 */ 7488 membar_producer(); 7489 } 7490 lastpp->p_szc = sz; 7491 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 7492 if (lastpp != rootpp) { 7493 lastpp = PP_PAGEPREV(lastpp); 7494 } 7495 } 7496 if (sz == 0) { 7497 /* the loop above doesn't cover this case */ 7498 rootpp->p_szc = 0; 7499 } 7500 out: 7501 ASSERT(pp->p_szc == 0); 7502 if (pmtx != NULL) { 7503 sfmmu_page_exit(pmtx); 7504 } 7505 sfmmu_mlist_exit(pml); 7506 } 7507 7508 /* 7509 * Refresh the HAT ismttecnt[] element for size szc. 7510 * Caller must have set ISM busy flag to prevent mapping 7511 * lists from changing while we're traversing them. 7512 */ 7513 pgcnt_t 7514 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 7515 { 7516 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 7517 ism_map_t *ism_map; 7518 pgcnt_t npgs = 0; 7519 int j; 7520 7521 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 7522 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 7523 ism_map = ism_blkp->iblk_maps; 7524 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) 7525 npgs += ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 7526 } 7527 sfmmup->sfmmu_ismttecnt[szc] = npgs; 7528 return (npgs); 7529 } 7530 7531 /* 7532 * Yield the memory claim requirement for an address space. 7533 * 7534 * This is currently implemented as the number of bytes that have active 7535 * hardware translations that have page structures. Therefore, it can 7536 * underestimate the traditional resident set size, eg, if the 7537 * physical page is present and the hardware translation is missing; 7538 * and it can overestimate the rss, eg, if there are active 7539 * translations to a frame buffer with page structs. 7540 * Also, it does not take sharing into account. 7541 * 7542 * Note that we don't acquire locks here since this function is most often 7543 * called from the clock thread. 7544 */ 7545 size_t 7546 hat_get_mapped_size(struct hat *hat) 7547 { 7548 size_t assize = 0; 7549 int i; 7550 7551 if (hat == NULL) 7552 return (0); 7553 7554 ASSERT(hat->sfmmu_xhat_provider == NULL); 7555 7556 for (i = 0; i < mmu_page_sizes; i++) 7557 assize += (pgcnt_t)hat->sfmmu_ttecnt[i] * TTEBYTES(i); 7558 7559 if (hat->sfmmu_iblk == NULL) 7560 return (assize); 7561 7562 for (i = 0; i < mmu_page_sizes; i++) 7563 assize += (pgcnt_t)hat->sfmmu_ismttecnt[i] * TTEBYTES(i); 7564 7565 return (assize); 7566 } 7567 7568 int 7569 hat_stats_enable(struct hat *hat) 7570 { 7571 hatlock_t *hatlockp; 7572 7573 ASSERT(hat->sfmmu_xhat_provider == NULL); 7574 7575 hatlockp = sfmmu_hat_enter(hat); 7576 hat->sfmmu_rmstat++; 7577 sfmmu_hat_exit(hatlockp); 7578 return (1); 7579 } 7580 7581 void 7582 hat_stats_disable(struct hat *hat) 7583 { 7584 hatlock_t *hatlockp; 7585 7586 ASSERT(hat->sfmmu_xhat_provider == NULL); 7587 7588 hatlockp = sfmmu_hat_enter(hat); 7589 hat->sfmmu_rmstat--; 7590 sfmmu_hat_exit(hatlockp); 7591 } 7592 7593 /* 7594 * Routines for entering or removing ourselves from the 7595 * ism_hat's mapping list. 7596 */ 7597 static void 7598 iment_add(struct ism_ment *iment, struct hat *ism_hat) 7599 { 7600 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7601 7602 iment->iment_prev = NULL; 7603 iment->iment_next = ism_hat->sfmmu_iment; 7604 if (ism_hat->sfmmu_iment) { 7605 ism_hat->sfmmu_iment->iment_prev = iment; 7606 } 7607 ism_hat->sfmmu_iment = iment; 7608 } 7609 7610 static void 7611 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 7612 { 7613 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7614 7615 if (ism_hat->sfmmu_iment == NULL) { 7616 panic("ism map entry remove - no entries"); 7617 } 7618 7619 if (iment->iment_prev) { 7620 ASSERT(ism_hat->sfmmu_iment != iment); 7621 iment->iment_prev->iment_next = iment->iment_next; 7622 } else { 7623 ASSERT(ism_hat->sfmmu_iment == iment); 7624 ism_hat->sfmmu_iment = iment->iment_next; 7625 } 7626 7627 if (iment->iment_next) { 7628 iment->iment_next->iment_prev = iment->iment_prev; 7629 } 7630 7631 /* 7632 * zero out the entry 7633 */ 7634 iment->iment_next = NULL; 7635 iment->iment_prev = NULL; 7636 iment->iment_hat = NULL; 7637 } 7638 7639 /* 7640 * Hat_share()/unshare() return an (non-zero) error 7641 * when saddr and daddr are not properly aligned. 7642 * 7643 * The top level mapping element determines the alignment 7644 * requirement for saddr and daddr, depending on different 7645 * architectures. 7646 * 7647 * When hat_share()/unshare() are not supported, 7648 * HATOP_SHARE()/UNSHARE() return 0 7649 */ 7650 int 7651 hat_share(struct hat *sfmmup, caddr_t addr, 7652 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 7653 { 7654 ism_blk_t *ism_blkp; 7655 ism_blk_t *new_iblk; 7656 ism_map_t *ism_map; 7657 ism_ment_t *ism_ment; 7658 int i, added; 7659 hatlock_t *hatlockp; 7660 int reload_mmu = 0; 7661 uint_t ismshift = page_get_shift(ismszc); 7662 size_t ismpgsz = page_get_pagesize(ismszc); 7663 uint_t ismmask = (uint_t)ismpgsz - 1; 7664 size_t sh_size = ISM_SHIFT(ismshift, len); 7665 ushort_t ismhatflag; 7666 7667 #ifdef DEBUG 7668 caddr_t eaddr = addr + len; 7669 #endif /* DEBUG */ 7670 7671 ASSERT(ism_hatid != NULL && sfmmup != NULL); 7672 ASSERT(sptaddr == ISMID_STARTADDR); 7673 /* 7674 * Check the alignment. 7675 */ 7676 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 7677 return (EINVAL); 7678 7679 /* 7680 * Check size alignment. 7681 */ 7682 if (!ISM_ALIGNED(ismshift, len)) 7683 return (EINVAL); 7684 7685 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 7686 7687 /* 7688 * Allocate ism_ment for the ism_hat's mapping list, and an 7689 * ism map blk in case we need one. We must do our 7690 * allocations before acquiring locks to prevent a deadlock 7691 * in the kmem allocator on the mapping list lock. 7692 */ 7693 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 7694 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 7695 7696 /* 7697 * Serialize ISM mappings with the ISM busy flag, and also the 7698 * trap handlers. 7699 */ 7700 sfmmu_ismhat_enter(sfmmup, 0); 7701 7702 /* 7703 * Allocate an ism map blk if necessary. 7704 */ 7705 if (sfmmup->sfmmu_iblk == NULL) { 7706 sfmmup->sfmmu_iblk = new_iblk; 7707 bzero(new_iblk, sizeof (*new_iblk)); 7708 new_iblk->iblk_nextpa = (uint64_t)-1; 7709 membar_stst(); /* make sure next ptr visible to all CPUs */ 7710 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 7711 reload_mmu = 1; 7712 new_iblk = NULL; 7713 } 7714 7715 #ifdef DEBUG 7716 /* 7717 * Make sure mapping does not already exist. 7718 */ 7719 ism_blkp = sfmmup->sfmmu_iblk; 7720 while (ism_blkp) { 7721 ism_map = ism_blkp->iblk_maps; 7722 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 7723 if ((addr >= ism_start(ism_map[i]) && 7724 addr < ism_end(ism_map[i])) || 7725 eaddr > ism_start(ism_map[i]) && 7726 eaddr <= ism_end(ism_map[i])) { 7727 panic("sfmmu_share: Already mapped!"); 7728 } 7729 } 7730 ism_blkp = ism_blkp->iblk_next; 7731 } 7732 #endif /* DEBUG */ 7733 7734 ASSERT(ismszc >= TTE4M); 7735 if (ismszc == TTE4M) { 7736 ismhatflag = HAT_4M_FLAG; 7737 } else if (ismszc == TTE32M) { 7738 ismhatflag = HAT_32M_FLAG; 7739 } else if (ismszc == TTE256M) { 7740 ismhatflag = HAT_256M_FLAG; 7741 } 7742 /* 7743 * Add mapping to first available mapping slot. 7744 */ 7745 ism_blkp = sfmmup->sfmmu_iblk; 7746 added = 0; 7747 while (!added) { 7748 ism_map = ism_blkp->iblk_maps; 7749 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7750 if (ism_map[i].imap_ismhat == NULL) { 7751 7752 ism_map[i].imap_ismhat = ism_hatid; 7753 ism_map[i].imap_vb_shift = (ushort_t)ismshift; 7754 ism_map[i].imap_hatflags = ismhatflag; 7755 ism_map[i].imap_sz_mask = ismmask; 7756 /* 7757 * imap_seg is checked in ISM_CHECK to see if 7758 * non-NULL, then other info assumed valid. 7759 */ 7760 membar_stst(); 7761 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 7762 ism_map[i].imap_ment = ism_ment; 7763 7764 /* 7765 * Now add ourselves to the ism_hat's 7766 * mapping list. 7767 */ 7768 ism_ment->iment_hat = sfmmup; 7769 ism_ment->iment_base_va = addr; 7770 ism_hatid->sfmmu_ismhat = 1; 7771 ism_hatid->sfmmu_flags = 0; 7772 mutex_enter(&ism_mlist_lock); 7773 iment_add(ism_ment, ism_hatid); 7774 mutex_exit(&ism_mlist_lock); 7775 added = 1; 7776 break; 7777 } 7778 } 7779 if (!added && ism_blkp->iblk_next == NULL) { 7780 ism_blkp->iblk_next = new_iblk; 7781 new_iblk = NULL; 7782 bzero(ism_blkp->iblk_next, 7783 sizeof (*ism_blkp->iblk_next)); 7784 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 7785 membar_stst(); 7786 ism_blkp->iblk_nextpa = 7787 va_to_pa((caddr_t)ism_blkp->iblk_next); 7788 } 7789 ism_blkp = ism_blkp->iblk_next; 7790 } 7791 7792 /* 7793 * Update our counters for this sfmmup's ism mappings. 7794 */ 7795 for (i = 0; i <= ismszc; i++) { 7796 if (!(disable_ism_large_pages & (1 << i))) 7797 (void) ism_tsb_entries(sfmmup, i); 7798 } 7799 7800 hatlockp = sfmmu_hat_enter(sfmmup); 7801 7802 /* 7803 * For ISM and DISM we do not support 512K pages, so we only 7804 * only search the 4M and 8K/64K hashes for 4 pagesize cpus, and search 7805 * the 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 7806 */ 7807 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 7808 7809 if (ismszc > TTE4M && !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) 7810 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7811 7812 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) 7813 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 7814 7815 /* 7816 * If we updated the ismblkpa for this HAT or we need 7817 * to start searching the 256M or 32M or 4M hash, we must 7818 * make sure all CPUs running this process reload their 7819 * tsbmiss area. Otherwise they will fail to load the mappings 7820 * in the tsbmiss handler and will loop calling pagefault(). 7821 */ 7822 switch (ismszc) { 7823 case TTE256M: 7824 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 7825 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 7826 sfmmu_sync_mmustate(sfmmup); 7827 } 7828 break; 7829 case TTE32M: 7830 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 7831 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 7832 sfmmu_sync_mmustate(sfmmup); 7833 } 7834 break; 7835 case TTE4M: 7836 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 7837 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7838 sfmmu_sync_mmustate(sfmmup); 7839 } 7840 break; 7841 default: 7842 break; 7843 } 7844 7845 /* 7846 * Now we can drop the locks. 7847 */ 7848 sfmmu_ismhat_exit(sfmmup, 1); 7849 sfmmu_hat_exit(hatlockp); 7850 7851 /* 7852 * Free up ismblk if we didn't use it. 7853 */ 7854 if (new_iblk != NULL) 7855 kmem_cache_free(ism_blk_cache, new_iblk); 7856 7857 /* 7858 * Check TSB and TLB page sizes. 7859 */ 7860 sfmmu_check_page_sizes(sfmmup, 1); 7861 7862 return (0); 7863 } 7864 7865 /* 7866 * hat_unshare removes exactly one ism_map from 7867 * this process's as. It expects multiple calls 7868 * to hat_unshare for multiple shm segments. 7869 */ 7870 void 7871 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 7872 { 7873 ism_map_t *ism_map; 7874 ism_ment_t *free_ment = NULL; 7875 ism_blk_t *ism_blkp; 7876 struct hat *ism_hatid; 7877 int found, i; 7878 hatlock_t *hatlockp; 7879 struct tsb_info *tsbinfo; 7880 uint_t ismshift = page_get_shift(ismszc); 7881 size_t sh_size = ISM_SHIFT(ismshift, len); 7882 7883 ASSERT(ISM_ALIGNED(ismshift, addr)); 7884 ASSERT(ISM_ALIGNED(ismshift, len)); 7885 ASSERT(sfmmup != NULL); 7886 ASSERT(sfmmup != ksfmmup); 7887 7888 if (sfmmup->sfmmu_xhat_provider) { 7889 XHAT_UNSHARE(sfmmup, addr, len); 7890 return; 7891 } else { 7892 /* 7893 * This must be a CPU HAT. If the address space has 7894 * XHATs attached, inform all XHATs that ISM segment 7895 * is going away 7896 */ 7897 ASSERT(sfmmup->sfmmu_as != NULL); 7898 if (sfmmup->sfmmu_as->a_xhat != NULL) 7899 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 7900 } 7901 7902 /* 7903 * Make sure that during the entire time ISM mappings are removed, 7904 * the trap handlers serialize behind us, and that no one else 7905 * can be mucking with ISM mappings. This also lets us get away 7906 * with not doing expensive cross calls to flush the TLB -- we 7907 * just discard the context, flush the entire TSB, and call it 7908 * a day. 7909 */ 7910 sfmmu_ismhat_enter(sfmmup, 0); 7911 7912 /* 7913 * Remove the mapping. 7914 * 7915 * We can't have any holes in the ism map. 7916 * The tsb miss code while searching the ism map will 7917 * stop on an empty map slot. So we must move 7918 * everyone past the hole up 1 if any. 7919 * 7920 * Also empty ism map blks are not freed until the 7921 * process exits. This is to prevent a MT race condition 7922 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 7923 */ 7924 found = 0; 7925 ism_blkp = sfmmup->sfmmu_iblk; 7926 while (!found && ism_blkp) { 7927 ism_map = ism_blkp->iblk_maps; 7928 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7929 if (addr == ism_start(ism_map[i]) && 7930 sh_size == (size_t)(ism_size(ism_map[i]))) { 7931 found = 1; 7932 break; 7933 } 7934 } 7935 if (!found) 7936 ism_blkp = ism_blkp->iblk_next; 7937 } 7938 7939 if (found) { 7940 ism_hatid = ism_map[i].imap_ismhat; 7941 ASSERT(ism_hatid != NULL); 7942 ASSERT(ism_hatid->sfmmu_ismhat == 1); 7943 7944 /* 7945 * First remove ourselves from the ism mapping list. 7946 */ 7947 mutex_enter(&ism_mlist_lock); 7948 iment_sub(ism_map[i].imap_ment, ism_hatid); 7949 mutex_exit(&ism_mlist_lock); 7950 free_ment = ism_map[i].imap_ment; 7951 7952 /* 7953 * Now gurantee that any other cpu 7954 * that tries to process an ISM miss 7955 * will go to tl=0. 7956 */ 7957 hatlockp = sfmmu_hat_enter(sfmmup); 7958 7959 sfmmu_invalidate_ctx(sfmmup); 7960 7961 sfmmu_hat_exit(hatlockp); 7962 7963 /* 7964 * We delete the ism map by copying 7965 * the next map over the current one. 7966 * We will take the next one in the maps 7967 * array or from the next ism_blk. 7968 */ 7969 while (ism_blkp) { 7970 ism_map = ism_blkp->iblk_maps; 7971 while (i < (ISM_MAP_SLOTS - 1)) { 7972 ism_map[i] = ism_map[i + 1]; 7973 i++; 7974 } 7975 /* i == (ISM_MAP_SLOTS - 1) */ 7976 ism_blkp = ism_blkp->iblk_next; 7977 if (ism_blkp) { 7978 ism_map[i] = ism_blkp->iblk_maps[0]; 7979 i = 0; 7980 } else { 7981 ism_map[i].imap_seg = 0; 7982 ism_map[i].imap_vb_shift = 0; 7983 ism_map[i].imap_hatflags = 0; 7984 ism_map[i].imap_sz_mask = 0; 7985 ism_map[i].imap_ismhat = NULL; 7986 ism_map[i].imap_ment = NULL; 7987 } 7988 } 7989 7990 /* 7991 * Now flush entire TSB for the process, since 7992 * demapping page by page can be too expensive. 7993 * We don't have to flush the TLB here anymore 7994 * since we switch to a new TLB ctx instead. 7995 * Also, there is no need to flush if the process 7996 * is exiting since the TSB will be freed later. 7997 */ 7998 if (!sfmmup->sfmmu_free) { 7999 hatlockp = sfmmu_hat_enter(sfmmup); 8000 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 8001 tsbinfo = tsbinfo->tsb_next) { 8002 if (tsbinfo->tsb_flags & TSB_SWAPPED) 8003 continue; 8004 sfmmu_inv_tsb(tsbinfo->tsb_va, 8005 TSB_BYTES(tsbinfo->tsb_szc)); 8006 } 8007 sfmmu_hat_exit(hatlockp); 8008 } 8009 } 8010 8011 /* 8012 * Update our counters for this sfmmup's ism mappings. 8013 */ 8014 for (i = 0; i <= ismszc; i++) { 8015 if (!(disable_ism_large_pages & (1 << i))) 8016 (void) ism_tsb_entries(sfmmup, i); 8017 } 8018 8019 sfmmu_ismhat_exit(sfmmup, 0); 8020 8021 /* 8022 * We must do our freeing here after dropping locks 8023 * to prevent a deadlock in the kmem allocator on the 8024 * mapping list lock. 8025 */ 8026 if (free_ment != NULL) 8027 kmem_cache_free(ism_ment_cache, free_ment); 8028 8029 /* 8030 * Check TSB and TLB page sizes if the process isn't exiting. 8031 */ 8032 if (!sfmmup->sfmmu_free) 8033 sfmmu_check_page_sizes(sfmmup, 0); 8034 } 8035 8036 /* ARGSUSED */ 8037 static int 8038 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8039 { 8040 /* void *buf is sfmmu_t pointer */ 8041 return (0); 8042 } 8043 8044 /* ARGSUSED */ 8045 static void 8046 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8047 { 8048 /* void *buf is sfmmu_t pointer */ 8049 } 8050 8051 /* 8052 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8053 * field to be the pa of this hmeblk 8054 */ 8055 /* ARGSUSED */ 8056 static int 8057 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8058 { 8059 struct hme_blk *hmeblkp; 8060 8061 bzero(buf, (size_t)cdrarg); 8062 hmeblkp = (struct hme_blk *)buf; 8063 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8064 8065 #ifdef HBLK_TRACE 8066 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8067 #endif /* HBLK_TRACE */ 8068 8069 return (0); 8070 } 8071 8072 /* ARGSUSED */ 8073 static void 8074 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8075 { 8076 8077 #ifdef HBLK_TRACE 8078 8079 struct hme_blk *hmeblkp; 8080 8081 hmeblkp = (struct hme_blk *)buf; 8082 mutex_destroy(&hmeblkp->hblk_audit_lock); 8083 8084 #endif /* HBLK_TRACE */ 8085 } 8086 8087 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 8088 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 8089 /* 8090 * The kmem allocator will callback into our reclaim routine when the system 8091 * is running low in memory. We traverse the hash and free up all unused but 8092 * still cached hme_blks. We also traverse the free list and free them up 8093 * as well. 8094 */ 8095 /*ARGSUSED*/ 8096 static void 8097 sfmmu_hblkcache_reclaim(void *cdrarg) 8098 { 8099 int i; 8100 uint64_t hblkpa, prevpa, nx_pa; 8101 struct hmehash_bucket *hmebp; 8102 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 8103 static struct hmehash_bucket *uhmehash_reclaim_hand; 8104 static struct hmehash_bucket *khmehash_reclaim_hand; 8105 struct hme_blk *list = NULL; 8106 8107 hmebp = uhmehash_reclaim_hand; 8108 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 8109 uhmehash_reclaim_hand = hmebp = uhme_hash; 8110 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8111 8112 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8113 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8114 hmeblkp = hmebp->hmeblkp; 8115 hblkpa = hmebp->hmeh_nextpa; 8116 prevpa = 0; 8117 pr_hblk = NULL; 8118 while (hmeblkp) { 8119 nx_hblk = hmeblkp->hblk_next; 8120 nx_pa = hmeblkp->hblk_nextpa; 8121 if (!hmeblkp->hblk_vcnt && 8122 !hmeblkp->hblk_hmecnt) { 8123 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8124 prevpa, pr_hblk); 8125 sfmmu_hblk_free(hmebp, hmeblkp, 8126 hblkpa, &list); 8127 } else { 8128 pr_hblk = hmeblkp; 8129 prevpa = hblkpa; 8130 } 8131 hmeblkp = nx_hblk; 8132 hblkpa = nx_pa; 8133 } 8134 SFMMU_HASH_UNLOCK(hmebp); 8135 } 8136 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 8137 hmebp = uhme_hash; 8138 } 8139 8140 hmebp = khmehash_reclaim_hand; 8141 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 8142 khmehash_reclaim_hand = hmebp = khme_hash; 8143 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8144 8145 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8146 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8147 hmeblkp = hmebp->hmeblkp; 8148 hblkpa = hmebp->hmeh_nextpa; 8149 prevpa = 0; 8150 pr_hblk = NULL; 8151 while (hmeblkp) { 8152 nx_hblk = hmeblkp->hblk_next; 8153 nx_pa = hmeblkp->hblk_nextpa; 8154 if (!hmeblkp->hblk_vcnt && 8155 !hmeblkp->hblk_hmecnt) { 8156 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8157 prevpa, pr_hblk); 8158 sfmmu_hblk_free(hmebp, hmeblkp, 8159 hblkpa, &list); 8160 } else { 8161 pr_hblk = hmeblkp; 8162 prevpa = hblkpa; 8163 } 8164 hmeblkp = nx_hblk; 8165 hblkpa = nx_pa; 8166 } 8167 SFMMU_HASH_UNLOCK(hmebp); 8168 } 8169 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 8170 hmebp = khme_hash; 8171 } 8172 sfmmu_hblks_list_purge(&list); 8173 } 8174 8175 /* 8176 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 8177 * same goes for sfmmu_get_addrvcolor(). 8178 * 8179 * This function will return the virtual color for the specified page. The 8180 * virtual color corresponds to this page current mapping or its last mapping. 8181 * It is used by memory allocators to choose addresses with the correct 8182 * alignment so vac consistency is automatically maintained. If the page 8183 * has no color it returns -1. 8184 */ 8185 /*ARGSUSED*/ 8186 int 8187 sfmmu_get_ppvcolor(struct page *pp) 8188 { 8189 #ifdef VAC 8190 int color; 8191 8192 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 8193 return (-1); 8194 } 8195 color = PP_GET_VCOLOR(pp); 8196 ASSERT(color < mmu_btop(shm_alignment)); 8197 return (color); 8198 #else 8199 return (-1); 8200 #endif /* VAC */ 8201 } 8202 8203 /* 8204 * This function will return the desired alignment for vac consistency 8205 * (vac color) given a virtual address. If no vac is present it returns -1. 8206 */ 8207 /*ARGSUSED*/ 8208 int 8209 sfmmu_get_addrvcolor(caddr_t vaddr) 8210 { 8211 #ifdef VAC 8212 if (cache & CACHE_VAC) { 8213 return (addr_to_vcolor(vaddr)); 8214 } else { 8215 return (-1); 8216 } 8217 #else 8218 return (-1); 8219 #endif /* VAC */ 8220 } 8221 8222 #ifdef VAC 8223 /* 8224 * Check for conflicts. 8225 * A conflict exists if the new and existent mappings do not match in 8226 * their "shm_alignment fields. If conflicts exist, the existant mappings 8227 * are flushed unless one of them is locked. If one of them is locked, then 8228 * the mappings are flushed and converted to non-cacheable mappings. 8229 */ 8230 static void 8231 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 8232 { 8233 struct hat *tmphat; 8234 struct sf_hment *sfhmep, *tmphme = NULL; 8235 struct hme_blk *hmeblkp; 8236 int vcolor; 8237 tte_t tte; 8238 8239 ASSERT(sfmmu_mlist_held(pp)); 8240 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 8241 8242 vcolor = addr_to_vcolor(addr); 8243 if (PP_NEWPAGE(pp)) { 8244 PP_SET_VCOLOR(pp, vcolor); 8245 return; 8246 } 8247 8248 if (PP_GET_VCOLOR(pp) == vcolor) { 8249 return; 8250 } 8251 8252 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 8253 /* 8254 * Previous user of page had a different color 8255 * but since there are no current users 8256 * we just flush the cache and change the color. 8257 */ 8258 SFMMU_STAT(sf_pgcolor_conflict); 8259 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8260 PP_SET_VCOLOR(pp, vcolor); 8261 return; 8262 } 8263 8264 /* 8265 * If we get here we have a vac conflict with a current 8266 * mapping. VAC conflict policy is as follows. 8267 * - The default is to unload the other mappings unless: 8268 * - If we have a large mapping we uncache the page. 8269 * We need to uncache the rest of the large page too. 8270 * - If any of the mappings are locked we uncache the page. 8271 * - If the requested mapping is inconsistent 8272 * with another mapping and that mapping 8273 * is in the same address space we have to 8274 * make it non-cached. The default thing 8275 * to do is unload the inconsistent mapping 8276 * but if they are in the same address space 8277 * we run the risk of unmapping the pc or the 8278 * stack which we will use as we return to the user, 8279 * in which case we can then fault on the thing 8280 * we just unloaded and get into an infinite loop. 8281 */ 8282 if (PP_ISMAPPED_LARGE(pp)) { 8283 int sz; 8284 8285 /* 8286 * Existing mapping is for big pages. We don't unload 8287 * existing big mappings to satisfy new mappings. 8288 * Always convert all mappings to TNC. 8289 */ 8290 sz = fnd_mapping_sz(pp); 8291 pp = PP_GROUPLEADER(pp, sz); 8292 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 8293 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 8294 TTEPAGES(sz)); 8295 8296 return; 8297 } 8298 8299 /* 8300 * check if any mapping is in same as or if it is locked 8301 * since in that case we need to uncache. 8302 */ 8303 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8304 tmphme = sfhmep->hme_next; 8305 hmeblkp = sfmmu_hmetohblk(sfhmep); 8306 if (hmeblkp->hblk_xhat_bit) 8307 continue; 8308 tmphat = hblktosfmmu(hmeblkp); 8309 sfmmu_copytte(&sfhmep->hme_tte, &tte); 8310 ASSERT(TTE_IS_VALID(&tte)); 8311 if ((tmphat == hat) || hmeblkp->hblk_lckcnt) { 8312 /* 8313 * We have an uncache conflict 8314 */ 8315 SFMMU_STAT(sf_uncache_conflict); 8316 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 8317 return; 8318 } 8319 } 8320 8321 /* 8322 * We have an unload conflict 8323 * We have already checked for LARGE mappings, therefore 8324 * the remaining mapping(s) must be TTE8K. 8325 */ 8326 SFMMU_STAT(sf_unload_conflict); 8327 8328 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8329 tmphme = sfhmep->hme_next; 8330 hmeblkp = sfmmu_hmetohblk(sfhmep); 8331 if (hmeblkp->hblk_xhat_bit) 8332 continue; 8333 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 8334 } 8335 8336 if (PP_ISMAPPED_KPM(pp)) 8337 sfmmu_kpm_vac_unload(pp, addr); 8338 8339 /* 8340 * Unloads only do TLB flushes so we need to flush the 8341 * cache here. 8342 */ 8343 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8344 PP_SET_VCOLOR(pp, vcolor); 8345 } 8346 8347 /* 8348 * Whenever a mapping is unloaded and the page is in TNC state, 8349 * we see if the page can be made cacheable again. 'pp' is 8350 * the page that we just unloaded a mapping from, the size 8351 * of mapping that was unloaded is 'ottesz'. 8352 * Remark: 8353 * The recache policy for mpss pages can leave a performance problem 8354 * under the following circumstances: 8355 * . A large page in uncached mode has just been unmapped. 8356 * . All constituent pages are TNC due to a conflicting small mapping. 8357 * . There are many other, non conflicting, small mappings around for 8358 * a lot of the constituent pages. 8359 * . We're called w/ the "old" groupleader page and the old ottesz, 8360 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 8361 * we end up w/ TTE8K or npages == 1. 8362 * . We call tst_tnc w/ the old groupleader only, and if there is no 8363 * conflict, we re-cache only this page. 8364 * . All other small mappings are not checked and will be left in TNC mode. 8365 * The problem is not very serious because: 8366 * . mpss is actually only defined for heap and stack, so the probability 8367 * is not very high that a large page mapping exists in parallel to a small 8368 * one (this is possible, but seems to be bad programming style in the 8369 * appl). 8370 * . The problem gets a little bit more serious, when those TNC pages 8371 * have to be mapped into kernel space, e.g. for networking. 8372 * . When VAC alias conflicts occur in applications, this is regarded 8373 * as an application bug. So if kstat's show them, the appl should 8374 * be changed anyway. 8375 */ 8376 void 8377 conv_tnc(page_t *pp, int ottesz) 8378 { 8379 int cursz, dosz; 8380 pgcnt_t curnpgs, dopgs; 8381 pgcnt_t pg64k; 8382 page_t *pp2; 8383 8384 /* 8385 * Determine how big a range we check for TNC and find 8386 * leader page. cursz is the size of the biggest 8387 * mapping that still exist on 'pp'. 8388 */ 8389 if (PP_ISMAPPED_LARGE(pp)) { 8390 cursz = fnd_mapping_sz(pp); 8391 } else { 8392 cursz = TTE8K; 8393 } 8394 8395 if (ottesz >= cursz) { 8396 dosz = ottesz; 8397 pp2 = pp; 8398 } else { 8399 dosz = cursz; 8400 pp2 = PP_GROUPLEADER(pp, dosz); 8401 } 8402 8403 pg64k = TTEPAGES(TTE64K); 8404 dopgs = TTEPAGES(dosz); 8405 8406 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 8407 8408 while (dopgs != 0) { 8409 curnpgs = TTEPAGES(cursz); 8410 if (tst_tnc(pp2, curnpgs)) { 8411 SFMMU_STAT_ADD(sf_recache, curnpgs); 8412 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 8413 curnpgs); 8414 } 8415 8416 ASSERT(dopgs >= curnpgs); 8417 dopgs -= curnpgs; 8418 8419 if (dopgs == 0) { 8420 break; 8421 } 8422 8423 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 8424 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 8425 cursz = fnd_mapping_sz(pp2); 8426 } else { 8427 cursz = TTE8K; 8428 } 8429 } 8430 } 8431 8432 /* 8433 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 8434 * returns 0 otherwise. Note that oaddr argument is valid for only 8435 * 8k pages. 8436 */ 8437 int 8438 tst_tnc(page_t *pp, pgcnt_t npages) 8439 { 8440 struct sf_hment *sfhme; 8441 struct hme_blk *hmeblkp; 8442 tte_t tte; 8443 caddr_t vaddr; 8444 int clr_valid = 0; 8445 int color, color1, bcolor; 8446 int i, ncolors; 8447 8448 ASSERT(pp != NULL); 8449 ASSERT(!(cache & CACHE_WRITEBACK)); 8450 8451 if (npages > 1) { 8452 ncolors = CACHE_NUM_COLOR; 8453 } 8454 8455 for (i = 0; i < npages; i++) { 8456 ASSERT(sfmmu_mlist_held(pp)); 8457 ASSERT(PP_ISTNC(pp)); 8458 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 8459 8460 if (PP_ISPNC(pp)) { 8461 return (0); 8462 } 8463 8464 clr_valid = 0; 8465 if (PP_ISMAPPED_KPM(pp)) { 8466 caddr_t kpmvaddr; 8467 8468 ASSERT(kpm_enable); 8469 kpmvaddr = hat_kpm_page2va(pp, 1); 8470 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 8471 color1 = addr_to_vcolor(kpmvaddr); 8472 clr_valid = 1; 8473 } 8474 8475 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8476 hmeblkp = sfmmu_hmetohblk(sfhme); 8477 if (hmeblkp->hblk_xhat_bit) 8478 continue; 8479 8480 sfmmu_copytte(&sfhme->hme_tte, &tte); 8481 ASSERT(TTE_IS_VALID(&tte)); 8482 8483 vaddr = tte_to_vaddr(hmeblkp, tte); 8484 color = addr_to_vcolor(vaddr); 8485 8486 if (npages > 1) { 8487 /* 8488 * If there is a big mapping, make sure 8489 * 8K mapping is consistent with the big 8490 * mapping. 8491 */ 8492 bcolor = i % ncolors; 8493 if (color != bcolor) { 8494 return (0); 8495 } 8496 } 8497 if (!clr_valid) { 8498 clr_valid = 1; 8499 color1 = color; 8500 } 8501 8502 if (color1 != color) { 8503 return (0); 8504 } 8505 } 8506 8507 pp = PP_PAGENEXT(pp); 8508 } 8509 8510 return (1); 8511 } 8512 8513 void 8514 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 8515 pgcnt_t npages) 8516 { 8517 kmutex_t *pmtx; 8518 int i, ncolors, bcolor; 8519 kpm_hlk_t *kpmp; 8520 cpuset_t cpuset; 8521 8522 ASSERT(pp != NULL); 8523 ASSERT(!(cache & CACHE_WRITEBACK)); 8524 8525 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 8526 pmtx = sfmmu_page_enter(pp); 8527 8528 /* 8529 * Fast path caching single unmapped page 8530 */ 8531 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 8532 flags == HAT_CACHE) { 8533 PP_CLRTNC(pp); 8534 PP_CLRPNC(pp); 8535 sfmmu_page_exit(pmtx); 8536 sfmmu_kpm_kpmp_exit(kpmp); 8537 return; 8538 } 8539 8540 /* 8541 * We need to capture all cpus in order to change cacheability 8542 * because we can't allow one cpu to access the same physical 8543 * page using a cacheable and a non-cachebale mapping at the same 8544 * time. Since we may end up walking the ism mapping list 8545 * have to grab it's lock now since we can't after all the 8546 * cpus have been captured. 8547 */ 8548 sfmmu_hat_lock_all(); 8549 mutex_enter(&ism_mlist_lock); 8550 kpreempt_disable(); 8551 cpuset = cpu_ready_set; 8552 xc_attention(cpuset); 8553 8554 if (npages > 1) { 8555 /* 8556 * Make sure all colors are flushed since the 8557 * sfmmu_page_cache() only flushes one color- 8558 * it does not know big pages. 8559 */ 8560 ncolors = CACHE_NUM_COLOR; 8561 if (flags & HAT_TMPNC) { 8562 for (i = 0; i < ncolors; i++) { 8563 sfmmu_cache_flushcolor(i, pp->p_pagenum); 8564 } 8565 cache_flush_flag = CACHE_NO_FLUSH; 8566 } 8567 } 8568 8569 for (i = 0; i < npages; i++) { 8570 8571 ASSERT(sfmmu_mlist_held(pp)); 8572 8573 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 8574 8575 if (npages > 1) { 8576 bcolor = i % ncolors; 8577 } else { 8578 bcolor = NO_VCOLOR; 8579 } 8580 8581 sfmmu_page_cache(pp, flags, cache_flush_flag, 8582 bcolor); 8583 } 8584 8585 pp = PP_PAGENEXT(pp); 8586 } 8587 8588 xt_sync(cpuset); 8589 xc_dismissed(cpuset); 8590 mutex_exit(&ism_mlist_lock); 8591 sfmmu_hat_unlock_all(); 8592 sfmmu_page_exit(pmtx); 8593 sfmmu_kpm_kpmp_exit(kpmp); 8594 kpreempt_enable(); 8595 } 8596 8597 /* 8598 * This function changes the virtual cacheability of all mappings to a 8599 * particular page. When changing from uncache to cacheable the mappings will 8600 * only be changed if all of them have the same virtual color. 8601 * We need to flush the cache in all cpus. It is possible that 8602 * a process referenced a page as cacheable but has sinced exited 8603 * and cleared the mapping list. We still to flush it but have no 8604 * state so all cpus is the only alternative. 8605 */ 8606 static void 8607 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 8608 { 8609 struct sf_hment *sfhme; 8610 struct hme_blk *hmeblkp; 8611 sfmmu_t *sfmmup; 8612 tte_t tte, ttemod; 8613 caddr_t vaddr; 8614 int ret, color; 8615 pfn_t pfn; 8616 8617 color = bcolor; 8618 pfn = pp->p_pagenum; 8619 8620 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8621 8622 hmeblkp = sfmmu_hmetohblk(sfhme); 8623 8624 if (hmeblkp->hblk_xhat_bit) 8625 continue; 8626 8627 sfmmu_copytte(&sfhme->hme_tte, &tte); 8628 ASSERT(TTE_IS_VALID(&tte)); 8629 vaddr = tte_to_vaddr(hmeblkp, tte); 8630 color = addr_to_vcolor(vaddr); 8631 8632 #ifdef DEBUG 8633 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 8634 ASSERT(color == bcolor); 8635 } 8636 #endif 8637 8638 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 8639 8640 ttemod = tte; 8641 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 8642 TTE_CLR_VCACHEABLE(&ttemod); 8643 } else { /* flags & HAT_CACHE */ 8644 TTE_SET_VCACHEABLE(&ttemod); 8645 } 8646 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 8647 if (ret < 0) { 8648 /* 8649 * Since all cpus are captured modifytte should not 8650 * fail. 8651 */ 8652 panic("sfmmu_page_cache: write to tte failed"); 8653 } 8654 8655 sfmmup = hblktosfmmu(hmeblkp); 8656 if (cache_flush_flag == CACHE_FLUSH) { 8657 /* 8658 * Flush TSBs, TLBs and caches 8659 */ 8660 if (sfmmup->sfmmu_ismhat) { 8661 if (flags & HAT_CACHE) { 8662 SFMMU_STAT(sf_ism_recache); 8663 } else { 8664 SFMMU_STAT(sf_ism_uncache); 8665 } 8666 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8667 pfn, CACHE_FLUSH); 8668 } else { 8669 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 8670 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 8671 } 8672 8673 /* 8674 * all cache entries belonging to this pfn are 8675 * now flushed. 8676 */ 8677 cache_flush_flag = CACHE_NO_FLUSH; 8678 } else { 8679 8680 /* 8681 * Flush only TSBs and TLBs. 8682 */ 8683 if (sfmmup->sfmmu_ismhat) { 8684 if (flags & HAT_CACHE) { 8685 SFMMU_STAT(sf_ism_recache); 8686 } else { 8687 SFMMU_STAT(sf_ism_uncache); 8688 } 8689 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8690 pfn, CACHE_NO_FLUSH); 8691 } else { 8692 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 8693 } 8694 } 8695 } 8696 8697 if (PP_ISMAPPED_KPM(pp)) 8698 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 8699 8700 switch (flags) { 8701 8702 default: 8703 panic("sfmmu_pagecache: unknown flags"); 8704 break; 8705 8706 case HAT_CACHE: 8707 PP_CLRTNC(pp); 8708 PP_CLRPNC(pp); 8709 PP_SET_VCOLOR(pp, color); 8710 break; 8711 8712 case HAT_TMPNC: 8713 PP_SETTNC(pp); 8714 PP_SET_VCOLOR(pp, NO_VCOLOR); 8715 break; 8716 8717 case HAT_UNCACHE: 8718 PP_SETPNC(pp); 8719 PP_CLRTNC(pp); 8720 PP_SET_VCOLOR(pp, NO_VCOLOR); 8721 break; 8722 } 8723 } 8724 #endif /* VAC */ 8725 8726 8727 /* 8728 * Wrapper routine used to return a context. 8729 * 8730 * It's the responsibility of the caller to guarantee that the 8731 * process serializes on calls here by taking the HAT lock for 8732 * the hat. 8733 * 8734 */ 8735 static void 8736 sfmmu_get_ctx(sfmmu_t *sfmmup) 8737 { 8738 mmu_ctx_t *mmu_ctxp; 8739 uint_t pstate_save; 8740 8741 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8742 ASSERT(sfmmup != ksfmmup); 8743 8744 kpreempt_disable(); 8745 8746 mmu_ctxp = CPU_MMU_CTXP(CPU); 8747 ASSERT(mmu_ctxp); 8748 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 8749 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 8750 8751 /* 8752 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 8753 */ 8754 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 8755 sfmmu_ctx_wrap_around(mmu_ctxp); 8756 8757 /* 8758 * Let the MMU set up the page sizes to use for 8759 * this context in the TLB. Don't program 2nd dtlb for ism hat. 8760 */ 8761 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 8762 mmu_set_ctx_page_sizes(sfmmup); 8763 } 8764 8765 /* 8766 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 8767 * interrupts disabled to prevent race condition with wrap-around 8768 * ctx invalidatation. In sun4v, ctx invalidation also involves 8769 * a HV call to set the number of TSBs to 0. If interrupts are not 8770 * disabled until after sfmmu_load_mmustate is complete TSBs may 8771 * become assigned to INVALID_CONTEXT. This is not allowed. 8772 */ 8773 pstate_save = sfmmu_disable_intrs(); 8774 8775 sfmmu_alloc_ctx(sfmmup, 1, CPU); 8776 sfmmu_load_mmustate(sfmmup); 8777 8778 sfmmu_enable_intrs(pstate_save); 8779 8780 kpreempt_enable(); 8781 } 8782 8783 /* 8784 * When all cnums are used up in a MMU, cnum will wrap around to the 8785 * next generation and start from 2. 8786 */ 8787 static void 8788 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp) 8789 { 8790 8791 /* caller must have disabled the preemption */ 8792 ASSERT(curthread->t_preempt >= 1); 8793 ASSERT(mmu_ctxp != NULL); 8794 8795 /* acquire Per-MMU (PM) spin lock */ 8796 mutex_enter(&mmu_ctxp->mmu_lock); 8797 8798 /* re-check to see if wrap-around is needed */ 8799 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 8800 goto done; 8801 8802 SFMMU_MMU_STAT(mmu_wrap_around); 8803 8804 /* update gnum */ 8805 ASSERT(mmu_ctxp->mmu_gnum != 0); 8806 mmu_ctxp->mmu_gnum++; 8807 if (mmu_ctxp->mmu_gnum == 0 || 8808 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 8809 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 8810 (void *)mmu_ctxp); 8811 } 8812 8813 if (mmu_ctxp->mmu_ncpus > 1) { 8814 cpuset_t cpuset; 8815 8816 membar_enter(); /* make sure updated gnum visible */ 8817 8818 SFMMU_XCALL_STATS(NULL); 8819 8820 /* xcall to others on the same MMU to invalidate ctx */ 8821 cpuset = mmu_ctxp->mmu_cpuset; 8822 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id)); 8823 CPUSET_DEL(cpuset, CPU->cpu_id); 8824 CPUSET_AND(cpuset, cpu_ready_set); 8825 8826 /* 8827 * Pass in INVALID_CONTEXT as the first parameter to 8828 * sfmmu_raise_tsb_exception, which invalidates the context 8829 * of any process running on the CPUs in the MMU. 8830 */ 8831 xt_some(cpuset, sfmmu_raise_tsb_exception, 8832 INVALID_CONTEXT, INVALID_CONTEXT); 8833 xt_sync(cpuset); 8834 8835 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 8836 } 8837 8838 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 8839 sfmmu_setctx_sec(INVALID_CONTEXT); 8840 sfmmu_clear_utsbinfo(); 8841 } 8842 8843 /* 8844 * No xcall is needed here. For sun4u systems all CPUs in context 8845 * domain share a single physical MMU therefore it's enough to flush 8846 * TLB on local CPU. On sun4v systems we use 1 global context 8847 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 8848 * handler. Note that vtag_flushall_uctxs() is called 8849 * for Ultra II machine, where the equivalent flushall functionality 8850 * is implemented in SW, and only user ctx TLB entries are flushed. 8851 */ 8852 if (&vtag_flushall_uctxs != NULL) { 8853 vtag_flushall_uctxs(); 8854 } else { 8855 vtag_flushall(); 8856 } 8857 8858 /* reset mmu cnum, skips cnum 0 and 1 */ 8859 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 8860 8861 done: 8862 mutex_exit(&mmu_ctxp->mmu_lock); 8863 } 8864 8865 8866 /* 8867 * For multi-threaded process, set the process context to INVALID_CONTEXT 8868 * so that it faults and reloads the MMU state from TL=0. For single-threaded 8869 * process, we can just load the MMU state directly without having to 8870 * set context invalid. Caller must hold the hat lock since we don't 8871 * acquire it here. 8872 */ 8873 static void 8874 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 8875 { 8876 uint_t cnum; 8877 uint_t pstate_save; 8878 8879 ASSERT(sfmmup != ksfmmup); 8880 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8881 8882 kpreempt_disable(); 8883 8884 /* 8885 * We check whether the pass'ed-in sfmmup is the same as the 8886 * current running proc. This is to makes sure the current proc 8887 * stays single-threaded if it already is. 8888 */ 8889 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 8890 (curthread->t_procp->p_lwpcnt == 1)) { 8891 /* single-thread */ 8892 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 8893 if (cnum != INVALID_CONTEXT) { 8894 uint_t curcnum; 8895 /* 8896 * Disable interrupts to prevent race condition 8897 * with sfmmu_ctx_wrap_around ctx invalidation. 8898 * In sun4v, ctx invalidation involves setting 8899 * TSB to NULL, hence, interrupts should be disabled 8900 * untill after sfmmu_load_mmustate is completed. 8901 */ 8902 pstate_save = sfmmu_disable_intrs(); 8903 curcnum = sfmmu_getctx_sec(); 8904 if (curcnum == cnum) 8905 sfmmu_load_mmustate(sfmmup); 8906 sfmmu_enable_intrs(pstate_save); 8907 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 8908 } 8909 } else { 8910 /* 8911 * multi-thread 8912 * or when sfmmup is not the same as the curproc. 8913 */ 8914 sfmmu_invalidate_ctx(sfmmup); 8915 } 8916 8917 kpreempt_enable(); 8918 } 8919 8920 8921 /* 8922 * Replace the specified TSB with a new TSB. This function gets called when 8923 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 8924 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 8925 * (8K). 8926 * 8927 * Caller must hold the HAT lock, but should assume any tsb_info 8928 * pointers it has are no longer valid after calling this function. 8929 * 8930 * Return values: 8931 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 8932 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 8933 * something to this tsbinfo/TSB 8934 * TSB_SUCCESS Operation succeeded 8935 */ 8936 static tsb_replace_rc_t 8937 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 8938 hatlock_t *hatlockp, uint_t flags) 8939 { 8940 struct tsb_info *new_tsbinfo = NULL; 8941 struct tsb_info *curtsb, *prevtsb; 8942 uint_t tte_sz_mask; 8943 int i; 8944 8945 ASSERT(sfmmup != ksfmmup); 8946 ASSERT(sfmmup->sfmmu_ismhat == 0); 8947 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8948 ASSERT(szc <= tsb_max_growsize); 8949 8950 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 8951 return (TSB_LOSTRACE); 8952 8953 /* 8954 * Find the tsb_info ahead of this one in the list, and 8955 * also make sure that the tsb_info passed in really 8956 * exists! 8957 */ 8958 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8959 curtsb != old_tsbinfo && curtsb != NULL; 8960 prevtsb = curtsb, curtsb = curtsb->tsb_next) 8961 ; 8962 ASSERT(curtsb != NULL); 8963 8964 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8965 /* 8966 * The process is swapped out, so just set the new size 8967 * code. When it swaps back in, we'll allocate a new one 8968 * of the new chosen size. 8969 */ 8970 curtsb->tsb_szc = szc; 8971 return (TSB_SUCCESS); 8972 } 8973 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 8974 8975 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 8976 8977 /* 8978 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 8979 * If we fail to allocate a TSB, exit. 8980 */ 8981 sfmmu_hat_exit(hatlockp); 8982 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, tte_sz_mask, 8983 flags, sfmmup)) { 8984 (void) sfmmu_hat_enter(sfmmup); 8985 if (!(flags & TSB_SWAPIN)) 8986 SFMMU_STAT(sf_tsb_resize_failures); 8987 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8988 return (TSB_ALLOCFAIL); 8989 } 8990 (void) sfmmu_hat_enter(sfmmup); 8991 8992 /* 8993 * Re-check to make sure somebody else didn't muck with us while we 8994 * didn't hold the HAT lock. If the process swapped out, fine, just 8995 * exit; this can happen if we try to shrink the TSB from the context 8996 * of another process (such as on an ISM unmap), though it is rare. 8997 */ 8998 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8999 SFMMU_STAT(sf_tsb_resize_failures); 9000 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9001 sfmmu_hat_exit(hatlockp); 9002 sfmmu_tsbinfo_free(new_tsbinfo); 9003 (void) sfmmu_hat_enter(sfmmup); 9004 return (TSB_LOSTRACE); 9005 } 9006 9007 #ifdef DEBUG 9008 /* Reverify that the tsb_info still exists.. for debugging only */ 9009 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9010 curtsb != old_tsbinfo && curtsb != NULL; 9011 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9012 ; 9013 ASSERT(curtsb != NULL); 9014 #endif /* DEBUG */ 9015 9016 /* 9017 * Quiesce any CPUs running this process on their next TLB miss 9018 * so they atomically see the new tsb_info. We temporarily set the 9019 * context to invalid context so new threads that come on processor 9020 * after we do the xcall to cpusran will also serialize behind the 9021 * HAT lock on TLB miss and will see the new TSB. Since this short 9022 * race with a new thread coming on processor is relatively rare, 9023 * this synchronization mechanism should be cheaper than always 9024 * pausing all CPUs for the duration of the setup, which is what 9025 * the old implementation did. This is particuarly true if we are 9026 * copying a huge chunk of memory around during that window. 9027 * 9028 * The memory barriers are to make sure things stay consistent 9029 * with resume() since it does not hold the HAT lock while 9030 * walking the list of tsb_info structures. 9031 */ 9032 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 9033 /* The TSB is either growing or shrinking. */ 9034 sfmmu_invalidate_ctx(sfmmup); 9035 } else { 9036 /* 9037 * It is illegal to swap in TSBs from a process other 9038 * than a process being swapped in. This in turn 9039 * implies we do not have a valid MMU context here 9040 * since a process needs one to resolve translation 9041 * misses. 9042 */ 9043 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 9044 } 9045 9046 #ifdef DEBUG 9047 ASSERT(max_mmu_ctxdoms > 0); 9048 9049 /* 9050 * Process should have INVALID_CONTEXT on all MMUs 9051 */ 9052 for (i = 0; i < max_mmu_ctxdoms; i++) { 9053 9054 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 9055 } 9056 #endif 9057 9058 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 9059 membar_stst(); /* strict ordering required */ 9060 if (prevtsb) 9061 prevtsb->tsb_next = new_tsbinfo; 9062 else 9063 sfmmup->sfmmu_tsb = new_tsbinfo; 9064 membar_enter(); /* make sure new TSB globally visible */ 9065 sfmmu_setup_tsbinfo(sfmmup); 9066 9067 /* 9068 * We need to migrate TSB entries from the old TSB to the new TSB 9069 * if tsb_remap_ttes is set and the TSB is growing. 9070 */ 9071 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 9072 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 9073 9074 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9075 9076 /* 9077 * Drop the HAT lock to free our old tsb_info. 9078 */ 9079 sfmmu_hat_exit(hatlockp); 9080 9081 if ((flags & TSB_GROW) == TSB_GROW) { 9082 SFMMU_STAT(sf_tsb_grow); 9083 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 9084 SFMMU_STAT(sf_tsb_shrink); 9085 } 9086 9087 sfmmu_tsbinfo_free(old_tsbinfo); 9088 9089 (void) sfmmu_hat_enter(sfmmup); 9090 return (TSB_SUCCESS); 9091 } 9092 9093 /* 9094 * This function will re-program hat pgsz array, and invalidate the 9095 * process' context, forcing the process to switch to another 9096 * context on the next TLB miss, and therefore start using the 9097 * TLB that is reprogrammed for the new page sizes. 9098 */ 9099 void 9100 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 9101 { 9102 int i; 9103 hatlock_t *hatlockp = NULL; 9104 9105 hatlockp = sfmmu_hat_enter(sfmmup); 9106 /* USIII+-IV+ optimization, requires hat lock */ 9107 if (tmp_pgsz) { 9108 for (i = 0; i < mmu_page_sizes; i++) 9109 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 9110 } 9111 SFMMU_STAT(sf_tlb_reprog_pgsz); 9112 9113 sfmmu_invalidate_ctx(sfmmup); 9114 9115 sfmmu_hat_exit(hatlockp); 9116 } 9117 9118 /* 9119 * This function assumes that there are either four or six supported page 9120 * sizes and at most two programmable TLBs, so we need to decide which 9121 * page sizes are most important and then tell the MMU layer so it 9122 * can adjust the TLB page sizes accordingly (if supported). 9123 * 9124 * If these assumptions change, this function will need to be 9125 * updated to support whatever the new limits are. 9126 * 9127 * The growing flag is nonzero if we are growing the address space, 9128 * and zero if it is shrinking. This allows us to decide whether 9129 * to grow or shrink our TSB, depending upon available memory 9130 * conditions. 9131 */ 9132 static void 9133 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 9134 { 9135 uint64_t ttecnt[MMU_PAGE_SIZES]; 9136 uint64_t tte8k_cnt, tte4m_cnt; 9137 uint8_t i; 9138 int sectsb_thresh; 9139 9140 /* 9141 * Kernel threads, processes with small address spaces not using 9142 * large pages, and dummy ISM HATs need not apply. 9143 */ 9144 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 9145 return; 9146 9147 if ((sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) == 0 && 9148 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 9149 return; 9150 9151 for (i = 0; i < mmu_page_sizes; i++) { 9152 ttecnt[i] = SFMMU_TTE_CNT(sfmmup, i); 9153 } 9154 9155 /* Check pagesizes in use, and possibly reprogram DTLB. */ 9156 if (&mmu_check_page_sizes) 9157 mmu_check_page_sizes(sfmmup, ttecnt); 9158 9159 /* 9160 * Calculate the number of 8k ttes to represent the span of these 9161 * pages. 9162 */ 9163 tte8k_cnt = ttecnt[TTE8K] + 9164 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 9165 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 9166 if (mmu_page_sizes == max_mmu_page_sizes) { 9167 tte4m_cnt = ttecnt[TTE4M] + 9168 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 9169 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 9170 } else { 9171 tte4m_cnt = ttecnt[TTE4M]; 9172 } 9173 9174 /* 9175 * Inflate TSB sizes by a factor of 2 if this process 9176 * uses 4M text pages to minimize extra conflict misses 9177 * in the first TSB since without counting text pages 9178 * 8K TSB may become too small. 9179 * 9180 * Also double the size of the second TSB to minimize 9181 * extra conflict misses due to competition between 4M text pages 9182 * and data pages. 9183 * 9184 * We need to adjust the second TSB allocation threshold by the 9185 * inflation factor, since there is no point in creating a second 9186 * TSB when we know all the mappings can fit in the I/D TLBs. 9187 */ 9188 sectsb_thresh = tsb_sectsb_threshold; 9189 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 9190 tte8k_cnt <<= 1; 9191 tte4m_cnt <<= 1; 9192 sectsb_thresh <<= 1; 9193 } 9194 9195 /* 9196 * Check to see if our TSB is the right size; we may need to 9197 * grow or shrink it. If the process is small, our work is 9198 * finished at this point. 9199 */ 9200 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 9201 return; 9202 } 9203 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 9204 } 9205 9206 static void 9207 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 9208 uint64_t tte4m_cnt, int sectsb_thresh) 9209 { 9210 int tsb_bits; 9211 uint_t tsb_szc; 9212 struct tsb_info *tsbinfop; 9213 hatlock_t *hatlockp = NULL; 9214 9215 hatlockp = sfmmu_hat_enter(sfmmup); 9216 ASSERT(hatlockp != NULL); 9217 tsbinfop = sfmmup->sfmmu_tsb; 9218 ASSERT(tsbinfop != NULL); 9219 9220 /* 9221 * If we're growing, select the size based on RSS. If we're 9222 * shrinking, leave some room so we don't have to turn around and 9223 * grow again immediately. 9224 */ 9225 if (growing) 9226 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 9227 else 9228 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 9229 9230 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9231 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9232 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9233 hatlockp, TSB_SHRINK); 9234 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 9235 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9236 hatlockp, TSB_GROW); 9237 } 9238 tsbinfop = sfmmup->sfmmu_tsb; 9239 9240 /* 9241 * With the TLB and first TSB out of the way, we need to see if 9242 * we need a second TSB for 4M pages. If we managed to reprogram 9243 * the TLB page sizes above, the process will start using this new 9244 * TSB right away; otherwise, it will start using it on the next 9245 * context switch. Either way, it's no big deal so there's no 9246 * synchronization with the trap handlers here unless we grow the 9247 * TSB (in which case it's required to prevent using the old one 9248 * after it's freed). Note: second tsb is required for 32M/256M 9249 * page sizes. 9250 */ 9251 if (tte4m_cnt > sectsb_thresh) { 9252 /* 9253 * If we're growing, select the size based on RSS. If we're 9254 * shrinking, leave some room so we don't have to turn 9255 * around and grow again immediately. 9256 */ 9257 if (growing) 9258 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 9259 else 9260 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 9261 if (tsbinfop->tsb_next == NULL) { 9262 struct tsb_info *newtsb; 9263 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 9264 0 : TSB_ALLOC; 9265 9266 sfmmu_hat_exit(hatlockp); 9267 9268 /* 9269 * Try to allocate a TSB for 4[32|256]M pages. If we 9270 * can't get the size we want, retry w/a minimum sized 9271 * TSB. If that still didn't work, give up; we can 9272 * still run without one. 9273 */ 9274 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 9275 TSB4M|TSB32M|TSB256M:TSB4M; 9276 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 9277 allocflags, sfmmup) != 0) && 9278 (sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 9279 tsb_bits, allocflags, sfmmup) != 0)) { 9280 return; 9281 } 9282 9283 hatlockp = sfmmu_hat_enter(sfmmup); 9284 9285 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 9286 sfmmup->sfmmu_tsb->tsb_next = newtsb; 9287 SFMMU_STAT(sf_tsb_sectsb_create); 9288 sfmmu_setup_tsbinfo(sfmmup); 9289 sfmmu_hat_exit(hatlockp); 9290 return; 9291 } else { 9292 /* 9293 * It's annoying, but possible for us 9294 * to get here.. we dropped the HAT lock 9295 * because of locking order in the kmem 9296 * allocator, and while we were off getting 9297 * our memory, some other thread decided to 9298 * do us a favor and won the race to get a 9299 * second TSB for this process. Sigh. 9300 */ 9301 sfmmu_hat_exit(hatlockp); 9302 sfmmu_tsbinfo_free(newtsb); 9303 return; 9304 } 9305 } 9306 9307 /* 9308 * We have a second TSB, see if it's big enough. 9309 */ 9310 tsbinfop = tsbinfop->tsb_next; 9311 9312 /* 9313 * Check to see if our second TSB is the right size; 9314 * we may need to grow or shrink it. 9315 * To prevent thrashing (e.g. growing the TSB on a 9316 * subsequent map operation), only try to shrink if 9317 * the TSB reach exceeds twice the virtual address 9318 * space size. 9319 */ 9320 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9321 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9322 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9323 tsb_szc, hatlockp, TSB_SHRINK); 9324 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 9325 TSB_OK_GROW()) { 9326 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9327 tsb_szc, hatlockp, TSB_GROW); 9328 } 9329 } 9330 9331 sfmmu_hat_exit(hatlockp); 9332 } 9333 9334 /* 9335 * Free up a sfmmu 9336 * Since the sfmmu is currently embedded in the hat struct we simply zero 9337 * out our fields and free up the ism map blk list if any. 9338 */ 9339 static void 9340 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 9341 { 9342 ism_blk_t *blkp, *nx_blkp; 9343 #ifdef DEBUG 9344 ism_map_t *map; 9345 int i; 9346 #endif 9347 9348 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 9349 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 9350 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 9351 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 9352 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 9353 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 9354 9355 sfmmup->sfmmu_free = 0; 9356 sfmmup->sfmmu_ismhat = 0; 9357 9358 blkp = sfmmup->sfmmu_iblk; 9359 sfmmup->sfmmu_iblk = NULL; 9360 9361 while (blkp) { 9362 #ifdef DEBUG 9363 map = blkp->iblk_maps; 9364 for (i = 0; i < ISM_MAP_SLOTS; i++) { 9365 ASSERT(map[i].imap_seg == 0); 9366 ASSERT(map[i].imap_ismhat == NULL); 9367 ASSERT(map[i].imap_ment == NULL); 9368 } 9369 #endif 9370 nx_blkp = blkp->iblk_next; 9371 blkp->iblk_next = NULL; 9372 blkp->iblk_nextpa = (uint64_t)-1; 9373 kmem_cache_free(ism_blk_cache, blkp); 9374 blkp = nx_blkp; 9375 } 9376 } 9377 9378 /* 9379 * Locking primitves accessed by HATLOCK macros 9380 */ 9381 9382 #define SFMMU_SPL_MTX (0x0) 9383 #define SFMMU_ML_MTX (0x1) 9384 9385 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 9386 SPL_HASH(pg) : MLIST_HASH(pg)) 9387 9388 kmutex_t * 9389 sfmmu_page_enter(struct page *pp) 9390 { 9391 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 9392 } 9393 9394 void 9395 sfmmu_page_exit(kmutex_t *spl) 9396 { 9397 mutex_exit(spl); 9398 } 9399 9400 int 9401 sfmmu_page_spl_held(struct page *pp) 9402 { 9403 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 9404 } 9405 9406 kmutex_t * 9407 sfmmu_mlist_enter(struct page *pp) 9408 { 9409 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 9410 } 9411 9412 void 9413 sfmmu_mlist_exit(kmutex_t *mml) 9414 { 9415 mutex_exit(mml); 9416 } 9417 9418 int 9419 sfmmu_mlist_held(struct page *pp) 9420 { 9421 9422 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 9423 } 9424 9425 /* 9426 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 9427 * sfmmu_mlist_enter() case mml_table lock array is used and for 9428 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 9429 * 9430 * The lock is taken on a root page so that it protects an operation on all 9431 * constituent pages of a large page pp belongs to. 9432 * 9433 * The routine takes a lock from the appropriate array. The lock is determined 9434 * by hashing the root page. After taking the lock this routine checks if the 9435 * root page has the same size code that was used to determine the root (i.e 9436 * that root hasn't changed). If root page has the expected p_szc field we 9437 * have the right lock and it's returned to the caller. If root's p_szc 9438 * decreased we release the lock and retry from the beginning. This case can 9439 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 9440 * value and taking the lock. The number of retries due to p_szc decrease is 9441 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 9442 * determined by hashing pp itself. 9443 * 9444 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 9445 * possible that p_szc can increase. To increase p_szc a thread has to lock 9446 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 9447 * callers that don't hold a page locked recheck if hmeblk through which pp 9448 * was found still maps this pp. If it doesn't map it anymore returned lock 9449 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 9450 * p_szc increase after taking the lock it returns this lock without further 9451 * retries because in this case the caller doesn't care about which lock was 9452 * taken. The caller will drop it right away. 9453 * 9454 * After the routine returns it's guaranteed that hat_page_demote() can't 9455 * change p_szc field of any of constituent pages of a large page pp belongs 9456 * to as long as pp was either locked at least SHARED prior to this call or 9457 * the caller finds that hment that pointed to this pp still references this 9458 * pp (this also assumes that the caller holds hme hash bucket lock so that 9459 * the same pp can't be remapped into the same hmeblk after it was unmapped by 9460 * hat_pageunload()). 9461 */ 9462 static kmutex_t * 9463 sfmmu_mlspl_enter(struct page *pp, int type) 9464 { 9465 kmutex_t *mtx; 9466 uint_t prev_rszc = UINT_MAX; 9467 page_t *rootpp; 9468 uint_t szc; 9469 uint_t rszc; 9470 uint_t pszc = pp->p_szc; 9471 9472 ASSERT(pp != NULL); 9473 9474 again: 9475 if (pszc == 0) { 9476 mtx = SFMMU_MLSPL_MTX(type, pp); 9477 mutex_enter(mtx); 9478 return (mtx); 9479 } 9480 9481 /* The lock lives in the root page */ 9482 rootpp = PP_GROUPLEADER(pp, pszc); 9483 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9484 mutex_enter(mtx); 9485 9486 /* 9487 * Return mml in the following 3 cases: 9488 * 9489 * 1) If pp itself is root since if its p_szc decreased before we took 9490 * the lock pp is still the root of smaller szc page. And if its p_szc 9491 * increased it doesn't matter what lock we return (see comment in 9492 * front of this routine). 9493 * 9494 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 9495 * large page we have the right lock since any previous potential 9496 * hat_page_demote() is done demoting from greater than current root's 9497 * p_szc because hat_page_demote() changes root's p_szc last. No 9498 * further hat_page_demote() can start or be in progress since it 9499 * would need the same lock we currently hold. 9500 * 9501 * 3) If rootpp's p_szc increased since previous iteration it doesn't 9502 * matter what lock we return (see comment in front of this routine). 9503 */ 9504 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 9505 rszc >= prev_rszc) { 9506 return (mtx); 9507 } 9508 9509 /* 9510 * hat_page_demote() could have decreased root's p_szc. 9511 * In this case pp's p_szc must also be smaller than pszc. 9512 * Retry. 9513 */ 9514 if (rszc < pszc) { 9515 szc = pp->p_szc; 9516 if (szc < pszc) { 9517 mutex_exit(mtx); 9518 pszc = szc; 9519 goto again; 9520 } 9521 /* 9522 * pp's p_szc increased after it was decreased. 9523 * page cannot be mapped. Return current lock. The caller 9524 * will drop it right away. 9525 */ 9526 return (mtx); 9527 } 9528 9529 /* 9530 * root's p_szc is greater than pp's p_szc. 9531 * hat_page_demote() is not done with all pages 9532 * yet. Wait for it to complete. 9533 */ 9534 mutex_exit(mtx); 9535 rootpp = PP_GROUPLEADER(rootpp, rszc); 9536 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9537 mutex_enter(mtx); 9538 mutex_exit(mtx); 9539 prev_rszc = rszc; 9540 goto again; 9541 } 9542 9543 static int 9544 sfmmu_mlspl_held(struct page *pp, int type) 9545 { 9546 kmutex_t *mtx; 9547 9548 ASSERT(pp != NULL); 9549 /* The lock lives in the root page */ 9550 pp = PP_PAGEROOT(pp); 9551 ASSERT(pp != NULL); 9552 9553 mtx = SFMMU_MLSPL_MTX(type, pp); 9554 return (MUTEX_HELD(mtx)); 9555 } 9556 9557 static uint_t 9558 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 9559 { 9560 struct hme_blk *hblkp; 9561 9562 if (freehblkp != NULL) { 9563 mutex_enter(&freehblkp_lock); 9564 if (freehblkp != NULL) { 9565 /* 9566 * If the current thread is owning hblk_reserve OR 9567 * critical request from sfmmu_hblk_steal() 9568 * let it succeed even if freehblkcnt is really low. 9569 */ 9570 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 9571 SFMMU_STAT(sf_get_free_throttle); 9572 mutex_exit(&freehblkp_lock); 9573 return (0); 9574 } 9575 freehblkcnt--; 9576 *hmeblkpp = freehblkp; 9577 hblkp = *hmeblkpp; 9578 freehblkp = hblkp->hblk_next; 9579 mutex_exit(&freehblkp_lock); 9580 hblkp->hblk_next = NULL; 9581 SFMMU_STAT(sf_get_free_success); 9582 return (1); 9583 } 9584 mutex_exit(&freehblkp_lock); 9585 } 9586 SFMMU_STAT(sf_get_free_fail); 9587 return (0); 9588 } 9589 9590 static uint_t 9591 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 9592 { 9593 struct hme_blk *hblkp; 9594 9595 /* 9596 * If the current thread is mapping into kernel space, 9597 * let it succede even if freehblkcnt is max 9598 * so that it will avoid freeing it to kmem. 9599 * This will prevent stack overflow due to 9600 * possible recursion since kmem_cache_free() 9601 * might require creation of a slab which 9602 * in turn needs an hmeblk to map that slab; 9603 * let's break this vicious chain at the first 9604 * opportunity. 9605 */ 9606 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9607 mutex_enter(&freehblkp_lock); 9608 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9609 SFMMU_STAT(sf_put_free_success); 9610 freehblkcnt++; 9611 hmeblkp->hblk_next = freehblkp; 9612 freehblkp = hmeblkp; 9613 mutex_exit(&freehblkp_lock); 9614 return (1); 9615 } 9616 mutex_exit(&freehblkp_lock); 9617 } 9618 9619 /* 9620 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 9621 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 9622 * we are not in the process of mapping into kernel space. 9623 */ 9624 ASSERT(!critical); 9625 while (freehblkcnt > HBLK_RESERVE_CNT) { 9626 mutex_enter(&freehblkp_lock); 9627 if (freehblkcnt > HBLK_RESERVE_CNT) { 9628 freehblkcnt--; 9629 hblkp = freehblkp; 9630 freehblkp = hblkp->hblk_next; 9631 mutex_exit(&freehblkp_lock); 9632 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 9633 kmem_cache_free(sfmmu8_cache, hblkp); 9634 continue; 9635 } 9636 mutex_exit(&freehblkp_lock); 9637 } 9638 SFMMU_STAT(sf_put_free_fail); 9639 return (0); 9640 } 9641 9642 static void 9643 sfmmu_hblk_swap(struct hme_blk *new) 9644 { 9645 struct hme_blk *old, *hblkp, *prev; 9646 uint64_t hblkpa, prevpa, newpa; 9647 caddr_t base, vaddr, endaddr; 9648 struct hmehash_bucket *hmebp; 9649 struct sf_hment *osfhme, *nsfhme; 9650 page_t *pp; 9651 kmutex_t *pml; 9652 tte_t tte; 9653 9654 #ifdef DEBUG 9655 hmeblk_tag hblktag; 9656 struct hme_blk *found; 9657 #endif 9658 old = HBLK_RESERVE; 9659 9660 /* 9661 * save pa before bcopy clobbers it 9662 */ 9663 newpa = new->hblk_nextpa; 9664 9665 base = (caddr_t)get_hblk_base(old); 9666 endaddr = base + get_hblk_span(old); 9667 9668 /* 9669 * acquire hash bucket lock. 9670 */ 9671 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K); 9672 9673 /* 9674 * copy contents from old to new 9675 */ 9676 bcopy((void *)old, (void *)new, HME8BLK_SZ); 9677 9678 /* 9679 * add new to hash chain 9680 */ 9681 sfmmu_hblk_hash_add(hmebp, new, newpa); 9682 9683 /* 9684 * search hash chain for hblk_reserve; this needs to be performed 9685 * after adding new, otherwise prevpa and prev won't correspond 9686 * to the hblk which is prior to old in hash chain when we call 9687 * sfmmu_hblk_hash_rm to remove old later. 9688 */ 9689 for (prevpa = 0, prev = NULL, 9690 hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp; 9691 hblkp != NULL && hblkp != old; 9692 prevpa = hblkpa, prev = hblkp, 9693 hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next) 9694 ; 9695 9696 if (hblkp != old) 9697 panic("sfmmu_hblk_swap: hblk_reserve not found"); 9698 9699 /* 9700 * p_mapping list is still pointing to hments in hblk_reserve; 9701 * fix up p_mapping list so that they point to hments in new. 9702 * 9703 * Since all these mappings are created by hblk_reserve_thread 9704 * on the way and it's using at least one of the buffers from each of 9705 * the newly minted slabs, there is no danger of any of these 9706 * mappings getting unloaded by another thread. 9707 * 9708 * tsbmiss could only modify ref/mod bits of hments in old/new. 9709 * Since all of these hments hold mappings established by segkmem 9710 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 9711 * have no meaning for the mappings in hblk_reserve. hments in 9712 * old and new are identical except for ref/mod bits. 9713 */ 9714 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 9715 9716 HBLKTOHME(osfhme, old, vaddr); 9717 sfmmu_copytte(&osfhme->hme_tte, &tte); 9718 9719 if (TTE_IS_VALID(&tte)) { 9720 if ((pp = osfhme->hme_page) == NULL) 9721 panic("sfmmu_hblk_swap: page not mapped"); 9722 9723 pml = sfmmu_mlist_enter(pp); 9724 9725 if (pp != osfhme->hme_page) 9726 panic("sfmmu_hblk_swap: mapping changed"); 9727 9728 HBLKTOHME(nsfhme, new, vaddr); 9729 9730 HME_ADD(nsfhme, pp); 9731 HME_SUB(osfhme, pp); 9732 9733 sfmmu_mlist_exit(pml); 9734 } 9735 } 9736 9737 /* 9738 * remove old from hash chain 9739 */ 9740 sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev); 9741 9742 #ifdef DEBUG 9743 9744 hblktag.htag_id = ksfmmup; 9745 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 9746 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 9747 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 9748 9749 if (found != new) 9750 panic("sfmmu_hblk_swap: new hblk not found"); 9751 #endif 9752 9753 SFMMU_HASH_UNLOCK(hmebp); 9754 9755 /* 9756 * Reset hblk_reserve 9757 */ 9758 bzero((void *)old, HME8BLK_SZ); 9759 old->hblk_nextpa = va_to_pa((caddr_t)old); 9760 } 9761 9762 /* 9763 * Grab the mlist mutex for both pages passed in. 9764 * 9765 * low and high will be returned as pointers to the mutexes for these pages. 9766 * low refers to the mutex residing in the lower bin of the mlist hash, while 9767 * high refers to the mutex residing in the higher bin of the mlist hash. This 9768 * is due to the locking order restrictions on the same thread grabbing 9769 * multiple mlist mutexes. The low lock must be acquired before the high lock. 9770 * 9771 * If both pages hash to the same mutex, only grab that single mutex, and 9772 * high will be returned as NULL 9773 * If the pages hash to different bins in the hash, grab the lower addressed 9774 * lock first and then the higher addressed lock in order to follow the locking 9775 * rules involved with the same thread grabbing multiple mlist mutexes. 9776 * low and high will both have non-NULL values. 9777 */ 9778 static void 9779 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 9780 kmutex_t **low, kmutex_t **high) 9781 { 9782 kmutex_t *mml_targ, *mml_repl; 9783 9784 /* 9785 * no need to do the dance around szc as in sfmmu_mlist_enter() 9786 * because this routine is only called by hat_page_relocate() and all 9787 * targ and repl pages are already locked EXCL so szc can't change. 9788 */ 9789 9790 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 9791 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 9792 9793 if (mml_targ == mml_repl) { 9794 *low = mml_targ; 9795 *high = NULL; 9796 } else { 9797 if (mml_targ < mml_repl) { 9798 *low = mml_targ; 9799 *high = mml_repl; 9800 } else { 9801 *low = mml_repl; 9802 *high = mml_targ; 9803 } 9804 } 9805 9806 mutex_enter(*low); 9807 if (*high) 9808 mutex_enter(*high); 9809 } 9810 9811 static void 9812 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 9813 { 9814 if (high) 9815 mutex_exit(high); 9816 mutex_exit(low); 9817 } 9818 9819 static hatlock_t * 9820 sfmmu_hat_enter(sfmmu_t *sfmmup) 9821 { 9822 hatlock_t *hatlockp; 9823 9824 if (sfmmup != ksfmmup) { 9825 hatlockp = TSB_HASH(sfmmup); 9826 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 9827 return (hatlockp); 9828 } 9829 return (NULL); 9830 } 9831 9832 static hatlock_t * 9833 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 9834 { 9835 hatlock_t *hatlockp; 9836 9837 if (sfmmup != ksfmmup) { 9838 hatlockp = TSB_HASH(sfmmup); 9839 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 9840 return (NULL); 9841 return (hatlockp); 9842 } 9843 return (NULL); 9844 } 9845 9846 static void 9847 sfmmu_hat_exit(hatlock_t *hatlockp) 9848 { 9849 if (hatlockp != NULL) 9850 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 9851 } 9852 9853 static void 9854 sfmmu_hat_lock_all(void) 9855 { 9856 int i; 9857 for (i = 0; i < SFMMU_NUM_LOCK; i++) 9858 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 9859 } 9860 9861 static void 9862 sfmmu_hat_unlock_all(void) 9863 { 9864 int i; 9865 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 9866 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 9867 } 9868 9869 int 9870 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 9871 { 9872 ASSERT(sfmmup != ksfmmup); 9873 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 9874 } 9875 9876 /* 9877 * Locking primitives to provide consistency between ISM unmap 9878 * and other operations. Since ISM unmap can take a long time, we 9879 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 9880 * contention on the hatlock buckets while ISM segments are being 9881 * unmapped. The tradeoff is that the flags don't prevent priority 9882 * inversion from occurring, so we must request kernel priority in 9883 * case we have to sleep to keep from getting buried while holding 9884 * the HAT_ISMBUSY flag set, which in turn could block other kernel 9885 * threads from running (for example, in sfmmu_uvatopfn()). 9886 */ 9887 static void 9888 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 9889 { 9890 hatlock_t *hatlockp; 9891 9892 THREAD_KPRI_REQUEST(); 9893 if (!hatlock_held) 9894 hatlockp = sfmmu_hat_enter(sfmmup); 9895 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 9896 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 9897 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 9898 if (!hatlock_held) 9899 sfmmu_hat_exit(hatlockp); 9900 } 9901 9902 static void 9903 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 9904 { 9905 hatlock_t *hatlockp; 9906 9907 if (!hatlock_held) 9908 hatlockp = sfmmu_hat_enter(sfmmup); 9909 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 9910 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 9911 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 9912 if (!hatlock_held) 9913 sfmmu_hat_exit(hatlockp); 9914 THREAD_KPRI_RELEASE(); 9915 } 9916 9917 /* 9918 * 9919 * Algorithm: 9920 * 9921 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 9922 * hblks. 9923 * 9924 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 9925 * 9926 * (a) try to return an hblk from reserve pool of free hblks; 9927 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 9928 * and return hblk_reserve. 9929 * 9930 * (3) call kmem_cache_alloc() to allocate hblk; 9931 * 9932 * (a) if hblk_reserve_lock is held by the current thread, 9933 * atomically replace hblk_reserve by the hblk that is 9934 * returned by kmem_cache_alloc; release hblk_reserve_lock 9935 * and call kmem_cache_alloc() again. 9936 * (b) if reserve pool is not full, add the hblk that is 9937 * returned by kmem_cache_alloc to reserve pool and 9938 * call kmem_cache_alloc again. 9939 * 9940 */ 9941 static struct hme_blk * 9942 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 9943 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 9944 uint_t flags) 9945 { 9946 struct hme_blk *hmeblkp = NULL; 9947 struct hme_blk *newhblkp; 9948 struct hme_blk *shw_hblkp = NULL; 9949 struct kmem_cache *sfmmu_cache = NULL; 9950 uint64_t hblkpa; 9951 ulong_t index; 9952 uint_t owner; /* set to 1 if using hblk_reserve */ 9953 uint_t forcefree; 9954 int sleep; 9955 9956 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 9957 9958 /* 9959 * If segkmem is not created yet, allocate from static hmeblks 9960 * created at the end of startup_modules(). See the block comment 9961 * in startup_modules() describing how we estimate the number of 9962 * static hmeblks that will be needed during re-map. 9963 */ 9964 if (!hblk_alloc_dynamic) { 9965 9966 if (size == TTE8K) { 9967 index = nucleus_hblk8.index; 9968 if (index >= nucleus_hblk8.len) { 9969 /* 9970 * If we panic here, see startup_modules() to 9971 * make sure that we are calculating the 9972 * number of hblk8's that we need correctly. 9973 */ 9974 prom_panic("no nucleus hblk8 to allocate"); 9975 } 9976 hmeblkp = 9977 (struct hme_blk *)&nucleus_hblk8.list[index]; 9978 nucleus_hblk8.index++; 9979 SFMMU_STAT(sf_hblk8_nalloc); 9980 } else { 9981 index = nucleus_hblk1.index; 9982 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 9983 /* 9984 * If we panic here, see startup_modules(). 9985 * Most likely you need to update the 9986 * calculation of the number of hblk1 elements 9987 * that the kernel needs to boot. 9988 */ 9989 prom_panic("no nucleus hblk1 to allocate"); 9990 } 9991 hmeblkp = 9992 (struct hme_blk *)&nucleus_hblk1.list[index]; 9993 nucleus_hblk1.index++; 9994 SFMMU_STAT(sf_hblk1_nalloc); 9995 } 9996 9997 goto hblk_init; 9998 } 9999 10000 SFMMU_HASH_UNLOCK(hmebp); 10001 10002 if (sfmmup != KHATID) { 10003 if (mmu_page_sizes == max_mmu_page_sizes) { 10004 if (size < TTE256M) 10005 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10006 size, flags); 10007 } else { 10008 if (size < TTE4M) 10009 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10010 size, flags); 10011 } 10012 } 10013 10014 fill_hblk: 10015 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 10016 10017 if (owner && size == TTE8K) { 10018 10019 /* 10020 * We are really in a tight spot. We already own 10021 * hblk_reserve and we need another hblk. In anticipation 10022 * of this kind of scenario, we specifically set aside 10023 * HBLK_RESERVE_MIN number of hblks to be used exclusively 10024 * by owner of hblk_reserve. 10025 */ 10026 SFMMU_STAT(sf_hblk_recurse_cnt); 10027 10028 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 10029 panic("sfmmu_hblk_alloc: reserve list is empty"); 10030 10031 goto hblk_verify; 10032 } 10033 10034 ASSERT(!owner); 10035 10036 if ((flags & HAT_NO_KALLOC) == 0) { 10037 10038 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 10039 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 10040 10041 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 10042 hmeblkp = sfmmu_hblk_steal(size); 10043 } else { 10044 /* 10045 * if we are the owner of hblk_reserve, 10046 * swap hblk_reserve with hmeblkp and 10047 * start a fresh life. Hope things go 10048 * better this time. 10049 */ 10050 if (hblk_reserve_thread == curthread) { 10051 ASSERT(sfmmu_cache == sfmmu8_cache); 10052 sfmmu_hblk_swap(hmeblkp); 10053 hblk_reserve_thread = NULL; 10054 mutex_exit(&hblk_reserve_lock); 10055 goto fill_hblk; 10056 } 10057 /* 10058 * let's donate this hblk to our reserve list if 10059 * we are not mapping kernel range 10060 */ 10061 if (size == TTE8K && sfmmup != KHATID) 10062 if (sfmmu_put_free_hblk(hmeblkp, 0)) 10063 goto fill_hblk; 10064 } 10065 } else { 10066 /* 10067 * We are here to map the slab in sfmmu8_cache; let's 10068 * check if we could tap our reserve list; if successful, 10069 * this will avoid the pain of going thru sfmmu_hblk_swap 10070 */ 10071 SFMMU_STAT(sf_hblk_slab_cnt); 10072 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 10073 /* 10074 * let's start hblk_reserve dance 10075 */ 10076 SFMMU_STAT(sf_hblk_reserve_cnt); 10077 owner = 1; 10078 mutex_enter(&hblk_reserve_lock); 10079 hmeblkp = HBLK_RESERVE; 10080 hblk_reserve_thread = curthread; 10081 } 10082 } 10083 10084 hblk_verify: 10085 ASSERT(hmeblkp != NULL); 10086 set_hblk_sz(hmeblkp, size); 10087 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10088 SFMMU_HASH_LOCK(hmebp); 10089 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10090 if (newhblkp != NULL) { 10091 SFMMU_HASH_UNLOCK(hmebp); 10092 if (hmeblkp != HBLK_RESERVE) { 10093 /* 10094 * This is really tricky! 10095 * 10096 * vmem_alloc(vmem_seg_arena) 10097 * vmem_alloc(vmem_internal_arena) 10098 * segkmem_alloc(heap_arena) 10099 * vmem_alloc(heap_arena) 10100 * page_create() 10101 * hat_memload() 10102 * kmem_cache_free() 10103 * kmem_cache_alloc() 10104 * kmem_slab_create() 10105 * vmem_alloc(kmem_internal_arena) 10106 * segkmem_alloc(heap_arena) 10107 * vmem_alloc(heap_arena) 10108 * page_create() 10109 * hat_memload() 10110 * kmem_cache_free() 10111 * ... 10112 * 10113 * Thus, hat_memload() could call kmem_cache_free 10114 * for enough number of times that we could easily 10115 * hit the bottom of the stack or run out of reserve 10116 * list of vmem_seg structs. So, we must donate 10117 * this hblk to reserve list if it's allocated 10118 * from sfmmu8_cache *and* mapping kernel range. 10119 * We don't need to worry about freeing hmeblk1's 10120 * to kmem since they don't map any kmem slabs. 10121 * 10122 * Note: When segkmem supports largepages, we must 10123 * free hmeblk1's to reserve list as well. 10124 */ 10125 forcefree = (sfmmup == KHATID) ? 1 : 0; 10126 if (size == TTE8K && 10127 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 10128 goto re_verify; 10129 } 10130 ASSERT(sfmmup != KHATID); 10131 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10132 } else { 10133 /* 10134 * Hey! we don't need hblk_reserve any more. 10135 */ 10136 ASSERT(owner); 10137 hblk_reserve_thread = NULL; 10138 mutex_exit(&hblk_reserve_lock); 10139 owner = 0; 10140 } 10141 re_verify: 10142 /* 10143 * let's check if the goodies are still present 10144 */ 10145 SFMMU_HASH_LOCK(hmebp); 10146 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10147 if (newhblkp != NULL) { 10148 /* 10149 * return newhblkp if it's not hblk_reserve; 10150 * if newhblkp is hblk_reserve, return it 10151 * _only if_ we are the owner of hblk_reserve. 10152 */ 10153 if (newhblkp != HBLK_RESERVE || owner) { 10154 return (newhblkp); 10155 } else { 10156 /* 10157 * we just hit hblk_reserve in the hash and 10158 * we are not the owner of that; 10159 * 10160 * block until hblk_reserve_thread completes 10161 * swapping hblk_reserve and try the dance 10162 * once again. 10163 */ 10164 SFMMU_HASH_UNLOCK(hmebp); 10165 mutex_enter(&hblk_reserve_lock); 10166 mutex_exit(&hblk_reserve_lock); 10167 SFMMU_STAT(sf_hblk_reserve_hit); 10168 goto fill_hblk; 10169 } 10170 } else { 10171 /* 10172 * it's no more! try the dance once again. 10173 */ 10174 SFMMU_HASH_UNLOCK(hmebp); 10175 goto fill_hblk; 10176 } 10177 } 10178 10179 hblk_init: 10180 set_hblk_sz(hmeblkp, size); 10181 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10182 hmeblkp->hblk_next = (struct hme_blk *)NULL; 10183 hmeblkp->hblk_tag = hblktag; 10184 hmeblkp->hblk_shadow = shw_hblkp; 10185 hblkpa = hmeblkp->hblk_nextpa; 10186 hmeblkp->hblk_nextpa = 0; 10187 10188 ASSERT(get_hblk_ttesz(hmeblkp) == size); 10189 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 10190 ASSERT(hmeblkp->hblk_hmecnt == 0); 10191 ASSERT(hmeblkp->hblk_vcnt == 0); 10192 ASSERT(hmeblkp->hblk_lckcnt == 0); 10193 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10194 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 10195 return (hmeblkp); 10196 } 10197 10198 /* 10199 * This function performs any cleanup required on the hme_blk 10200 * and returns it to the free list. 10201 */ 10202 /* ARGSUSED */ 10203 static void 10204 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10205 uint64_t hblkpa, struct hme_blk **listp) 10206 { 10207 int shw_size, vshift; 10208 struct hme_blk *shw_hblkp; 10209 uint_t shw_mask, newshw_mask; 10210 uintptr_t vaddr; 10211 int size; 10212 uint_t critical; 10213 10214 ASSERT(hmeblkp); 10215 ASSERT(!hmeblkp->hblk_hmecnt); 10216 ASSERT(!hmeblkp->hblk_vcnt); 10217 ASSERT(!hmeblkp->hblk_lckcnt); 10218 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10219 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 10220 10221 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 10222 10223 size = get_hblk_ttesz(hmeblkp); 10224 shw_hblkp = hmeblkp->hblk_shadow; 10225 if (shw_hblkp) { 10226 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 10227 if (mmu_page_sizes == max_mmu_page_sizes) { 10228 ASSERT(size < TTE256M); 10229 } else { 10230 ASSERT(size < TTE4M); 10231 } 10232 10233 shw_size = get_hblk_ttesz(shw_hblkp); 10234 vaddr = get_hblk_base(hmeblkp); 10235 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10236 ASSERT(vshift < 8); 10237 /* 10238 * Atomically clear shadow mask bit 10239 */ 10240 do { 10241 shw_mask = shw_hblkp->hblk_shw_mask; 10242 ASSERT(shw_mask & (1 << vshift)); 10243 newshw_mask = shw_mask & ~(1 << vshift); 10244 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10245 shw_mask, newshw_mask); 10246 } while (newshw_mask != shw_mask); 10247 hmeblkp->hblk_shadow = NULL; 10248 } 10249 hmeblkp->hblk_next = NULL; 10250 hmeblkp->hblk_nextpa = hblkpa; 10251 hmeblkp->hblk_shw_bit = 0; 10252 10253 if (hmeblkp->hblk_nuc_bit == 0) { 10254 10255 if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical)) 10256 return; 10257 10258 hmeblkp->hblk_next = *listp; 10259 *listp = hmeblkp; 10260 } 10261 } 10262 10263 static void 10264 sfmmu_hblks_list_purge(struct hme_blk **listp) 10265 { 10266 struct hme_blk *hmeblkp; 10267 10268 while ((hmeblkp = *listp) != NULL) { 10269 *listp = hmeblkp->hblk_next; 10270 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10271 } 10272 } 10273 10274 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 10275 #define SFMMU_HBLK_STEAL_THRESHOLD 5 10276 10277 static uint_t sfmmu_hblk_steal_twice; 10278 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 10279 10280 /* 10281 * Steal a hmeblk from user or kernel hme hash lists. 10282 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to 10283 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts 10284 * tap into critical reserve of freehblkp. 10285 * Note: We remain looping in this routine until we find one. 10286 */ 10287 static struct hme_blk * 10288 sfmmu_hblk_steal(int size) 10289 { 10290 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 10291 struct hmehash_bucket *hmebp; 10292 struct hme_blk *hmeblkp = NULL, *pr_hblk; 10293 uint64_t hblkpa, prevpa; 10294 int i; 10295 uint_t loop_cnt = 0, critical; 10296 10297 for (;;) { 10298 if (size == TTE8K) { 10299 critical = 10300 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0; 10301 if (sfmmu_get_free_hblk(&hmeblkp, critical)) 10302 return (hmeblkp); 10303 } 10304 10305 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 10306 uhmehash_steal_hand; 10307 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 10308 10309 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 10310 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 10311 SFMMU_HASH_LOCK(hmebp); 10312 hmeblkp = hmebp->hmeblkp; 10313 hblkpa = hmebp->hmeh_nextpa; 10314 prevpa = 0; 10315 pr_hblk = NULL; 10316 while (hmeblkp) { 10317 /* 10318 * check if it is a hmeblk that is not locked 10319 * and not shared. skip shadow hmeblks with 10320 * shadow_mask set i.e valid count non zero. 10321 */ 10322 if ((get_hblk_ttesz(hmeblkp) == size) && 10323 (hmeblkp->hblk_shw_bit == 0 || 10324 hmeblkp->hblk_vcnt == 0) && 10325 (hmeblkp->hblk_lckcnt == 0)) { 10326 /* 10327 * there is a high probability that we 10328 * will find a free one. search some 10329 * buckets for a free hmeblk initially 10330 * before unloading a valid hmeblk. 10331 */ 10332 if ((hmeblkp->hblk_vcnt == 0 && 10333 hmeblkp->hblk_hmecnt == 0) || (i >= 10334 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 10335 if (sfmmu_steal_this_hblk(hmebp, 10336 hmeblkp, hblkpa, prevpa, 10337 pr_hblk)) { 10338 /* 10339 * Hblk is unloaded 10340 * successfully 10341 */ 10342 break; 10343 } 10344 } 10345 } 10346 pr_hblk = hmeblkp; 10347 prevpa = hblkpa; 10348 hblkpa = hmeblkp->hblk_nextpa; 10349 hmeblkp = hmeblkp->hblk_next; 10350 } 10351 10352 SFMMU_HASH_UNLOCK(hmebp); 10353 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 10354 hmebp = uhme_hash; 10355 } 10356 uhmehash_steal_hand = hmebp; 10357 10358 if (hmeblkp != NULL) 10359 break; 10360 10361 /* 10362 * in the worst case, look for a free one in the kernel 10363 * hash table. 10364 */ 10365 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 10366 SFMMU_HASH_LOCK(hmebp); 10367 hmeblkp = hmebp->hmeblkp; 10368 hblkpa = hmebp->hmeh_nextpa; 10369 prevpa = 0; 10370 pr_hblk = NULL; 10371 while (hmeblkp) { 10372 /* 10373 * check if it is free hmeblk 10374 */ 10375 if ((get_hblk_ttesz(hmeblkp) == size) && 10376 (hmeblkp->hblk_lckcnt == 0) && 10377 (hmeblkp->hblk_vcnt == 0) && 10378 (hmeblkp->hblk_hmecnt == 0)) { 10379 if (sfmmu_steal_this_hblk(hmebp, 10380 hmeblkp, hblkpa, prevpa, pr_hblk)) { 10381 break; 10382 } else { 10383 /* 10384 * Cannot fail since we have 10385 * hash lock. 10386 */ 10387 panic("fail to steal?"); 10388 } 10389 } 10390 10391 pr_hblk = hmeblkp; 10392 prevpa = hblkpa; 10393 hblkpa = hmeblkp->hblk_nextpa; 10394 hmeblkp = hmeblkp->hblk_next; 10395 } 10396 10397 SFMMU_HASH_UNLOCK(hmebp); 10398 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 10399 hmebp = khme_hash; 10400 } 10401 10402 if (hmeblkp != NULL) 10403 break; 10404 sfmmu_hblk_steal_twice++; 10405 } 10406 return (hmeblkp); 10407 } 10408 10409 /* 10410 * This routine does real work to prepare a hblk to be "stolen" by 10411 * unloading the mappings, updating shadow counts .... 10412 * It returns 1 if the block is ready to be reused (stolen), or 0 10413 * means the block cannot be stolen yet- pageunload is still working 10414 * on this hblk. 10415 */ 10416 static int 10417 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10418 uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk) 10419 { 10420 int shw_size, vshift; 10421 struct hme_blk *shw_hblkp; 10422 uintptr_t vaddr; 10423 uint_t shw_mask, newshw_mask; 10424 10425 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10426 10427 /* 10428 * check if the hmeblk is free, unload if necessary 10429 */ 10430 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10431 sfmmu_t *sfmmup; 10432 demap_range_t dmr; 10433 10434 sfmmup = hblktosfmmu(hmeblkp); 10435 DEMAP_RANGE_INIT(sfmmup, &dmr); 10436 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 10437 (caddr_t)get_hblk_base(hmeblkp), 10438 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 10439 DEMAP_RANGE_FLUSH(&dmr); 10440 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10441 /* 10442 * Pageunload is working on the same hblk. 10443 */ 10444 return (0); 10445 } 10446 10447 sfmmu_hblk_steal_unload_count++; 10448 } 10449 10450 ASSERT(hmeblkp->hblk_lckcnt == 0); 10451 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 10452 10453 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 10454 hmeblkp->hblk_nextpa = hblkpa; 10455 10456 shw_hblkp = hmeblkp->hblk_shadow; 10457 if (shw_hblkp) { 10458 shw_size = get_hblk_ttesz(shw_hblkp); 10459 vaddr = get_hblk_base(hmeblkp); 10460 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10461 ASSERT(vshift < 8); 10462 /* 10463 * Atomically clear shadow mask bit 10464 */ 10465 do { 10466 shw_mask = shw_hblkp->hblk_shw_mask; 10467 ASSERT(shw_mask & (1 << vshift)); 10468 newshw_mask = shw_mask & ~(1 << vshift); 10469 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10470 shw_mask, newshw_mask); 10471 } while (newshw_mask != shw_mask); 10472 hmeblkp->hblk_shadow = NULL; 10473 } 10474 10475 /* 10476 * remove shadow bit if we are stealing an unused shadow hmeblk. 10477 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 10478 * we are indeed allocating a shadow hmeblk. 10479 */ 10480 hmeblkp->hblk_shw_bit = 0; 10481 10482 sfmmu_hblk_steal_count++; 10483 SFMMU_STAT(sf_steal_count); 10484 10485 return (1); 10486 } 10487 10488 struct hme_blk * 10489 sfmmu_hmetohblk(struct sf_hment *sfhme) 10490 { 10491 struct hme_blk *hmeblkp; 10492 struct sf_hment *sfhme0; 10493 struct hme_blk *hblk_dummy = 0; 10494 10495 /* 10496 * No dummy sf_hments, please. 10497 */ 10498 ASSERT(sfhme->hme_tte.ll != 0); 10499 10500 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 10501 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 10502 (uintptr_t)&hblk_dummy->hblk_hme[0]); 10503 10504 return (hmeblkp); 10505 } 10506 10507 /* 10508 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 10509 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 10510 * KM_SLEEP allocation. 10511 * 10512 * Return 0 on success, -1 otherwise. 10513 */ 10514 static void 10515 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 10516 { 10517 struct tsb_info *tsbinfop, *next; 10518 tsb_replace_rc_t rc; 10519 boolean_t gotfirst = B_FALSE; 10520 10521 ASSERT(sfmmup != ksfmmup); 10522 ASSERT(sfmmu_hat_lock_held(sfmmup)); 10523 10524 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 10525 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10526 } 10527 10528 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10529 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 10530 } else { 10531 return; 10532 } 10533 10534 ASSERT(sfmmup->sfmmu_tsb != NULL); 10535 10536 /* 10537 * Loop over all tsbinfo's replacing them with ones that actually have 10538 * a TSB. If any of the replacements ever fail, bail out of the loop. 10539 */ 10540 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 10541 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 10542 next = tsbinfop->tsb_next; 10543 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 10544 hatlockp, TSB_SWAPIN); 10545 if (rc != TSB_SUCCESS) { 10546 break; 10547 } 10548 gotfirst = B_TRUE; 10549 } 10550 10551 switch (rc) { 10552 case TSB_SUCCESS: 10553 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10554 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10555 return; 10556 case TSB_ALLOCFAIL: 10557 break; 10558 default: 10559 panic("sfmmu_replace_tsb returned unrecognized failure code " 10560 "%d", rc); 10561 } 10562 10563 /* 10564 * In this case, we failed to get one of our TSBs. If we failed to 10565 * get the first TSB, get one of minimum size (8KB). Walk the list 10566 * and throw away the tsbinfos, starting where the allocation failed; 10567 * we can get by with just one TSB as long as we don't leave the 10568 * SWAPPED tsbinfo structures lying around. 10569 */ 10570 tsbinfop = sfmmup->sfmmu_tsb; 10571 next = tsbinfop->tsb_next; 10572 tsbinfop->tsb_next = NULL; 10573 10574 sfmmu_hat_exit(hatlockp); 10575 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 10576 next = tsbinfop->tsb_next; 10577 sfmmu_tsbinfo_free(tsbinfop); 10578 } 10579 hatlockp = sfmmu_hat_enter(sfmmup); 10580 10581 /* 10582 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 10583 * pages. 10584 */ 10585 if (!gotfirst) { 10586 tsbinfop = sfmmup->sfmmu_tsb; 10587 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 10588 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 10589 ASSERT(rc == TSB_SUCCESS); 10590 } else { 10591 /* update machine specific tsbinfo */ 10592 sfmmu_setup_tsbinfo(sfmmup); 10593 } 10594 10595 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10596 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10597 } 10598 10599 /* 10600 * Handle exceptions for low level tsb_handler. 10601 * 10602 * There are many scenarios that could land us here: 10603 * 10604 * If the context is invalid we land here. The context can be invalid 10605 * for 3 reasons: 1) we couldn't allocate a new context and now need to 10606 * perform a wrap around operation in order to allocate a new context. 10607 * 2) Context was invalidated to change pagesize programming 3) ISMs or 10608 * TSBs configuration is changeing for this process and we are forced into 10609 * here to do a syncronization operation. If the context is valid we can 10610 * be here from window trap hanlder. In this case just call trap to handle 10611 * the fault. 10612 * 10613 * Note that the process will run in INVALID_CONTEXT before 10614 * faulting into here and subsequently loading the MMU registers 10615 * (including the TSB base register) associated with this process. 10616 * For this reason, the trap handlers must all test for 10617 * INVALID_CONTEXT before attempting to access any registers other 10618 * than the context registers. 10619 */ 10620 void 10621 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 10622 { 10623 sfmmu_t *sfmmup; 10624 uint_t ctxtype; 10625 klwp_id_t lwp; 10626 char lwp_save_state; 10627 hatlock_t *hatlockp; 10628 struct tsb_info *tsbinfop; 10629 10630 SFMMU_STAT(sf_tsb_exceptions); 10631 SFMMU_MMU_STAT(mmu_tsb_exceptions); 10632 sfmmup = astosfmmu(curthread->t_procp->p_as); 10633 /* 10634 * note that in sun4u, tagacces register contains ctxnum 10635 * while sun4v passes ctxtype in the tagaccess register. 10636 */ 10637 ctxtype = tagaccess & TAGACC_CTX_MASK; 10638 10639 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT); 10640 ASSERT(sfmmup->sfmmu_ismhat == 0); 10641 /* 10642 * First, make sure we come out of here with a valid ctx, 10643 * since if we don't get one we'll simply loop on the 10644 * faulting instruction. 10645 * 10646 * If the ISM mappings are changing, the TSB is being relocated, or 10647 * the process is swapped out we serialize behind the controlling 10648 * thread with the sfmmu_flags and sfmmu_tsb_cv condition variable. 10649 * Otherwise we synchronize with the context stealer or the thread 10650 * that required us to change out our MMU registers (such 10651 * as a thread changing out our TSB while we were running) by 10652 * locking the HAT and grabbing the rwlock on the context as a 10653 * reader temporarily. 10654 */ 10655 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 10656 ctxtype == INVALID_CONTEXT); 10657 10658 if (ctxtype == INVALID_CONTEXT) { 10659 /* 10660 * Must set lwp state to LWP_SYS before 10661 * trying to acquire any adaptive lock 10662 */ 10663 lwp = ttolwp(curthread); 10664 ASSERT(lwp); 10665 lwp_save_state = lwp->lwp_state; 10666 lwp->lwp_state = LWP_SYS; 10667 10668 hatlockp = sfmmu_hat_enter(sfmmup); 10669 retry: 10670 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 10671 tsbinfop = tsbinfop->tsb_next) { 10672 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 10673 cv_wait(&sfmmup->sfmmu_tsb_cv, 10674 HATLOCK_MUTEXP(hatlockp)); 10675 goto retry; 10676 } 10677 } 10678 10679 /* 10680 * Wait for ISM maps to be updated. 10681 */ 10682 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 10683 cv_wait(&sfmmup->sfmmu_tsb_cv, 10684 HATLOCK_MUTEXP(hatlockp)); 10685 goto retry; 10686 } 10687 10688 /* 10689 * If we're swapping in, get TSB(s). Note that we must do 10690 * this before we get a ctx or load the MMU state. Once 10691 * we swap in we have to recheck to make sure the TSB(s) and 10692 * ISM mappings didn't change while we slept. 10693 */ 10694 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10695 sfmmu_tsb_swapin(sfmmup, hatlockp); 10696 goto retry; 10697 } 10698 10699 sfmmu_get_ctx(sfmmup); 10700 10701 sfmmu_hat_exit(hatlockp); 10702 /* 10703 * Must restore lwp_state if not calling 10704 * trap() for further processing. Restore 10705 * it anyway. 10706 */ 10707 lwp->lwp_state = lwp_save_state; 10708 if (sfmmup->sfmmu_ttecnt[TTE8K] != 0 || 10709 sfmmup->sfmmu_ttecnt[TTE64K] != 0 || 10710 sfmmup->sfmmu_ttecnt[TTE512K] != 0 || 10711 sfmmup->sfmmu_ttecnt[TTE4M] != 0 || 10712 sfmmup->sfmmu_ttecnt[TTE32M] != 0 || 10713 sfmmup->sfmmu_ttecnt[TTE256M] != 0) { 10714 return; 10715 } 10716 if (traptype == T_DATA_PROT) { 10717 traptype = T_DATA_MMU_MISS; 10718 } 10719 } 10720 trap(rp, (caddr_t)tagaccess, traptype, 0); 10721 } 10722 10723 /* 10724 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 10725 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 10726 * rather than spinning to avoid send mondo timeouts with 10727 * interrupts enabled. When the lock is acquired it is immediately 10728 * released and we return back to sfmmu_vatopfn just after 10729 * the GET_TTE call. 10730 */ 10731 void 10732 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 10733 { 10734 struct page **pp; 10735 10736 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10737 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10738 } 10739 10740 /* 10741 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 10742 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 10743 * cross traps which cannot be handled while spinning in the 10744 * trap handlers. Simply enter and exit the kpr_suspendlock spin 10745 * mutex, which is held by the holder of the suspend bit, and then 10746 * retry the trapped instruction after unwinding. 10747 */ 10748 /*ARGSUSED*/ 10749 void 10750 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 10751 { 10752 ASSERT(curthread != kreloc_thread); 10753 mutex_enter(&kpr_suspendlock); 10754 mutex_exit(&kpr_suspendlock); 10755 } 10756 10757 /* 10758 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 10759 * This routine may be called with all cpu's captured. Therefore, the 10760 * caller is responsible for holding all locks and disabling kernel 10761 * preemption. 10762 */ 10763 /* ARGSUSED */ 10764 static void 10765 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 10766 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 10767 { 10768 cpuset_t cpuset; 10769 caddr_t va; 10770 ism_ment_t *ment; 10771 sfmmu_t *sfmmup; 10772 #ifdef VAC 10773 int vcolor; 10774 #endif 10775 int ttesz; 10776 10777 /* 10778 * Walk the ism_hat's mapping list and flush the page 10779 * from every hat sharing this ism_hat. This routine 10780 * may be called while all cpu's have been captured. 10781 * Therefore we can't attempt to grab any locks. For now 10782 * this means we will protect the ism mapping list under 10783 * a single lock which will be grabbed by the caller. 10784 * If hat_share/unshare scalibility becomes a performance 10785 * problem then we may need to re-think ism mapping list locking. 10786 */ 10787 ASSERT(ism_sfmmup->sfmmu_ismhat); 10788 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 10789 addr = addr - ISMID_STARTADDR; 10790 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 10791 10792 sfmmup = ment->iment_hat; 10793 10794 va = ment->iment_base_va; 10795 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 10796 10797 /* 10798 * Flush TSB of ISM mappings. 10799 */ 10800 ttesz = get_hblk_ttesz(hmeblkp); 10801 if (ttesz == TTE8K || ttesz == TTE4M) { 10802 sfmmu_unload_tsb(sfmmup, va, ttesz); 10803 } else { 10804 caddr_t sva = va; 10805 caddr_t eva; 10806 ASSERT(addr == (caddr_t)get_hblk_base(hmeblkp)); 10807 eva = sva + get_hblk_span(hmeblkp); 10808 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); 10809 } 10810 10811 cpuset = sfmmup->sfmmu_cpusran; 10812 CPUSET_AND(cpuset, cpu_ready_set); 10813 CPUSET_DEL(cpuset, CPU->cpu_id); 10814 10815 SFMMU_XCALL_STATS(sfmmup); 10816 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 10817 (uint64_t)sfmmup); 10818 10819 vtag_flushpage(va, (uint64_t)sfmmup); 10820 10821 #ifdef VAC 10822 /* 10823 * Flush D$ 10824 * When flushing D$ we must flush all 10825 * cpu's. See sfmmu_cache_flush(). 10826 */ 10827 if (cache_flush_flag == CACHE_FLUSH) { 10828 cpuset = cpu_ready_set; 10829 CPUSET_DEL(cpuset, CPU->cpu_id); 10830 10831 SFMMU_XCALL_STATS(sfmmup); 10832 vcolor = addr_to_vcolor(va); 10833 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10834 vac_flushpage(pfnum, vcolor); 10835 } 10836 #endif /* VAC */ 10837 } 10838 } 10839 10840 /* 10841 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 10842 * a particular virtual address and ctx. If noflush is set we do not 10843 * flush the TLB/TSB. This function may or may not be called with the 10844 * HAT lock held. 10845 */ 10846 static void 10847 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10848 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 10849 int hat_lock_held) 10850 { 10851 #ifdef VAC 10852 int vcolor; 10853 #endif 10854 cpuset_t cpuset; 10855 hatlock_t *hatlockp; 10856 10857 #if defined(lint) && !defined(VAC) 10858 pfnum = pfnum; 10859 cpu_flag = cpu_flag; 10860 cache_flush_flag = cache_flush_flag; 10861 #endif 10862 /* 10863 * There is no longer a need to protect against ctx being 10864 * stolen here since we don't store the ctx in the TSB anymore. 10865 */ 10866 #ifdef VAC 10867 vcolor = addr_to_vcolor(addr); 10868 #endif 10869 10870 /* 10871 * We must hold the hat lock during the flush of TLB, 10872 * to avoid a race with sfmmu_invalidate_ctx(), where 10873 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 10874 * causing TLB demap routine to skip flush on that MMU. 10875 * If the context on a MMU has already been set to 10876 * INVALID_CONTEXT, we just get an extra flush on 10877 * that MMU. 10878 */ 10879 if (!hat_lock_held && !tlb_noflush) 10880 hatlockp = sfmmu_hat_enter(sfmmup); 10881 10882 kpreempt_disable(); 10883 if (!tlb_noflush) { 10884 /* 10885 * Flush the TSB and TLB. 10886 */ 10887 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10888 10889 cpuset = sfmmup->sfmmu_cpusran; 10890 CPUSET_AND(cpuset, cpu_ready_set); 10891 CPUSET_DEL(cpuset, CPU->cpu_id); 10892 10893 SFMMU_XCALL_STATS(sfmmup); 10894 10895 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 10896 (uint64_t)sfmmup); 10897 10898 vtag_flushpage(addr, (uint64_t)sfmmup); 10899 } 10900 10901 if (!hat_lock_held && !tlb_noflush) 10902 sfmmu_hat_exit(hatlockp); 10903 10904 #ifdef VAC 10905 /* 10906 * Flush the D$ 10907 * 10908 * Even if the ctx is stolen, we need to flush the 10909 * cache. Our ctx stealer only flushes the TLBs. 10910 */ 10911 if (cache_flush_flag == CACHE_FLUSH) { 10912 if (cpu_flag & FLUSH_ALL_CPUS) { 10913 cpuset = cpu_ready_set; 10914 } else { 10915 cpuset = sfmmup->sfmmu_cpusran; 10916 CPUSET_AND(cpuset, cpu_ready_set); 10917 } 10918 CPUSET_DEL(cpuset, CPU->cpu_id); 10919 SFMMU_XCALL_STATS(sfmmup); 10920 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10921 vac_flushpage(pfnum, vcolor); 10922 } 10923 #endif /* VAC */ 10924 kpreempt_enable(); 10925 } 10926 10927 /* 10928 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 10929 * address and ctx. If noflush is set we do not currently do anything. 10930 * This function may or may not be called with the HAT lock held. 10931 */ 10932 static void 10933 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10934 int tlb_noflush, int hat_lock_held) 10935 { 10936 cpuset_t cpuset; 10937 hatlock_t *hatlockp; 10938 10939 /* 10940 * If the process is exiting we have nothing to do. 10941 */ 10942 if (tlb_noflush) 10943 return; 10944 10945 /* 10946 * Flush TSB. 10947 */ 10948 if (!hat_lock_held) 10949 hatlockp = sfmmu_hat_enter(sfmmup); 10950 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10951 10952 kpreempt_disable(); 10953 10954 cpuset = sfmmup->sfmmu_cpusran; 10955 CPUSET_AND(cpuset, cpu_ready_set); 10956 CPUSET_DEL(cpuset, CPU->cpu_id); 10957 10958 SFMMU_XCALL_STATS(sfmmup); 10959 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 10960 10961 vtag_flushpage(addr, (uint64_t)sfmmup); 10962 10963 if (!hat_lock_held) 10964 sfmmu_hat_exit(hatlockp); 10965 10966 kpreempt_enable(); 10967 10968 } 10969 10970 /* 10971 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 10972 * call handler that can flush a range of pages to save on xcalls. 10973 */ 10974 static int sfmmu_xcall_save; 10975 10976 static void 10977 sfmmu_tlb_range_demap(demap_range_t *dmrp) 10978 { 10979 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 10980 hatlock_t *hatlockp; 10981 cpuset_t cpuset; 10982 uint64_t sfmmu_pgcnt; 10983 pgcnt_t pgcnt = 0; 10984 int pgunload = 0; 10985 int dirtypg = 0; 10986 caddr_t addr = dmrp->dmr_addr; 10987 caddr_t eaddr; 10988 uint64_t bitvec = dmrp->dmr_bitvec; 10989 10990 ASSERT(bitvec & 1); 10991 10992 /* 10993 * Flush TSB and calculate number of pages to flush. 10994 */ 10995 while (bitvec != 0) { 10996 dirtypg = 0; 10997 /* 10998 * Find the first page to flush and then count how many 10999 * pages there are after it that also need to be flushed. 11000 * This way the number of TSB flushes is minimized. 11001 */ 11002 while ((bitvec & 1) == 0) { 11003 pgcnt++; 11004 addr += MMU_PAGESIZE; 11005 bitvec >>= 1; 11006 } 11007 while (bitvec & 1) { 11008 dirtypg++; 11009 bitvec >>= 1; 11010 } 11011 eaddr = addr + ptob(dirtypg); 11012 hatlockp = sfmmu_hat_enter(sfmmup); 11013 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 11014 sfmmu_hat_exit(hatlockp); 11015 pgunload += dirtypg; 11016 addr = eaddr; 11017 pgcnt += dirtypg; 11018 } 11019 11020 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 11021 if (sfmmup->sfmmu_free == 0) { 11022 addr = dmrp->dmr_addr; 11023 bitvec = dmrp->dmr_bitvec; 11024 11025 /* 11026 * make sure it has SFMMU_PGCNT_SHIFT bits only, 11027 * as it will be used to pack argument for xt_some 11028 */ 11029 ASSERT((pgcnt > 0) && 11030 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 11031 11032 /* 11033 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 11034 * the low 6 bits of sfmmup. This is doable since pgcnt 11035 * always >= 1. 11036 */ 11037 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 11038 sfmmu_pgcnt = (uint64_t)sfmmup | 11039 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 11040 11041 /* 11042 * We must hold the hat lock during the flush of TLB, 11043 * to avoid a race with sfmmu_invalidate_ctx(), where 11044 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 11045 * causing TLB demap routine to skip flush on that MMU. 11046 * If the context on a MMU has already been set to 11047 * INVALID_CONTEXT, we just get an extra flush on 11048 * that MMU. 11049 */ 11050 hatlockp = sfmmu_hat_enter(sfmmup); 11051 kpreempt_disable(); 11052 11053 cpuset = sfmmup->sfmmu_cpusran; 11054 CPUSET_AND(cpuset, cpu_ready_set); 11055 CPUSET_DEL(cpuset, CPU->cpu_id); 11056 11057 SFMMU_XCALL_STATS(sfmmup); 11058 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 11059 sfmmu_pgcnt); 11060 11061 for (; bitvec != 0; bitvec >>= 1) { 11062 if (bitvec & 1) 11063 vtag_flushpage(addr, (uint64_t)sfmmup); 11064 addr += MMU_PAGESIZE; 11065 } 11066 kpreempt_enable(); 11067 sfmmu_hat_exit(hatlockp); 11068 11069 sfmmu_xcall_save += (pgunload-1); 11070 } 11071 dmrp->dmr_bitvec = 0; 11072 } 11073 11074 /* 11075 * In cases where we need to synchronize with TLB/TSB miss trap 11076 * handlers, _and_ need to flush the TLB, it's a lot easier to 11077 * throw away the context from the process than to do a 11078 * special song and dance to keep things consistent for the 11079 * handlers. 11080 * 11081 * Since the process suddenly ends up without a context and our caller 11082 * holds the hat lock, threads that fault after this function is called 11083 * will pile up on the lock. We can then do whatever we need to 11084 * atomically from the context of the caller. The first blocked thread 11085 * to resume executing will get the process a new context, and the 11086 * process will resume executing. 11087 * 11088 * One added advantage of this approach is that on MMUs that 11089 * support a "flush all" operation, we will delay the flush until 11090 * cnum wrap-around, and then flush the TLB one time. This 11091 * is rather rare, so it's a lot less expensive than making 8000 11092 * x-calls to flush the TLB 8000 times. 11093 * 11094 * A per-process (PP) lock is used to synchronize ctx allocations in 11095 * resume() and ctx invalidations here. 11096 */ 11097 static void 11098 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 11099 { 11100 cpuset_t cpuset; 11101 int cnum, currcnum; 11102 mmu_ctx_t *mmu_ctxp; 11103 int i; 11104 uint_t pstate_save; 11105 11106 SFMMU_STAT(sf_ctx_inv); 11107 11108 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11109 ASSERT(sfmmup != ksfmmup); 11110 11111 kpreempt_disable(); 11112 11113 mmu_ctxp = CPU_MMU_CTXP(CPU); 11114 ASSERT(mmu_ctxp); 11115 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 11116 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 11117 11118 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 11119 11120 pstate_save = sfmmu_disable_intrs(); 11121 11122 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 11123 /* set HAT cnum invalid across all context domains. */ 11124 for (i = 0; i < max_mmu_ctxdoms; i++) { 11125 11126 cnum = sfmmup->sfmmu_ctxs[i].cnum; 11127 if (cnum == INVALID_CONTEXT) { 11128 continue; 11129 } 11130 11131 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 11132 } 11133 membar_enter(); /* make sure globally visible to all CPUs */ 11134 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 11135 11136 sfmmu_enable_intrs(pstate_save); 11137 11138 cpuset = sfmmup->sfmmu_cpusran; 11139 CPUSET_DEL(cpuset, CPU->cpu_id); 11140 CPUSET_AND(cpuset, cpu_ready_set); 11141 if (!CPUSET_ISNULL(cpuset)) { 11142 SFMMU_XCALL_STATS(sfmmup); 11143 xt_some(cpuset, sfmmu_raise_tsb_exception, 11144 (uint64_t)sfmmup, INVALID_CONTEXT); 11145 xt_sync(cpuset); 11146 SFMMU_STAT(sf_tsb_raise_exception); 11147 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 11148 } 11149 11150 /* 11151 * If the hat to-be-invalidated is the same as the current 11152 * process on local CPU we need to invalidate 11153 * this CPU context as well. 11154 */ 11155 if ((sfmmu_getctx_sec() == currcnum) && 11156 (currcnum != INVALID_CONTEXT)) { 11157 sfmmu_setctx_sec(INVALID_CONTEXT); 11158 sfmmu_clear_utsbinfo(); 11159 } 11160 11161 kpreempt_enable(); 11162 11163 /* 11164 * we hold the hat lock, so nobody should allocate a context 11165 * for us yet 11166 */ 11167 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 11168 } 11169 11170 #ifdef VAC 11171 /* 11172 * We need to flush the cache in all cpus. It is possible that 11173 * a process referenced a page as cacheable but has sinced exited 11174 * and cleared the mapping list. We still to flush it but have no 11175 * state so all cpus is the only alternative. 11176 */ 11177 void 11178 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 11179 { 11180 cpuset_t cpuset; 11181 11182 kpreempt_disable(); 11183 cpuset = cpu_ready_set; 11184 CPUSET_DEL(cpuset, CPU->cpu_id); 11185 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 11186 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 11187 xt_sync(cpuset); 11188 vac_flushpage(pfnum, vcolor); 11189 kpreempt_enable(); 11190 } 11191 11192 void 11193 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 11194 { 11195 cpuset_t cpuset; 11196 11197 ASSERT(vcolor >= 0); 11198 11199 kpreempt_disable(); 11200 cpuset = cpu_ready_set; 11201 CPUSET_DEL(cpuset, CPU->cpu_id); 11202 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 11203 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 11204 xt_sync(cpuset); 11205 vac_flushcolor(vcolor, pfnum); 11206 kpreempt_enable(); 11207 } 11208 #endif /* VAC */ 11209 11210 /* 11211 * We need to prevent processes from accessing the TSB using a cached physical 11212 * address. It's alright if they try to access the TSB via virtual address 11213 * since they will just fault on that virtual address once the mapping has 11214 * been suspended. 11215 */ 11216 #pragma weak sendmondo_in_recover 11217 11218 /* ARGSUSED */ 11219 static int 11220 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 11221 { 11222 hatlock_t *hatlockp; 11223 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11224 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11225 extern uint32_t sendmondo_in_recover; 11226 11227 if (flags != HAT_PRESUSPEND) 11228 return (0); 11229 11230 hatlockp = sfmmu_hat_enter(sfmmup); 11231 11232 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 11233 11234 /* 11235 * For Cheetah+ Erratum 25: 11236 * Wait for any active recovery to finish. We can't risk 11237 * relocating the TSB of the thread running mondo_recover_proc() 11238 * since, if we did that, we would deadlock. The scenario we are 11239 * trying to avoid is as follows: 11240 * 11241 * THIS CPU RECOVER CPU 11242 * -------- ----------- 11243 * Begins recovery, walking through TSB 11244 * hat_pagesuspend() TSB TTE 11245 * TLB miss on TSB TTE, spins at TL1 11246 * xt_sync() 11247 * send_mondo_timeout() 11248 * mondo_recover_proc() 11249 * ((deadlocked)) 11250 * 11251 * The second half of the workaround is that mondo_recover_proc() 11252 * checks to see if the tsb_info has the RELOC flag set, and if it 11253 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 11254 * and hence avoiding the TLB miss that could result in a deadlock. 11255 */ 11256 if (&sendmondo_in_recover) { 11257 membar_enter(); /* make sure RELOC flag visible */ 11258 while (sendmondo_in_recover) { 11259 drv_usecwait(1); 11260 membar_consumer(); 11261 } 11262 } 11263 11264 sfmmu_invalidate_ctx(sfmmup); 11265 sfmmu_hat_exit(hatlockp); 11266 11267 return (0); 11268 } 11269 11270 /* ARGSUSED */ 11271 static int 11272 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 11273 void *tsbinfo, pfn_t newpfn) 11274 { 11275 hatlock_t *hatlockp; 11276 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11277 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11278 11279 if (flags != HAT_POSTUNSUSPEND) 11280 return (0); 11281 11282 hatlockp = sfmmu_hat_enter(sfmmup); 11283 11284 SFMMU_STAT(sf_tsb_reloc); 11285 11286 /* 11287 * The process may have swapped out while we were relocating one 11288 * of its TSBs. If so, don't bother doing the setup since the 11289 * process can't be using the memory anymore. 11290 */ 11291 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 11292 ASSERT(va == tsbinfop->tsb_va); 11293 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 11294 sfmmu_setup_tsbinfo(sfmmup); 11295 11296 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 11297 sfmmu_inv_tsb(tsbinfop->tsb_va, 11298 TSB_BYTES(tsbinfop->tsb_szc)); 11299 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 11300 } 11301 } 11302 11303 membar_exit(); 11304 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 11305 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11306 11307 sfmmu_hat_exit(hatlockp); 11308 11309 return (0); 11310 } 11311 11312 /* 11313 * Allocate and initialize a tsb_info structure. Note that we may or may not 11314 * allocate a TSB here, depending on the flags passed in. 11315 */ 11316 static int 11317 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 11318 uint_t flags, sfmmu_t *sfmmup) 11319 { 11320 int err; 11321 11322 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 11323 sfmmu_tsbinfo_cache, KM_SLEEP); 11324 11325 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 11326 tsb_szc, flags, sfmmup)) != 0) { 11327 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 11328 SFMMU_STAT(sf_tsb_allocfail); 11329 *tsbinfopp = NULL; 11330 return (err); 11331 } 11332 SFMMU_STAT(sf_tsb_alloc); 11333 11334 /* 11335 * Bump the TSB size counters for this TSB size. 11336 */ 11337 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 11338 return (0); 11339 } 11340 11341 static void 11342 sfmmu_tsb_free(struct tsb_info *tsbinfo) 11343 { 11344 caddr_t tsbva = tsbinfo->tsb_va; 11345 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 11346 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 11347 vmem_t *vmp = tsbinfo->tsb_vmp; 11348 11349 /* 11350 * If we allocated this TSB from relocatable kernel memory, then we 11351 * need to uninstall the callback handler. 11352 */ 11353 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 11354 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11355 caddr_t slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 11356 page_t **ppl; 11357 int ret; 11358 11359 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 11360 ASSERT(ret == 0); 11361 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 11362 0, NULL); 11363 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 11364 } 11365 11366 if (kmem_cachep != NULL) { 11367 kmem_cache_free(kmem_cachep, tsbva); 11368 } else { 11369 vmem_xfree(vmp, (void *)tsbva, tsb_size); 11370 } 11371 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 11372 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 11373 } 11374 11375 static void 11376 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 11377 { 11378 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 11379 sfmmu_tsb_free(tsbinfo); 11380 } 11381 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 11382 11383 } 11384 11385 /* 11386 * Setup all the references to physical memory for this tsbinfo. 11387 * The underlying page(s) must be locked. 11388 */ 11389 static void 11390 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 11391 { 11392 ASSERT(pfn != PFN_INVALID); 11393 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 11394 11395 #ifndef sun4v 11396 if (tsbinfo->tsb_szc == 0) { 11397 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 11398 PROT_WRITE|PROT_READ, TTE8K); 11399 } else { 11400 /* 11401 * Round down PA and use a large mapping; the handlers will 11402 * compute the TSB pointer at the correct offset into the 11403 * big virtual page. NOTE: this assumes all TSBs larger 11404 * than 8K must come from physically contiguous slabs of 11405 * size tsb_slab_size. 11406 */ 11407 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 11408 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 11409 } 11410 tsbinfo->tsb_pa = ptob(pfn); 11411 11412 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 11413 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 11414 11415 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 11416 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 11417 #else /* sun4v */ 11418 tsbinfo->tsb_pa = ptob(pfn); 11419 #endif /* sun4v */ 11420 } 11421 11422 11423 /* 11424 * Returns zero on success, ENOMEM if over the high water mark, 11425 * or EAGAIN if the caller needs to retry with a smaller TSB 11426 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 11427 * 11428 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 11429 * is specified and the TSB requested is PAGESIZE, though it 11430 * may sleep waiting for memory if sufficient memory is not 11431 * available. 11432 */ 11433 static int 11434 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 11435 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 11436 { 11437 caddr_t vaddr = NULL; 11438 caddr_t slab_vaddr; 11439 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11440 int tsbbytes = TSB_BYTES(tsbcode); 11441 int lowmem = 0; 11442 struct kmem_cache *kmem_cachep = NULL; 11443 vmem_t *vmp = NULL; 11444 lgrp_id_t lgrpid = LGRP_NONE; 11445 pfn_t pfn; 11446 uint_t cbflags = HAC_SLEEP; 11447 page_t **pplist; 11448 int ret; 11449 11450 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 11451 flags |= TSB_ALLOC; 11452 11453 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 11454 11455 tsbinfo->tsb_sfmmu = sfmmup; 11456 11457 /* 11458 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 11459 * return. 11460 */ 11461 if ((flags & TSB_ALLOC) == 0) { 11462 tsbinfo->tsb_szc = tsbcode; 11463 tsbinfo->tsb_ttesz_mask = tteszmask; 11464 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 11465 tsbinfo->tsb_pa = -1; 11466 tsbinfo->tsb_tte.ll = 0; 11467 tsbinfo->tsb_next = NULL; 11468 tsbinfo->tsb_flags = TSB_SWAPPED; 11469 tsbinfo->tsb_cache = NULL; 11470 tsbinfo->tsb_vmp = NULL; 11471 return (0); 11472 } 11473 11474 #ifdef DEBUG 11475 /* 11476 * For debugging: 11477 * Randomly force allocation failures every tsb_alloc_mtbf 11478 * tries if TSB_FORCEALLOC is not specified. This will 11479 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 11480 * it is even, to allow testing of both failure paths... 11481 */ 11482 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 11483 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 11484 tsb_alloc_count = 0; 11485 tsb_alloc_fail_mtbf++; 11486 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 11487 } 11488 #endif /* DEBUG */ 11489 11490 /* 11491 * Enforce high water mark if we are not doing a forced allocation 11492 * and are not shrinking a process' TSB. 11493 */ 11494 if ((flags & TSB_SHRINK) == 0 && 11495 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 11496 if ((flags & TSB_FORCEALLOC) == 0) 11497 return (ENOMEM); 11498 lowmem = 1; 11499 } 11500 11501 /* 11502 * Allocate from the correct location based upon the size of the TSB 11503 * compared to the base page size, and what memory conditions dictate. 11504 * Note we always do nonblocking allocations from the TSB arena since 11505 * we don't want memory fragmentation to cause processes to block 11506 * indefinitely waiting for memory; until the kernel algorithms that 11507 * coalesce large pages are improved this is our best option. 11508 * 11509 * Algorithm: 11510 * If allocating a "large" TSB (>8K), allocate from the 11511 * appropriate kmem_tsb_default_arena vmem arena 11512 * else if low on memory or the TSB_FORCEALLOC flag is set or 11513 * tsb_forceheap is set 11514 * Allocate from kernel heap via sfmmu_tsb8k_cache with 11515 * KM_SLEEP (never fails) 11516 * else 11517 * Allocate from appropriate sfmmu_tsb_cache with 11518 * KM_NOSLEEP 11519 * endif 11520 */ 11521 if (tsb_lgrp_affinity) 11522 lgrpid = lgrp_home_id(curthread); 11523 if (lgrpid == LGRP_NONE) 11524 lgrpid = 0; /* use lgrp of boot CPU */ 11525 11526 if (tsbbytes > MMU_PAGESIZE) { 11527 vmp = kmem_tsb_default_arena[lgrpid]; 11528 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 0, 0, 11529 NULL, NULL, VM_NOSLEEP); 11530 #ifdef DEBUG 11531 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 11532 #else /* !DEBUG */ 11533 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 11534 #endif /* DEBUG */ 11535 kmem_cachep = sfmmu_tsb8k_cache; 11536 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 11537 ASSERT(vaddr != NULL); 11538 } else { 11539 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 11540 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 11541 } 11542 11543 tsbinfo->tsb_cache = kmem_cachep; 11544 tsbinfo->tsb_vmp = vmp; 11545 11546 if (vaddr == NULL) { 11547 return (EAGAIN); 11548 } 11549 11550 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 11551 kmem_cachep = tsbinfo->tsb_cache; 11552 11553 /* 11554 * If we are allocating from outside the cage, then we need to 11555 * register a relocation callback handler. Note that for now 11556 * since pseudo mappings always hang off of the slab's root page, 11557 * we need only lock the first 8K of the TSB slab. This is a bit 11558 * hacky but it is good for performance. 11559 */ 11560 if (kmem_cachep != sfmmu_tsb8k_cache) { 11561 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 11562 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 11563 ASSERT(ret == 0); 11564 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 11565 cbflags, (void *)tsbinfo, &pfn, NULL); 11566 11567 /* 11568 * Need to free up resources if we could not successfully 11569 * add the callback function and return an error condition. 11570 */ 11571 if (ret != 0) { 11572 if (kmem_cachep) { 11573 kmem_cache_free(kmem_cachep, vaddr); 11574 } else { 11575 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 11576 } 11577 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 11578 S_WRITE); 11579 return (EAGAIN); 11580 } 11581 } else { 11582 /* 11583 * Since allocation of 8K TSBs from heap is rare and occurs 11584 * during memory pressure we allocate them from permanent 11585 * memory rather than using callbacks to get the PFN. 11586 */ 11587 pfn = hat_getpfnum(kas.a_hat, vaddr); 11588 } 11589 11590 tsbinfo->tsb_va = vaddr; 11591 tsbinfo->tsb_szc = tsbcode; 11592 tsbinfo->tsb_ttesz_mask = tteszmask; 11593 tsbinfo->tsb_next = NULL; 11594 tsbinfo->tsb_flags = 0; 11595 11596 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 11597 11598 if (kmem_cachep != sfmmu_tsb8k_cache) { 11599 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 11600 } 11601 11602 sfmmu_inv_tsb(vaddr, tsbbytes); 11603 return (0); 11604 } 11605 11606 /* 11607 * Initialize per cpu tsb and per cpu tsbmiss_area 11608 */ 11609 void 11610 sfmmu_init_tsbs(void) 11611 { 11612 int i; 11613 struct tsbmiss *tsbmissp; 11614 struct kpmtsbm *kpmtsbmp; 11615 #ifndef sun4v 11616 extern int dcache_line_mask; 11617 #endif /* sun4v */ 11618 extern uint_t vac_colors; 11619 11620 /* 11621 * Init. tsb miss area. 11622 */ 11623 tsbmissp = tsbmiss_area; 11624 11625 for (i = 0; i < NCPU; tsbmissp++, i++) { 11626 /* 11627 * initialize the tsbmiss area. 11628 * Do this for all possible CPUs as some may be added 11629 * while the system is running. There is no cost to this. 11630 */ 11631 tsbmissp->ksfmmup = ksfmmup; 11632 #ifndef sun4v 11633 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 11634 #endif /* sun4v */ 11635 tsbmissp->khashstart = 11636 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 11637 tsbmissp->uhashstart = 11638 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 11639 tsbmissp->khashsz = khmehash_num; 11640 tsbmissp->uhashsz = uhmehash_num; 11641 } 11642 11643 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 11644 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 11645 11646 if (kpm_enable == 0) 11647 return; 11648 11649 /* -- Begin KPM specific init -- */ 11650 11651 if (kpm_smallpages) { 11652 /* 11653 * If we're using base pagesize pages for seg_kpm 11654 * mappings, we use the kernel TSB since we can't afford 11655 * to allocate a second huge TSB for these mappings. 11656 */ 11657 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11658 kpm_tsbsz = ktsb_szcode; 11659 kpmsm_tsbbase = kpm_tsbbase; 11660 kpmsm_tsbsz = kpm_tsbsz; 11661 } else { 11662 /* 11663 * In VAC conflict case, just put the entries in the 11664 * kernel 8K indexed TSB for now so we can find them. 11665 * This could really be changed in the future if we feel 11666 * the need... 11667 */ 11668 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11669 kpmsm_tsbsz = ktsb_szcode; 11670 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 11671 kpm_tsbsz = ktsb4m_szcode; 11672 } 11673 11674 kpmtsbmp = kpmtsbm_area; 11675 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 11676 /* 11677 * Initialize the kpmtsbm area. 11678 * Do this for all possible CPUs as some may be added 11679 * while the system is running. There is no cost to this. 11680 */ 11681 kpmtsbmp->vbase = kpm_vbase; 11682 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 11683 kpmtsbmp->sz_shift = kpm_size_shift; 11684 kpmtsbmp->kpmp_shift = kpmp_shift; 11685 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 11686 if (kpm_smallpages == 0) { 11687 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 11688 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 11689 } else { 11690 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 11691 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 11692 } 11693 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 11694 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 11695 #ifdef DEBUG 11696 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 11697 #endif /* DEBUG */ 11698 if (ktsb_phys) 11699 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 11700 } 11701 11702 /* -- End KPM specific init -- */ 11703 } 11704 11705 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 11706 struct tsb_info ktsb_info[2]; 11707 11708 /* 11709 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 11710 */ 11711 void 11712 sfmmu_init_ktsbinfo() 11713 { 11714 ASSERT(ksfmmup != NULL); 11715 ASSERT(ksfmmup->sfmmu_tsb == NULL); 11716 /* 11717 * Allocate tsbinfos for kernel and copy in data 11718 * to make debug easier and sun4v setup easier. 11719 */ 11720 ktsb_info[0].tsb_sfmmu = ksfmmup; 11721 ktsb_info[0].tsb_szc = ktsb_szcode; 11722 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 11723 ktsb_info[0].tsb_va = ktsb_base; 11724 ktsb_info[0].tsb_pa = ktsb_pbase; 11725 ktsb_info[0].tsb_flags = 0; 11726 ktsb_info[0].tsb_tte.ll = 0; 11727 ktsb_info[0].tsb_cache = NULL; 11728 11729 ktsb_info[1].tsb_sfmmu = ksfmmup; 11730 ktsb_info[1].tsb_szc = ktsb4m_szcode; 11731 ktsb_info[1].tsb_ttesz_mask = TSB4M; 11732 ktsb_info[1].tsb_va = ktsb4m_base; 11733 ktsb_info[1].tsb_pa = ktsb4m_pbase; 11734 ktsb_info[1].tsb_flags = 0; 11735 ktsb_info[1].tsb_tte.ll = 0; 11736 ktsb_info[1].tsb_cache = NULL; 11737 11738 /* Link them into ksfmmup. */ 11739 ktsb_info[0].tsb_next = &ktsb_info[1]; 11740 ktsb_info[1].tsb_next = NULL; 11741 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 11742 11743 sfmmu_setup_tsbinfo(ksfmmup); 11744 } 11745 11746 /* 11747 * Cache the last value returned from va_to_pa(). If the VA specified 11748 * in the current call to cached_va_to_pa() maps to the same Page (as the 11749 * previous call to cached_va_to_pa()), then compute the PA using 11750 * cached info, else call va_to_pa(). 11751 * 11752 * Note: this function is neither MT-safe nor consistent in the presence 11753 * of multiple, interleaved threads. This function was created to enable 11754 * an optimization used during boot (at a point when there's only one thread 11755 * executing on the "boot CPU", and before startup_vm() has been called). 11756 */ 11757 static uint64_t 11758 cached_va_to_pa(void *vaddr) 11759 { 11760 static uint64_t prev_vaddr_base = 0; 11761 static uint64_t prev_pfn = 0; 11762 11763 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 11764 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 11765 } else { 11766 uint64_t pa = va_to_pa(vaddr); 11767 11768 if (pa != ((uint64_t)-1)) { 11769 /* 11770 * Computed physical address is valid. Cache its 11771 * related info for the next cached_va_to_pa() call. 11772 */ 11773 prev_pfn = pa & MMU_PAGEMASK; 11774 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 11775 } 11776 11777 return (pa); 11778 } 11779 } 11780 11781 /* 11782 * Carve up our nucleus hblk region. We may allocate more hblks than 11783 * asked due to rounding errors but we are guaranteed to have at least 11784 * enough space to allocate the requested number of hblk8's and hblk1's. 11785 */ 11786 void 11787 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 11788 { 11789 struct hme_blk *hmeblkp; 11790 size_t hme8blk_sz, hme1blk_sz; 11791 size_t i; 11792 size_t hblk8_bound; 11793 ulong_t j = 0, k = 0; 11794 11795 ASSERT(addr != NULL && size != 0); 11796 11797 /* Need to use proper structure alignment */ 11798 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 11799 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 11800 11801 nucleus_hblk8.list = (void *)addr; 11802 nucleus_hblk8.index = 0; 11803 11804 /* 11805 * Use as much memory as possible for hblk8's since we 11806 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 11807 * We need to hold back enough space for the hblk1's which 11808 * we'll allocate next. 11809 */ 11810 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 11811 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 11812 hmeblkp = (struct hme_blk *)addr; 11813 addr += hme8blk_sz; 11814 hmeblkp->hblk_nuc_bit = 1; 11815 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11816 } 11817 nucleus_hblk8.len = j; 11818 ASSERT(j >= nhblk8); 11819 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 11820 11821 nucleus_hblk1.list = (void *)addr; 11822 nucleus_hblk1.index = 0; 11823 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 11824 hmeblkp = (struct hme_blk *)addr; 11825 addr += hme1blk_sz; 11826 hmeblkp->hblk_nuc_bit = 1; 11827 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11828 } 11829 ASSERT(k >= nhblk1); 11830 nucleus_hblk1.len = k; 11831 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 11832 } 11833 11834 /* 11835 * This function is currently not supported on this platform. For what 11836 * it's supposed to do, see hat.c and hat_srmmu.c 11837 */ 11838 /* ARGSUSED */ 11839 faultcode_t 11840 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 11841 uint_t flags) 11842 { 11843 ASSERT(hat->sfmmu_xhat_provider == NULL); 11844 return (FC_NOSUPPORT); 11845 } 11846 11847 /* 11848 * Searchs the mapping list of the page for a mapping of the same size. If not 11849 * found the corresponding bit is cleared in the p_index field. When large 11850 * pages are more prevalent in the system, we can maintain the mapping list 11851 * in order and we don't have to traverse the list each time. Just check the 11852 * next and prev entries, and if both are of different size, we clear the bit. 11853 */ 11854 static void 11855 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 11856 { 11857 struct sf_hment *sfhmep; 11858 struct hme_blk *hmeblkp; 11859 int index; 11860 pgcnt_t npgs; 11861 11862 ASSERT(ttesz > TTE8K); 11863 11864 ASSERT(sfmmu_mlist_held(pp)); 11865 11866 ASSERT(PP_ISMAPPED_LARGE(pp)); 11867 11868 /* 11869 * Traverse mapping list looking for another mapping of same size. 11870 * since we only want to clear index field if all mappings of 11871 * that size are gone. 11872 */ 11873 11874 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 11875 hmeblkp = sfmmu_hmetohblk(sfhmep); 11876 if (hmeblkp->hblk_xhat_bit) 11877 continue; 11878 if (hme_size(sfhmep) == ttesz) { 11879 /* 11880 * another mapping of the same size. don't clear index. 11881 */ 11882 return; 11883 } 11884 } 11885 11886 /* 11887 * Clear the p_index bit for large page. 11888 */ 11889 index = PAGESZ_TO_INDEX(ttesz); 11890 npgs = TTEPAGES(ttesz); 11891 while (npgs-- > 0) { 11892 ASSERT(pp->p_index & index); 11893 pp->p_index &= ~index; 11894 pp = PP_PAGENEXT(pp); 11895 } 11896 } 11897 11898 /* 11899 * return supported features 11900 */ 11901 /* ARGSUSED */ 11902 int 11903 hat_supported(enum hat_features feature, void *arg) 11904 { 11905 switch (feature) { 11906 case HAT_SHARED_PT: 11907 case HAT_DYNAMIC_ISM_UNMAP: 11908 case HAT_VMODSORT: 11909 return (1); 11910 default: 11911 return (0); 11912 } 11913 } 11914 11915 void 11916 hat_enter(struct hat *hat) 11917 { 11918 hatlock_t *hatlockp; 11919 11920 if (hat != ksfmmup) { 11921 hatlockp = TSB_HASH(hat); 11922 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 11923 } 11924 } 11925 11926 void 11927 hat_exit(struct hat *hat) 11928 { 11929 hatlock_t *hatlockp; 11930 11931 if (hat != ksfmmup) { 11932 hatlockp = TSB_HASH(hat); 11933 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 11934 } 11935 } 11936 11937 /*ARGSUSED*/ 11938 void 11939 hat_reserve(struct as *as, caddr_t addr, size_t len) 11940 { 11941 } 11942 11943 static void 11944 hat_kstat_init(void) 11945 { 11946 kstat_t *ksp; 11947 11948 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 11949 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 11950 KSTAT_FLAG_VIRTUAL); 11951 if (ksp) { 11952 ksp->ks_data = (void *) &sfmmu_global_stat; 11953 kstat_install(ksp); 11954 } 11955 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 11956 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 11957 KSTAT_FLAG_VIRTUAL); 11958 if (ksp) { 11959 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 11960 kstat_install(ksp); 11961 } 11962 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 11963 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 11964 KSTAT_FLAG_WRITABLE); 11965 if (ksp) { 11966 ksp->ks_update = sfmmu_kstat_percpu_update; 11967 kstat_install(ksp); 11968 } 11969 } 11970 11971 /* ARGSUSED */ 11972 static int 11973 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 11974 { 11975 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 11976 struct tsbmiss *tsbm = tsbmiss_area; 11977 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 11978 int i; 11979 11980 ASSERT(cpu_kstat); 11981 if (rw == KSTAT_READ) { 11982 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 11983 cpu_kstat->sf_itlb_misses = tsbm->itlb_misses; 11984 cpu_kstat->sf_dtlb_misses = tsbm->dtlb_misses; 11985 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 11986 tsbm->uprot_traps; 11987 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 11988 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 11989 11990 if (tsbm->itlb_misses > 0 && tsbm->dtlb_misses > 0) { 11991 cpu_kstat->sf_tsb_hits = 11992 (tsbm->itlb_misses + tsbm->dtlb_misses) - 11993 (tsbm->utsb_misses + tsbm->ktsb_misses + 11994 kpmtsbm->kpm_tsb_misses); 11995 } else { 11996 cpu_kstat->sf_tsb_hits = 0; 11997 } 11998 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 11999 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 12000 } 12001 } else { 12002 /* KSTAT_WRITE is used to clear stats */ 12003 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 12004 tsbm->itlb_misses = 0; 12005 tsbm->dtlb_misses = 0; 12006 tsbm->utsb_misses = 0; 12007 tsbm->ktsb_misses = 0; 12008 tsbm->uprot_traps = 0; 12009 tsbm->kprot_traps = 0; 12010 kpmtsbm->kpm_dtlb_misses = 0; 12011 kpmtsbm->kpm_tsb_misses = 0; 12012 } 12013 } 12014 return (0); 12015 } 12016 12017 #ifdef DEBUG 12018 12019 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 12020 12021 /* 12022 * A tte checker. *orig_old is the value we read before cas. 12023 * *cur is the value returned by cas. 12024 * *new is the desired value when we do the cas. 12025 * 12026 * *hmeblkp is currently unused. 12027 */ 12028 12029 /* ARGSUSED */ 12030 void 12031 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 12032 { 12033 pfn_t i, j, k; 12034 int cpuid = CPU->cpu_id; 12035 12036 gorig[cpuid] = orig_old; 12037 gcur[cpuid] = cur; 12038 gnew[cpuid] = new; 12039 12040 #ifdef lint 12041 hmeblkp = hmeblkp; 12042 #endif 12043 12044 if (TTE_IS_VALID(orig_old)) { 12045 if (TTE_IS_VALID(cur)) { 12046 i = TTE_TO_TTEPFN(orig_old); 12047 j = TTE_TO_TTEPFN(cur); 12048 k = TTE_TO_TTEPFN(new); 12049 if (i != j) { 12050 /* remap error? */ 12051 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 12052 } 12053 12054 if (i != k) { 12055 /* remap error? */ 12056 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 12057 } 12058 } else { 12059 if (TTE_IS_VALID(new)) { 12060 panic("chk_tte: invalid cur? "); 12061 } 12062 12063 i = TTE_TO_TTEPFN(orig_old); 12064 k = TTE_TO_TTEPFN(new); 12065 if (i != k) { 12066 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 12067 } 12068 } 12069 } else { 12070 if (TTE_IS_VALID(cur)) { 12071 j = TTE_TO_TTEPFN(cur); 12072 if (TTE_IS_VALID(new)) { 12073 k = TTE_TO_TTEPFN(new); 12074 if (j != k) { 12075 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 12076 j, k); 12077 } 12078 } else { 12079 panic("chk_tte: why here?"); 12080 } 12081 } else { 12082 if (!TTE_IS_VALID(new)) { 12083 panic("chk_tte: why here2 ?"); 12084 } 12085 } 12086 } 12087 } 12088 12089 #endif /* DEBUG */ 12090 12091 extern void prefetch_tsbe_read(struct tsbe *); 12092 extern void prefetch_tsbe_write(struct tsbe *); 12093 12094 12095 /* 12096 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 12097 * us optimal performance on Cheetah+. You can only have 8 outstanding 12098 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 12099 * prefetch to make the most utilization of the prefetch capability. 12100 */ 12101 #define TSBE_PREFETCH_STRIDE (7) 12102 12103 void 12104 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 12105 { 12106 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 12107 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 12108 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 12109 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 12110 struct tsbe *old; 12111 struct tsbe *new; 12112 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 12113 uint64_t va; 12114 int new_offset; 12115 int i; 12116 int vpshift; 12117 int last_prefetch; 12118 12119 if (old_bytes == new_bytes) { 12120 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 12121 } else { 12122 12123 /* 12124 * A TSBE is 16 bytes which means there are four TSBE's per 12125 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 12126 */ 12127 old = (struct tsbe *)old_tsbinfo->tsb_va; 12128 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 12129 for (i = 0; i < old_entries; i++, old++) { 12130 if (((i & (4-1)) == 0) && (i < last_prefetch)) 12131 prefetch_tsbe_read(old); 12132 if (!old->tte_tag.tag_invalid) { 12133 /* 12134 * We have a valid TTE to remap. Check the 12135 * size. We won't remap 64K or 512K TTEs 12136 * because they span more than one TSB entry 12137 * and are indexed using an 8K virt. page. 12138 * Ditto for 32M and 256M TTEs. 12139 */ 12140 if (TTE_CSZ(&old->tte_data) == TTE64K || 12141 TTE_CSZ(&old->tte_data) == TTE512K) 12142 continue; 12143 if (mmu_page_sizes == max_mmu_page_sizes) { 12144 if (TTE_CSZ(&old->tte_data) == TTE32M || 12145 TTE_CSZ(&old->tte_data) == TTE256M) 12146 continue; 12147 } 12148 12149 /* clear the lower 22 bits of the va */ 12150 va = *(uint64_t *)old << 22; 12151 /* turn va into a virtual pfn */ 12152 va >>= 22 - TSB_START_SIZE; 12153 /* 12154 * or in bits from the offset in the tsb 12155 * to get the real virtual pfn. These 12156 * correspond to bits [21:13] in the va 12157 */ 12158 vpshift = 12159 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 12160 0x1ff; 12161 va |= (i << vpshift); 12162 va >>= vpshift; 12163 new_offset = va & (new_entries - 1); 12164 new = new_base + new_offset; 12165 prefetch_tsbe_write(new); 12166 *new = *old; 12167 } 12168 } 12169 } 12170 } 12171 12172 /* 12173 * unused in sfmmu 12174 */ 12175 void 12176 hat_dump(void) 12177 { 12178 } 12179 12180 /* 12181 * Called when a thread is exiting and we have switched to the kernel address 12182 * space. Perform the same VM initialization resume() uses when switching 12183 * processes. 12184 * 12185 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 12186 * we call it anyway in case the semantics change in the future. 12187 */ 12188 /*ARGSUSED*/ 12189 void 12190 hat_thread_exit(kthread_t *thd) 12191 { 12192 uint64_t pgsz_cnum; 12193 uint_t pstate_save; 12194 12195 ASSERT(thd->t_procp->p_as == &kas); 12196 12197 pgsz_cnum = KCONTEXT; 12198 #ifdef sun4u 12199 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 12200 #endif 12201 /* 12202 * Note that sfmmu_load_mmustate() is currently a no-op for 12203 * kernel threads. We need to disable interrupts here, 12204 * simply because otherwise sfmmu_load_mmustate() would panic 12205 * if the caller does not disable interrupts. 12206 */ 12207 pstate_save = sfmmu_disable_intrs(); 12208 sfmmu_setctx_sec(pgsz_cnum); 12209 sfmmu_load_mmustate(ksfmmup); 12210 sfmmu_enable_intrs(pstate_save); 12211 } 12212