1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * VM - Hardware Address Translation management for Spitfire MMU. 30 * 31 * This file implements the machine specific hardware translation 32 * needed by the VM system. The machine independent interface is 33 * described in <vm/hat.h> while the machine dependent interface 34 * and data structures are described in <vm/hat_sfmmu.h>. 35 * 36 * The hat layer manages the address translation hardware as a cache 37 * driven by calls from the higher levels in the VM system. 38 */ 39 40 #include <sys/types.h> 41 #include <vm/hat.h> 42 #include <vm/hat_sfmmu.h> 43 #include <vm/page.h> 44 #include <sys/pte.h> 45 #include <sys/systm.h> 46 #include <sys/mman.h> 47 #include <sys/sysmacros.h> 48 #include <sys/machparam.h> 49 #include <sys/vtrace.h> 50 #include <sys/kmem.h> 51 #include <sys/mmu.h> 52 #include <sys/cmn_err.h> 53 #include <sys/cpu.h> 54 #include <sys/cpuvar.h> 55 #include <sys/debug.h> 56 #include <sys/lgrp.h> 57 #include <sys/archsystm.h> 58 #include <sys/machsystm.h> 59 #include <sys/vmsystm.h> 60 #include <vm/as.h> 61 #include <vm/seg.h> 62 #include <vm/seg_kp.h> 63 #include <vm/seg_kmem.h> 64 #include <vm/seg_kpm.h> 65 #include <vm/rm.h> 66 #include <sys/t_lock.h> 67 #include <sys/obpdefs.h> 68 #include <sys/vm_machparam.h> 69 #include <sys/var.h> 70 #include <sys/trap.h> 71 #include <sys/machtrap.h> 72 #include <sys/scb.h> 73 #include <sys/bitmap.h> 74 #include <sys/machlock.h> 75 #include <sys/membar.h> 76 #include <sys/atomic.h> 77 #include <sys/cpu_module.h> 78 #include <sys/prom_debug.h> 79 #include <sys/ksynch.h> 80 #include <sys/mem_config.h> 81 #include <sys/mem_cage.h> 82 #include <sys/dtrace.h> 83 #include <vm/vm_dep.h> 84 #include <vm/xhat_sfmmu.h> 85 #include <sys/fpu/fpusystm.h> 86 87 #if defined(SF_ERRATA_57) 88 extern caddr_t errata57_limit; 89 #endif 90 91 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 92 (sizeof (int64_t))) 93 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 94 95 #define HBLK_RESERVE_CNT 128 96 #define HBLK_RESERVE_MIN 20 97 98 static struct hme_blk *freehblkp; 99 static kmutex_t freehblkp_lock; 100 static int freehblkcnt; 101 102 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 103 static kmutex_t hblk_reserve_lock; 104 static kthread_t *hblk_reserve_thread; 105 106 static nucleus_hblk8_info_t nucleus_hblk8; 107 static nucleus_hblk1_info_t nucleus_hblk1; 108 109 /* 110 * SFMMU specific hat functions 111 */ 112 void hat_pagecachectl(struct page *, int); 113 114 /* flags for hat_pagecachectl */ 115 #define HAT_CACHE 0x1 116 #define HAT_UNCACHE 0x2 117 #define HAT_TMPNC 0x4 118 119 /* 120 * Flag to allow the creation of non-cacheable translations 121 * to system memory. It is off by default. At the moment this 122 * flag is used by the ecache error injector. The error injector 123 * will turn it on when creating such a translation then shut it 124 * off when it's finished. 125 */ 126 127 int sfmmu_allow_nc_trans = 0; 128 129 /* 130 * Flag to disable large page support. 131 * value of 1 => disable all large pages. 132 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 133 * 134 * For example, use the value 0x4 to disable 512K pages. 135 * 136 */ 137 #define LARGE_PAGES_OFF 0x1 138 139 /* 140 * WARNING: 512K pages MUST be disabled for ISM/DISM. If not 141 * a process would page fault indefinitely if it tried to 142 * access a 512K page. 143 */ 144 int disable_ism_large_pages = (1 << TTE512K); 145 int disable_large_pages = 0; 146 int disable_auto_large_pages = 0; 147 148 /* 149 * Private sfmmu data structures for hat management 150 */ 151 static struct kmem_cache *sfmmuid_cache; 152 153 /* 154 * Private sfmmu data structures for ctx management 155 */ 156 static struct ctx *ctxhand; /* hand used while stealing ctxs */ 157 static struct ctx *ctxfree; /* head of free ctx list */ 158 static struct ctx *ctxdirty; /* head of dirty ctx list */ 159 160 /* 161 * Private sfmmu data structures for tsb management 162 */ 163 static struct kmem_cache *sfmmu_tsbinfo_cache; 164 static struct kmem_cache *sfmmu_tsb8k_cache; 165 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 166 static vmem_t *kmem_tsb_arena; 167 168 /* 169 * sfmmu static variables for hmeblk resource management. 170 */ 171 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 172 static struct kmem_cache *sfmmu8_cache; 173 static struct kmem_cache *sfmmu1_cache; 174 static struct kmem_cache *pa_hment_cache; 175 176 static kmutex_t ctx_list_lock; /* mutex for ctx free/dirty lists */ 177 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 178 /* 179 * private data for ism 180 */ 181 static struct kmem_cache *ism_blk_cache; 182 static struct kmem_cache *ism_ment_cache; 183 #define ISMID_STARTADDR NULL 184 185 /* 186 * Whether to delay TLB flushes and use Cheetah's flush-all support 187 * when removing contexts from the dirty list. 188 */ 189 int delay_tlb_flush; 190 int disable_delay_tlb_flush; 191 192 /* 193 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 194 * HAT flags, synchronizing TLB/TSB coherency, and context management. 195 * The lock is hashed on the sfmmup since the case where we need to lock 196 * all processes is rare but does occur (e.g. we need to unload a shared 197 * mapping from all processes using the mapping). We have a lot of buckets, 198 * and each slab of sfmmu_t's can use about a quarter of them, giving us 199 * a fairly good distribution without wasting too much space and overhead 200 * when we have to grab them all. 201 */ 202 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 203 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 204 205 /* 206 * Hash algorithm optimized for a small number of slabs. 207 * 7 is (highbit((sizeof sfmmu_t)) - 1) 208 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 209 * kmem_cache, and thus they will be sequential within that cache. In 210 * addition, each new slab will have a different "color" up to cache_maxcolor 211 * which will skew the hashing for each successive slab which is allocated. 212 * If the size of sfmmu_t changed to a larger size, this algorithm may need 213 * to be revisited. 214 */ 215 #define TSB_HASH_SHIFT_BITS (7) 216 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 217 218 #ifdef DEBUG 219 int tsb_hash_debug = 0; 220 #define TSB_HASH(sfmmup) \ 221 (tsb_hash_debug ? &hat_lock[0] : \ 222 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 223 #else /* DEBUG */ 224 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 225 #endif /* DEBUG */ 226 227 228 /* sfmmu_replace_tsb() return codes. */ 229 typedef enum tsb_replace_rc { 230 TSB_SUCCESS, 231 TSB_ALLOCFAIL, 232 TSB_LOSTRACE, 233 TSB_ALREADY_SWAPPED, 234 TSB_CANTGROW 235 } tsb_replace_rc_t; 236 237 /* 238 * Flags for TSB allocation routines. 239 */ 240 #define TSB_ALLOC 0x01 241 #define TSB_FORCEALLOC 0x02 242 #define TSB_GROW 0x04 243 #define TSB_SHRINK 0x08 244 #define TSB_SWAPIN 0x10 245 246 /* 247 * Support for HAT callbacks. 248 */ 249 #define SFMMU_MAX_RELOC_CALLBACKS 10 250 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 251 static id_t sfmmu_cb_nextid = 0; 252 static id_t sfmmu_tsb_cb_id; 253 struct sfmmu_callback *sfmmu_cb_table; 254 255 /* 256 * Kernel page relocation is enabled by default for non-caged 257 * kernel pages. This has little effect unless segkmem_reloc is 258 * set, since by default kernel memory comes from inside the 259 * kernel cage. 260 */ 261 int hat_kpr_enabled = 1; 262 263 kmutex_t kpr_mutex; 264 kmutex_t kpr_suspendlock; 265 kthread_t *kreloc_thread; 266 267 /* 268 * Enable VA->PA translation sanity checking on DEBUG kernels. 269 * Disabled by default. This is incompatible with some 270 * drivers (error injector, RSM) so if it breaks you get 271 * to keep both pieces. 272 */ 273 int hat_check_vtop = 0; 274 275 /* 276 * Private sfmmu routines (prototypes) 277 */ 278 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 279 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 280 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t); 281 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 282 caddr_t, demap_range_t *, uint_t); 283 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 284 caddr_t, int); 285 static void sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *, 286 uint64_t, struct hme_blk **); 287 static void sfmmu_hblks_list_purge(struct hme_blk **); 288 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 289 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 290 static struct hme_blk *sfmmu_hblk_steal(int); 291 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 292 struct hme_blk *, uint64_t, uint64_t, 293 struct hme_blk *); 294 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 295 296 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 297 uint_t, uint_t, pgcnt_t); 298 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 299 uint_t); 300 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 301 uint_t); 302 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 303 caddr_t, int); 304 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 305 struct hmehash_bucket *, caddr_t, uint_t, uint_t); 306 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 307 caddr_t, page_t **, uint_t); 308 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 309 310 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 311 pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *); 312 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 313 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 314 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 315 static int tst_tnc(page_t *pp, pgcnt_t); 316 static void conv_tnc(page_t *pp, int); 317 318 static struct ctx *sfmmu_get_ctx(sfmmu_t *); 319 static void sfmmu_free_ctx(sfmmu_t *, struct ctx *); 320 static void sfmmu_free_sfmmu(sfmmu_t *); 321 322 static void sfmmu_gettte(struct hat *, caddr_t, tte_t *); 323 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 324 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 325 326 static cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 327 static void hat_pagereload(struct page *, struct page *); 328 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 329 static void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 330 static void sfmmu_page_cache(page_t *, int, int, int); 331 332 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 333 pfn_t, int, int, int, int); 334 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 335 pfn_t, int); 336 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 337 static void sfmmu_tlb_range_demap(demap_range_t *); 338 static void sfmmu_tlb_ctx_demap(sfmmu_t *); 339 static void sfmmu_tlb_all_demap(void); 340 static void sfmmu_tlb_swap_ctx(sfmmu_t *, struct ctx *); 341 static void sfmmu_sync_mmustate(sfmmu_t *); 342 343 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 344 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 345 sfmmu_t *); 346 static void sfmmu_tsb_free(struct tsb_info *); 347 static void sfmmu_tsbinfo_free(struct tsb_info *); 348 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 349 sfmmu_t *); 350 351 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 352 static int sfmmu_select_tsb_szc(pgcnt_t); 353 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 354 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 355 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 356 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 357 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 358 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 359 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 360 hatlock_t *, uint_t); 361 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 362 363 static void sfmmu_cache_flush(pfn_t, int); 364 void sfmmu_cache_flushcolor(int, pfn_t); 365 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 366 caddr_t, demap_range_t *, uint_t, int); 367 368 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 369 static uint_t sfmmu_ptov_attr(tte_t *); 370 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 371 caddr_t, demap_range_t *, uint_t); 372 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 373 static int sfmmu_idcache_constructor(void *, void *, int); 374 static void sfmmu_idcache_destructor(void *, void *); 375 static int sfmmu_hblkcache_constructor(void *, void *, int); 376 static void sfmmu_hblkcache_destructor(void *, void *); 377 static void sfmmu_hblkcache_reclaim(void *); 378 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 379 struct hmehash_bucket *); 380 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 381 382 static void sfmmu_reuse_ctx(struct ctx *, sfmmu_t *); 383 static void sfmmu_disallow_ctx_steal(sfmmu_t *); 384 static void sfmmu_allow_ctx_steal(sfmmu_t *); 385 386 static void sfmmu_rm_large_mappings(page_t *, int); 387 388 static void hat_lock_init(void); 389 static void hat_kstat_init(void); 390 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 391 static void sfmmu_check_page_sizes(sfmmu_t *, int); 392 static int fnd_mapping_sz(page_t *); 393 static void iment_add(struct ism_ment *, struct hat *); 394 static void iment_sub(struct ism_ment *, struct hat *); 395 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 396 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 397 extern void sfmmu_clear_utsbinfo(void); 398 399 /* kpm prototypes */ 400 static caddr_t sfmmu_kpm_mapin(page_t *); 401 static void sfmmu_kpm_mapout(page_t *, caddr_t); 402 static int sfmmu_kpme_lookup(struct kpme *, page_t *); 403 static void sfmmu_kpme_add(struct kpme *, page_t *); 404 static void sfmmu_kpme_sub(struct kpme *, page_t *); 405 static caddr_t sfmmu_kpm_getvaddr(page_t *, int *); 406 static int sfmmu_kpm_fault(caddr_t, struct memseg *, page_t *); 407 static int sfmmu_kpm_fault_small(caddr_t, struct memseg *, page_t *); 408 static void sfmmu_kpm_vac_conflict(page_t *, caddr_t); 409 static void sfmmu_kpm_pageunload(page_t *); 410 static void sfmmu_kpm_vac_unload(page_t *, caddr_t); 411 static void sfmmu_kpm_demap_large(caddr_t); 412 static void sfmmu_kpm_demap_small(caddr_t); 413 static void sfmmu_kpm_demap_tlbs(caddr_t, int); 414 static void sfmmu_kpm_hme_unload(page_t *); 415 static kpm_hlk_t *sfmmu_kpm_kpmp_enter(page_t *, pgcnt_t); 416 static void sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp); 417 static void sfmmu_kpm_page_cache(page_t *, int, int); 418 419 /* kpm globals */ 420 #ifdef DEBUG 421 /* 422 * Enable trap level tsbmiss handling 423 */ 424 int kpm_tsbmtl = 1; 425 426 /* 427 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 428 * required TLB shootdowns in this case, so handle w/ care. Off by default. 429 */ 430 int kpm_tlb_flush; 431 #endif /* DEBUG */ 432 433 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 434 435 #ifdef DEBUG 436 static void sfmmu_check_hblk_flist(); 437 #endif 438 439 /* 440 * Semi-private sfmmu data structures. Some of them are initialize in 441 * startup or in hat_init. Some of them are private but accessed by 442 * assembly code or mach_sfmmu.c 443 */ 444 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 445 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 446 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 447 uint64_t khme_hash_pa; /* PA of khme_hash */ 448 int uhmehash_num; /* # of buckets in user hash table */ 449 int khmehash_num; /* # of buckets in kernel hash table */ 450 struct ctx *ctxs; /* used by <machine/mmu.c> */ 451 uint_t nctxs; /* total number of contexts */ 452 453 int cache; /* describes system cache */ 454 455 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 456 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 457 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 458 int ktsb_sz; /* kernel 8k-indexed tsb size */ 459 460 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 461 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 462 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 463 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 464 465 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 466 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 467 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 468 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 469 470 #ifndef sun4v 471 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 472 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 473 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 474 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 475 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 476 #endif /* sun4v */ 477 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 478 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 479 480 /* 481 * Size to use for TSB slabs. Future platforms that support page sizes 482 * larger than 4M may wish to change these values, and provide their own 483 * assembly macros for building and decoding the TSB base register contents. 484 */ 485 uint_t tsb_slab_size = MMU_PAGESIZE4M; 486 uint_t tsb_slab_shift = MMU_PAGESHIFT4M; 487 uint_t tsb_slab_ttesz = TTE4M; 488 uint_t tsb_slab_mask = 0x1ff; /* 4M page alignment for 8K pfn */ 489 490 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 491 int tsb_max_growsize = UTSB_MAX_SZCODE; 492 493 /* 494 * Tunable parameters dealing with TSB policies. 495 */ 496 497 /* 498 * This undocumented tunable forces all 8K TSBs to be allocated from 499 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 500 */ 501 #ifdef DEBUG 502 int tsb_forceheap = 0; 503 #endif /* DEBUG */ 504 505 /* 506 * Decide whether to use per-lgroup arenas, or one global set of 507 * TSB arenas. The default is not to break up per-lgroup, since 508 * most platforms don't recognize any tangible benefit from it. 509 */ 510 int tsb_lgrp_affinity = 0; 511 512 /* 513 * Used for growing the TSB based on the process RSS. 514 * tsb_rss_factor is based on the smallest TSB, and is 515 * shifted by the TSB size to determine if we need to grow. 516 * The default will grow the TSB if the number of TTEs for 517 * this page size exceeds 75% of the number of TSB entries, 518 * which should _almost_ eliminate all conflict misses 519 * (at the expense of using up lots and lots of memory). 520 */ 521 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 522 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 523 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 524 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 525 default_tsb_size) 526 #define TSB_OK_SHRINK() \ 527 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 528 #define TSB_OK_GROW() \ 529 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 530 531 int enable_tsb_rss_sizing = 1; 532 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 533 534 /* which TSB size code to use for new address spaces or if rss sizing off */ 535 int default_tsb_size = TSB_8K_SZCODE; 536 537 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 538 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 539 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 540 541 #ifdef DEBUG 542 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 543 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 544 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 545 static int tsb_alloc_fail_mtbf = 0; 546 static int tsb_alloc_count = 0; 547 #endif /* DEBUG */ 548 549 /* if set to 1, will remap valid TTEs when growing TSB. */ 550 int tsb_remap_ttes = 1; 551 552 /* 553 * If we have more than this many mappings, allocate a second TSB. 554 * This default is chosen because the I/D fully associative TLBs are 555 * assumed to have at least 8 available entries. Platforms with a 556 * larger fully-associative TLB could probably override the default. 557 */ 558 int tsb_sectsb_threshold = 8; 559 560 /* 561 * kstat data 562 */ 563 struct sfmmu_global_stat sfmmu_global_stat; 564 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 565 566 /* 567 * Global data 568 */ 569 sfmmu_t *ksfmmup; /* kernel's hat id */ 570 struct ctx *kctx; /* kernel's context */ 571 572 #ifdef DEBUG 573 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 574 #endif 575 576 /* sfmmu locking operations */ 577 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 578 static int sfmmu_mlspl_held(struct page *, int); 579 580 static kmutex_t *sfmmu_page_enter(page_t *); 581 static void sfmmu_page_exit(kmutex_t *); 582 static int sfmmu_page_spl_held(struct page *); 583 584 /* sfmmu internal locking operations - accessed directly */ 585 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 586 kmutex_t **, kmutex_t **); 587 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 588 static hatlock_t * 589 sfmmu_hat_enter(sfmmu_t *); 590 static hatlock_t * 591 sfmmu_hat_tryenter(sfmmu_t *); 592 static void sfmmu_hat_exit(hatlock_t *); 593 static void sfmmu_hat_lock_all(void); 594 static void sfmmu_hat_unlock_all(void); 595 static void sfmmu_ismhat_enter(sfmmu_t *, int); 596 static void sfmmu_ismhat_exit(sfmmu_t *, int); 597 598 /* 599 * Array of mutexes protecting a page's mapping list and p_nrm field. 600 * 601 * The hash function looks complicated, but is made up so that: 602 * 603 * "pp" not shifted, so adjacent pp values will hash to different cache lines 604 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 605 * 606 * "pp" >> mml_shift, incorporates more source bits into the hash result 607 * 608 * "& (mml_table_size - 1), should be faster than using remainder "%" 609 * 610 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 611 * cacheline, since they get declared next to each other below. We'll trust 612 * ld not to do something random. 613 */ 614 #ifdef DEBUG 615 int mlist_hash_debug = 0; 616 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 617 &mml_table[((uintptr_t)(pp) + \ 618 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 619 #else /* !DEBUG */ 620 #define MLIST_HASH(pp) &mml_table[ \ 621 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 622 #endif /* !DEBUG */ 623 624 kmutex_t *mml_table; 625 uint_t mml_table_sz; /* must be a power of 2 */ 626 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 627 628 /* 629 * kpm_page lock hash. 630 * All slots should be used equally and 2 adjacent kpm_page_t's 631 * shouldn't have their mutexes in the same cache line. 632 */ 633 #ifdef DEBUG 634 int kpmp_hash_debug = 0; 635 #define KPMP_HASH(kpp) (kpmp_hash_debug ? &kpmp_table[0] : &kpmp_table[ \ 636 ((uintptr_t)(kpp) + ((uintptr_t)(kpp) >> kpmp_shift)) \ 637 & (kpmp_table_sz - 1)]) 638 #else /* !DEBUG */ 639 #define KPMP_HASH(kpp) &kpmp_table[ \ 640 ((uintptr_t)(kpp) + ((uintptr_t)(kpp) >> kpmp_shift)) \ 641 & (kpmp_table_sz - 1)] 642 #endif /* DEBUG */ 643 644 kpm_hlk_t *kpmp_table; 645 uint_t kpmp_table_sz; /* must be a power of 2 */ 646 uchar_t kpmp_shift; 647 648 #ifdef DEBUG 649 #define KPMP_SHASH(kpp) (kpmp_hash_debug ? &kpmp_stable[0] : &kpmp_stable[ \ 650 (((uintptr_t)(kpp) << kpmp_shift) + (uintptr_t)(kpp)) \ 651 & (kpmp_stable_sz - 1)]) 652 #else /* !DEBUG */ 653 #define KPMP_SHASH(kpp) &kpmp_stable[ \ 654 (((uintptr_t)(kpp) << kpmp_shift) + (uintptr_t)(kpp)) \ 655 & (kpmp_stable_sz - 1)] 656 #endif /* DEBUG */ 657 658 kpm_shlk_t *kpmp_stable; 659 uint_t kpmp_stable_sz; /* must be a power of 2 */ 660 661 /* 662 * SPL_HASH was improved to avoid false cache line sharing 663 */ 664 #define SPL_TABLE_SIZE 128 665 #define SPL_MASK (SPL_TABLE_SIZE - 1) 666 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 667 668 #define SPL_INDEX(pp) \ 669 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 670 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 671 (SPL_TABLE_SIZE - 1)) 672 673 #define SPL_HASH(pp) \ 674 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 675 676 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 677 678 679 /* 680 * hat_unload_callback() will group together callbacks in order 681 * to avoid xt_sync() calls. This is the maximum size of the group. 682 */ 683 #define MAX_CB_ADDR 32 684 685 #ifdef DEBUG 686 687 /* 688 * Debugging trace ring buffer for stolen and freed ctxs. The 689 * stolen_ctxs[] array is protected by the ctx_trace_mutex. 690 */ 691 struct ctx_trace stolen_ctxs[TRSIZE]; 692 struct ctx_trace *ctx_trace_first = &stolen_ctxs[0]; 693 struct ctx_trace *ctx_trace_last = &stolen_ctxs[TRSIZE-1]; 694 struct ctx_trace *ctx_trace_ptr = &stolen_ctxs[0]; 695 kmutex_t ctx_trace_mutex; 696 uint_t num_ctx_stolen = 0; 697 698 int ism_debug = 0; 699 700 #endif /* DEBUG */ 701 702 tte_t hw_tte; 703 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 704 705 /* 706 * kpm virtual address to physical address 707 */ 708 #define SFMMU_KPM_VTOP(vaddr, paddr) { \ 709 uintptr_t r, v; \ 710 \ 711 r = ((vaddr) - kpm_vbase) >> (uintptr_t)kpm_size_shift; \ 712 (paddr) = (vaddr) - kpm_vbase; \ 713 if (r != 0) { \ 714 v = ((uintptr_t)(vaddr) >> MMU_PAGESHIFT) & \ 715 vac_colors_mask; \ 716 (paddr) -= r << kpm_size_shift; \ 717 if (r > v) \ 718 (paddr) += (r - v) << MMU_PAGESHIFT; \ 719 else \ 720 (paddr) -= r << MMU_PAGESHIFT; \ 721 } \ 722 } 723 724 /* 725 * Wrapper for vmem_xalloc since vmem_create only allows limited 726 * parameters for vm_source_alloc functions. This function allows us 727 * to specify alignment consistent with the size of the object being 728 * allocated. 729 */ 730 static void * 731 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 732 { 733 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 734 } 735 736 /* Common code for setting tsb_alloc_hiwater. */ 737 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 738 ptob(pages) / tsb_alloc_hiwater_factor 739 740 /* 741 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 742 * a single TSB. physmem is the number of physical pages so we need physmem 8K 743 * TTEs to represent all those physical pages. We round this up by using 744 * 1<<highbit(). To figure out which size code to use, remember that the size 745 * code is just an amount to shift the smallest TSB size to get the size of 746 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 747 * highbit() - 1) to get the size code for the smallest TSB that can represent 748 * all of physical memory, while erring on the side of too much. 749 * 750 * If the computed size code is less than the current tsb_max_growsize, we set 751 * tsb_max_growsize to the computed size code. In the case where the computed 752 * size code is greater than tsb_max_growsize, we have these restrictions that 753 * apply to increasing tsb_max_growsize: 754 * 1) TSBs can't grow larger than the TSB slab size 755 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 756 */ 757 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 758 int i, szc; \ 759 \ 760 i = highbit(pages); \ 761 if ((1 << (i - 1)) == (pages)) \ 762 i--; /* 2^n case, round down */ \ 763 szc = i - TSB_START_SIZE; \ 764 if (szc < tsb_max_growsize) \ 765 tsb_max_growsize = szc; \ 766 else if ((szc > tsb_max_growsize) && \ 767 (szc <= tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT))) \ 768 tsb_max_growsize = MIN(szc, UTSB_MAX_SZCODE); \ 769 } 770 771 /* 772 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 773 * tsb_info which handles that TTE size. 774 */ 775 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) \ 776 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 777 ASSERT(sfmmu_hat_lock_held(sfmmup)); \ 778 if ((tte_szc) >= TTE4M) \ 779 (tsbinfop) = (tsbinfop)->tsb_next; 780 781 /* 782 * Return the number of mappings present in the HAT 783 * for a particular process and page size. 784 */ 785 #define SFMMU_TTE_CNT(sfmmup, szc) \ 786 (sfmmup)->sfmmu_iblk? \ 787 (sfmmup)->sfmmu_ismttecnt[(szc)] + \ 788 (sfmmup)->sfmmu_ttecnt[(szc)] : \ 789 (sfmmup)->sfmmu_ttecnt[(szc)]; 790 791 /* 792 * Macro to use to unload entries from the TSB. 793 * It has knowledge of which page sizes get replicated in the TSB 794 * and will call the appropriate unload routine for the appropriate size. 795 */ 796 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp) \ 797 { \ 798 int ttesz = get_hblk_ttesz(hmeblkp); \ 799 if (ttesz == TTE8K || ttesz == TTE4M) { \ 800 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 801 } else { \ 802 caddr_t sva = (caddr_t)get_hblk_base(hmeblkp); \ 803 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 804 ASSERT(addr >= sva && addr < eva); \ 805 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 806 } \ 807 } 808 809 810 /* Update tsb_alloc_hiwater after memory is configured. */ 811 /*ARGSUSED*/ 812 static void 813 sfmmu_update_tsb_post_add(void *arg, pgcnt_t delta_pages) 814 { 815 /* Assumes physmem has already been updated. */ 816 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 817 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 818 } 819 820 /* 821 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 822 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 823 * deleted. 824 */ 825 /*ARGSUSED*/ 826 static int 827 sfmmu_update_tsb_pre_del(void *arg, pgcnt_t delta_pages) 828 { 829 return (0); 830 } 831 832 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 833 /*ARGSUSED*/ 834 static void 835 sfmmu_update_tsb_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 836 { 837 /* 838 * Whether the delete was cancelled or not, just go ahead and update 839 * tsb_alloc_hiwater and tsb_max_growsize. 840 */ 841 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 842 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 843 } 844 845 static kphysm_setup_vector_t sfmmu_update_tsb_vec = { 846 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 847 sfmmu_update_tsb_post_add, /* post_add */ 848 sfmmu_update_tsb_pre_del, /* pre_del */ 849 sfmmu_update_tsb_post_del /* post_del */ 850 }; 851 852 853 /* 854 * HME_BLK HASH PRIMITIVES 855 */ 856 857 /* 858 * Enter a hme on the mapping list for page pp. 859 * When large pages are more prevalent in the system we might want to 860 * keep the mapping list in ascending order by the hment size. For now, 861 * small pages are more frequent, so don't slow it down. 862 */ 863 #define HME_ADD(hme, pp) \ 864 { \ 865 ASSERT(sfmmu_mlist_held(pp)); \ 866 \ 867 hme->hme_prev = NULL; \ 868 hme->hme_next = pp->p_mapping; \ 869 hme->hme_page = pp; \ 870 if (pp->p_mapping) { \ 871 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 872 ASSERT(pp->p_share > 0); \ 873 } else { \ 874 /* EMPTY */ \ 875 ASSERT(pp->p_share == 0); \ 876 } \ 877 pp->p_mapping = hme; \ 878 pp->p_share++; \ 879 } 880 881 /* 882 * Enter a hme on the mapping list for page pp. 883 * If we are unmapping a large translation, we need to make sure that the 884 * change is reflect in the corresponding bit of the p_index field. 885 */ 886 #define HME_SUB(hme, pp) \ 887 { \ 888 ASSERT(sfmmu_mlist_held(pp)); \ 889 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 890 \ 891 if (pp->p_mapping == NULL) { \ 892 panic("hme_remove - no mappings"); \ 893 } \ 894 \ 895 membar_stst(); /* ensure previous stores finish */ \ 896 \ 897 ASSERT(pp->p_share > 0); \ 898 pp->p_share--; \ 899 \ 900 if (hme->hme_prev) { \ 901 ASSERT(pp->p_mapping != hme); \ 902 ASSERT(hme->hme_prev->hme_page == pp || \ 903 IS_PAHME(hme->hme_prev)); \ 904 hme->hme_prev->hme_next = hme->hme_next; \ 905 } else { \ 906 ASSERT(pp->p_mapping == hme); \ 907 pp->p_mapping = hme->hme_next; \ 908 ASSERT((pp->p_mapping == NULL) ? \ 909 (pp->p_share == 0) : 1); \ 910 } \ 911 \ 912 if (hme->hme_next) { \ 913 ASSERT(hme->hme_next->hme_page == pp || \ 914 IS_PAHME(hme->hme_next)); \ 915 hme->hme_next->hme_prev = hme->hme_prev; \ 916 } \ 917 \ 918 /* zero out the entry */ \ 919 hme->hme_next = NULL; \ 920 hme->hme_prev = NULL; \ 921 hme->hme_page = NULL; \ 922 \ 923 if (hme_size(hme) > TTE8K) { \ 924 /* remove mappings for remainder of large pg */ \ 925 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 926 } \ 927 } 928 929 /* 930 * This function returns the hment given the hme_blk and a vaddr. 931 * It assumes addr has already been checked to belong to hme_blk's 932 * range. 933 */ 934 #define HBLKTOHME(hment, hmeblkp, addr) \ 935 { \ 936 int index; \ 937 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 938 } 939 940 /* 941 * Version of HBLKTOHME that also returns the index in hmeblkp 942 * of the hment. 943 */ 944 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 945 { \ 946 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 947 \ 948 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 949 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 950 } else \ 951 idx = 0; \ 952 \ 953 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 954 } 955 956 /* 957 * Disable any page sizes not supported by the CPU 958 */ 959 void 960 hat_init_pagesizes() 961 { 962 int i; 963 964 mmu_exported_page_sizes = 0; 965 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 966 extern int disable_text_largepages; 967 extern int disable_initdata_largepages; 968 969 szc_2_userszc[i] = (uint_t)-1; 970 userszc_2_szc[i] = (uint_t)-1; 971 972 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 973 disable_large_pages |= (1 << i); 974 disable_ism_large_pages |= (1 << i); 975 disable_text_largepages |= (1 << i); 976 disable_initdata_largepages |= (1 << i); 977 } else { 978 szc_2_userszc[i] = mmu_exported_page_sizes; 979 userszc_2_szc[mmu_exported_page_sizes] = i; 980 mmu_exported_page_sizes++; 981 } 982 } 983 984 disable_auto_large_pages = disable_large_pages; 985 986 /* 987 * Initialize mmu-specific large page sizes. 988 */ 989 if ((mmu_page_sizes == max_mmu_page_sizes) && 990 (&mmu_large_pages_disabled)) { 991 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 992 disable_ism_large_pages |= 993 mmu_large_pages_disabled(HAT_LOAD_SHARE); 994 disable_auto_large_pages |= 995 mmu_large_pages_disabled(HAT_LOAD_AUTOLPG); 996 } 997 998 } 999 1000 /* 1001 * Initialize the hardware address translation structures. 1002 */ 1003 void 1004 hat_init(void) 1005 { 1006 struct ctx *ctx; 1007 struct ctx *cur_ctx = NULL; 1008 int i; 1009 1010 hat_lock_init(); 1011 hat_kstat_init(); 1012 1013 /* 1014 * Hardware-only bits in a TTE 1015 */ 1016 MAKE_TTE_MASK(&hw_tte); 1017 1018 hat_init_pagesizes(); 1019 1020 /* Initialize the hash locks */ 1021 for (i = 0; i < khmehash_num; i++) { 1022 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 1023 MUTEX_DEFAULT, NULL); 1024 } 1025 for (i = 0; i < uhmehash_num; i++) { 1026 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 1027 MUTEX_DEFAULT, NULL); 1028 } 1029 khmehash_num--; /* make sure counter starts from 0 */ 1030 uhmehash_num--; /* make sure counter starts from 0 */ 1031 1032 /* 1033 * Initialize ctx structures and list lock. 1034 * We keep two lists of ctxs. The "free" list contains contexts 1035 * ready to use. The "dirty" list contains contexts that are OK 1036 * to use after flushing the TLBs of any stale mappings. 1037 */ 1038 mutex_init(&ctx_list_lock, NULL, MUTEX_DEFAULT, NULL); 1039 kctx = &ctxs[KCONTEXT]; 1040 ctx = &ctxs[NUM_LOCKED_CTXS]; 1041 ctxhand = ctxfree = ctx; /* head of free list */ 1042 ctxdirty = NULL; 1043 for (i = NUM_LOCKED_CTXS; i < nctxs; i++) { 1044 cur_ctx = &ctxs[i]; 1045 cur_ctx->ctx_flags = CTX_FREE_FLAG; 1046 cur_ctx->ctx_free = &ctxs[i + 1]; 1047 } 1048 cur_ctx->ctx_free = NULL; /* tail of free list */ 1049 1050 /* 1051 * Intialize ism mapping list lock. 1052 */ 1053 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1054 1055 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", sizeof (sfmmu_t), 1056 0, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1057 NULL, NULL, NULL, 0); 1058 1059 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1060 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1061 1062 /* 1063 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1064 * from the heap when low on memory or when TSB_FORCEALLOC is 1065 * specified, don't use magazines to cache them--we want to return 1066 * them to the system as quickly as possible. 1067 */ 1068 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1069 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1070 static_arena, KMC_NOMAGAZINE); 1071 1072 /* 1073 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1074 * memory, which corresponds to the old static reserve for TSBs. 1075 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1076 * memory we'll allocate for TSB slabs; beyond this point TSB 1077 * allocations will be taken from the kernel heap (via 1078 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1079 * consumer. 1080 */ 1081 if (tsb_alloc_hiwater_factor == 0) { 1082 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1083 } 1084 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1085 1086 /* Set tsb_max_growsize. */ 1087 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1088 1089 /* 1090 * On smaller memory systems, allocate TSB memory in 512K chunks 1091 * instead of the default 4M slab size. The trap handlers need to 1092 * be patched with the final slab shift since they need to be able 1093 * to construct the TSB pointer at runtime. 1094 */ 1095 if ((tsb_max_growsize <= TSB_512K_SZCODE) && 1096 !(disable_large_pages & (1 << TTE512K))) { 1097 tsb_slab_size = MMU_PAGESIZE512K; 1098 tsb_slab_shift = MMU_PAGESHIFT512K; 1099 tsb_slab_ttesz = TTE512K; 1100 tsb_slab_mask = 0x3f; /* 512K page alignment for 8K pfn */ 1101 } 1102 1103 /* 1104 * Set up memory callback to update tsb_alloc_hiwater and 1105 * tsb_max_growsize. 1106 */ 1107 i = kphysm_setup_func_register(&sfmmu_update_tsb_vec, (void *) 0); 1108 ASSERT(i == 0); 1109 1110 /* 1111 * kmem_tsb_arena is the source from which large TSB slabs are 1112 * drawn. The quantum of this arena corresponds to the largest 1113 * TSB size we can dynamically allocate for user processes. 1114 * Currently it must also be a supported page size since we 1115 * use exactly one translation entry to map each slab page. 1116 * 1117 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1118 * which most TSBs are allocated. Since most TSB allocations are 1119 * typically 8K we have a kmem cache we stack on top of each 1120 * kmem_tsb_default_arena to speed up those allocations. 1121 * 1122 * Note the two-level scheme of arenas is required only 1123 * because vmem_create doesn't allow us to specify alignment 1124 * requirements. If this ever changes the code could be 1125 * simplified to use only one level of arenas. 1126 */ 1127 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1128 sfmmu_vmem_xalloc_aligned_wrapper, vmem_xfree, heap_arena, 1129 0, VM_SLEEP); 1130 1131 if (tsb_lgrp_affinity) { 1132 char s[50]; 1133 for (i = 0; i < NLGRPS_MAX; i++) { 1134 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1135 kmem_tsb_default_arena[i] = 1136 vmem_create(s, NULL, 0, PAGESIZE, 1137 sfmmu_tsb_segkmem_alloc, sfmmu_tsb_segkmem_free, 1138 kmem_tsb_arena, 0, VM_SLEEP | VM_BESTFIT); 1139 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1140 sfmmu_tsb_cache[i] = kmem_cache_create(s, PAGESIZE, 1141 PAGESIZE, NULL, NULL, NULL, NULL, 1142 kmem_tsb_default_arena[i], 0); 1143 } 1144 } else { 1145 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1146 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1147 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1148 VM_SLEEP | VM_BESTFIT); 1149 1150 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1151 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1152 kmem_tsb_default_arena[0], 0); 1153 } 1154 1155 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1156 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1157 sfmmu_hblkcache_destructor, 1158 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1159 hat_memload_arena, KMC_NOHASH); 1160 1161 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1162 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 1163 1164 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1165 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1166 sfmmu_hblkcache_destructor, 1167 NULL, (void *)HME1BLK_SZ, 1168 hat_memload1_arena, KMC_NOHASH); 1169 1170 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1171 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1172 1173 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1174 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1175 NULL, NULL, static_arena, KMC_NOHASH); 1176 1177 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1178 sizeof (ism_ment_t), 0, NULL, NULL, 1179 NULL, NULL, NULL, 0); 1180 1181 /* 1182 * We grab the first hat for the kernel, 1183 */ 1184 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1185 kas.a_hat = hat_alloc(&kas); 1186 AS_LOCK_EXIT(&kas, &kas.a_lock); 1187 1188 /* 1189 * Initialize hblk_reserve. 1190 */ 1191 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1192 va_to_pa((caddr_t)hblk_reserve); 1193 1194 #ifndef UTSB_PHYS 1195 /* 1196 * Reserve some kernel virtual address space for the locked TTEs 1197 * that allow us to probe the TSB from TL>0. 1198 */ 1199 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1200 0, 0, NULL, NULL, VM_SLEEP); 1201 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1202 0, 0, NULL, NULL, VM_SLEEP); 1203 #endif 1204 1205 /* 1206 * The big page VAC handling code assumes VAC 1207 * will not be bigger than the smallest big 1208 * page- which is 64K. 1209 */ 1210 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1211 cmn_err(CE_PANIC, "VAC too big!"); 1212 } 1213 1214 (void) xhat_init(); 1215 1216 uhme_hash_pa = va_to_pa(uhme_hash); 1217 khme_hash_pa = va_to_pa(khme_hash); 1218 1219 /* 1220 * Initialize relocation locks. kpr_suspendlock is held 1221 * at PIL_MAX to prevent interrupts from pinning the holder 1222 * of a suspended TTE which may access it leading to a 1223 * deadlock condition. 1224 */ 1225 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1226 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1227 } 1228 1229 /* 1230 * Initialize locking for the hat layer, called early during boot. 1231 */ 1232 static void 1233 hat_lock_init() 1234 { 1235 int i; 1236 struct ctx *ctx; 1237 1238 /* 1239 * initialize the array of mutexes protecting a page's mapping 1240 * list and p_nrm field. 1241 */ 1242 for (i = 0; i < mml_table_sz; i++) 1243 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1244 1245 if (kpm_enable) { 1246 for (i = 0; i < kpmp_table_sz; i++) { 1247 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1248 MUTEX_DEFAULT, NULL); 1249 } 1250 } 1251 1252 /* 1253 * Initialize array of mutex locks that protects sfmmu fields and 1254 * TSB lists. 1255 */ 1256 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1257 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1258 NULL); 1259 1260 #ifdef DEBUG 1261 mutex_init(&ctx_trace_mutex, NULL, MUTEX_DEFAULT, NULL); 1262 #endif /* DEBUG */ 1263 1264 for (ctx = ctxs, i = 0; i < nctxs; i++, ctx++) { 1265 rw_init(&ctx->ctx_rwlock, NULL, RW_DEFAULT, NULL); 1266 } 1267 } 1268 1269 extern caddr_t kmem64_base, kmem64_end; 1270 1271 #define SFMMU_KERNEL_MAXVA \ 1272 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1273 1274 /* 1275 * Allocate a hat structure. 1276 * Called when an address space first uses a hat. 1277 */ 1278 struct hat * 1279 hat_alloc(struct as *as) 1280 { 1281 sfmmu_t *sfmmup; 1282 struct ctx *ctx; 1283 int i; 1284 extern uint_t get_color_start(struct as *); 1285 1286 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1287 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1288 sfmmup->sfmmu_as = as; 1289 sfmmup->sfmmu_flags = 0; 1290 1291 if (as == &kas) { 1292 ctx = kctx; 1293 ksfmmup = sfmmup; 1294 sfmmup->sfmmu_cnum = ctxtoctxnum(ctx); 1295 ASSERT(sfmmup->sfmmu_cnum == KCONTEXT); 1296 sfmmup->sfmmu_cext = 0; 1297 ctx->ctx_sfmmu = sfmmup; 1298 ctx->ctx_flags = 0; 1299 sfmmup->sfmmu_clrstart = 0; 1300 sfmmup->sfmmu_tsb = NULL; 1301 /* 1302 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1303 * to setup tsb_info for ksfmmup. 1304 */ 1305 } else { 1306 1307 /* 1308 * Just set to invalid ctx. When it faults, it will 1309 * get a valid ctx. This would avoid the situation 1310 * where we get a ctx, but it gets stolen and then 1311 * we fault when we try to run and so have to get 1312 * another ctx. 1313 */ 1314 sfmmup->sfmmu_cnum = INVALID_CONTEXT; 1315 sfmmup->sfmmu_cext = 0; 1316 /* initialize original physical page coloring bin */ 1317 sfmmup->sfmmu_clrstart = get_color_start(as); 1318 #ifdef DEBUG 1319 if (tsb_random_size) { 1320 uint32_t randval = (uint32_t)gettick() >> 4; 1321 int size = randval % (tsb_max_growsize + 1); 1322 1323 /* chose a random tsb size for stress testing */ 1324 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1325 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1326 } else 1327 #endif /* DEBUG */ 1328 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1329 default_tsb_size, 1330 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1331 sfmmup->sfmmu_flags = HAT_SWAPPED; 1332 ASSERT(sfmmup->sfmmu_tsb != NULL); 1333 } 1334 sfmmu_setup_tsbinfo(sfmmup); 1335 for (i = 0; i < max_mmu_page_sizes; i++) { 1336 sfmmup->sfmmu_ttecnt[i] = 0; 1337 sfmmup->sfmmu_ismttecnt[i] = 0; 1338 sfmmup->sfmmu_pgsz[i] = TTE8K; 1339 } 1340 1341 sfmmup->sfmmu_iblk = NULL; 1342 sfmmup->sfmmu_ismhat = 0; 1343 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1344 if (sfmmup == ksfmmup) { 1345 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1346 } else { 1347 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1348 } 1349 sfmmup->sfmmu_free = 0; 1350 sfmmup->sfmmu_rmstat = 0; 1351 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1352 sfmmup->sfmmu_xhat_provider = NULL; 1353 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1354 return (sfmmup); 1355 } 1356 1357 /* 1358 * Hat_setup, makes an address space context the current active one. 1359 * In sfmmu this translates to setting the secondary context with the 1360 * corresponding context. 1361 */ 1362 void 1363 hat_setup(struct hat *sfmmup, int allocflag) 1364 { 1365 struct ctx *ctx; 1366 uint_t ctx_num; 1367 hatlock_t *hatlockp; 1368 1369 /* Init needs some special treatment. */ 1370 if (allocflag == HAT_INIT) { 1371 /* 1372 * Make sure that we have 1373 * 1. a TSB 1374 * 2. a valid ctx that doesn't get stolen after this point. 1375 */ 1376 hatlockp = sfmmu_hat_enter(sfmmup); 1377 1378 /* 1379 * Swap in the TSB. hat_init() allocates tsbinfos without 1380 * TSBs, but we need one for init, since the kernel does some 1381 * special things to set up its stack and needs the TSB to 1382 * resolve page faults. 1383 */ 1384 sfmmu_tsb_swapin(sfmmup, hatlockp); 1385 1386 sfmmu_disallow_ctx_steal(sfmmup); 1387 1388 kpreempt_disable(); 1389 1390 ctx = sfmmutoctx(sfmmup); 1391 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1392 ctx_num = ctxtoctxnum(ctx); 1393 ASSERT(sfmmup == ctx->ctx_sfmmu); 1394 ASSERT(ctx_num >= NUM_LOCKED_CTXS); 1395 sfmmu_setctx_sec(ctx_num); 1396 sfmmu_load_mmustate(sfmmup); 1397 1398 kpreempt_enable(); 1399 1400 /* 1401 * Allow ctx to be stolen. 1402 */ 1403 sfmmu_allow_ctx_steal(sfmmup); 1404 sfmmu_hat_exit(hatlockp); 1405 } else { 1406 ASSERT(allocflag == HAT_ALLOC); 1407 1408 hatlockp = sfmmu_hat_enter(sfmmup); 1409 kpreempt_disable(); 1410 1411 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1412 sfmmu_setctx_sec(INVALID_CONTEXT); 1413 sfmmu_clear_utsbinfo(); 1414 1415 kpreempt_enable(); 1416 sfmmu_hat_exit(hatlockp); 1417 } 1418 } 1419 1420 /* 1421 * Free all the translation resources for the specified address space. 1422 * Called from as_free when an address space is being destroyed. 1423 */ 1424 void 1425 hat_free_start(struct hat *sfmmup) 1426 { 1427 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1428 ASSERT(sfmmup != ksfmmup); 1429 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1430 1431 sfmmup->sfmmu_free = 1; 1432 } 1433 1434 void 1435 hat_free_end(struct hat *sfmmup) 1436 { 1437 int i; 1438 1439 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1440 if (sfmmup->sfmmu_ismhat) { 1441 for (i = 0; i < mmu_page_sizes; i++) { 1442 sfmmup->sfmmu_ttecnt[i] = 0; 1443 sfmmup->sfmmu_ismttecnt[i] = 0; 1444 } 1445 } else { 1446 /* EMPTY */ 1447 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1448 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1449 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1450 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1451 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1452 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1453 } 1454 1455 if (sfmmup->sfmmu_rmstat) { 1456 hat_freestat(sfmmup->sfmmu_as, NULL); 1457 } 1458 if (!delay_tlb_flush) { 1459 sfmmu_tlb_ctx_demap(sfmmup); 1460 xt_sync(sfmmup->sfmmu_cpusran); 1461 } else { 1462 SFMMU_STAT(sf_tlbflush_deferred); 1463 } 1464 sfmmu_free_ctx(sfmmup, sfmmutoctx(sfmmup)); 1465 while (sfmmup->sfmmu_tsb != NULL) { 1466 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1467 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1468 sfmmup->sfmmu_tsb = next; 1469 } 1470 sfmmu_free_sfmmu(sfmmup); 1471 1472 kmem_cache_free(sfmmuid_cache, sfmmup); 1473 } 1474 1475 /* 1476 * Set up any translation structures, for the specified address space, 1477 * that are needed or preferred when the process is being swapped in. 1478 */ 1479 /* ARGSUSED */ 1480 void 1481 hat_swapin(struct hat *hat) 1482 { 1483 ASSERT(hat->sfmmu_xhat_provider == NULL); 1484 } 1485 1486 /* 1487 * Free all of the translation resources, for the specified address space, 1488 * that can be freed while the process is swapped out. Called from as_swapout. 1489 * Also, free up the ctx that this process was using. 1490 */ 1491 void 1492 hat_swapout(struct hat *sfmmup) 1493 { 1494 struct hmehash_bucket *hmebp; 1495 struct hme_blk *hmeblkp; 1496 struct hme_blk *pr_hblk = NULL; 1497 struct hme_blk *nx_hblk; 1498 struct ctx *ctx; 1499 int cnum; 1500 int i; 1501 uint64_t hblkpa, prevpa, nx_pa; 1502 struct hme_blk *list = NULL; 1503 hatlock_t *hatlockp; 1504 struct tsb_info *tsbinfop; 1505 struct free_tsb { 1506 struct free_tsb *next; 1507 struct tsb_info *tsbinfop; 1508 }; /* free list of TSBs */ 1509 struct free_tsb *freelist, *last, *next; 1510 1511 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1512 SFMMU_STAT(sf_swapout); 1513 1514 /* 1515 * There is no way to go from an as to all its translations in sfmmu. 1516 * Here is one of the times when we take the big hit and traverse 1517 * the hash looking for hme_blks to free up. Not only do we free up 1518 * this as hme_blks but all those that are free. We are obviously 1519 * swapping because we need memory so let's free up as much 1520 * as we can. 1521 * 1522 * Note that we don't flush TLB/TSB here -- it's not necessary 1523 * because: 1524 * 1) we free the ctx we're using and throw away the TSB(s); 1525 * 2) processes aren't runnable while being swapped out. 1526 */ 1527 ASSERT(sfmmup != KHATID); 1528 for (i = 0; i <= UHMEHASH_SZ; i++) { 1529 hmebp = &uhme_hash[i]; 1530 SFMMU_HASH_LOCK(hmebp); 1531 hmeblkp = hmebp->hmeblkp; 1532 hblkpa = hmebp->hmeh_nextpa; 1533 prevpa = 0; 1534 pr_hblk = NULL; 1535 while (hmeblkp) { 1536 1537 ASSERT(!hmeblkp->hblk_xhat_bit); 1538 1539 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1540 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1541 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1542 (caddr_t)get_hblk_base(hmeblkp), 1543 get_hblk_endaddr(hmeblkp), 1544 NULL, HAT_UNLOAD); 1545 } 1546 nx_hblk = hmeblkp->hblk_next; 1547 nx_pa = hmeblkp->hblk_nextpa; 1548 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1549 ASSERT(!hmeblkp->hblk_lckcnt); 1550 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 1551 prevpa, pr_hblk); 1552 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 1553 } else { 1554 pr_hblk = hmeblkp; 1555 prevpa = hblkpa; 1556 } 1557 hmeblkp = nx_hblk; 1558 hblkpa = nx_pa; 1559 } 1560 SFMMU_HASH_UNLOCK(hmebp); 1561 } 1562 1563 sfmmu_hblks_list_purge(&list); 1564 1565 /* 1566 * Now free up the ctx so that others can reuse it. 1567 */ 1568 hatlockp = sfmmu_hat_enter(sfmmup); 1569 ctx = sfmmutoctx(sfmmup); 1570 cnum = ctxtoctxnum(ctx); 1571 1572 if (cnum != INVALID_CONTEXT) { 1573 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 1574 if (sfmmup->sfmmu_cnum == cnum) { 1575 sfmmu_reuse_ctx(ctx, sfmmup); 1576 /* 1577 * Put ctx back to the free list. 1578 */ 1579 mutex_enter(&ctx_list_lock); 1580 CTX_SET_FLAGS(ctx, CTX_FREE_FLAG); 1581 ctx->ctx_free = ctxfree; 1582 ctxfree = ctx; 1583 mutex_exit(&ctx_list_lock); 1584 } 1585 rw_exit(&ctx->ctx_rwlock); 1586 } 1587 1588 /* 1589 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1590 * If TSBs were never swapped in, just return. 1591 * This implies that we don't support partial swapping 1592 * of TSBs -- either all are swapped out, or none are. 1593 * 1594 * We must hold the HAT lock here to prevent racing with another 1595 * thread trying to unmap TTEs from the TSB or running the post- 1596 * relocator after relocating the TSB's memory. Unfortunately, we 1597 * can't free memory while holding the HAT lock or we could 1598 * deadlock, so we build a list of TSBs to be freed after marking 1599 * the tsbinfos as swapped out and free them after dropping the 1600 * lock. 1601 */ 1602 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1603 sfmmu_hat_exit(hatlockp); 1604 return; 1605 } 1606 1607 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1608 last = freelist = NULL; 1609 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1610 tsbinfop = tsbinfop->tsb_next) { 1611 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1612 1613 /* 1614 * Cast the TSB into a struct free_tsb and put it on the free 1615 * list. 1616 */ 1617 if (freelist == NULL) { 1618 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1619 } else { 1620 last->next = (struct free_tsb *)tsbinfop->tsb_va; 1621 last = last->next; 1622 } 1623 last->next = NULL; 1624 last->tsbinfop = tsbinfop; 1625 tsbinfop->tsb_flags |= TSB_SWAPPED; 1626 /* 1627 * Zero out the TTE to clear the valid bit. 1628 * Note we can't use a value like 0xbad because we want to 1629 * ensure diagnostic bits are NEVER set on TTEs that might 1630 * be loaded. The intent is to catch any invalid access 1631 * to the swapped TSB, such as a thread running with a valid 1632 * context without first calling sfmmu_tsb_swapin() to 1633 * allocate TSB memory. 1634 */ 1635 tsbinfop->tsb_tte.ll = 0; 1636 } 1637 1638 /* Now we can drop the lock and free the TSB memory. */ 1639 sfmmu_hat_exit(hatlockp); 1640 for (; freelist != NULL; freelist = next) { 1641 next = freelist->next; 1642 sfmmu_tsb_free(freelist->tsbinfop); 1643 } 1644 } 1645 1646 /* 1647 * Duplicate the translations of an as into another newas 1648 */ 1649 /* ARGSUSED */ 1650 int 1651 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 1652 uint_t flag) 1653 { 1654 ASSERT(hat->sfmmu_xhat_provider == NULL); 1655 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW)); 1656 1657 if (flag == HAT_DUP_COW) { 1658 panic("hat_dup: HAT_DUP_COW not supported"); 1659 } 1660 return (0); 1661 } 1662 1663 /* 1664 * Set up addr to map to page pp with protection prot. 1665 * As an optimization we also load the TSB with the 1666 * corresponding tte but it is no big deal if the tte gets kicked out. 1667 */ 1668 void 1669 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 1670 uint_t attr, uint_t flags) 1671 { 1672 tte_t tte; 1673 1674 1675 ASSERT(hat != NULL); 1676 ASSERT(PAGE_LOCKED(pp)); 1677 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1678 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1679 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1680 1681 if (PP_ISFREE(pp)) { 1682 panic("hat_memload: loading a mapping to free page %p", 1683 (void *)pp); 1684 } 1685 1686 if (hat->sfmmu_xhat_provider) { 1687 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 1688 return; 1689 } 1690 1691 ASSERT((hat == ksfmmup) || 1692 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1693 1694 if (flags & ~SFMMU_LOAD_ALLFLAG) 1695 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 1696 flags & ~SFMMU_LOAD_ALLFLAG); 1697 1698 if (hat->sfmmu_rmstat) 1699 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 1700 1701 #if defined(SF_ERRATA_57) 1702 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1703 (addr < errata57_limit) && (attr & PROT_EXEC) && 1704 !(flags & HAT_LOAD_SHARE)) { 1705 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 1706 " page executable"); 1707 attr &= ~PROT_EXEC; 1708 } 1709 #endif 1710 1711 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 1712 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags); 1713 1714 /* 1715 * Check TSB and TLB page sizes. 1716 */ 1717 if ((flags & HAT_LOAD_SHARE) == 0) { 1718 sfmmu_check_page_sizes(hat, 1); 1719 } 1720 } 1721 1722 /* 1723 * hat_devload can be called to map real memory (e.g. 1724 * /dev/kmem) and even though hat_devload will determine pf is 1725 * for memory, it will be unable to get a shared lock on the 1726 * page (because someone else has it exclusively) and will 1727 * pass dp = NULL. If tteload doesn't get a non-NULL 1728 * page pointer it can't cache memory. 1729 */ 1730 void 1731 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 1732 uint_t attr, int flags) 1733 { 1734 tte_t tte; 1735 struct page *pp = NULL; 1736 int use_lgpg = 0; 1737 1738 ASSERT(hat != NULL); 1739 1740 if (hat->sfmmu_xhat_provider) { 1741 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 1742 return; 1743 } 1744 1745 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1746 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1747 ASSERT((hat == ksfmmup) || 1748 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1749 if (len == 0) 1750 panic("hat_devload: zero len"); 1751 if (flags & ~SFMMU_LOAD_ALLFLAG) 1752 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 1753 flags & ~SFMMU_LOAD_ALLFLAG); 1754 1755 #if defined(SF_ERRATA_57) 1756 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1757 (addr < errata57_limit) && (attr & PROT_EXEC) && 1758 !(flags & HAT_LOAD_SHARE)) { 1759 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 1760 " page executable"); 1761 attr &= ~PROT_EXEC; 1762 } 1763 #endif 1764 1765 /* 1766 * If it's a memory page find its pp 1767 */ 1768 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 1769 pp = page_numtopp_nolock(pfn); 1770 if (pp == NULL) { 1771 flags |= HAT_LOAD_NOCONSIST; 1772 } else { 1773 if (PP_ISFREE(pp)) { 1774 panic("hat_memload: loading " 1775 "a mapping to free page %p", 1776 (void *)pp); 1777 } 1778 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1779 panic("hat_memload: loading a mapping " 1780 "to unlocked relocatable page %p", 1781 (void *)pp); 1782 } 1783 ASSERT(len == MMU_PAGESIZE); 1784 } 1785 } 1786 1787 if (hat->sfmmu_rmstat) 1788 hat_resvstat(len, hat->sfmmu_as, addr); 1789 1790 if (flags & HAT_LOAD_NOCONSIST) { 1791 attr |= SFMMU_UNCACHEVTTE; 1792 use_lgpg = 1; 1793 } 1794 if (!pf_is_memory(pfn)) { 1795 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 1796 use_lgpg = 1; 1797 switch (attr & HAT_ORDER_MASK) { 1798 case HAT_STRICTORDER: 1799 case HAT_UNORDERED_OK: 1800 /* 1801 * we set the side effect bit for all non 1802 * memory mappings unless merging is ok 1803 */ 1804 attr |= SFMMU_SIDEFFECT; 1805 break; 1806 case HAT_MERGING_OK: 1807 case HAT_LOADCACHING_OK: 1808 case HAT_STORECACHING_OK: 1809 break; 1810 default: 1811 panic("hat_devload: bad attr"); 1812 break; 1813 } 1814 } 1815 while (len) { 1816 if (!use_lgpg) { 1817 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1818 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1819 flags); 1820 len -= MMU_PAGESIZE; 1821 addr += MMU_PAGESIZE; 1822 pfn++; 1823 continue; 1824 } 1825 /* 1826 * try to use large pages, check va/pa alignments 1827 * Note that 32M/256M page sizes are not (yet) supported. 1828 */ 1829 if ((len >= MMU_PAGESIZE4M) && 1830 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 1831 !(disable_large_pages & (1 << TTE4M)) && 1832 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 1833 sfmmu_memtte(&tte, pfn, attr, TTE4M); 1834 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1835 flags); 1836 len -= MMU_PAGESIZE4M; 1837 addr += MMU_PAGESIZE4M; 1838 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 1839 } else if ((len >= MMU_PAGESIZE512K) && 1840 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 1841 !(disable_large_pages & (1 << TTE512K)) && 1842 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 1843 sfmmu_memtte(&tte, pfn, attr, TTE512K); 1844 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1845 flags); 1846 len -= MMU_PAGESIZE512K; 1847 addr += MMU_PAGESIZE512K; 1848 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 1849 } else if ((len >= MMU_PAGESIZE64K) && 1850 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 1851 !(disable_large_pages & (1 << TTE64K)) && 1852 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 1853 sfmmu_memtte(&tte, pfn, attr, TTE64K); 1854 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1855 flags); 1856 len -= MMU_PAGESIZE64K; 1857 addr += MMU_PAGESIZE64K; 1858 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 1859 } else { 1860 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1861 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1862 flags); 1863 len -= MMU_PAGESIZE; 1864 addr += MMU_PAGESIZE; 1865 pfn++; 1866 } 1867 } 1868 1869 /* 1870 * Check TSB and TLB page sizes. 1871 */ 1872 if ((flags & HAT_LOAD_SHARE) == 0) { 1873 sfmmu_check_page_sizes(hat, 1); 1874 } 1875 } 1876 1877 /* 1878 * Map the largest extend possible out of the page array. The array may NOT 1879 * be in order. The largest possible mapping a page can have 1880 * is specified in the p_szc field. The p_szc field 1881 * cannot change as long as there any mappings (large or small) 1882 * to any of the pages that make up the large page. (ie. any 1883 * promotion/demotion of page size is not up to the hat but up to 1884 * the page free list manager). The array 1885 * should consist of properly aligned contigous pages that are 1886 * part of a big page for a large mapping to be created. 1887 */ 1888 void 1889 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 1890 struct page **pps, uint_t attr, uint_t flags) 1891 { 1892 int ttesz; 1893 size_t mapsz; 1894 pgcnt_t numpg, npgs; 1895 tte_t tte; 1896 page_t *pp; 1897 int large_pages_disable; 1898 1899 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1900 1901 if (hat->sfmmu_xhat_provider) { 1902 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 1903 return; 1904 } 1905 1906 if (hat->sfmmu_rmstat) 1907 hat_resvstat(len, hat->sfmmu_as, addr); 1908 1909 #if defined(SF_ERRATA_57) 1910 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1911 (addr < errata57_limit) && (attr & PROT_EXEC) && 1912 !(flags & HAT_LOAD_SHARE)) { 1913 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 1914 "user page executable"); 1915 attr &= ~PROT_EXEC; 1916 } 1917 #endif 1918 1919 /* Get number of pages */ 1920 npgs = len >> MMU_PAGESHIFT; 1921 1922 if (flags & HAT_LOAD_SHARE) { 1923 large_pages_disable = disable_ism_large_pages; 1924 } else { 1925 large_pages_disable = disable_large_pages; 1926 } 1927 1928 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 1929 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 1930 return; 1931 } 1932 1933 while (npgs >= NHMENTS) { 1934 pp = *pps; 1935 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 1936 /* 1937 * Check if this page size is disabled. 1938 */ 1939 if (large_pages_disable & (1 << ttesz)) 1940 continue; 1941 1942 numpg = TTEPAGES(ttesz); 1943 mapsz = numpg << MMU_PAGESHIFT; 1944 if ((npgs >= numpg) && 1945 IS_P2ALIGNED(addr, mapsz) && 1946 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 1947 /* 1948 * At this point we have enough pages and 1949 * we know the virtual address and the pfn 1950 * are properly aligned. We still need 1951 * to check for physical contiguity but since 1952 * it is very likely that this is the case 1953 * we will assume they are so and undo 1954 * the request if necessary. It would 1955 * be great if we could get a hint flag 1956 * like HAT_CONTIG which would tell us 1957 * the pages are contigous for sure. 1958 */ 1959 sfmmu_memtte(&tte, (*pps)->p_pagenum, 1960 attr, ttesz); 1961 if (!sfmmu_tteload_array(hat, &tte, addr, 1962 pps, flags)) { 1963 break; 1964 } 1965 } 1966 } 1967 if (ttesz == TTE8K) { 1968 /* 1969 * We were not able to map array using a large page 1970 * batch a hmeblk or fraction at a time. 1971 */ 1972 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 1973 & (NHMENTS-1); 1974 numpg = NHMENTS - numpg; 1975 ASSERT(numpg <= npgs); 1976 mapsz = numpg * MMU_PAGESIZE; 1977 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 1978 numpg); 1979 } 1980 addr += mapsz; 1981 npgs -= numpg; 1982 pps += numpg; 1983 } 1984 1985 if (npgs) { 1986 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 1987 } 1988 1989 /* 1990 * Check TSB and TLB page sizes. 1991 */ 1992 if ((flags & HAT_LOAD_SHARE) == 0) { 1993 sfmmu_check_page_sizes(hat, 1); 1994 } 1995 } 1996 1997 /* 1998 * Function tries to batch 8K pages into the same hme blk. 1999 */ 2000 static void 2001 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2002 uint_t attr, uint_t flags, pgcnt_t npgs) 2003 { 2004 tte_t tte; 2005 page_t *pp; 2006 struct hmehash_bucket *hmebp; 2007 struct hme_blk *hmeblkp; 2008 int index; 2009 2010 while (npgs) { 2011 /* 2012 * Acquire the hash bucket. 2013 */ 2014 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K); 2015 ASSERT(hmebp); 2016 2017 /* 2018 * Find the hment block. 2019 */ 2020 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2021 TTE8K, flags); 2022 ASSERT(hmeblkp); 2023 2024 do { 2025 /* 2026 * Make the tte. 2027 */ 2028 pp = *pps; 2029 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2030 2031 /* 2032 * Add the translation. 2033 */ 2034 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2035 vaddr, pps, flags); 2036 2037 /* 2038 * Goto next page. 2039 */ 2040 pps++; 2041 npgs--; 2042 2043 /* 2044 * Goto next address. 2045 */ 2046 vaddr += MMU_PAGESIZE; 2047 2048 /* 2049 * Don't crossover into a different hmentblk. 2050 */ 2051 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2052 (NHMENTS-1)); 2053 2054 } while (index != 0 && npgs != 0); 2055 2056 /* 2057 * Release the hash bucket. 2058 */ 2059 2060 sfmmu_tteload_release_hashbucket(hmebp); 2061 } 2062 } 2063 2064 /* 2065 * Construct a tte for a page: 2066 * 2067 * tte_valid = 1 2068 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2069 * tte_size = size 2070 * tte_nfo = attr & HAT_NOFAULT 2071 * tte_ie = attr & HAT_STRUCTURE_LE 2072 * tte_hmenum = hmenum 2073 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2074 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2075 * tte_ref = 1 (optimization) 2076 * tte_wr_perm = attr & PROT_WRITE; 2077 * tte_no_sync = attr & HAT_NOSYNC 2078 * tte_lock = attr & SFMMU_LOCKTTE 2079 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2080 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2081 * tte_e = attr & SFMMU_SIDEFFECT 2082 * tte_priv = !(attr & PROT_USER) 2083 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2084 * tte_glb = 0 2085 */ 2086 void 2087 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2088 { 2089 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2090 2091 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2092 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2093 2094 if (TTE_IS_NOSYNC(ttep)) { 2095 TTE_SET_REF(ttep); 2096 if (TTE_IS_WRITABLE(ttep)) { 2097 TTE_SET_MOD(ttep); 2098 } 2099 } 2100 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2101 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2102 } 2103 } 2104 2105 /* 2106 * This function will add a translation to the hme_blk and allocate the 2107 * hme_blk if one does not exist. 2108 * If a page structure is specified then it will add the 2109 * corresponding hment to the mapping list. 2110 * It will also update the hmenum field for the tte. 2111 */ 2112 void 2113 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2114 uint_t flags) 2115 { 2116 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags); 2117 } 2118 2119 /* 2120 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2121 * Assumes that a particular page size may only be resident in one TSB. 2122 */ 2123 static void 2124 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2125 { 2126 struct tsb_info *tsbinfop = NULL; 2127 uint64_t tag; 2128 struct tsbe *tsbe_addr; 2129 uint64_t tsb_base; 2130 uint_t tsb_size; 2131 int vpshift = MMU_PAGESHIFT; 2132 int phys = 0; 2133 2134 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2135 phys = ktsb_phys; 2136 if (ttesz >= TTE4M) { 2137 #ifndef sun4v 2138 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2139 #endif 2140 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2141 tsb_size = ktsb4m_szcode; 2142 } else { 2143 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2144 tsb_size = ktsb_szcode; 2145 } 2146 } else { 2147 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2148 2149 /* 2150 * If there isn't a TSB for this page size, or the TSB is 2151 * swapped out, there is nothing to do. Note that the latter 2152 * case seems impossible but can occur if hat_pageunload() 2153 * is called on an ISM mapping while the process is swapped 2154 * out. 2155 */ 2156 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2157 return; 2158 2159 /* 2160 * If another thread is in the middle of relocating a TSB 2161 * we can't unload the entry so set a flag so that the 2162 * TSB will be flushed before it can be accessed by the 2163 * process. 2164 */ 2165 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2166 if (ttep == NULL) 2167 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2168 return; 2169 } 2170 #if defined(UTSB_PHYS) 2171 phys = 1; 2172 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2173 #else 2174 tsb_base = (uint64_t)tsbinfop->tsb_va; 2175 #endif 2176 tsb_size = tsbinfop->tsb_szc; 2177 } 2178 if (ttesz >= TTE4M) 2179 vpshift = MMU_PAGESHIFT4M; 2180 2181 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2182 tag = sfmmu_make_tsbtag(vaddr); 2183 2184 if (ttep == NULL) { 2185 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2186 } else { 2187 if (ttesz >= TTE4M) { 2188 SFMMU_STAT(sf_tsb_load4m); 2189 } else { 2190 SFMMU_STAT(sf_tsb_load8k); 2191 } 2192 2193 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2194 } 2195 } 2196 2197 /* 2198 * Unmap all entries from [start, end) matching the given page size. 2199 * 2200 * This function is used primarily to unmap replicated 64K or 512K entries 2201 * from the TSB that are inserted using the base page size TSB pointer, but 2202 * it may also be called to unmap a range of addresses from the TSB. 2203 */ 2204 void 2205 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2206 { 2207 struct tsb_info *tsbinfop; 2208 uint64_t tag; 2209 struct tsbe *tsbe_addr; 2210 caddr_t vaddr; 2211 uint64_t tsb_base; 2212 int vpshift, vpgsz; 2213 uint_t tsb_size; 2214 int phys = 0; 2215 2216 /* 2217 * Assumptions: 2218 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2219 * at a time shooting down any valid entries we encounter. 2220 * 2221 * If ttesz >= 4M we walk the range 4M at a time shooting 2222 * down any valid mappings we find. 2223 */ 2224 if (sfmmup == ksfmmup) { 2225 phys = ktsb_phys; 2226 if (ttesz >= TTE4M) { 2227 #ifndef sun4v 2228 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2229 #endif 2230 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2231 tsb_size = ktsb4m_szcode; 2232 } else { 2233 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2234 tsb_size = ktsb_szcode; 2235 } 2236 } else { 2237 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2238 2239 /* 2240 * If there isn't a TSB for this page size, or the TSB is 2241 * swapped out, there is nothing to do. Note that the latter 2242 * case seems impossible but can occur if hat_pageunload() 2243 * is called on an ISM mapping while the process is swapped 2244 * out. 2245 */ 2246 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2247 return; 2248 2249 /* 2250 * If another thread is in the middle of relocating a TSB 2251 * we can't unload the entry so set a flag so that the 2252 * TSB will be flushed before it can be accessed by the 2253 * process. 2254 */ 2255 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2256 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2257 return; 2258 } 2259 #if defined(UTSB_PHYS) 2260 phys = 1; 2261 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2262 #else 2263 tsb_base = (uint64_t)tsbinfop->tsb_va; 2264 #endif 2265 tsb_size = tsbinfop->tsb_szc; 2266 } 2267 if (ttesz >= TTE4M) { 2268 vpshift = MMU_PAGESHIFT4M; 2269 vpgsz = MMU_PAGESIZE4M; 2270 } else { 2271 vpshift = MMU_PAGESHIFT; 2272 vpgsz = MMU_PAGESIZE; 2273 } 2274 2275 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2276 tag = sfmmu_make_tsbtag(vaddr); 2277 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2278 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2279 } 2280 } 2281 2282 /* 2283 * Select the optimum TSB size given the number of mappings 2284 * that need to be cached. 2285 */ 2286 static int 2287 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2288 { 2289 int szc = 0; 2290 2291 #ifdef DEBUG 2292 if (tsb_grow_stress) { 2293 uint32_t randval = (uint32_t)gettick() >> 4; 2294 return (randval % (tsb_max_growsize + 1)); 2295 } 2296 #endif /* DEBUG */ 2297 2298 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2299 szc++; 2300 return (szc); 2301 } 2302 2303 /* 2304 * This function will add a translation to the hme_blk and allocate the 2305 * hme_blk if one does not exist. 2306 * If a page structure is specified then it will add the 2307 * corresponding hment to the mapping list. 2308 * It will also update the hmenum field for the tte. 2309 * Furthermore, it attempts to create a large page translation 2310 * for <addr,hat> at page array pps. It assumes addr and first 2311 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2312 */ 2313 static int 2314 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2315 page_t **pps, uint_t flags) 2316 { 2317 struct hmehash_bucket *hmebp; 2318 struct hme_blk *hmeblkp; 2319 int ret; 2320 uint_t size; 2321 2322 /* 2323 * Get mapping size. 2324 */ 2325 size = TTE_CSZ(ttep); 2326 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2327 2328 /* 2329 * Acquire the hash bucket. 2330 */ 2331 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size); 2332 ASSERT(hmebp); 2333 2334 /* 2335 * Find the hment block. 2336 */ 2337 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags); 2338 ASSERT(hmeblkp); 2339 2340 /* 2341 * Add the translation. 2342 */ 2343 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags); 2344 2345 /* 2346 * Release the hash bucket. 2347 */ 2348 sfmmu_tteload_release_hashbucket(hmebp); 2349 2350 return (ret); 2351 } 2352 2353 /* 2354 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2355 */ 2356 static struct hmehash_bucket * 2357 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size) 2358 { 2359 struct hmehash_bucket *hmebp; 2360 int hmeshift; 2361 2362 hmeshift = HME_HASH_SHIFT(size); 2363 2364 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 2365 2366 SFMMU_HASH_LOCK(hmebp); 2367 2368 return (hmebp); 2369 } 2370 2371 /* 2372 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2373 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2374 * allocated. 2375 */ 2376 static struct hme_blk * 2377 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2378 caddr_t vaddr, uint_t size, uint_t flags) 2379 { 2380 hmeblk_tag hblktag; 2381 int hmeshift; 2382 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2383 uint64_t hblkpa, prevpa; 2384 struct kmem_cache *sfmmu_cache; 2385 uint_t forcefree; 2386 2387 hblktag.htag_id = sfmmup; 2388 hmeshift = HME_HASH_SHIFT(size); 2389 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2390 hblktag.htag_rehash = HME_HASH_REHASH(size); 2391 2392 ttearray_realloc: 2393 2394 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 2395 pr_hblk, prevpa, &list); 2396 2397 /* 2398 * We block until hblk_reserve_lock is released; it's held by 2399 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2400 * replaced by a hblk from sfmmu8_cache. 2401 */ 2402 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2403 hblk_reserve_thread != curthread) { 2404 SFMMU_HASH_UNLOCK(hmebp); 2405 mutex_enter(&hblk_reserve_lock); 2406 mutex_exit(&hblk_reserve_lock); 2407 SFMMU_STAT(sf_hblk_reserve_hit); 2408 SFMMU_HASH_LOCK(hmebp); 2409 goto ttearray_realloc; 2410 } 2411 2412 if (hmeblkp == NULL) { 2413 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2414 hblktag, flags); 2415 } else { 2416 /* 2417 * It is possible for 8k and 64k hblks to collide since they 2418 * have the same rehash value. This is because we 2419 * lazily free hblks and 8K/64K blks could be lingering. 2420 * If we find size mismatch we free the block and & try again. 2421 */ 2422 if (get_hblk_ttesz(hmeblkp) != size) { 2423 ASSERT(!hmeblkp->hblk_vcnt); 2424 ASSERT(!hmeblkp->hblk_hmecnt); 2425 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 2426 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 2427 goto ttearray_realloc; 2428 } 2429 if (hmeblkp->hblk_shw_bit) { 2430 /* 2431 * if the hblk was previously used as a shadow hblk then 2432 * we will change it to a normal hblk 2433 */ 2434 if (hmeblkp->hblk_shw_mask) { 2435 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2436 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2437 goto ttearray_realloc; 2438 } else { 2439 hmeblkp->hblk_shw_bit = 0; 2440 } 2441 } 2442 SFMMU_STAT(sf_hblk_hit); 2443 } 2444 2445 /* 2446 * hat_memload() should never call kmem_cache_free(); see block 2447 * comment showing the stacktrace in sfmmu_hblk_alloc(); 2448 * enqueue each hblk in the list to reserve list if it's created 2449 * from sfmmu8_cache *and* sfmmup == KHATID. 2450 */ 2451 forcefree = (sfmmup == KHATID) ? 1 : 0; 2452 while ((pr_hblk = list) != NULL) { 2453 list = pr_hblk->hblk_next; 2454 sfmmu_cache = get_hblk_cache(pr_hblk); 2455 if ((sfmmu_cache == sfmmu8_cache) && 2456 sfmmu_put_free_hblk(pr_hblk, forcefree)) 2457 continue; 2458 2459 ASSERT(sfmmup != KHATID); 2460 kmem_cache_free(sfmmu_cache, pr_hblk); 2461 } 2462 2463 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2464 ASSERT(!hmeblkp->hblk_shw_bit); 2465 2466 return (hmeblkp); 2467 } 2468 2469 /* 2470 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2471 * otherwise. 2472 */ 2473 static int 2474 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2475 caddr_t vaddr, page_t **pps, uint_t flags) 2476 { 2477 page_t *pp = *pps; 2478 int hmenum, size, remap; 2479 tte_t tteold, flush_tte; 2480 #ifdef DEBUG 2481 tte_t orig_old; 2482 #endif /* DEBUG */ 2483 struct sf_hment *sfhme; 2484 kmutex_t *pml, *pmtx; 2485 hatlock_t *hatlockp; 2486 2487 /* 2488 * remove this panic when we decide to let user virtual address 2489 * space be >= USERLIMIT. 2490 */ 2491 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2492 panic("user addr %p in kernel space", vaddr); 2493 #if defined(TTE_IS_GLOBAL) 2494 if (TTE_IS_GLOBAL(ttep)) 2495 panic("sfmmu_tteload: creating global tte"); 2496 #endif 2497 2498 #ifdef DEBUG 2499 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2500 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2501 panic("sfmmu_tteload: non cacheable memory tte"); 2502 #endif /* DEBUG */ 2503 2504 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2505 !TTE_IS_MOD(ttep)) { 2506 /* 2507 * Don't load TSB for dummy as in ISM. Also don't preload 2508 * the TSB if the TTE isn't writable since we're likely to 2509 * fault on it again -- preloading can be fairly expensive. 2510 */ 2511 flags |= SFMMU_NO_TSBLOAD; 2512 } 2513 2514 size = TTE_CSZ(ttep); 2515 switch (size) { 2516 case TTE8K: 2517 SFMMU_STAT(sf_tteload8k); 2518 break; 2519 case TTE64K: 2520 SFMMU_STAT(sf_tteload64k); 2521 break; 2522 case TTE512K: 2523 SFMMU_STAT(sf_tteload512k); 2524 break; 2525 case TTE4M: 2526 SFMMU_STAT(sf_tteload4m); 2527 break; 2528 case (TTE32M): 2529 SFMMU_STAT(sf_tteload32m); 2530 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2531 break; 2532 case (TTE256M): 2533 SFMMU_STAT(sf_tteload256m); 2534 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2535 break; 2536 } 2537 2538 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2539 2540 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 2541 2542 /* 2543 * Need to grab mlist lock here so that pageunload 2544 * will not change tte behind us. 2545 */ 2546 if (pp) { 2547 pml = sfmmu_mlist_enter(pp); 2548 } 2549 2550 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2551 /* 2552 * Look for corresponding hment and if valid verify 2553 * pfns are equal. 2554 */ 2555 remap = TTE_IS_VALID(&tteold); 2556 if (remap) { 2557 pfn_t new_pfn, old_pfn; 2558 2559 old_pfn = TTE_TO_PFN(vaddr, &tteold); 2560 new_pfn = TTE_TO_PFN(vaddr, ttep); 2561 2562 if (flags & HAT_LOAD_REMAP) { 2563 /* make sure we are remapping same type of pages */ 2564 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 2565 panic("sfmmu_tteload - tte remap io<->memory"); 2566 } 2567 if (old_pfn != new_pfn && 2568 (pp != NULL || sfhme->hme_page != NULL)) { 2569 panic("sfmmu_tteload - tte remap pp != NULL"); 2570 } 2571 } else if (old_pfn != new_pfn) { 2572 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 2573 (void *)hmeblkp); 2574 } 2575 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 2576 } 2577 2578 if (pp) { 2579 if (size == TTE8K) { 2580 /* 2581 * Handle VAC consistency 2582 */ 2583 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 2584 sfmmu_vac_conflict(sfmmup, vaddr, pp); 2585 } 2586 2587 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2588 pmtx = sfmmu_page_enter(pp); 2589 PP_CLRRO(pp); 2590 sfmmu_page_exit(pmtx); 2591 } else if (!PP_ISMAPPED(pp) && 2592 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 2593 pmtx = sfmmu_page_enter(pp); 2594 if (!(PP_ISMOD(pp))) { 2595 PP_SETRO(pp); 2596 } 2597 sfmmu_page_exit(pmtx); 2598 } 2599 2600 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 2601 /* 2602 * sfmmu_pagearray_setup failed so return 2603 */ 2604 sfmmu_mlist_exit(pml); 2605 return (1); 2606 } 2607 } 2608 2609 /* 2610 * Make sure hment is not on a mapping list. 2611 */ 2612 ASSERT(remap || (sfhme->hme_page == NULL)); 2613 2614 /* if it is not a remap then hme->next better be NULL */ 2615 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 2616 2617 if (flags & HAT_LOAD_LOCK) { 2618 if (((int)hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 2619 panic("too high lckcnt-hmeblk %p", 2620 (void *)hmeblkp); 2621 } 2622 atomic_add_16(&hmeblkp->hblk_lckcnt, 1); 2623 2624 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 2625 } 2626 2627 if (pp && PP_ISNC(pp)) { 2628 /* 2629 * If the physical page is marked to be uncacheable, like 2630 * by a vac conflict, make sure the new mapping is also 2631 * uncacheable. 2632 */ 2633 TTE_CLR_VCACHEABLE(ttep); 2634 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 2635 } 2636 ttep->tte_hmenum = hmenum; 2637 2638 #ifdef DEBUG 2639 orig_old = tteold; 2640 #endif /* DEBUG */ 2641 2642 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 2643 if ((sfmmup == KHATID) && 2644 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 2645 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2646 } 2647 #ifdef DEBUG 2648 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 2649 #endif /* DEBUG */ 2650 } 2651 2652 if (!TTE_IS_VALID(&tteold)) { 2653 2654 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 2655 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 2656 2657 /* 2658 * HAT_RELOAD_SHARE has been deprecated with lpg DISM. 2659 */ 2660 2661 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 2662 sfmmup != ksfmmup) { 2663 /* 2664 * If this is the first large mapping for the process 2665 * we must force any CPUs running this process to TL=0 2666 * where they will reload the HAT flags from the 2667 * tsbmiss area. This is necessary to make the large 2668 * mappings we are about to load visible to those CPUs; 2669 * otherwise they'll loop forever calling pagefault() 2670 * since we don't search large hash chains by default. 2671 */ 2672 hatlockp = sfmmu_hat_enter(sfmmup); 2673 if (size == TTE512K && 2674 !SFMMU_FLAGS_ISSET(sfmmup, HAT_512K_FLAG)) { 2675 SFMMU_FLAGS_SET(sfmmup, HAT_512K_FLAG); 2676 sfmmu_sync_mmustate(sfmmup); 2677 } else if (size == TTE4M && 2678 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 2679 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 2680 sfmmu_sync_mmustate(sfmmup); 2681 } else if (size == TTE64K && 2682 !SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) { 2683 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 2684 /* no sync mmustate; 64K shares 8K hashes */ 2685 } else if (mmu_page_sizes == max_mmu_page_sizes) { 2686 if (size == TTE32M && 2687 !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 2688 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 2689 sfmmu_sync_mmustate(sfmmup); 2690 } else if (size == TTE256M && 2691 !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 2692 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 2693 sfmmu_sync_mmustate(sfmmup); 2694 } 2695 } 2696 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 2697 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 2698 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 2699 } 2700 sfmmu_hat_exit(hatlockp); 2701 } 2702 } 2703 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 2704 2705 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 2706 hw_tte.tte_intlo; 2707 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 2708 hw_tte.tte_inthi; 2709 2710 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 2711 /* 2712 * If remap and new tte differs from old tte we need 2713 * to sync the mod bit and flush TLB/TSB. We don't 2714 * need to sync ref bit because we currently always set 2715 * ref bit in tteload. 2716 */ 2717 ASSERT(TTE_IS_REF(ttep)); 2718 if (TTE_IS_MOD(&tteold)) { 2719 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 2720 } 2721 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 2722 xt_sync(sfmmup->sfmmu_cpusran); 2723 } 2724 2725 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 2726 /* 2727 * We only preload 8K and 4M mappings into the TSB, since 2728 * 64K and 512K mappings are replicated and hence don't 2729 * have a single, unique TSB entry. Ditto for 32M/256M. 2730 */ 2731 if (size == TTE8K || size == TTE4M) { 2732 hatlockp = sfmmu_hat_enter(sfmmup); 2733 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, size); 2734 sfmmu_hat_exit(hatlockp); 2735 } 2736 } 2737 if (pp) { 2738 if (!remap) { 2739 HME_ADD(sfhme, pp); 2740 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 2741 ASSERT(hmeblkp->hblk_hmecnt > 0); 2742 2743 /* 2744 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 2745 * see pageunload() for comment. 2746 */ 2747 } 2748 sfmmu_mlist_exit(pml); 2749 } 2750 2751 return (0); 2752 } 2753 /* 2754 * Function unlocks hash bucket. 2755 */ 2756 static void 2757 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 2758 { 2759 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2760 SFMMU_HASH_UNLOCK(hmebp); 2761 } 2762 2763 /* 2764 * function which checks and sets up page array for a large 2765 * translation. Will set p_vcolor, p_index, p_ro fields. 2766 * Assumes addr and pfnum of first page are properly aligned. 2767 * Will check for physical contiguity. If check fails it return 2768 * non null. 2769 */ 2770 static int 2771 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 2772 { 2773 int i, index, ttesz, osz; 2774 pfn_t pfnum; 2775 pgcnt_t npgs; 2776 int cflags = 0; 2777 page_t *pp, *pp1; 2778 kmutex_t *pmtx; 2779 int vac_err = 0; 2780 int newidx = 0; 2781 2782 ttesz = TTE_CSZ(ttep); 2783 2784 ASSERT(ttesz > TTE8K); 2785 2786 npgs = TTEPAGES(ttesz); 2787 index = PAGESZ_TO_INDEX(ttesz); 2788 2789 pfnum = (*pps)->p_pagenum; 2790 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 2791 2792 /* 2793 * Save the first pp so we can do HAT_TMPNC at the end. 2794 */ 2795 pp1 = *pps; 2796 osz = fnd_mapping_sz(pp1); 2797 2798 for (i = 0; i < npgs; i++, pps++) { 2799 pp = *pps; 2800 ASSERT(PAGE_LOCKED(pp)); 2801 ASSERT(pp->p_szc >= ttesz); 2802 ASSERT(pp->p_szc == pp1->p_szc); 2803 ASSERT(sfmmu_mlist_held(pp)); 2804 2805 /* 2806 * XXX is it possible to maintain P_RO on the root only? 2807 */ 2808 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2809 pmtx = sfmmu_page_enter(pp); 2810 PP_CLRRO(pp); 2811 sfmmu_page_exit(pmtx); 2812 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 2813 !PP_ISMOD(pp)) { 2814 pmtx = sfmmu_page_enter(pp); 2815 if (!(PP_ISMOD(pp))) { 2816 PP_SETRO(pp); 2817 } 2818 sfmmu_page_exit(pmtx); 2819 } 2820 2821 /* 2822 * If this is a remap we skip vac & contiguity checks. 2823 */ 2824 if (remap) 2825 continue; 2826 2827 /* 2828 * set p_vcolor and detect any vac conflicts. 2829 */ 2830 if (vac_err == 0) { 2831 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 2832 2833 } 2834 2835 /* 2836 * Save current index in case we need to undo it. 2837 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 2838 * "SFMMU_INDEX_SHIFT 6" 2839 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 2840 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 2841 * 2842 * So: index = PAGESZ_TO_INDEX(ttesz); 2843 * if ttesz == 1 then index = 0x2 2844 * 2 then index = 0x4 2845 * 3 then index = 0x8 2846 * 4 then index = 0x10 2847 * 5 then index = 0x20 2848 * The code below checks if it's a new pagesize (ie, newidx) 2849 * in case we need to take it back out of p_index, 2850 * and then or's the new index into the existing index. 2851 */ 2852 if ((PP_MAPINDEX(pp) & index) == 0) 2853 newidx = 1; 2854 pp->p_index = (PP_MAPINDEX(pp) | index); 2855 2856 /* 2857 * contiguity check 2858 */ 2859 if (pp->p_pagenum != pfnum) { 2860 /* 2861 * If we fail the contiguity test then 2862 * the only thing we need to fix is the p_index field. 2863 * We might get a few extra flushes but since this 2864 * path is rare that is ok. The p_ro field will 2865 * get automatically fixed on the next tteload to 2866 * the page. NO TNC bit is set yet. 2867 */ 2868 while (i >= 0) { 2869 pp = *pps; 2870 if (newidx) 2871 pp->p_index = (PP_MAPINDEX(pp) & 2872 ~index); 2873 pps--; 2874 i--; 2875 } 2876 return (1); 2877 } 2878 pfnum++; 2879 addr += MMU_PAGESIZE; 2880 } 2881 2882 if (vac_err) { 2883 if (ttesz > osz) { 2884 /* 2885 * There are some smaller mappings that causes vac 2886 * conflicts. Convert all existing small mappings to 2887 * TNC. 2888 */ 2889 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 2890 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 2891 npgs); 2892 } else { 2893 /* EMPTY */ 2894 /* 2895 * If there exists an big page mapping, 2896 * that means the whole existing big page 2897 * has TNC setting already. No need to covert to 2898 * TNC again. 2899 */ 2900 ASSERT(PP_ISTNC(pp1)); 2901 } 2902 } 2903 2904 return (0); 2905 } 2906 2907 /* 2908 * Routine that detects vac consistency for a large page. It also 2909 * sets virtual color for all pp's for this big mapping. 2910 */ 2911 static int 2912 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 2913 { 2914 int vcolor, ocolor; 2915 2916 ASSERT(sfmmu_mlist_held(pp)); 2917 2918 if (PP_ISNC(pp)) { 2919 return (HAT_TMPNC); 2920 } 2921 2922 vcolor = addr_to_vcolor(addr); 2923 if (PP_NEWPAGE(pp)) { 2924 PP_SET_VCOLOR(pp, vcolor); 2925 return (0); 2926 } 2927 2928 ocolor = PP_GET_VCOLOR(pp); 2929 if (ocolor == vcolor) { 2930 return (0); 2931 } 2932 2933 if (!PP_ISMAPPED(pp)) { 2934 /* 2935 * Previous user of page had a differnet color 2936 * but since there are no current users 2937 * we just flush the cache and change the color. 2938 * As an optimization for large pages we flush the 2939 * entire cache of that color and set a flag. 2940 */ 2941 SFMMU_STAT(sf_pgcolor_conflict); 2942 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 2943 CacheColor_SetFlushed(*cflags, ocolor); 2944 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 2945 } 2946 PP_SET_VCOLOR(pp, vcolor); 2947 return (0); 2948 } 2949 2950 /* 2951 * We got a real conflict with a current mapping. 2952 * set flags to start unencaching all mappings 2953 * and return failure so we restart looping 2954 * the pp array from the beginning. 2955 */ 2956 return (HAT_TMPNC); 2957 } 2958 2959 /* 2960 * creates a large page shadow hmeblk for a tte. 2961 * The purpose of this routine is to allow us to do quick unloads because 2962 * the vm layer can easily pass a very large but sparsely populated range. 2963 */ 2964 static struct hme_blk * 2965 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 2966 { 2967 struct hmehash_bucket *hmebp; 2968 hmeblk_tag hblktag; 2969 int hmeshift, size, vshift; 2970 uint_t shw_mask, newshw_mask; 2971 struct hme_blk *hmeblkp; 2972 2973 ASSERT(sfmmup != KHATID); 2974 if (mmu_page_sizes == max_mmu_page_sizes) { 2975 ASSERT(ttesz < TTE256M); 2976 } else { 2977 ASSERT(ttesz < TTE4M); 2978 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 2979 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 2980 } 2981 2982 if (ttesz == TTE8K) { 2983 size = TTE512K; 2984 } else { 2985 size = ++ttesz; 2986 } 2987 2988 hblktag.htag_id = sfmmup; 2989 hmeshift = HME_HASH_SHIFT(size); 2990 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2991 hblktag.htag_rehash = HME_HASH_REHASH(size); 2992 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 2993 2994 SFMMU_HASH_LOCK(hmebp); 2995 2996 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 2997 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 2998 if (hmeblkp == NULL) { 2999 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3000 hblktag, flags); 3001 } 3002 ASSERT(hmeblkp); 3003 if (!hmeblkp->hblk_shw_mask) { 3004 /* 3005 * if this is a unused hblk it was just allocated or could 3006 * potentially be a previous large page hblk so we need to 3007 * set the shadow bit. 3008 */ 3009 hmeblkp->hblk_shw_bit = 1; 3010 } 3011 ASSERT(hmeblkp->hblk_shw_bit == 1); 3012 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3013 ASSERT(vshift < 8); 3014 /* 3015 * Atomically set shw mask bit 3016 */ 3017 do { 3018 shw_mask = hmeblkp->hblk_shw_mask; 3019 newshw_mask = shw_mask | (1 << vshift); 3020 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3021 newshw_mask); 3022 } while (newshw_mask != shw_mask); 3023 3024 SFMMU_HASH_UNLOCK(hmebp); 3025 3026 return (hmeblkp); 3027 } 3028 3029 /* 3030 * This routine cleanup a previous shadow hmeblk and changes it to 3031 * a regular hblk. This happens rarely but it is possible 3032 * when a process wants to use large pages and there are hblks still 3033 * lying around from the previous as that used these hmeblks. 3034 * The alternative was to cleanup the shadow hblks at unload time 3035 * but since so few user processes actually use large pages, it is 3036 * better to be lazy and cleanup at this time. 3037 */ 3038 static void 3039 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3040 struct hmehash_bucket *hmebp) 3041 { 3042 caddr_t addr, endaddr; 3043 int hashno, size; 3044 3045 ASSERT(hmeblkp->hblk_shw_bit); 3046 3047 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3048 3049 if (!hmeblkp->hblk_shw_mask) { 3050 hmeblkp->hblk_shw_bit = 0; 3051 return; 3052 } 3053 addr = (caddr_t)get_hblk_base(hmeblkp); 3054 endaddr = get_hblk_endaddr(hmeblkp); 3055 size = get_hblk_ttesz(hmeblkp); 3056 hashno = size - 1; 3057 ASSERT(hashno > 0); 3058 SFMMU_HASH_UNLOCK(hmebp); 3059 3060 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3061 3062 SFMMU_HASH_LOCK(hmebp); 3063 } 3064 3065 static void 3066 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3067 int hashno) 3068 { 3069 int hmeshift, shadow = 0; 3070 hmeblk_tag hblktag; 3071 struct hmehash_bucket *hmebp; 3072 struct hme_blk *hmeblkp; 3073 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3074 uint64_t hblkpa, prevpa, nx_pa; 3075 3076 ASSERT(hashno > 0); 3077 hblktag.htag_id = sfmmup; 3078 hblktag.htag_rehash = hashno; 3079 3080 hmeshift = HME_HASH_SHIFT(hashno); 3081 3082 while (addr < endaddr) { 3083 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3084 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3085 SFMMU_HASH_LOCK(hmebp); 3086 /* inline HME_HASH_SEARCH */ 3087 hmeblkp = hmebp->hmeblkp; 3088 hblkpa = hmebp->hmeh_nextpa; 3089 prevpa = 0; 3090 pr_hblk = NULL; 3091 while (hmeblkp) { 3092 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 3093 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3094 /* found hme_blk */ 3095 if (hmeblkp->hblk_shw_bit) { 3096 if (hmeblkp->hblk_shw_mask) { 3097 shadow = 1; 3098 sfmmu_shadow_hcleanup(sfmmup, 3099 hmeblkp, hmebp); 3100 break; 3101 } else { 3102 hmeblkp->hblk_shw_bit = 0; 3103 } 3104 } 3105 3106 /* 3107 * Hblk_hmecnt and hblk_vcnt could be non zero 3108 * since hblk_unload() does not gurantee that. 3109 * 3110 * XXX - this could cause tteload() to spin 3111 * where sfmmu_shadow_hcleanup() is called. 3112 */ 3113 } 3114 3115 nx_hblk = hmeblkp->hblk_next; 3116 nx_pa = hmeblkp->hblk_nextpa; 3117 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3118 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 3119 pr_hblk); 3120 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3121 } else { 3122 pr_hblk = hmeblkp; 3123 prevpa = hblkpa; 3124 } 3125 hmeblkp = nx_hblk; 3126 hblkpa = nx_pa; 3127 } 3128 3129 SFMMU_HASH_UNLOCK(hmebp); 3130 3131 if (shadow) { 3132 /* 3133 * We found another shadow hblk so cleaned its 3134 * children. We need to go back and cleanup 3135 * the original hblk so we don't change the 3136 * addr. 3137 */ 3138 shadow = 0; 3139 } else { 3140 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3141 (1 << hmeshift)); 3142 } 3143 } 3144 sfmmu_hblks_list_purge(&list); 3145 } 3146 3147 /* 3148 * Release one hardware address translation lock on the given address range. 3149 */ 3150 void 3151 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3152 { 3153 struct hmehash_bucket *hmebp; 3154 hmeblk_tag hblktag; 3155 int hmeshift, hashno = 1; 3156 struct hme_blk *hmeblkp, *list = NULL; 3157 caddr_t endaddr; 3158 3159 ASSERT(sfmmup != NULL); 3160 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3161 3162 ASSERT((sfmmup == ksfmmup) || 3163 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3164 ASSERT((len & MMU_PAGEOFFSET) == 0); 3165 endaddr = addr + len; 3166 hblktag.htag_id = sfmmup; 3167 3168 /* 3169 * Spitfire supports 4 page sizes. 3170 * Most pages are expected to be of the smallest page size (8K) and 3171 * these will not need to be rehashed. 64K pages also don't need to be 3172 * rehashed because an hmeblk spans 64K of address space. 512K pages 3173 * might need 1 rehash and and 4M pages might need 2 rehashes. 3174 */ 3175 while (addr < endaddr) { 3176 hmeshift = HME_HASH_SHIFT(hashno); 3177 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3178 hblktag.htag_rehash = hashno; 3179 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3180 3181 SFMMU_HASH_LOCK(hmebp); 3182 3183 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3184 if (hmeblkp != NULL) { 3185 /* 3186 * If we encounter a shadow hmeblk then 3187 * we know there are no valid hmeblks mapping 3188 * this address at this size or larger. 3189 * Just increment address by the smallest 3190 * page size. 3191 */ 3192 if (hmeblkp->hblk_shw_bit) { 3193 addr += MMU_PAGESIZE; 3194 } else { 3195 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3196 endaddr); 3197 } 3198 SFMMU_HASH_UNLOCK(hmebp); 3199 hashno = 1; 3200 continue; 3201 } 3202 SFMMU_HASH_UNLOCK(hmebp); 3203 3204 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3205 /* 3206 * We have traversed the whole list and rehashed 3207 * if necessary without finding the address to unlock 3208 * which should never happen. 3209 */ 3210 panic("sfmmu_unlock: addr not found. " 3211 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3212 } else { 3213 hashno++; 3214 } 3215 } 3216 3217 sfmmu_hblks_list_purge(&list); 3218 } 3219 3220 /* 3221 * Function to unlock a range of addresses in an hmeblk. It returns the 3222 * next address that needs to be unlocked. 3223 * Should be called with the hash lock held. 3224 */ 3225 static caddr_t 3226 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 3227 { 3228 struct sf_hment *sfhme; 3229 tte_t tteold, ttemod; 3230 int ttesz, ret; 3231 3232 ASSERT(in_hblk_range(hmeblkp, addr)); 3233 ASSERT(hmeblkp->hblk_shw_bit == 0); 3234 3235 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 3236 ttesz = get_hblk_ttesz(hmeblkp); 3237 3238 HBLKTOHME(sfhme, hmeblkp, addr); 3239 while (addr < endaddr) { 3240 readtte: 3241 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3242 if (TTE_IS_VALID(&tteold)) { 3243 3244 ttemod = tteold; 3245 3246 ret = sfmmu_modifytte_try(&tteold, &ttemod, 3247 &sfhme->hme_tte); 3248 3249 if (ret < 0) 3250 goto readtte; 3251 3252 if (hmeblkp->hblk_lckcnt == 0) 3253 panic("zero hblk lckcnt"); 3254 3255 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 3256 (uintptr_t)endaddr) 3257 panic("can't unlock large tte"); 3258 3259 ASSERT(hmeblkp->hblk_lckcnt > 0); 3260 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 3261 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 3262 } else { 3263 panic("sfmmu_hblk_unlock: invalid tte"); 3264 } 3265 addr += TTEBYTES(ttesz); 3266 sfhme++; 3267 } 3268 return (addr); 3269 } 3270 3271 /* 3272 * Physical Address Mapping Framework 3273 * 3274 * General rules: 3275 * 3276 * (1) Applies only to seg_kmem memory pages. To make things easier, 3277 * seg_kpm addresses are also accepted by the routines, but nothing 3278 * is done with them since by definition their PA mappings are static. 3279 * (2) hat_add_callback() may only be called while holding the page lock 3280 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()). 3281 * (3) prehandler() and posthandler() may not call hat_add_callback() or 3282 * hat_delete_callback(), nor should they allocate memory. Post quiesce 3283 * callbacks may not sleep or acquire adaptive mutex locks. 3284 * (4) Either prehandler() or posthandler() (but not both) may be specified 3285 * as being NULL. Specifying an errhandler() is optional. 3286 * 3287 * Details of using the framework: 3288 * 3289 * registering a callback (hat_register_callback()) 3290 * 3291 * Pass prehandler, posthandler, errhandler addresses 3292 * as described below. If capture_cpus argument is nonzero, 3293 * suspend callback to the prehandler will occur with CPUs 3294 * captured and executing xc_loop() and CPUs will remain 3295 * captured until after the posthandler suspend callback 3296 * occurs. 3297 * 3298 * adding a callback (hat_add_callback()) 3299 * 3300 * as_pagelock(); 3301 * hat_add_callback(); 3302 * save returned pfn in private data structures or program registers; 3303 * as_pageunlock(); 3304 * 3305 * prehandler() 3306 * 3307 * Stop all accesses by physical address to this memory page. 3308 * Called twice: the first, PRESUSPEND, is a context safe to acquire 3309 * adaptive locks. The second, SUSPEND, is called at high PIL with 3310 * CPUs captured so adaptive locks may NOT be acquired (and all spin 3311 * locks must be XCALL_PIL or higher locks). 3312 * 3313 * May return the following errors: 3314 * EIO: A fatal error has occurred. This will result in panic. 3315 * EAGAIN: The page cannot be suspended. This will fail the 3316 * relocation. 3317 * 0: Success. 3318 * 3319 * posthandler() 3320 * 3321 * Save new pfn in private data structures or program registers; 3322 * not allowed to fail (non-zero return values will result in panic). 3323 * 3324 * errhandler() 3325 * 3326 * called when an error occurs related to the callback. Currently 3327 * the only such error is HAT_CB_ERR_LEAKED which indicates that 3328 * a page is being freed, but there are still outstanding callback(s) 3329 * registered on the page. 3330 * 3331 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 3332 * 3333 * stop using physical address 3334 * hat_delete_callback(); 3335 * 3336 */ 3337 3338 /* 3339 * Register a callback class. Each subsystem should do this once and 3340 * cache the id_t returned for use in setting up and tearing down callbacks. 3341 * 3342 * There is no facility for removing callback IDs once they are created; 3343 * the "key" should be unique for each module, so in case a module is unloaded 3344 * and subsequently re-loaded, we can recycle the module's previous entry. 3345 */ 3346 id_t 3347 hat_register_callback(int key, 3348 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 3349 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 3350 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 3351 int capture_cpus) 3352 { 3353 id_t id; 3354 3355 /* 3356 * Search the table for a pre-existing callback associated with 3357 * the identifier "key". If one exists, we re-use that entry in 3358 * the table for this instance, otherwise we assign the next 3359 * available table slot. 3360 */ 3361 for (id = 0; id < sfmmu_max_cb_id; id++) { 3362 if (sfmmu_cb_table[id].key == key) 3363 break; 3364 } 3365 3366 if (id == sfmmu_max_cb_id) { 3367 id = sfmmu_cb_nextid++; 3368 if (id >= sfmmu_max_cb_id) 3369 panic("hat_register_callback: out of callback IDs"); 3370 } 3371 3372 ASSERT(prehandler != NULL || posthandler != NULL); 3373 3374 sfmmu_cb_table[id].key = key; 3375 sfmmu_cb_table[id].prehandler = prehandler; 3376 sfmmu_cb_table[id].posthandler = posthandler; 3377 sfmmu_cb_table[id].errhandler = errhandler; 3378 sfmmu_cb_table[id].capture_cpus = capture_cpus; 3379 3380 return (id); 3381 } 3382 3383 /* 3384 * Add relocation callbacks to the specified addr/len which will be called 3385 * when relocating the associated page. See the description of pre and 3386 * posthandler above for more details. IMPT: this operation is only valid 3387 * on seg_kmem pages!! 3388 * 3389 * If HAC_PAGELOCK is included in flags, the underlying memory page is 3390 * locked internally so the caller must be able to deal with the callback 3391 * running even before this function has returned. If HAC_PAGELOCK is not 3392 * set, it is assumed that the underlying memory pages are locked. 3393 * 3394 * Since the caller must track the individual page boundaries anyway, 3395 * we only allow a callback to be added to a single page (large 3396 * or small). Thus [addr, addr + len) MUST be contained within a single 3397 * page. 3398 * 3399 * Registering multiple callbacks on the same [addr, addr+len) is supported, 3400 * in which case the corresponding callback will be called once with each 3401 * unique parameter specified. The number of subsequent deletes must match 3402 * since reference counts are held. If a callback is desired for each 3403 * virtual object with the same parameter specified for multiple callbacks, 3404 * a different virtual address should be specified at the time of 3405 * callback registration. 3406 * 3407 * Returns the pfn of the underlying kernel page in *rpfn 3408 * on success, or PFN_INVALID on failure. 3409 * 3410 * Returns values: 3411 * 0: success 3412 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 3413 * EINVAL: callback ID is not valid 3414 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 3415 * space, or crosses a page boundary 3416 */ 3417 int 3418 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 3419 void *pvt, pfn_t *rpfn) 3420 { 3421 struct hmehash_bucket *hmebp; 3422 hmeblk_tag hblktag; 3423 struct hme_blk *hmeblkp; 3424 int hmeshift, hashno; 3425 caddr_t saddr, eaddr, baseaddr; 3426 struct pa_hment *pahmep, *tpahmep; 3427 struct sf_hment *sfhmep, *osfhmep, *tsfhmep; 3428 kmutex_t *pml; 3429 tte_t tte; 3430 page_t *pp, *rpp; 3431 pfn_t pfn; 3432 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 3433 int locked = 0; 3434 3435 /* 3436 * For KPM mappings, just return the physical address since we 3437 * don't need to register any callbacks. 3438 */ 3439 if (IS_KPM_ADDR(vaddr)) { 3440 uint64_t paddr; 3441 SFMMU_KPM_VTOP(vaddr, paddr); 3442 *rpfn = btop(paddr); 3443 return (0); 3444 } 3445 3446 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 3447 *rpfn = PFN_INVALID; 3448 return (EINVAL); 3449 } 3450 3451 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 3452 *rpfn = PFN_INVALID; 3453 return (ENOMEM); 3454 } 3455 3456 sfhmep = &pahmep->sfment; 3457 3458 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3459 eaddr = saddr + len; 3460 3461 rehash: 3462 /* Find the mapping(s) for this page */ 3463 for (hashno = TTE64K, hmeblkp = NULL; 3464 hmeblkp == NULL && hashno <= mmu_hashcnt; 3465 hashno++) { 3466 hmeshift = HME_HASH_SHIFT(hashno); 3467 hblktag.htag_id = ksfmmup; 3468 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3469 hblktag.htag_rehash = hashno; 3470 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3471 3472 SFMMU_HASH_LOCK(hmebp); 3473 3474 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3475 3476 if (hmeblkp == NULL) 3477 SFMMU_HASH_UNLOCK(hmebp); 3478 } 3479 3480 if (hmeblkp == NULL) { 3481 kmem_cache_free(pa_hment_cache, pahmep); 3482 *rpfn = PFN_INVALID; 3483 return (ENXIO); 3484 } 3485 3486 /* 3487 * Make sure the boundaries for the callback fall within this 3488 * single mapping. 3489 */ 3490 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 3491 ASSERT(saddr >= baseaddr); 3492 if (eaddr > (caddr_t)get_hblk_endaddr(hmeblkp)) { 3493 SFMMU_HASH_UNLOCK(hmebp); 3494 kmem_cache_free(pa_hment_cache, pahmep); 3495 *rpfn = PFN_INVALID; 3496 return (ENXIO); 3497 } 3498 3499 HBLKTOHME(osfhmep, hmeblkp, saddr); 3500 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3501 3502 ASSERT(TTE_IS_VALID(&tte)); 3503 pfn = sfmmu_ttetopfn(&tte, vaddr); 3504 3505 /* 3506 * The pfn may not have a page_t underneath in which case we 3507 * just return it. This can happen if we are doing I/O to a 3508 * static portion of the kernel's address space, for instance. 3509 */ 3510 pp = osfhmep->hme_page; 3511 if (pp == NULL || pp->p_vnode != &kvp) { 3512 SFMMU_HASH_UNLOCK(hmebp); 3513 kmem_cache_free(pa_hment_cache, pahmep); 3514 *rpfn = pfn; 3515 return (0); 3516 } 3517 3518 pml = sfmmu_mlist_enter(pp); 3519 3520 if ((flags & HAC_PAGELOCK) && !locked) { 3521 if (!page_trylock(pp, SE_SHARED)) { 3522 page_t *tpp; 3523 3524 /* 3525 * Somebody is holding SE_EXCL lock. Drop all 3526 * our locks, lookup the page in &kvp, and 3527 * retry. If it doesn't exist in &kvp, then we 3528 * die here; we should have caught it above, 3529 * meaning the page must have changed identity 3530 * (e.g. the caller didn't hold onto the page 3531 * lock after establishing the kernel mapping) 3532 */ 3533 sfmmu_mlist_exit(pml); 3534 SFMMU_HASH_UNLOCK(hmebp); 3535 tpp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3536 if (tpp == NULL) { 3537 panic("hat_add_callback: page not found: 0x%p", 3538 pp); 3539 } 3540 pp = tpp; 3541 rpp = PP_PAGEROOT(pp); 3542 if (rpp != pp) { 3543 page_unlock(pp); 3544 (void) page_lock(rpp, SE_SHARED, NULL, 3545 P_NO_RECLAIM); 3546 } 3547 locked = 1; 3548 goto rehash; 3549 } 3550 locked = 1; 3551 } 3552 3553 if (!PAGE_LOCKED(pp) && !panicstr) 3554 panic("hat_add_callback: page 0x%p not locked", pp); 3555 3556 if (osfhmep->hme_page != pp || pp->p_vnode != &kvp || 3557 pp->p_offset < (u_offset_t)baseaddr || 3558 pp->p_offset > (u_offset_t)eaddr) { 3559 /* 3560 * The page moved before we got our hands on it. Drop 3561 * all the locks and try again. 3562 */ 3563 ASSERT((flags & HAC_PAGELOCK) != 0); 3564 sfmmu_mlist_exit(pml); 3565 SFMMU_HASH_UNLOCK(hmebp); 3566 page_unlock(pp); 3567 locked = 0; 3568 goto rehash; 3569 } 3570 3571 ASSERT(osfhmep->hme_page == pp); 3572 3573 for (tsfhmep = pp->p_mapping; tsfhmep != NULL; 3574 tsfhmep = tsfhmep->hme_next) { 3575 3576 /* 3577 * skip va to pa mappings 3578 */ 3579 if (!IS_PAHME(tsfhmep)) 3580 continue; 3581 3582 tpahmep = tsfhmep->hme_data; 3583 ASSERT(tpahmep != NULL); 3584 3585 /* 3586 * See if the pahment already exists. 3587 */ 3588 if ((tpahmep->pvt == pvt) && 3589 (tpahmep->addr == vaddr) && 3590 (tpahmep->len == len)) { 3591 ASSERT(tpahmep->cb_id == callback_id); 3592 tpahmep->refcnt++; 3593 pp->p_share++; 3594 3595 sfmmu_mlist_exit(pml); 3596 SFMMU_HASH_UNLOCK(hmebp); 3597 3598 if (locked) 3599 page_unlock(pp); 3600 3601 kmem_cache_free(pa_hment_cache, pahmep); 3602 3603 *rpfn = pfn; 3604 return (0); 3605 } 3606 } 3607 3608 /* 3609 * setup this shiny new pa_hment .. 3610 */ 3611 pp->p_share++; 3612 pahmep->cb_id = callback_id; 3613 pahmep->addr = vaddr; 3614 pahmep->len = len; 3615 pahmep->refcnt = 1; 3616 pahmep->flags = 0; 3617 pahmep->pvt = pvt; 3618 3619 /* 3620 * .. and also set up the sf_hment and link to p_mapping list. 3621 */ 3622 sfhmep->hme_tte.ll = 0; 3623 sfhmep->hme_data = pahmep; 3624 sfhmep->hme_prev = osfhmep; 3625 sfhmep->hme_next = osfhmep->hme_next; 3626 3627 if (osfhmep->hme_next) 3628 osfhmep->hme_next->hme_prev = sfhmep; 3629 3630 osfhmep->hme_next = sfhmep; 3631 3632 sfmmu_mlist_exit(pml); 3633 SFMMU_HASH_UNLOCK(hmebp); 3634 3635 *rpfn = pfn; 3636 if (locked) 3637 page_unlock(pp); 3638 3639 return (0); 3640 } 3641 3642 /* 3643 * Remove the relocation callbacks from the specified addr/len. 3644 */ 3645 void 3646 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags) 3647 { 3648 struct hmehash_bucket *hmebp; 3649 hmeblk_tag hblktag; 3650 struct hme_blk *hmeblkp; 3651 int hmeshift, hashno; 3652 caddr_t saddr, eaddr, baseaddr; 3653 struct pa_hment *pahmep; 3654 struct sf_hment *sfhmep, *osfhmep; 3655 kmutex_t *pml; 3656 tte_t tte; 3657 page_t *pp, *rpp; 3658 int locked = 0; 3659 3660 if (IS_KPM_ADDR(vaddr)) 3661 return; 3662 3663 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3664 eaddr = saddr + len; 3665 3666 rehash: 3667 /* Find the mapping(s) for this page */ 3668 for (hashno = TTE64K, hmeblkp = NULL; 3669 hmeblkp == NULL && hashno <= mmu_hashcnt; 3670 hashno++) { 3671 hmeshift = HME_HASH_SHIFT(hashno); 3672 hblktag.htag_id = ksfmmup; 3673 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3674 hblktag.htag_rehash = hashno; 3675 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3676 3677 SFMMU_HASH_LOCK(hmebp); 3678 3679 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3680 3681 if (hmeblkp == NULL) 3682 SFMMU_HASH_UNLOCK(hmebp); 3683 } 3684 3685 if (hmeblkp == NULL) { 3686 if (!panicstr) { 3687 panic("hat_delete_callback: addr 0x%p not found", 3688 saddr); 3689 } 3690 return; 3691 } 3692 3693 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 3694 HBLKTOHME(osfhmep, hmeblkp, saddr); 3695 3696 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3697 ASSERT(TTE_IS_VALID(&tte)); 3698 3699 pp = osfhmep->hme_page; 3700 if (pp == NULL || pp->p_vnode != &kvp) { 3701 SFMMU_HASH_UNLOCK(hmebp); 3702 return; 3703 } 3704 3705 pml = sfmmu_mlist_enter(pp); 3706 3707 if ((flags & HAC_PAGELOCK) && !locked) { 3708 if (!page_trylock(pp, SE_SHARED)) { 3709 /* 3710 * Somebody is holding SE_EXCL lock. Drop all 3711 * our locks, lookup the page in &kvp, and 3712 * retry. 3713 */ 3714 sfmmu_mlist_exit(pml); 3715 SFMMU_HASH_UNLOCK(hmebp); 3716 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3717 ASSERT(pp != NULL); 3718 rpp = PP_PAGEROOT(pp); 3719 if (rpp != pp) { 3720 page_unlock(pp); 3721 (void) page_lock(rpp, SE_SHARED, NULL, 3722 P_NO_RECLAIM); 3723 } 3724 locked = 1; 3725 goto rehash; 3726 } 3727 locked = 1; 3728 } 3729 3730 ASSERT(PAGE_LOCKED(pp)); 3731 3732 if (osfhmep->hme_page != pp || pp->p_vnode != &kvp || 3733 pp->p_offset < (u_offset_t)baseaddr || 3734 pp->p_offset > (u_offset_t)eaddr) { 3735 /* 3736 * The page moved before we got our hands on it. Drop 3737 * all the locks and try again. 3738 */ 3739 ASSERT((flags & HAC_PAGELOCK) != 0); 3740 sfmmu_mlist_exit(pml); 3741 SFMMU_HASH_UNLOCK(hmebp); 3742 page_unlock(pp); 3743 locked = 0; 3744 goto rehash; 3745 } 3746 3747 ASSERT(osfhmep->hme_page == pp); 3748 3749 for (sfhmep = pp->p_mapping; sfhmep != NULL; 3750 sfhmep = sfhmep->hme_next) { 3751 3752 /* 3753 * skip va<->pa mappings 3754 */ 3755 if (!IS_PAHME(sfhmep)) 3756 continue; 3757 3758 pahmep = sfhmep->hme_data; 3759 ASSERT(pahmep != NULL); 3760 3761 /* 3762 * if pa_hment matches, remove it 3763 */ 3764 if ((pahmep->pvt == pvt) && 3765 (pahmep->addr == vaddr) && 3766 (pahmep->len == len)) { 3767 break; 3768 } 3769 } 3770 3771 if (sfhmep == NULL) { 3772 if (!panicstr) { 3773 panic("hat_delete_callback: pa_hment not found, pp %p", 3774 (void *)pp); 3775 } 3776 return; 3777 } 3778 3779 /* 3780 * Note: at this point a valid kernel mapping must still be 3781 * present on this page. 3782 */ 3783 pp->p_share--; 3784 if (pp->p_share <= 0) 3785 panic("hat_delete_callback: zero p_share"); 3786 3787 if (--pahmep->refcnt == 0) { 3788 if (pahmep->flags != 0) 3789 panic("hat_delete_callback: pa_hment is busy"); 3790 3791 /* 3792 * Remove sfhmep from the mapping list for the page. 3793 */ 3794 if (sfhmep->hme_prev) { 3795 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 3796 } else { 3797 pp->p_mapping = sfhmep->hme_next; 3798 } 3799 3800 if (sfhmep->hme_next) 3801 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 3802 3803 sfmmu_mlist_exit(pml); 3804 SFMMU_HASH_UNLOCK(hmebp); 3805 3806 if (locked) 3807 page_unlock(pp); 3808 3809 kmem_cache_free(pa_hment_cache, pahmep); 3810 return; 3811 } 3812 3813 sfmmu_mlist_exit(pml); 3814 SFMMU_HASH_UNLOCK(hmebp); 3815 if (locked) 3816 page_unlock(pp); 3817 } 3818 3819 /* 3820 * hat_probe returns 1 if the translation for the address 'addr' is 3821 * loaded, zero otherwise. 3822 * 3823 * hat_probe should be used only for advisorary purposes because it may 3824 * occasionally return the wrong value. The implementation must guarantee that 3825 * returning the wrong value is a very rare event. hat_probe is used 3826 * to implement optimizations in the segment drivers. 3827 * 3828 */ 3829 int 3830 hat_probe(struct hat *sfmmup, caddr_t addr) 3831 { 3832 pfn_t pfn; 3833 tte_t tte; 3834 3835 ASSERT(sfmmup != NULL); 3836 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3837 3838 ASSERT((sfmmup == ksfmmup) || 3839 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3840 3841 if (sfmmup == ksfmmup) { 3842 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 3843 == PFN_SUSPENDED) { 3844 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 3845 } 3846 } else { 3847 pfn = sfmmu_uvatopfn(addr, sfmmup); 3848 } 3849 3850 if (pfn != PFN_INVALID) 3851 return (1); 3852 else 3853 return (0); 3854 } 3855 3856 ssize_t 3857 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 3858 { 3859 tte_t tte; 3860 3861 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3862 3863 sfmmu_gettte(sfmmup, addr, &tte); 3864 if (TTE_IS_VALID(&tte)) { 3865 return (TTEBYTES(TTE_CSZ(&tte))); 3866 } 3867 return (-1); 3868 } 3869 3870 static void 3871 sfmmu_gettte(struct hat *sfmmup, caddr_t addr, tte_t *ttep) 3872 { 3873 struct hmehash_bucket *hmebp; 3874 hmeblk_tag hblktag; 3875 int hmeshift, hashno = 1; 3876 struct hme_blk *hmeblkp, *list = NULL; 3877 struct sf_hment *sfhmep; 3878 3879 /* support for ISM */ 3880 ism_map_t *ism_map; 3881 ism_blk_t *ism_blkp; 3882 int i; 3883 sfmmu_t *ism_hatid = NULL; 3884 sfmmu_t *locked_hatid = NULL; 3885 3886 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 3887 3888 ism_blkp = sfmmup->sfmmu_iblk; 3889 if (ism_blkp) { 3890 sfmmu_ismhat_enter(sfmmup, 0); 3891 locked_hatid = sfmmup; 3892 } 3893 while (ism_blkp && ism_hatid == NULL) { 3894 ism_map = ism_blkp->iblk_maps; 3895 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 3896 if (addr >= ism_start(ism_map[i]) && 3897 addr < ism_end(ism_map[i])) { 3898 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 3899 addr = (caddr_t)(addr - 3900 ism_start(ism_map[i])); 3901 break; 3902 } 3903 } 3904 ism_blkp = ism_blkp->iblk_next; 3905 } 3906 if (locked_hatid) { 3907 sfmmu_ismhat_exit(locked_hatid, 0); 3908 } 3909 3910 hblktag.htag_id = sfmmup; 3911 ttep->ll = 0; 3912 3913 do { 3914 hmeshift = HME_HASH_SHIFT(hashno); 3915 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3916 hblktag.htag_rehash = hashno; 3917 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3918 3919 SFMMU_HASH_LOCK(hmebp); 3920 3921 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3922 if (hmeblkp != NULL) { 3923 HBLKTOHME(sfhmep, hmeblkp, addr); 3924 sfmmu_copytte(&sfhmep->hme_tte, ttep); 3925 SFMMU_HASH_UNLOCK(hmebp); 3926 break; 3927 } 3928 SFMMU_HASH_UNLOCK(hmebp); 3929 hashno++; 3930 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 3931 3932 sfmmu_hblks_list_purge(&list); 3933 } 3934 3935 uint_t 3936 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 3937 { 3938 tte_t tte; 3939 3940 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3941 3942 sfmmu_gettte(sfmmup, addr, &tte); 3943 if (TTE_IS_VALID(&tte)) { 3944 *attr = sfmmu_ptov_attr(&tte); 3945 return (0); 3946 } 3947 *attr = 0; 3948 return ((uint_t)0xffffffff); 3949 } 3950 3951 /* 3952 * Enables more attributes on specified address range (ie. logical OR) 3953 */ 3954 void 3955 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 3956 { 3957 if (hat->sfmmu_xhat_provider) { 3958 XHAT_SETATTR(hat, addr, len, attr); 3959 return; 3960 } else { 3961 /* 3962 * This must be a CPU HAT. If the address space has 3963 * XHATs attached, change attributes for all of them, 3964 * just in case 3965 */ 3966 ASSERT(hat->sfmmu_as != NULL); 3967 if (hat->sfmmu_as->a_xhat != NULL) 3968 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 3969 } 3970 3971 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 3972 } 3973 3974 /* 3975 * Assigns attributes to the specified address range. All the attributes 3976 * are specified. 3977 */ 3978 void 3979 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 3980 { 3981 if (hat->sfmmu_xhat_provider) { 3982 XHAT_CHGATTR(hat, addr, len, attr); 3983 return; 3984 } else { 3985 /* 3986 * This must be a CPU HAT. If the address space has 3987 * XHATs attached, change attributes for all of them, 3988 * just in case 3989 */ 3990 ASSERT(hat->sfmmu_as != NULL); 3991 if (hat->sfmmu_as->a_xhat != NULL) 3992 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 3993 } 3994 3995 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 3996 } 3997 3998 /* 3999 * Remove attributes on the specified address range (ie. loginal NAND) 4000 */ 4001 void 4002 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4003 { 4004 if (hat->sfmmu_xhat_provider) { 4005 XHAT_CLRATTR(hat, addr, len, attr); 4006 return; 4007 } else { 4008 /* 4009 * This must be a CPU HAT. If the address space has 4010 * XHATs attached, change attributes for all of them, 4011 * just in case 4012 */ 4013 ASSERT(hat->sfmmu_as != NULL); 4014 if (hat->sfmmu_as->a_xhat != NULL) 4015 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4016 } 4017 4018 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4019 } 4020 4021 /* 4022 * Change attributes on an address range to that specified by attr and mode. 4023 */ 4024 static void 4025 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4026 int mode) 4027 { 4028 struct hmehash_bucket *hmebp; 4029 hmeblk_tag hblktag; 4030 int hmeshift, hashno = 1; 4031 struct hme_blk *hmeblkp, *list = NULL; 4032 caddr_t endaddr; 4033 cpuset_t cpuset; 4034 demap_range_t dmr; 4035 4036 CPUSET_ZERO(cpuset); 4037 4038 ASSERT((sfmmup == ksfmmup) || 4039 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4040 ASSERT((len & MMU_PAGEOFFSET) == 0); 4041 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4042 4043 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4044 ((addr + len) > (caddr_t)USERLIMIT)) { 4045 panic("user addr %p in kernel space", 4046 (void *)addr); 4047 } 4048 4049 endaddr = addr + len; 4050 hblktag.htag_id = sfmmup; 4051 DEMAP_RANGE_INIT(sfmmup, &dmr); 4052 4053 while (addr < endaddr) { 4054 hmeshift = HME_HASH_SHIFT(hashno); 4055 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4056 hblktag.htag_rehash = hashno; 4057 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4058 4059 SFMMU_HASH_LOCK(hmebp); 4060 4061 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4062 if (hmeblkp != NULL) { 4063 /* 4064 * We've encountered a shadow hmeblk so skip the range 4065 * of the next smaller mapping size. 4066 */ 4067 if (hmeblkp->hblk_shw_bit) { 4068 ASSERT(sfmmup != ksfmmup); 4069 ASSERT(hashno > 1); 4070 addr = (caddr_t)P2END((uintptr_t)addr, 4071 TTEBYTES(hashno - 1)); 4072 } else { 4073 addr = sfmmu_hblk_chgattr(sfmmup, 4074 hmeblkp, addr, endaddr, &dmr, attr, mode); 4075 } 4076 SFMMU_HASH_UNLOCK(hmebp); 4077 hashno = 1; 4078 continue; 4079 } 4080 SFMMU_HASH_UNLOCK(hmebp); 4081 4082 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4083 /* 4084 * We have traversed the whole list and rehashed 4085 * if necessary without finding the address to chgattr. 4086 * This is ok, so we increment the address by the 4087 * smallest hmeblk range for kernel mappings or for 4088 * user mappings with no large pages, and the largest 4089 * hmeblk range, to account for shadow hmeblks, for 4090 * user mappings with large pages and continue. 4091 */ 4092 if (sfmmup == ksfmmup) 4093 addr = (caddr_t)P2END((uintptr_t)addr, 4094 TTEBYTES(1)); 4095 else 4096 addr = (caddr_t)P2END((uintptr_t)addr, 4097 TTEBYTES(hashno)); 4098 hashno = 1; 4099 } else { 4100 hashno++; 4101 } 4102 } 4103 4104 sfmmu_hblks_list_purge(&list); 4105 DEMAP_RANGE_FLUSH(&dmr); 4106 cpuset = sfmmup->sfmmu_cpusran; 4107 xt_sync(cpuset); 4108 } 4109 4110 /* 4111 * This function chgattr on a range of addresses in an hmeblk. It returns the 4112 * next addres that needs to be chgattr. 4113 * It should be called with the hash lock held. 4114 * XXX It should be possible to optimize chgattr by not flushing every time but 4115 * on the other hand: 4116 * 1. do one flush crosscall. 4117 * 2. only flush if we are increasing permissions (make sure this will work) 4118 */ 4119 static caddr_t 4120 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4121 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4122 { 4123 tte_t tte, tteattr, tteflags, ttemod; 4124 struct sf_hment *sfhmep; 4125 int ttesz; 4126 struct page *pp = NULL; 4127 kmutex_t *pml, *pmtx; 4128 int ret; 4129 int use_demap_range; 4130 #if defined(SF_ERRATA_57) 4131 int check_exec; 4132 #endif 4133 4134 ASSERT(in_hblk_range(hmeblkp, addr)); 4135 ASSERT(hmeblkp->hblk_shw_bit == 0); 4136 4137 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4138 ttesz = get_hblk_ttesz(hmeblkp); 4139 4140 /* 4141 * Flush the current demap region if addresses have been 4142 * skipped or the page size doesn't match. 4143 */ 4144 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4145 if (use_demap_range) { 4146 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4147 } else { 4148 DEMAP_RANGE_FLUSH(dmrp); 4149 } 4150 4151 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4152 #if defined(SF_ERRATA_57) 4153 check_exec = (sfmmup != ksfmmup) && 4154 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4155 TTE_IS_EXECUTABLE(&tteattr); 4156 #endif 4157 HBLKTOHME(sfhmep, hmeblkp, addr); 4158 while (addr < endaddr) { 4159 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4160 if (TTE_IS_VALID(&tte)) { 4161 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4162 /* 4163 * if the new attr is the same as old 4164 * continue 4165 */ 4166 goto next_addr; 4167 } 4168 if (!TTE_IS_WRITABLE(&tteattr)) { 4169 /* 4170 * make sure we clear hw modify bit if we 4171 * removing write protections 4172 */ 4173 tteflags.tte_intlo |= TTE_HWWR_INT; 4174 } 4175 4176 pml = NULL; 4177 pp = sfhmep->hme_page; 4178 if (pp) { 4179 pml = sfmmu_mlist_enter(pp); 4180 } 4181 4182 if (pp != sfhmep->hme_page) { 4183 /* 4184 * tte must have been unloaded. 4185 */ 4186 ASSERT(pml); 4187 sfmmu_mlist_exit(pml); 4188 continue; 4189 } 4190 4191 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4192 4193 ttemod = tte; 4194 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4195 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4196 4197 #if defined(SF_ERRATA_57) 4198 if (check_exec && addr < errata57_limit) 4199 ttemod.tte_exec_perm = 0; 4200 #endif 4201 ret = sfmmu_modifytte_try(&tte, &ttemod, 4202 &sfhmep->hme_tte); 4203 4204 if (ret < 0) { 4205 /* tte changed underneath us */ 4206 if (pml) { 4207 sfmmu_mlist_exit(pml); 4208 } 4209 continue; 4210 } 4211 4212 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4213 /* 4214 * need to sync if we are clearing modify bit. 4215 */ 4216 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4217 } 4218 4219 if (pp && PP_ISRO(pp)) { 4220 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4221 pmtx = sfmmu_page_enter(pp); 4222 PP_CLRRO(pp); 4223 sfmmu_page_exit(pmtx); 4224 } 4225 } 4226 4227 if (ret > 0 && use_demap_range) { 4228 DEMAP_RANGE_MARKPG(dmrp, addr); 4229 } else if (ret > 0) { 4230 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4231 } 4232 4233 if (pml) { 4234 sfmmu_mlist_exit(pml); 4235 } 4236 } 4237 next_addr: 4238 addr += TTEBYTES(ttesz); 4239 sfhmep++; 4240 DEMAP_RANGE_NEXTPG(dmrp); 4241 } 4242 return (addr); 4243 } 4244 4245 /* 4246 * This routine converts virtual attributes to physical ones. It will 4247 * update the tteflags field with the tte mask corresponding to the attributes 4248 * affected and it returns the new attributes. It will also clear the modify 4249 * bit if we are taking away write permission. This is necessary since the 4250 * modify bit is the hardware permission bit and we need to clear it in order 4251 * to detect write faults. 4252 */ 4253 static uint64_t 4254 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 4255 { 4256 tte_t ttevalue; 4257 4258 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 4259 4260 switch (mode) { 4261 case SFMMU_CHGATTR: 4262 /* all attributes specified */ 4263 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 4264 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 4265 ttemaskp->tte_inthi = TTEINTHI_ATTR; 4266 ttemaskp->tte_intlo = TTEINTLO_ATTR; 4267 break; 4268 case SFMMU_SETATTR: 4269 ASSERT(!(attr & ~HAT_PROT_MASK)); 4270 ttemaskp->ll = 0; 4271 ttevalue.ll = 0; 4272 /* 4273 * a valid tte implies exec and read for sfmmu 4274 * so no need to do anything about them. 4275 * since priviledged access implies user access 4276 * PROT_USER doesn't make sense either. 4277 */ 4278 if (attr & PROT_WRITE) { 4279 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 4280 ttevalue.tte_intlo |= TTE_WRPRM_INT; 4281 } 4282 break; 4283 case SFMMU_CLRATTR: 4284 /* attributes will be nand with current ones */ 4285 if (attr & ~(PROT_WRITE | PROT_USER)) { 4286 panic("sfmmu: attr %x not supported", attr); 4287 } 4288 ttemaskp->ll = 0; 4289 ttevalue.ll = 0; 4290 if (attr & PROT_WRITE) { 4291 /* clear both writable and modify bit */ 4292 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 4293 } 4294 if (attr & PROT_USER) { 4295 ttemaskp->tte_intlo |= TTE_PRIV_INT; 4296 ttevalue.tte_intlo |= TTE_PRIV_INT; 4297 } 4298 break; 4299 default: 4300 panic("sfmmu_vtop_attr: bad mode %x", mode); 4301 } 4302 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 4303 return (ttevalue.ll); 4304 } 4305 4306 static uint_t 4307 sfmmu_ptov_attr(tte_t *ttep) 4308 { 4309 uint_t attr; 4310 4311 ASSERT(TTE_IS_VALID(ttep)); 4312 4313 attr = PROT_READ; 4314 4315 if (TTE_IS_WRITABLE(ttep)) { 4316 attr |= PROT_WRITE; 4317 } 4318 if (TTE_IS_EXECUTABLE(ttep)) { 4319 attr |= PROT_EXEC; 4320 } 4321 if (!TTE_IS_PRIVILEGED(ttep)) { 4322 attr |= PROT_USER; 4323 } 4324 if (TTE_IS_NFO(ttep)) { 4325 attr |= HAT_NOFAULT; 4326 } 4327 if (TTE_IS_NOSYNC(ttep)) { 4328 attr |= HAT_NOSYNC; 4329 } 4330 if (TTE_IS_SIDEFFECT(ttep)) { 4331 attr |= SFMMU_SIDEFFECT; 4332 } 4333 if (!TTE_IS_VCACHEABLE(ttep)) { 4334 attr |= SFMMU_UNCACHEVTTE; 4335 } 4336 if (!TTE_IS_PCACHEABLE(ttep)) { 4337 attr |= SFMMU_UNCACHEPTTE; 4338 } 4339 return (attr); 4340 } 4341 4342 /* 4343 * hat_chgprot is a deprecated hat call. New segment drivers 4344 * should store all attributes and use hat_*attr calls. 4345 * 4346 * Change the protections in the virtual address range 4347 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 4348 * then remove write permission, leaving the other 4349 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 4350 * 4351 */ 4352 void 4353 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 4354 { 4355 struct hmehash_bucket *hmebp; 4356 hmeblk_tag hblktag; 4357 int hmeshift, hashno = 1; 4358 struct hme_blk *hmeblkp, *list = NULL; 4359 caddr_t endaddr; 4360 cpuset_t cpuset; 4361 demap_range_t dmr; 4362 4363 ASSERT((len & MMU_PAGEOFFSET) == 0); 4364 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4365 4366 if (sfmmup->sfmmu_xhat_provider) { 4367 XHAT_CHGPROT(sfmmup, addr, len, vprot); 4368 return; 4369 } else { 4370 /* 4371 * This must be a CPU HAT. If the address space has 4372 * XHATs attached, change attributes for all of them, 4373 * just in case 4374 */ 4375 ASSERT(sfmmup->sfmmu_as != NULL); 4376 if (sfmmup->sfmmu_as->a_xhat != NULL) 4377 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 4378 } 4379 4380 CPUSET_ZERO(cpuset); 4381 4382 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 4383 ((addr + len) > (caddr_t)USERLIMIT)) { 4384 panic("user addr %p vprot %x in kernel space", 4385 (void *)addr, vprot); 4386 } 4387 endaddr = addr + len; 4388 hblktag.htag_id = sfmmup; 4389 DEMAP_RANGE_INIT(sfmmup, &dmr); 4390 4391 while (addr < endaddr) { 4392 hmeshift = HME_HASH_SHIFT(hashno); 4393 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4394 hblktag.htag_rehash = hashno; 4395 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4396 4397 SFMMU_HASH_LOCK(hmebp); 4398 4399 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4400 if (hmeblkp != NULL) { 4401 /* 4402 * We've encountered a shadow hmeblk so skip the range 4403 * of the next smaller mapping size. 4404 */ 4405 if (hmeblkp->hblk_shw_bit) { 4406 ASSERT(sfmmup != ksfmmup); 4407 ASSERT(hashno > 1); 4408 addr = (caddr_t)P2END((uintptr_t)addr, 4409 TTEBYTES(hashno - 1)); 4410 } else { 4411 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 4412 addr, endaddr, &dmr, vprot); 4413 } 4414 SFMMU_HASH_UNLOCK(hmebp); 4415 hashno = 1; 4416 continue; 4417 } 4418 SFMMU_HASH_UNLOCK(hmebp); 4419 4420 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4421 /* 4422 * We have traversed the whole list and rehashed 4423 * if necessary without finding the address to chgprot. 4424 * This is ok so we increment the address by the 4425 * smallest hmeblk range for kernel mappings and the 4426 * largest hmeblk range, to account for shadow hmeblks, 4427 * for user mappings and continue. 4428 */ 4429 if (sfmmup == ksfmmup) 4430 addr = (caddr_t)P2END((uintptr_t)addr, 4431 TTEBYTES(1)); 4432 else 4433 addr = (caddr_t)P2END((uintptr_t)addr, 4434 TTEBYTES(hashno)); 4435 hashno = 1; 4436 } else { 4437 hashno++; 4438 } 4439 } 4440 4441 sfmmu_hblks_list_purge(&list); 4442 DEMAP_RANGE_FLUSH(&dmr); 4443 cpuset = sfmmup->sfmmu_cpusran; 4444 xt_sync(cpuset); 4445 } 4446 4447 /* 4448 * This function chgprots a range of addresses in an hmeblk. It returns the 4449 * next addres that needs to be chgprot. 4450 * It should be called with the hash lock held. 4451 * XXX It shold be possible to optimize chgprot by not flushing every time but 4452 * on the other hand: 4453 * 1. do one flush crosscall. 4454 * 2. only flush if we are increasing permissions (make sure this will work) 4455 */ 4456 static caddr_t 4457 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4458 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 4459 { 4460 uint_t pprot; 4461 tte_t tte, ttemod; 4462 struct sf_hment *sfhmep; 4463 uint_t tteflags; 4464 int ttesz; 4465 struct page *pp = NULL; 4466 kmutex_t *pml, *pmtx; 4467 int ret; 4468 int use_demap_range; 4469 #if defined(SF_ERRATA_57) 4470 int check_exec; 4471 #endif 4472 4473 ASSERT(in_hblk_range(hmeblkp, addr)); 4474 ASSERT(hmeblkp->hblk_shw_bit == 0); 4475 4476 #ifdef DEBUG 4477 if (get_hblk_ttesz(hmeblkp) != TTE8K && 4478 (endaddr < get_hblk_endaddr(hmeblkp))) { 4479 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 4480 } 4481 #endif /* DEBUG */ 4482 4483 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4484 ttesz = get_hblk_ttesz(hmeblkp); 4485 4486 pprot = sfmmu_vtop_prot(vprot, &tteflags); 4487 #if defined(SF_ERRATA_57) 4488 check_exec = (sfmmup != ksfmmup) && 4489 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4490 ((vprot & PROT_EXEC) == PROT_EXEC); 4491 #endif 4492 HBLKTOHME(sfhmep, hmeblkp, addr); 4493 4494 /* 4495 * Flush the current demap region if addresses have been 4496 * skipped or the page size doesn't match. 4497 */ 4498 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 4499 if (use_demap_range) { 4500 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4501 } else { 4502 DEMAP_RANGE_FLUSH(dmrp); 4503 } 4504 4505 while (addr < endaddr) { 4506 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4507 if (TTE_IS_VALID(&tte)) { 4508 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 4509 /* 4510 * if the new protection is the same as old 4511 * continue 4512 */ 4513 goto next_addr; 4514 } 4515 pml = NULL; 4516 pp = sfhmep->hme_page; 4517 if (pp) { 4518 pml = sfmmu_mlist_enter(pp); 4519 } 4520 if (pp != sfhmep->hme_page) { 4521 /* 4522 * tte most have been unloaded 4523 * underneath us. Recheck 4524 */ 4525 ASSERT(pml); 4526 sfmmu_mlist_exit(pml); 4527 continue; 4528 } 4529 4530 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4531 4532 ttemod = tte; 4533 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 4534 #if defined(SF_ERRATA_57) 4535 if (check_exec && addr < errata57_limit) 4536 ttemod.tte_exec_perm = 0; 4537 #endif 4538 ret = sfmmu_modifytte_try(&tte, &ttemod, 4539 &sfhmep->hme_tte); 4540 4541 if (ret < 0) { 4542 /* tte changed underneath us */ 4543 if (pml) { 4544 sfmmu_mlist_exit(pml); 4545 } 4546 continue; 4547 } 4548 4549 if (tteflags & TTE_HWWR_INT) { 4550 /* 4551 * need to sync if we are clearing modify bit. 4552 */ 4553 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4554 } 4555 4556 if (pp && PP_ISRO(pp)) { 4557 if (pprot & TTE_WRPRM_INT) { 4558 pmtx = sfmmu_page_enter(pp); 4559 PP_CLRRO(pp); 4560 sfmmu_page_exit(pmtx); 4561 } 4562 } 4563 4564 if (ret > 0 && use_demap_range) { 4565 DEMAP_RANGE_MARKPG(dmrp, addr); 4566 } else if (ret > 0) { 4567 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4568 } 4569 4570 if (pml) { 4571 sfmmu_mlist_exit(pml); 4572 } 4573 } 4574 next_addr: 4575 addr += TTEBYTES(ttesz); 4576 sfhmep++; 4577 DEMAP_RANGE_NEXTPG(dmrp); 4578 } 4579 return (addr); 4580 } 4581 4582 /* 4583 * This routine is deprecated and should only be used by hat_chgprot. 4584 * The correct routine is sfmmu_vtop_attr. 4585 * This routine converts virtual page protections to physical ones. It will 4586 * update the tteflags field with the tte mask corresponding to the protections 4587 * affected and it returns the new protections. It will also clear the modify 4588 * bit if we are taking away write permission. This is necessary since the 4589 * modify bit is the hardware permission bit and we need to clear it in order 4590 * to detect write faults. 4591 * It accepts the following special protections: 4592 * ~PROT_WRITE = remove write permissions. 4593 * ~PROT_USER = remove user permissions. 4594 */ 4595 static uint_t 4596 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 4597 { 4598 if (vprot == (uint_t)~PROT_WRITE) { 4599 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 4600 return (0); /* will cause wrprm to be cleared */ 4601 } 4602 if (vprot == (uint_t)~PROT_USER) { 4603 *tteflagsp = TTE_PRIV_INT; 4604 return (0); /* will cause privprm to be cleared */ 4605 } 4606 if ((vprot == 0) || (vprot == PROT_USER) || 4607 ((vprot & PROT_ALL) != vprot)) { 4608 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4609 } 4610 4611 switch (vprot) { 4612 case (PROT_READ): 4613 case (PROT_EXEC): 4614 case (PROT_EXEC | PROT_READ): 4615 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4616 return (TTE_PRIV_INT); /* set prv and clr wrt */ 4617 case (PROT_WRITE): 4618 case (PROT_WRITE | PROT_READ): 4619 case (PROT_EXEC | PROT_WRITE): 4620 case (PROT_EXEC | PROT_WRITE | PROT_READ): 4621 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4622 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 4623 case (PROT_USER | PROT_READ): 4624 case (PROT_USER | PROT_EXEC): 4625 case (PROT_USER | PROT_EXEC | PROT_READ): 4626 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4627 return (0); /* clr prv and wrt */ 4628 case (PROT_USER | PROT_WRITE): 4629 case (PROT_USER | PROT_WRITE | PROT_READ): 4630 case (PROT_USER | PROT_EXEC | PROT_WRITE): 4631 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 4632 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4633 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 4634 default: 4635 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4636 } 4637 return (0); 4638 } 4639 4640 /* 4641 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 4642 * the normal algorithm would take too long for a very large VA range with 4643 * few real mappings. This routine just walks thru all HMEs in the global 4644 * hash table to find and remove mappings. 4645 */ 4646 static void 4647 hat_unload_large_virtual( 4648 struct hat *sfmmup, 4649 caddr_t startaddr, 4650 size_t len, 4651 uint_t flags, 4652 hat_callback_t *callback) 4653 { 4654 struct hmehash_bucket *hmebp; 4655 struct hme_blk *hmeblkp; 4656 struct hme_blk *pr_hblk = NULL; 4657 struct hme_blk *nx_hblk; 4658 struct hme_blk *list = NULL; 4659 int i; 4660 uint64_t hblkpa, prevpa, nx_pa; 4661 hatlock_t *hatlockp; 4662 struct tsb_info *tsbinfop; 4663 struct ctx *ctx; 4664 caddr_t endaddr = startaddr + len; 4665 caddr_t sa; 4666 caddr_t ea; 4667 caddr_t cb_sa[MAX_CB_ADDR]; 4668 caddr_t cb_ea[MAX_CB_ADDR]; 4669 int addr_cnt = 0; 4670 int a = 0; 4671 int cnum; 4672 4673 hatlockp = sfmmu_hat_enter(sfmmup); 4674 4675 /* 4676 * Since we know we're unmapping a huge range of addresses, 4677 * just throw away the context and switch to another. It's 4678 * cheaper than trying to unmap all of the TTEs we may find 4679 * from the TLB individually, which is too expensive in terms 4680 * of xcalls. Better yet, if we're exiting, no need to flush 4681 * anything at all! 4682 */ 4683 if (!sfmmup->sfmmu_free) { 4684 ctx = sfmmutoctx(sfmmup); 4685 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 4686 cnum = sfmmutoctxnum(sfmmup); 4687 if (cnum != INVALID_CONTEXT) { 4688 sfmmu_tlb_swap_ctx(sfmmup, ctx); 4689 } 4690 rw_exit(&ctx->ctx_rwlock); 4691 4692 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 4693 tsbinfop = tsbinfop->tsb_next) { 4694 if (tsbinfop->tsb_flags & TSB_SWAPPED) 4695 continue; 4696 sfmmu_inv_tsb(tsbinfop->tsb_va, 4697 TSB_BYTES(tsbinfop->tsb_szc)); 4698 } 4699 } 4700 4701 /* 4702 * Loop through all the hash buckets of HME blocks looking for matches. 4703 */ 4704 for (i = 0; i <= UHMEHASH_SZ; i++) { 4705 hmebp = &uhme_hash[i]; 4706 SFMMU_HASH_LOCK(hmebp); 4707 hmeblkp = hmebp->hmeblkp; 4708 hblkpa = hmebp->hmeh_nextpa; 4709 prevpa = 0; 4710 pr_hblk = NULL; 4711 while (hmeblkp) { 4712 nx_hblk = hmeblkp->hblk_next; 4713 nx_pa = hmeblkp->hblk_nextpa; 4714 4715 /* 4716 * skip if not this context, if a shadow block or 4717 * if the mapping is not in the requested range 4718 */ 4719 if (hmeblkp->hblk_tag.htag_id != sfmmup || 4720 hmeblkp->hblk_shw_bit || 4721 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 4722 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 4723 pr_hblk = hmeblkp; 4724 prevpa = hblkpa; 4725 goto next_block; 4726 } 4727 4728 /* 4729 * unload if there are any current valid mappings 4730 */ 4731 if (hmeblkp->hblk_vcnt != 0 || 4732 hmeblkp->hblk_hmecnt != 0) 4733 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 4734 sa, ea, NULL, flags); 4735 4736 /* 4737 * on unmap we also release the HME block itself, once 4738 * all mappings are gone. 4739 */ 4740 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 4741 !hmeblkp->hblk_vcnt && 4742 !hmeblkp->hblk_hmecnt) { 4743 ASSERT(!hmeblkp->hblk_lckcnt); 4744 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 4745 prevpa, pr_hblk); 4746 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 4747 } else { 4748 pr_hblk = hmeblkp; 4749 prevpa = hblkpa; 4750 } 4751 4752 if (callback == NULL) 4753 goto next_block; 4754 4755 /* 4756 * HME blocks may span more than one page, but we may be 4757 * unmapping only one page, so check for a smaller range 4758 * for the callback 4759 */ 4760 if (sa < startaddr) 4761 sa = startaddr; 4762 if (--ea > endaddr) 4763 ea = endaddr - 1; 4764 4765 cb_sa[addr_cnt] = sa; 4766 cb_ea[addr_cnt] = ea; 4767 if (++addr_cnt == MAX_CB_ADDR) { 4768 for (a = 0; a < MAX_CB_ADDR; ++a) { 4769 callback->hcb_start_addr = cb_sa[a]; 4770 callback->hcb_end_addr = cb_ea[a]; 4771 callback->hcb_function(callback); 4772 } 4773 addr_cnt = 0; 4774 } 4775 4776 next_block: 4777 hmeblkp = nx_hblk; 4778 hblkpa = nx_pa; 4779 } 4780 SFMMU_HASH_UNLOCK(hmebp); 4781 } 4782 4783 sfmmu_hblks_list_purge(&list); 4784 4785 for (a = 0; a < addr_cnt; ++a) { 4786 callback->hcb_start_addr = cb_sa[a]; 4787 callback->hcb_end_addr = cb_ea[a]; 4788 callback->hcb_function(callback); 4789 } 4790 4791 sfmmu_hat_exit(hatlockp); 4792 4793 /* 4794 * Check TSB and TLB page sizes if the process isn't exiting. 4795 */ 4796 if (!sfmmup->sfmmu_free) 4797 sfmmu_check_page_sizes(sfmmup, 0); 4798 } 4799 4800 4801 /* 4802 * Unload all the mappings in the range [addr..addr+len). addr and len must 4803 * be MMU_PAGESIZE aligned. 4804 */ 4805 4806 extern struct seg *segkmap; 4807 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 4808 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 4809 4810 4811 void 4812 hat_unload_callback( 4813 struct hat *sfmmup, 4814 caddr_t addr, 4815 size_t len, 4816 uint_t flags, 4817 hat_callback_t *callback) 4818 { 4819 struct hmehash_bucket *hmebp; 4820 hmeblk_tag hblktag; 4821 int hmeshift, hashno, iskernel; 4822 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 4823 caddr_t endaddr; 4824 cpuset_t cpuset; 4825 uint64_t hblkpa, prevpa; 4826 int addr_count = 0; 4827 int a; 4828 caddr_t cb_start_addr[MAX_CB_ADDR]; 4829 caddr_t cb_end_addr[MAX_CB_ADDR]; 4830 int issegkmap = ISSEGKMAP(sfmmup, addr); 4831 demap_range_t dmr, *dmrp; 4832 4833 if (sfmmup->sfmmu_xhat_provider) { 4834 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 4835 return; 4836 } else { 4837 /* 4838 * This must be a CPU HAT. If the address space has 4839 * XHATs attached, unload the mappings for all of them, 4840 * just in case 4841 */ 4842 ASSERT(sfmmup->sfmmu_as != NULL); 4843 if (sfmmup->sfmmu_as->a_xhat != NULL) 4844 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 4845 len, flags, callback); 4846 } 4847 4848 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 4849 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4850 4851 ASSERT(sfmmup != NULL); 4852 ASSERT((len & MMU_PAGEOFFSET) == 0); 4853 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4854 4855 /* 4856 * Probing through a large VA range (say 63 bits) will be slow, even 4857 * at 4 Meg steps between the probes. So, when the virtual address range 4858 * is very large, search the HME entries for what to unload. 4859 * 4860 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 4861 * 4862 * UHMEHASH_SZ is number of hash buckets to examine 4863 * 4864 */ 4865 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 4866 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 4867 return; 4868 } 4869 4870 CPUSET_ZERO(cpuset); 4871 4872 /* 4873 * If the process is exiting, we can save a lot of fuss since 4874 * we'll flush the TLB when we free the ctx anyway. 4875 */ 4876 if (sfmmup->sfmmu_free) 4877 dmrp = NULL; 4878 else 4879 dmrp = &dmr; 4880 4881 DEMAP_RANGE_INIT(sfmmup, dmrp); 4882 endaddr = addr + len; 4883 hblktag.htag_id = sfmmup; 4884 4885 /* 4886 * It is likely for the vm to call unload over a wide range of 4887 * addresses that are actually very sparsely populated by 4888 * translations. In order to speed this up the sfmmu hat supports 4889 * the concept of shadow hmeblks. Dummy large page hmeblks that 4890 * correspond to actual small translations are allocated at tteload 4891 * time and are referred to as shadow hmeblks. Now, during unload 4892 * time, we first check if we have a shadow hmeblk for that 4893 * translation. The absence of one means the corresponding address 4894 * range is empty and can be skipped. 4895 * 4896 * The kernel is an exception to above statement and that is why 4897 * we don't use shadow hmeblks and hash starting from the smallest 4898 * page size. 4899 */ 4900 if (sfmmup == KHATID) { 4901 iskernel = 1; 4902 hashno = TTE64K; 4903 } else { 4904 iskernel = 0; 4905 if (mmu_page_sizes == max_mmu_page_sizes) { 4906 hashno = TTE256M; 4907 } else { 4908 hashno = TTE4M; 4909 } 4910 } 4911 while (addr < endaddr) { 4912 hmeshift = HME_HASH_SHIFT(hashno); 4913 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4914 hblktag.htag_rehash = hashno; 4915 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4916 4917 SFMMU_HASH_LOCK(hmebp); 4918 4919 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 4920 prevpa, &list); 4921 if (hmeblkp == NULL) { 4922 /* 4923 * didn't find an hmeblk. skip the appropiate 4924 * address range. 4925 */ 4926 SFMMU_HASH_UNLOCK(hmebp); 4927 if (iskernel) { 4928 if (hashno < mmu_hashcnt) { 4929 hashno++; 4930 continue; 4931 } else { 4932 hashno = TTE64K; 4933 addr = (caddr_t)roundup((uintptr_t)addr 4934 + 1, MMU_PAGESIZE64K); 4935 continue; 4936 } 4937 } 4938 addr = (caddr_t)roundup((uintptr_t)addr + 1, 4939 (1 << hmeshift)); 4940 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 4941 ASSERT(hashno == TTE64K); 4942 continue; 4943 } 4944 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 4945 hashno = TTE512K; 4946 continue; 4947 } 4948 if (mmu_page_sizes == max_mmu_page_sizes) { 4949 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 4950 hashno = TTE4M; 4951 continue; 4952 } 4953 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 4954 hashno = TTE32M; 4955 continue; 4956 } 4957 hashno = TTE256M; 4958 continue; 4959 } else { 4960 hashno = TTE4M; 4961 continue; 4962 } 4963 } 4964 ASSERT(hmeblkp); 4965 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 4966 /* 4967 * If the valid count is zero we can skip the range 4968 * mapped by this hmeblk. 4969 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 4970 * is used by segment drivers as a hint 4971 * that the mapping resource won't be used any longer. 4972 * The best example of this is during exit(). 4973 */ 4974 addr = (caddr_t)roundup((uintptr_t)addr + 1, 4975 get_hblk_span(hmeblkp)); 4976 if ((flags & HAT_UNLOAD_UNMAP) || 4977 (iskernel && !issegkmap)) { 4978 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 4979 pr_hblk); 4980 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 4981 } 4982 SFMMU_HASH_UNLOCK(hmebp); 4983 4984 if (iskernel) { 4985 hashno = TTE64K; 4986 continue; 4987 } 4988 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 4989 ASSERT(hashno == TTE64K); 4990 continue; 4991 } 4992 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 4993 hashno = TTE512K; 4994 continue; 4995 } 4996 if (mmu_page_sizes == max_mmu_page_sizes) { 4997 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 4998 hashno = TTE4M; 4999 continue; 5000 } 5001 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5002 hashno = TTE32M; 5003 continue; 5004 } 5005 hashno = TTE256M; 5006 continue; 5007 } else { 5008 hashno = TTE4M; 5009 continue; 5010 } 5011 } 5012 if (hmeblkp->hblk_shw_bit) { 5013 /* 5014 * If we encounter a shadow hmeblk we know there is 5015 * smaller sized hmeblks mapping the same address space. 5016 * Decrement the hash size and rehash. 5017 */ 5018 ASSERT(sfmmup != KHATID); 5019 hashno--; 5020 SFMMU_HASH_UNLOCK(hmebp); 5021 continue; 5022 } 5023 5024 /* 5025 * track callback address ranges. 5026 * only start a new range when it's not contiguous 5027 */ 5028 if (callback != NULL) { 5029 if (addr_count > 0 && 5030 addr == cb_end_addr[addr_count - 1]) 5031 --addr_count; 5032 else 5033 cb_start_addr[addr_count] = addr; 5034 } 5035 5036 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5037 dmrp, flags); 5038 5039 if (callback != NULL) 5040 cb_end_addr[addr_count++] = addr; 5041 5042 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5043 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5044 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5045 pr_hblk); 5046 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5047 } 5048 SFMMU_HASH_UNLOCK(hmebp); 5049 5050 /* 5051 * Notify our caller as to exactly which pages 5052 * have been unloaded. We do these in clumps, 5053 * to minimize the number of xt_sync()s that need to occur. 5054 */ 5055 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5056 DEMAP_RANGE_FLUSH(dmrp); 5057 if (dmrp != NULL) { 5058 cpuset = sfmmup->sfmmu_cpusran; 5059 xt_sync(cpuset); 5060 } 5061 5062 for (a = 0; a < MAX_CB_ADDR; ++a) { 5063 callback->hcb_start_addr = cb_start_addr[a]; 5064 callback->hcb_end_addr = cb_end_addr[a]; 5065 callback->hcb_function(callback); 5066 } 5067 addr_count = 0; 5068 } 5069 if (iskernel) { 5070 hashno = TTE64K; 5071 continue; 5072 } 5073 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5074 ASSERT(hashno == TTE64K); 5075 continue; 5076 } 5077 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5078 hashno = TTE512K; 5079 continue; 5080 } 5081 if (mmu_page_sizes == max_mmu_page_sizes) { 5082 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5083 hashno = TTE4M; 5084 continue; 5085 } 5086 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5087 hashno = TTE32M; 5088 continue; 5089 } 5090 hashno = TTE256M; 5091 } else { 5092 hashno = TTE4M; 5093 } 5094 } 5095 5096 sfmmu_hblks_list_purge(&list); 5097 DEMAP_RANGE_FLUSH(dmrp); 5098 if (dmrp != NULL) { 5099 cpuset = sfmmup->sfmmu_cpusran; 5100 xt_sync(cpuset); 5101 } 5102 if (callback && addr_count != 0) { 5103 for (a = 0; a < addr_count; ++a) { 5104 callback->hcb_start_addr = cb_start_addr[a]; 5105 callback->hcb_end_addr = cb_end_addr[a]; 5106 callback->hcb_function(callback); 5107 } 5108 } 5109 5110 /* 5111 * Check TSB and TLB page sizes if the process isn't exiting. 5112 */ 5113 if (!sfmmup->sfmmu_free) 5114 sfmmu_check_page_sizes(sfmmup, 0); 5115 } 5116 5117 /* 5118 * Unload all the mappings in the range [addr..addr+len). addr and len must 5119 * be MMU_PAGESIZE aligned. 5120 */ 5121 void 5122 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5123 { 5124 if (sfmmup->sfmmu_xhat_provider) { 5125 XHAT_UNLOAD(sfmmup, addr, len, flags); 5126 return; 5127 } 5128 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5129 } 5130 5131 5132 /* 5133 * Find the largest mapping size for this page. 5134 */ 5135 static int 5136 fnd_mapping_sz(page_t *pp) 5137 { 5138 int sz; 5139 int p_index; 5140 5141 p_index = PP_MAPINDEX(pp); 5142 5143 sz = 0; 5144 p_index >>= 1; /* don't care about 8K bit */ 5145 for (; p_index; p_index >>= 1) { 5146 sz++; 5147 } 5148 5149 return (sz); 5150 } 5151 5152 /* 5153 * This function unloads a range of addresses for an hmeblk. 5154 * It returns the next address to be unloaded. 5155 * It should be called with the hash lock held. 5156 */ 5157 static caddr_t 5158 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5159 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5160 { 5161 tte_t tte, ttemod; 5162 struct sf_hment *sfhmep; 5163 int ttesz; 5164 long ttecnt; 5165 page_t *pp; 5166 kmutex_t *pml; 5167 int ret; 5168 int use_demap_range; 5169 5170 ASSERT(in_hblk_range(hmeblkp, addr)); 5171 ASSERT(!hmeblkp->hblk_shw_bit); 5172 #ifdef DEBUG 5173 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5174 (endaddr < get_hblk_endaddr(hmeblkp))) { 5175 panic("sfmmu_hblk_unload: partial unload of large page"); 5176 } 5177 #endif /* DEBUG */ 5178 5179 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5180 ttesz = get_hblk_ttesz(hmeblkp); 5181 5182 use_demap_range = (do_virtual_coloring && 5183 TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 5184 if (use_demap_range) { 5185 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5186 } else { 5187 DEMAP_RANGE_FLUSH(dmrp); 5188 } 5189 ttecnt = 0; 5190 HBLKTOHME(sfhmep, hmeblkp, addr); 5191 5192 while (addr < endaddr) { 5193 pml = NULL; 5194 again: 5195 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5196 if (TTE_IS_VALID(&tte)) { 5197 pp = sfhmep->hme_page; 5198 if (pp && pml == NULL) { 5199 pml = sfmmu_mlist_enter(pp); 5200 } 5201 5202 /* 5203 * Verify if hme still points to 'pp' now that 5204 * we have p_mapping lock. 5205 */ 5206 if (sfhmep->hme_page != pp) { 5207 if (pp != NULL && sfhmep->hme_page != NULL) { 5208 if (pml) { 5209 sfmmu_mlist_exit(pml); 5210 } 5211 /* Re-start this iteration. */ 5212 continue; 5213 } 5214 ASSERT((pp != NULL) && 5215 (sfhmep->hme_page == NULL)); 5216 goto tte_unloaded; 5217 } 5218 5219 /* 5220 * This point on we have both HASH and p_mapping 5221 * lock. 5222 */ 5223 ASSERT(pp == sfhmep->hme_page); 5224 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5225 5226 /* 5227 * We need to loop on modify tte because it is 5228 * possible for pagesync to come along and 5229 * change the software bits beneath us. 5230 * 5231 * Page_unload can also invalidate the tte after 5232 * we read tte outside of p_mapping lock. 5233 */ 5234 ttemod = tte; 5235 5236 TTE_SET_INVALID(&ttemod); 5237 ret = sfmmu_modifytte_try(&tte, &ttemod, 5238 &sfhmep->hme_tte); 5239 5240 if (ret <= 0) { 5241 if (TTE_IS_VALID(&tte)) { 5242 goto again; 5243 } else { 5244 /* 5245 * We read in a valid pte, but it 5246 * is unloaded by page_unload. 5247 * hme_page has become NULL and 5248 * we hold no p_mapping lock. 5249 */ 5250 ASSERT(pp == NULL && pml == NULL); 5251 goto tte_unloaded; 5252 } 5253 } 5254 5255 if (!(flags & HAT_UNLOAD_NOSYNC)) { 5256 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5257 } 5258 5259 /* 5260 * Ok- we invalidated the tte. Do the rest of the job. 5261 */ 5262 ttecnt++; 5263 5264 if (flags & HAT_UNLOAD_UNLOCK) { 5265 ASSERT(hmeblkp->hblk_lckcnt > 0); 5266 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 5267 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 5268 } 5269 5270 /* 5271 * Normally we would need to flush the page 5272 * from the virtual cache at this point in 5273 * order to prevent a potential cache alias 5274 * inconsistency. 5275 * The particular scenario we need to worry 5276 * about is: 5277 * Given: va1 and va2 are two virtual address 5278 * that alias and map the same physical 5279 * address. 5280 * 1. mapping exists from va1 to pa and data 5281 * has been read into the cache. 5282 * 2. unload va1. 5283 * 3. load va2 and modify data using va2. 5284 * 4 unload va2. 5285 * 5. load va1 and reference data. Unless we 5286 * flush the data cache when we unload we will 5287 * get stale data. 5288 * Fortunately, page coloring eliminates the 5289 * above scenario by remembering the color a 5290 * physical page was last or is currently 5291 * mapped to. Now, we delay the flush until 5292 * the loading of translations. Only when the 5293 * new translation is of a different color 5294 * are we forced to flush. 5295 */ 5296 if (use_demap_range) { 5297 /* 5298 * Mark this page as needing a demap. 5299 */ 5300 DEMAP_RANGE_MARKPG(dmrp, addr); 5301 } else { 5302 if (do_virtual_coloring) { 5303 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 5304 sfmmup->sfmmu_free, 0); 5305 } else { 5306 pfn_t pfnum; 5307 5308 pfnum = TTE_TO_PFN(addr, &tte); 5309 sfmmu_tlbcache_demap(addr, sfmmup, 5310 hmeblkp, pfnum, sfmmup->sfmmu_free, 5311 FLUSH_NECESSARY_CPUS, 5312 CACHE_FLUSH, 0); 5313 } 5314 } 5315 5316 if (pp) { 5317 /* 5318 * Remove the hment from the mapping list 5319 */ 5320 ASSERT(hmeblkp->hblk_hmecnt > 0); 5321 5322 /* 5323 * Again, we cannot 5324 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 5325 */ 5326 HME_SUB(sfhmep, pp); 5327 membar_stst(); 5328 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 5329 } 5330 5331 ASSERT(hmeblkp->hblk_vcnt > 0); 5332 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 5333 5334 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 5335 !hmeblkp->hblk_lckcnt); 5336 5337 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 5338 if (PP_ISTNC(pp)) { 5339 /* 5340 * If page was temporary 5341 * uncached, try to recache 5342 * it. Note that HME_SUB() was 5343 * called above so p_index and 5344 * mlist had been updated. 5345 */ 5346 conv_tnc(pp, ttesz); 5347 } else if (pp->p_mapping == NULL) { 5348 ASSERT(kpm_enable); 5349 /* 5350 * Page is marked to be in VAC conflict 5351 * to an existing kpm mapping and/or is 5352 * kpm mapped using only the regular 5353 * pagesize. 5354 */ 5355 sfmmu_kpm_hme_unload(pp); 5356 } 5357 } 5358 } else if ((pp = sfhmep->hme_page) != NULL) { 5359 /* 5360 * TTE is invalid but the hme 5361 * still exists. let pageunload 5362 * complete its job. 5363 */ 5364 ASSERT(pml == NULL); 5365 pml = sfmmu_mlist_enter(pp); 5366 if (sfhmep->hme_page != NULL) { 5367 sfmmu_mlist_exit(pml); 5368 pml = NULL; 5369 goto again; 5370 } 5371 ASSERT(sfhmep->hme_page == NULL); 5372 } else if (hmeblkp->hblk_hmecnt != 0) { 5373 /* 5374 * pageunload may have not finished decrementing 5375 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 5376 * wait for pageunload to finish. Rely on pageunload 5377 * to decrement hblk_hmecnt after hblk_vcnt. 5378 */ 5379 pfn_t pfn = TTE_TO_TTEPFN(&tte); 5380 ASSERT(pml == NULL); 5381 if (pf_is_memory(pfn)) { 5382 pp = page_numtopp_nolock(pfn); 5383 if (pp != NULL) { 5384 pml = sfmmu_mlist_enter(pp); 5385 sfmmu_mlist_exit(pml); 5386 pml = NULL; 5387 } 5388 } 5389 } 5390 5391 tte_unloaded: 5392 /* 5393 * At this point, the tte we are looking at 5394 * should be unloaded, and hme has been unlinked 5395 * from page too. This is important because in 5396 * pageunload, it does ttesync() then HME_SUB. 5397 * We need to make sure HME_SUB has been completed 5398 * so we know ttesync() has been completed. Otherwise, 5399 * at exit time, after return from hat layer, VM will 5400 * release as structure which hat_setstat() (called 5401 * by ttesync()) needs. 5402 */ 5403 #ifdef DEBUG 5404 { 5405 tte_t dtte; 5406 5407 ASSERT(sfhmep->hme_page == NULL); 5408 5409 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 5410 ASSERT(!TTE_IS_VALID(&dtte)); 5411 } 5412 #endif 5413 5414 if (pml) { 5415 sfmmu_mlist_exit(pml); 5416 } 5417 5418 addr += TTEBYTES(ttesz); 5419 sfhmep++; 5420 DEMAP_RANGE_NEXTPG(dmrp); 5421 } 5422 if (ttecnt > 0) 5423 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 5424 return (addr); 5425 } 5426 5427 /* 5428 * Synchronize all the mappings in the range [addr..addr+len). 5429 * Can be called with clearflag having two states: 5430 * HAT_SYNC_DONTZERO means just return the rm stats 5431 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 5432 */ 5433 void 5434 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 5435 { 5436 struct hmehash_bucket *hmebp; 5437 hmeblk_tag hblktag; 5438 int hmeshift, hashno = 1; 5439 struct hme_blk *hmeblkp, *list = NULL; 5440 caddr_t endaddr; 5441 cpuset_t cpuset; 5442 5443 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 5444 ASSERT((sfmmup == ksfmmup) || 5445 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5446 ASSERT((len & MMU_PAGEOFFSET) == 0); 5447 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 5448 (clearflag == HAT_SYNC_ZERORM)); 5449 5450 CPUSET_ZERO(cpuset); 5451 5452 endaddr = addr + len; 5453 hblktag.htag_id = sfmmup; 5454 /* 5455 * Spitfire supports 4 page sizes. 5456 * Most pages are expected to be of the smallest page 5457 * size (8K) and these will not need to be rehashed. 64K 5458 * pages also don't need to be rehashed because the an hmeblk 5459 * spans 64K of address space. 512K pages might need 1 rehash and 5460 * and 4M pages 2 rehashes. 5461 */ 5462 while (addr < endaddr) { 5463 hmeshift = HME_HASH_SHIFT(hashno); 5464 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5465 hblktag.htag_rehash = hashno; 5466 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5467 5468 SFMMU_HASH_LOCK(hmebp); 5469 5470 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5471 if (hmeblkp != NULL) { 5472 /* 5473 * We've encountered a shadow hmeblk so skip the range 5474 * of the next smaller mapping size. 5475 */ 5476 if (hmeblkp->hblk_shw_bit) { 5477 ASSERT(sfmmup != ksfmmup); 5478 ASSERT(hashno > 1); 5479 addr = (caddr_t)P2END((uintptr_t)addr, 5480 TTEBYTES(hashno - 1)); 5481 } else { 5482 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 5483 addr, endaddr, clearflag); 5484 } 5485 SFMMU_HASH_UNLOCK(hmebp); 5486 hashno = 1; 5487 continue; 5488 } 5489 SFMMU_HASH_UNLOCK(hmebp); 5490 5491 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5492 /* 5493 * We have traversed the whole list and rehashed 5494 * if necessary without finding the address to sync. 5495 * This is ok so we increment the address by the 5496 * smallest hmeblk range for kernel mappings and the 5497 * largest hmeblk range, to account for shadow hmeblks, 5498 * for user mappings and continue. 5499 */ 5500 if (sfmmup == ksfmmup) 5501 addr = (caddr_t)P2END((uintptr_t)addr, 5502 TTEBYTES(1)); 5503 else 5504 addr = (caddr_t)P2END((uintptr_t)addr, 5505 TTEBYTES(hashno)); 5506 hashno = 1; 5507 } else { 5508 hashno++; 5509 } 5510 } 5511 sfmmu_hblks_list_purge(&list); 5512 cpuset = sfmmup->sfmmu_cpusran; 5513 xt_sync(cpuset); 5514 } 5515 5516 static caddr_t 5517 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5518 caddr_t endaddr, int clearflag) 5519 { 5520 tte_t tte, ttemod; 5521 struct sf_hment *sfhmep; 5522 int ttesz; 5523 struct page *pp; 5524 kmutex_t *pml; 5525 int ret; 5526 5527 ASSERT(hmeblkp->hblk_shw_bit == 0); 5528 5529 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5530 5531 ttesz = get_hblk_ttesz(hmeblkp); 5532 HBLKTOHME(sfhmep, hmeblkp, addr); 5533 5534 while (addr < endaddr) { 5535 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5536 if (TTE_IS_VALID(&tte)) { 5537 pml = NULL; 5538 pp = sfhmep->hme_page; 5539 if (pp) { 5540 pml = sfmmu_mlist_enter(pp); 5541 } 5542 if (pp != sfhmep->hme_page) { 5543 /* 5544 * tte most have been unloaded 5545 * underneath us. Recheck 5546 */ 5547 ASSERT(pml); 5548 sfmmu_mlist_exit(pml); 5549 continue; 5550 } 5551 5552 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5553 5554 if (clearflag == HAT_SYNC_ZERORM) { 5555 ttemod = tte; 5556 TTE_CLR_RM(&ttemod); 5557 ret = sfmmu_modifytte_try(&tte, &ttemod, 5558 &sfhmep->hme_tte); 5559 if (ret < 0) { 5560 if (pml) { 5561 sfmmu_mlist_exit(pml); 5562 } 5563 continue; 5564 } 5565 5566 if (ret > 0) { 5567 sfmmu_tlb_demap(addr, sfmmup, 5568 hmeblkp, 0, 0); 5569 } 5570 } 5571 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5572 if (pml) { 5573 sfmmu_mlist_exit(pml); 5574 } 5575 } 5576 addr += TTEBYTES(ttesz); 5577 sfhmep++; 5578 } 5579 return (addr); 5580 } 5581 5582 /* 5583 * This function will sync a tte to the page struct and it will 5584 * update the hat stats. Currently it allows us to pass a NULL pp 5585 * and we will simply update the stats. We may want to change this 5586 * so we only keep stats for pages backed by pp's. 5587 */ 5588 static void 5589 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 5590 { 5591 uint_t rm = 0; 5592 int sz; 5593 pgcnt_t npgs; 5594 5595 ASSERT(TTE_IS_VALID(ttep)); 5596 5597 if (TTE_IS_NOSYNC(ttep)) { 5598 return; 5599 } 5600 5601 if (TTE_IS_REF(ttep)) { 5602 rm = P_REF; 5603 } 5604 if (TTE_IS_MOD(ttep)) { 5605 rm |= P_MOD; 5606 } 5607 5608 if (rm == 0) { 5609 return; 5610 } 5611 5612 sz = TTE_CSZ(ttep); 5613 if (sfmmup->sfmmu_rmstat) { 5614 int i; 5615 caddr_t vaddr = addr; 5616 5617 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 5618 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 5619 } 5620 5621 } 5622 5623 /* 5624 * XXX I want to use cas to update nrm bits but they 5625 * currently belong in common/vm and not in hat where 5626 * they should be. 5627 * The nrm bits are protected by the same mutex as 5628 * the one that protects the page's mapping list. 5629 */ 5630 if (!pp) 5631 return; 5632 ASSERT(sfmmu_mlist_held(pp)); 5633 /* 5634 * If the tte is for a large page, we need to sync all the 5635 * pages covered by the tte. 5636 */ 5637 if (sz != TTE8K) { 5638 ASSERT(pp->p_szc != 0); 5639 pp = PP_GROUPLEADER(pp, sz); 5640 ASSERT(sfmmu_mlist_held(pp)); 5641 } 5642 5643 /* Get number of pages from tte size. */ 5644 npgs = TTEPAGES(sz); 5645 5646 do { 5647 ASSERT(pp); 5648 ASSERT(sfmmu_mlist_held(pp)); 5649 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 5650 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 5651 hat_page_setattr(pp, rm); 5652 5653 /* 5654 * Are we done? If not, we must have a large mapping. 5655 * For large mappings we need to sync the rest of the pages 5656 * covered by this tte; goto the next page. 5657 */ 5658 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 5659 } 5660 5661 /* 5662 * Execute pre-callback handler of each pa_hment linked to pp 5663 * 5664 * Inputs: 5665 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 5666 * capture_cpus: pointer to return value (below) 5667 * 5668 * Returns: 5669 * Propagates the subsystem callback return values back to the caller; 5670 * returns 0 on success. If capture_cpus is non-NULL, the value returned 5671 * is zero if all of the pa_hments are of a type that do not require 5672 * capturing CPUs prior to suspending the mapping, else it is 1. 5673 */ 5674 static int 5675 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 5676 { 5677 struct sf_hment *sfhmep; 5678 struct pa_hment *pahmep; 5679 int (*f)(caddr_t, uint_t, uint_t, void *); 5680 int ret; 5681 id_t id; 5682 int locked = 0; 5683 kmutex_t *pml; 5684 5685 ASSERT(PAGE_EXCL(pp)); 5686 if (!sfmmu_mlist_held(pp)) { 5687 pml = sfmmu_mlist_enter(pp); 5688 locked = 1; 5689 } 5690 5691 if (capture_cpus) 5692 *capture_cpus = 0; 5693 5694 top: 5695 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5696 /* 5697 * skip sf_hments corresponding to VA<->PA mappings; 5698 * for pa_hment's, hme_tte.ll is zero 5699 */ 5700 if (!IS_PAHME(sfhmep)) 5701 continue; 5702 5703 pahmep = sfhmep->hme_data; 5704 ASSERT(pahmep != NULL); 5705 5706 /* 5707 * skip if pre-handler has been called earlier in this loop 5708 */ 5709 if (pahmep->flags & flag) 5710 continue; 5711 5712 id = pahmep->cb_id; 5713 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5714 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 5715 *capture_cpus = 1; 5716 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 5717 pahmep->flags |= flag; 5718 continue; 5719 } 5720 5721 /* 5722 * Drop the mapping list lock to avoid locking order issues. 5723 */ 5724 if (locked) 5725 sfmmu_mlist_exit(pml); 5726 5727 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 5728 if (ret != 0) 5729 return (ret); /* caller must do the cleanup */ 5730 5731 if (locked) { 5732 pml = sfmmu_mlist_enter(pp); 5733 pahmep->flags |= flag; 5734 goto top; 5735 } 5736 5737 pahmep->flags |= flag; 5738 } 5739 5740 if (locked) 5741 sfmmu_mlist_exit(pml); 5742 5743 return (0); 5744 } 5745 5746 /* 5747 * Execute post-callback handler of each pa_hment linked to pp 5748 * 5749 * Same overall assumptions and restrictions apply as for 5750 * hat_pageprocess_precallbacks(). 5751 */ 5752 static void 5753 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 5754 { 5755 pfn_t pgpfn = pp->p_pagenum; 5756 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 5757 pfn_t newpfn; 5758 struct sf_hment *sfhmep; 5759 struct pa_hment *pahmep; 5760 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 5761 id_t id; 5762 int locked = 0; 5763 kmutex_t *pml; 5764 5765 ASSERT(PAGE_EXCL(pp)); 5766 if (!sfmmu_mlist_held(pp)) { 5767 pml = sfmmu_mlist_enter(pp); 5768 locked = 1; 5769 } 5770 5771 top: 5772 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5773 /* 5774 * skip sf_hments corresponding to VA<->PA mappings; 5775 * for pa_hment's, hme_tte.ll is zero 5776 */ 5777 if (!IS_PAHME(sfhmep)) 5778 continue; 5779 5780 pahmep = sfhmep->hme_data; 5781 ASSERT(pahmep != NULL); 5782 5783 if ((pahmep->flags & flag) == 0) 5784 continue; 5785 5786 pahmep->flags &= ~flag; 5787 5788 id = pahmep->cb_id; 5789 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5790 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 5791 continue; 5792 5793 /* 5794 * Convert the base page PFN into the constituent PFN 5795 * which is needed by the callback handler. 5796 */ 5797 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 5798 5799 /* 5800 * Drop the mapping list lock to avoid locking order issues. 5801 */ 5802 if (locked) 5803 sfmmu_mlist_exit(pml); 5804 5805 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 5806 != 0) 5807 panic("sfmmu: posthandler failed"); 5808 5809 if (locked) { 5810 pml = sfmmu_mlist_enter(pp); 5811 goto top; 5812 } 5813 } 5814 5815 if (locked) 5816 sfmmu_mlist_exit(pml); 5817 } 5818 5819 /* 5820 * Suspend locked kernel mapping 5821 */ 5822 void 5823 hat_pagesuspend(struct page *pp) 5824 { 5825 struct sf_hment *sfhmep; 5826 sfmmu_t *sfmmup; 5827 tte_t tte, ttemod; 5828 struct hme_blk *hmeblkp; 5829 caddr_t addr; 5830 int index, cons; 5831 cpuset_t cpuset; 5832 5833 ASSERT(PAGE_EXCL(pp)); 5834 ASSERT(sfmmu_mlist_held(pp)); 5835 5836 mutex_enter(&kpr_suspendlock); 5837 5838 /* 5839 * Call into dtrace to tell it we're about to suspend a 5840 * kernel mapping. This prevents us from running into issues 5841 * with probe context trying to touch a suspended page 5842 * in the relocation codepath itself. 5843 */ 5844 if (dtrace_kreloc_init) 5845 (*dtrace_kreloc_init)(); 5846 5847 index = PP_MAPINDEX(pp); 5848 cons = TTE8K; 5849 5850 retry: 5851 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5852 5853 if (IS_PAHME(sfhmep)) 5854 continue; 5855 5856 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 5857 continue; 5858 5859 /* 5860 * Loop until we successfully set the suspend bit in 5861 * the TTE. 5862 */ 5863 again: 5864 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5865 ASSERT(TTE_IS_VALID(&tte)); 5866 5867 ttemod = tte; 5868 TTE_SET_SUSPEND(&ttemod); 5869 if (sfmmu_modifytte_try(&tte, &ttemod, 5870 &sfhmep->hme_tte) < 0) 5871 goto again; 5872 5873 /* 5874 * Invalidate TSB entry 5875 */ 5876 hmeblkp = sfmmu_hmetohblk(sfhmep); 5877 5878 sfmmup = hblktosfmmu(hmeblkp); 5879 ASSERT(sfmmup == ksfmmup); 5880 5881 addr = tte_to_vaddr(hmeblkp, tte); 5882 5883 /* 5884 * No need to make sure that the TSB for this sfmmu is 5885 * not being relocated since it is ksfmmup and thus it 5886 * will never be relocated. 5887 */ 5888 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 5889 5890 /* 5891 * Update xcall stats 5892 */ 5893 cpuset = cpu_ready_set; 5894 CPUSET_DEL(cpuset, CPU->cpu_id); 5895 5896 /* LINTED: constant in conditional context */ 5897 SFMMU_XCALL_STATS(KCONTEXT); 5898 5899 /* 5900 * Flush TLB entry on remote CPU's 5901 */ 5902 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, KCONTEXT); 5903 xt_sync(cpuset); 5904 5905 /* 5906 * Flush TLB entry on local CPU 5907 */ 5908 vtag_flushpage(addr, KCONTEXT); 5909 } 5910 5911 while (index != 0) { 5912 index = index >> 1; 5913 if (index != 0) 5914 cons++; 5915 if (index & 0x1) { 5916 pp = PP_GROUPLEADER(pp, cons); 5917 goto retry; 5918 } 5919 } 5920 } 5921 5922 #ifdef DEBUG 5923 5924 #define N_PRLE 1024 5925 struct prle { 5926 page_t *targ; 5927 page_t *repl; 5928 int status; 5929 int pausecpus; 5930 hrtime_t whence; 5931 }; 5932 5933 static struct prle page_relocate_log[N_PRLE]; 5934 static int prl_entry; 5935 static kmutex_t prl_mutex; 5936 5937 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 5938 mutex_enter(&prl_mutex); \ 5939 page_relocate_log[prl_entry].targ = *(t); \ 5940 page_relocate_log[prl_entry].repl = *(r); \ 5941 page_relocate_log[prl_entry].status = (s); \ 5942 page_relocate_log[prl_entry].pausecpus = (p); \ 5943 page_relocate_log[prl_entry].whence = gethrtime(); \ 5944 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 5945 mutex_exit(&prl_mutex); 5946 5947 #else /* !DEBUG */ 5948 #define PAGE_RELOCATE_LOG(t, r, s, p) 5949 #endif 5950 5951 /* 5952 * Core Kernel Page Relocation Algorithm 5953 * 5954 * Input: 5955 * 5956 * target : constituent pages are SE_EXCL locked. 5957 * replacement: constituent pages are SE_EXCL locked. 5958 * 5959 * Output: 5960 * 5961 * nrelocp: number of pages relocated 5962 */ 5963 int 5964 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 5965 { 5966 page_t *targ, *repl; 5967 page_t *tpp, *rpp; 5968 kmutex_t *low, *high; 5969 spgcnt_t npages, i; 5970 page_t *pl = NULL; 5971 int old_pil; 5972 cpuset_t cpuset; 5973 int cap_cpus; 5974 int ret; 5975 5976 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 5977 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 5978 return (EAGAIN); 5979 } 5980 5981 mutex_enter(&kpr_mutex); 5982 kreloc_thread = curthread; 5983 5984 targ = *target; 5985 repl = *replacement; 5986 ASSERT(repl != NULL); 5987 ASSERT(targ->p_szc == repl->p_szc); 5988 5989 npages = page_get_pagecnt(targ->p_szc); 5990 5991 /* 5992 * unload VA<->PA mappings that are not locked 5993 */ 5994 tpp = targ; 5995 for (i = 0; i < npages; i++) { 5996 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 5997 tpp++; 5998 } 5999 6000 /* 6001 * Do "presuspend" callbacks, in a context from which we can still 6002 * block as needed. Note that we don't hold the mapping list lock 6003 * of "targ" at this point due to potential locking order issues; 6004 * we assume that between the hat_pageunload() above and holding 6005 * the SE_EXCL lock that the mapping list *cannot* change at this 6006 * point. 6007 */ 6008 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6009 if (ret != 0) { 6010 /* 6011 * EIO translates to fatal error, for all others cleanup 6012 * and return EAGAIN. 6013 */ 6014 ASSERT(ret != EIO); 6015 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6016 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6017 kreloc_thread = NULL; 6018 mutex_exit(&kpr_mutex); 6019 return (EAGAIN); 6020 } 6021 6022 /* 6023 * acquire p_mapping list lock for both the target and replacement 6024 * root pages. 6025 * 6026 * low and high refer to the need to grab the mlist locks in a 6027 * specific order in order to prevent race conditions. Thus the 6028 * lower lock must be grabbed before the higher lock. 6029 * 6030 * This will block hat_unload's accessing p_mapping list. Since 6031 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6032 * blocked. Thus, no one else will be accessing the p_mapping list 6033 * while we suspend and reload the locked mapping below. 6034 */ 6035 tpp = targ; 6036 rpp = repl; 6037 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6038 6039 kpreempt_disable(); 6040 6041 /* 6042 * If the replacement page is of a different virtual color 6043 * than the page it is replacing, we need to handle the VAC 6044 * consistency for it just as we would if we were setting up 6045 * a new mapping to a page. 6046 */ 6047 if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) { 6048 if (tpp->p_vcolor != rpp->p_vcolor) { 6049 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6050 rpp->p_pagenum); 6051 } 6052 } 6053 6054 /* 6055 * We raise our PIL to 13 so that we don't get captured by 6056 * another CPU or pinned by an interrupt thread. We can't go to 6057 * PIL 14 since the nexus driver(s) may need to interrupt at 6058 * that level in the case of IOMMU pseudo mappings. 6059 */ 6060 cpuset = cpu_ready_set; 6061 CPUSET_DEL(cpuset, CPU->cpu_id); 6062 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6063 old_pil = splr(XCALL_PIL); 6064 } else { 6065 old_pil = -1; 6066 xc_attention(cpuset); 6067 } 6068 ASSERT(getpil() == XCALL_PIL); 6069 6070 /* 6071 * Now do suspend callbacks. In the case of an IOMMU mapping 6072 * this will suspend all DMA activity to the page while it is 6073 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6074 * may be captured at this point we should have acquired any needed 6075 * locks in the presuspend callback. 6076 */ 6077 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6078 if (ret != 0) { 6079 repl = targ; 6080 goto suspend_fail; 6081 } 6082 6083 /* 6084 * Raise the PIL yet again, this time to block all high-level 6085 * interrupts on this CPU. This is necessary to prevent an 6086 * interrupt routine from pinning the thread which holds the 6087 * mapping suspended and then touching the suspended page. 6088 * 6089 * Once the page is suspended we also need to be careful to 6090 * avoid calling any functions which touch any seg_kmem memory 6091 * since that memory may be backed by the very page we are 6092 * relocating in here! 6093 */ 6094 hat_pagesuspend(targ); 6095 6096 /* 6097 * Now that we are confident everybody has stopped using this page, 6098 * copy the page contents. Note we use a physical copy to prevent 6099 * locking issues and to avoid fpRAS because we can't handle it in 6100 * this context. 6101 */ 6102 for (i = 0; i < npages; i++, tpp++, rpp++) { 6103 /* 6104 * Copy the contents of the page. 6105 */ 6106 ppcopy_kernel(tpp, rpp); 6107 } 6108 6109 tpp = targ; 6110 rpp = repl; 6111 for (i = 0; i < npages; i++, tpp++, rpp++) { 6112 /* 6113 * Copy attributes. VAC consistency was handled above, 6114 * if required. 6115 */ 6116 rpp->p_nrm = tpp->p_nrm; 6117 tpp->p_nrm = 0; 6118 rpp->p_index = tpp->p_index; 6119 tpp->p_index = 0; 6120 rpp->p_vcolor = tpp->p_vcolor; 6121 } 6122 6123 /* 6124 * First, unsuspend the page, if we set the suspend bit, and transfer 6125 * the mapping list from the target page to the replacement page. 6126 * Next process postcallbacks; since pa_hment's are linked only to the 6127 * p_mapping list of root page, we don't iterate over the constituent 6128 * pages. 6129 */ 6130 hat_pagereload(targ, repl); 6131 6132 suspend_fail: 6133 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6134 6135 /* 6136 * Now lower our PIL and release any captured CPUs since we 6137 * are out of the "danger zone". After this it will again be 6138 * safe to acquire adaptive mutex locks, or to drop them... 6139 */ 6140 if (old_pil != -1) { 6141 splx(old_pil); 6142 } else { 6143 xc_dismissed(cpuset); 6144 } 6145 6146 kpreempt_enable(); 6147 6148 sfmmu_mlist_reloc_exit(low, high); 6149 6150 /* 6151 * Postsuspend callbacks should drop any locks held across 6152 * the suspend callbacks. As before, we don't hold the mapping 6153 * list lock at this point.. our assumption is that the mapping 6154 * list still can't change due to our holding SE_EXCL lock and 6155 * there being no unlocked mappings left. Hence the restriction 6156 * on calling context to hat_delete_callback() 6157 */ 6158 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6159 if (ret != 0) { 6160 /* 6161 * The second presuspend call failed: we got here through 6162 * the suspend_fail label above. 6163 */ 6164 ASSERT(ret != EIO); 6165 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6166 kreloc_thread = NULL; 6167 mutex_exit(&kpr_mutex); 6168 return (EAGAIN); 6169 } 6170 6171 /* 6172 * Now that we're out of the performance critical section we can 6173 * take care of updating the hash table, since we still 6174 * hold all the pages locked SE_EXCL at this point we 6175 * needn't worry about things changing out from under us. 6176 */ 6177 tpp = targ; 6178 rpp = repl; 6179 for (i = 0; i < npages; i++, tpp++, rpp++) { 6180 6181 /* 6182 * replace targ with replacement in page_hash table 6183 */ 6184 targ = tpp; 6185 page_relocate_hash(rpp, targ); 6186 6187 /* 6188 * concatenate target; caller of platform_page_relocate() 6189 * expects target to be concatenated after returning. 6190 */ 6191 ASSERT(targ->p_next == targ); 6192 ASSERT(targ->p_prev == targ); 6193 page_list_concat(&pl, &targ); 6194 } 6195 6196 ASSERT(*target == pl); 6197 *nrelocp = npages; 6198 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6199 kreloc_thread = NULL; 6200 mutex_exit(&kpr_mutex); 6201 return (0); 6202 } 6203 6204 /* 6205 * Called when stray pa_hments are found attached to a page which is 6206 * being freed. Notify the subsystem which attached the pa_hment of 6207 * the error if it registered a suitable handler, else panic. 6208 */ 6209 static void 6210 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6211 { 6212 id_t cb_id = pahmep->cb_id; 6213 6214 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6215 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6216 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6217 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6218 return; /* non-fatal */ 6219 } 6220 panic("pa_hment leaked: 0x%p", pahmep); 6221 } 6222 6223 /* 6224 * Remove all mappings to page 'pp'. 6225 */ 6226 int 6227 hat_pageunload(struct page *pp, uint_t forceflag) 6228 { 6229 struct page *origpp = pp; 6230 struct sf_hment *sfhme, *tmphme; 6231 struct hme_blk *hmeblkp; 6232 kmutex_t *pml, *pmtx; 6233 cpuset_t cpuset, tset; 6234 int index, cons; 6235 int xhme_blks; 6236 int pa_hments; 6237 6238 ASSERT(PAGE_EXCL(pp)); 6239 6240 retry_xhat: 6241 tmphme = NULL; 6242 xhme_blks = 0; 6243 pa_hments = 0; 6244 CPUSET_ZERO(cpuset); 6245 6246 pml = sfmmu_mlist_enter(pp); 6247 6248 if (pp->p_kpmref) 6249 sfmmu_kpm_pageunload(pp); 6250 ASSERT(!PP_ISMAPPED_KPM(pp)); 6251 6252 index = PP_MAPINDEX(pp); 6253 cons = TTE8K; 6254 retry: 6255 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6256 tmphme = sfhme->hme_next; 6257 6258 if (IS_PAHME(sfhme)) { 6259 ASSERT(sfhme->hme_data != NULL); 6260 pa_hments++; 6261 continue; 6262 } 6263 6264 hmeblkp = sfmmu_hmetohblk(sfhme); 6265 if (hmeblkp->hblk_xhat_bit) { 6266 struct xhat_hme_blk *xblk = 6267 (struct xhat_hme_blk *)hmeblkp; 6268 6269 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 6270 pp, forceflag, XBLK2PROVBLK(xblk)); 6271 6272 xhme_blks = 1; 6273 continue; 6274 } 6275 6276 /* 6277 * If there are kernel mappings don't unload them, they will 6278 * be suspended. 6279 */ 6280 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 6281 hmeblkp->hblk_tag.htag_id == ksfmmup) 6282 continue; 6283 6284 tset = sfmmu_pageunload(pp, sfhme, cons); 6285 CPUSET_OR(cpuset, tset); 6286 } 6287 6288 while (index != 0) { 6289 index = index >> 1; 6290 if (index != 0) 6291 cons++; 6292 if (index & 0x1) { 6293 /* Go to leading page */ 6294 pp = PP_GROUPLEADER(pp, cons); 6295 ASSERT(sfmmu_mlist_held(pp)); 6296 goto retry; 6297 } 6298 } 6299 6300 /* 6301 * cpuset may be empty if the page was only mapped by segkpm, 6302 * in which case we won't actually cross-trap. 6303 */ 6304 xt_sync(cpuset); 6305 6306 /* 6307 * The page should have no mappings at this point, unless 6308 * we were called from hat_page_relocate() in which case we 6309 * leave the locked mappings which will be suspended later. 6310 */ 6311 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 6312 (forceflag == SFMMU_KERNEL_RELOC)); 6313 6314 if (PP_ISTNC(pp)) { 6315 if (cons == TTE8K) { 6316 pmtx = sfmmu_page_enter(pp); 6317 PP_CLRTNC(pp); 6318 sfmmu_page_exit(pmtx); 6319 } else { 6320 conv_tnc(pp, cons); 6321 } 6322 } 6323 6324 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 6325 /* 6326 * Unlink any pa_hments and free them, calling back 6327 * the responsible subsystem to notify it of the error. 6328 * This can occur in situations such as drivers leaking 6329 * DMA handles: naughty, but common enough that we'd like 6330 * to keep the system running rather than bringing it 6331 * down with an obscure error like "pa_hment leaked" 6332 * which doesn't aid the user in debugging their driver. 6333 */ 6334 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6335 tmphme = sfhme->hme_next; 6336 if (IS_PAHME(sfhme)) { 6337 struct pa_hment *pahmep = sfhme->hme_data; 6338 sfmmu_pahment_leaked(pahmep); 6339 HME_SUB(sfhme, pp); 6340 kmem_cache_free(pa_hment_cache, pahmep); 6341 } 6342 } 6343 6344 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 6345 } 6346 6347 sfmmu_mlist_exit(pml); 6348 6349 /* 6350 * XHAT may not have finished unloading pages 6351 * because some other thread was waiting for 6352 * mlist lock and XHAT_PAGEUNLOAD let it do 6353 * the job. 6354 */ 6355 if (xhme_blks) { 6356 pp = origpp; 6357 goto retry_xhat; 6358 } 6359 6360 return (0); 6361 } 6362 6363 static cpuset_t 6364 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 6365 { 6366 struct hme_blk *hmeblkp; 6367 sfmmu_t *sfmmup; 6368 tte_t tte, ttemod; 6369 #ifdef DEBUG 6370 tte_t orig_old; 6371 #endif /* DEBUG */ 6372 caddr_t addr; 6373 int ttesz; 6374 int ret; 6375 cpuset_t cpuset; 6376 6377 ASSERT(pp != NULL); 6378 ASSERT(sfmmu_mlist_held(pp)); 6379 ASSERT(pp->p_vnode != &kvp); 6380 6381 CPUSET_ZERO(cpuset); 6382 6383 hmeblkp = sfmmu_hmetohblk(sfhme); 6384 6385 readtte: 6386 sfmmu_copytte(&sfhme->hme_tte, &tte); 6387 if (TTE_IS_VALID(&tte)) { 6388 sfmmup = hblktosfmmu(hmeblkp); 6389 ttesz = get_hblk_ttesz(hmeblkp); 6390 /* 6391 * Only unload mappings of 'cons' size. 6392 */ 6393 if (ttesz != cons) 6394 return (cpuset); 6395 6396 /* 6397 * Note that we have p_mapping lock, but no hash lock here. 6398 * hblk_unload() has to have both hash lock AND p_mapping 6399 * lock before it tries to modify tte. So, the tte could 6400 * not become invalid in the sfmmu_modifytte_try() below. 6401 */ 6402 ttemod = tte; 6403 #ifdef DEBUG 6404 orig_old = tte; 6405 #endif /* DEBUG */ 6406 6407 TTE_SET_INVALID(&ttemod); 6408 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6409 if (ret < 0) { 6410 #ifdef DEBUG 6411 /* only R/M bits can change. */ 6412 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 6413 #endif /* DEBUG */ 6414 goto readtte; 6415 } 6416 6417 if (ret == 0) { 6418 panic("pageunload: cas failed?"); 6419 } 6420 6421 addr = tte_to_vaddr(hmeblkp, tte); 6422 6423 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6424 6425 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 6426 6427 /* 6428 * We need to flush the page from the virtual cache 6429 * in order to prevent a virtual cache alias 6430 * inconsistency. The particular scenario we need 6431 * to worry about is: 6432 * Given: va1 and va2 are two virtual address that 6433 * alias and will map the same physical address. 6434 * 1. mapping exists from va1 to pa and data has 6435 * been read into the cache. 6436 * 2. unload va1. 6437 * 3. load va2 and modify data using va2. 6438 * 4 unload va2. 6439 * 5. load va1 and reference data. Unless we flush 6440 * the data cache when we unload we will get 6441 * stale data. 6442 * This scenario is taken care of by using virtual 6443 * page coloring. 6444 */ 6445 if (sfmmup->sfmmu_ismhat) { 6446 /* 6447 * Flush TSBs, TLBs and caches 6448 * of every process 6449 * sharing this ism segment. 6450 */ 6451 sfmmu_hat_lock_all(); 6452 mutex_enter(&ism_mlist_lock); 6453 kpreempt_disable(); 6454 if (do_virtual_coloring) 6455 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6456 pp->p_pagenum, CACHE_NO_FLUSH); 6457 else 6458 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6459 pp->p_pagenum, CACHE_FLUSH); 6460 kpreempt_enable(); 6461 mutex_exit(&ism_mlist_lock); 6462 sfmmu_hat_unlock_all(); 6463 cpuset = cpu_ready_set; 6464 } else if (do_virtual_coloring) { 6465 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6466 cpuset = sfmmup->sfmmu_cpusran; 6467 } else { 6468 sfmmu_tlbcache_demap(addr, sfmmup, hmeblkp, 6469 pp->p_pagenum, 0, FLUSH_NECESSARY_CPUS, 6470 CACHE_FLUSH, 0); 6471 cpuset = sfmmup->sfmmu_cpusran; 6472 } 6473 6474 /* 6475 * Hme_sub has to run after ttesync() and a_rss update. 6476 * See hblk_unload(). 6477 */ 6478 HME_SUB(sfhme, pp); 6479 membar_stst(); 6480 6481 /* 6482 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 6483 * since pteload may have done a HME_ADD() right after 6484 * we did the HME_SUB() above. Hmecnt is now maintained 6485 * by cas only. no lock guranteed its value. The only 6486 * gurantee we have is the hmecnt should not be less than 6487 * what it should be so the hblk will not be taken away. 6488 * It's also important that we decremented the hmecnt after 6489 * we are done with hmeblkp so that this hmeblk won't be 6490 * stolen. 6491 */ 6492 ASSERT(hmeblkp->hblk_hmecnt > 0); 6493 ASSERT(hmeblkp->hblk_vcnt > 0); 6494 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6495 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6496 /* 6497 * This is bug 4063182. 6498 * XXX: fixme 6499 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6500 * !hmeblkp->hblk_lckcnt); 6501 */ 6502 } else { 6503 panic("invalid tte? pp %p &tte %p", 6504 (void *)pp, (void *)&tte); 6505 } 6506 6507 return (cpuset); 6508 } 6509 6510 /* 6511 * While relocating a kernel page, this function will move the mappings 6512 * from tpp to dpp and modify any associated data with these mappings. 6513 * It also unsuspends the suspended kernel mapping. 6514 */ 6515 static void 6516 hat_pagereload(struct page *tpp, struct page *dpp) 6517 { 6518 struct sf_hment *sfhme; 6519 tte_t tte, ttemod; 6520 int index, cons; 6521 6522 ASSERT(getpil() == PIL_MAX); 6523 ASSERT(sfmmu_mlist_held(tpp)); 6524 ASSERT(sfmmu_mlist_held(dpp)); 6525 6526 index = PP_MAPINDEX(tpp); 6527 cons = TTE8K; 6528 6529 /* Update real mappings to the page */ 6530 retry: 6531 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 6532 if (IS_PAHME(sfhme)) 6533 continue; 6534 sfmmu_copytte(&sfhme->hme_tte, &tte); 6535 ttemod = tte; 6536 6537 /* 6538 * replace old pfn with new pfn in TTE 6539 */ 6540 PFN_TO_TTE(ttemod, dpp->p_pagenum); 6541 6542 /* 6543 * clear suspend bit 6544 */ 6545 ASSERT(TTE_IS_SUSPEND(&ttemod)); 6546 TTE_CLR_SUSPEND(&ttemod); 6547 6548 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 6549 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 6550 6551 /* 6552 * set hme_page point to new page 6553 */ 6554 sfhme->hme_page = dpp; 6555 } 6556 6557 /* 6558 * move p_mapping list from old page to new page 6559 */ 6560 dpp->p_mapping = tpp->p_mapping; 6561 tpp->p_mapping = NULL; 6562 dpp->p_share = tpp->p_share; 6563 tpp->p_share = 0; 6564 6565 while (index != 0) { 6566 index = index >> 1; 6567 if (index != 0) 6568 cons++; 6569 if (index & 0x1) { 6570 tpp = PP_GROUPLEADER(tpp, cons); 6571 dpp = PP_GROUPLEADER(dpp, cons); 6572 goto retry; 6573 } 6574 } 6575 6576 if (dtrace_kreloc_fini) 6577 (*dtrace_kreloc_fini)(); 6578 mutex_exit(&kpr_suspendlock); 6579 } 6580 6581 uint_t 6582 hat_pagesync(struct page *pp, uint_t clearflag) 6583 { 6584 struct sf_hment *sfhme, *tmphme = NULL; 6585 struct hme_blk *hmeblkp; 6586 kmutex_t *pml; 6587 cpuset_t cpuset, tset; 6588 int index, cons; 6589 extern ulong_t po_share; 6590 page_t *save_pp = pp; 6591 6592 CPUSET_ZERO(cpuset); 6593 6594 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 6595 return (PP_GENERIC_ATTR(pp)); 6596 } 6597 6598 if ((clearflag == (HAT_SYNC_STOPON_REF | HAT_SYNC_DONTZERO)) && 6599 PP_ISREF(pp)) { 6600 return (PP_GENERIC_ATTR(pp)); 6601 } 6602 6603 if ((clearflag == (HAT_SYNC_STOPON_MOD | HAT_SYNC_DONTZERO)) && 6604 PP_ISMOD(pp)) { 6605 return (PP_GENERIC_ATTR(pp)); 6606 } 6607 6608 if ((clearflag & HAT_SYNC_STOPON_SHARED) != 0 && 6609 (pp->p_share > po_share) && 6610 !(clearflag & HAT_SYNC_ZERORM)) { 6611 if (PP_ISRO(pp)) 6612 hat_page_setattr(pp, P_REF); 6613 return (PP_GENERIC_ATTR(pp)); 6614 } 6615 6616 clearflag &= ~HAT_SYNC_STOPON_SHARED; 6617 pml = sfmmu_mlist_enter(pp); 6618 index = PP_MAPINDEX(pp); 6619 cons = TTE8K; 6620 retry: 6621 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6622 /* 6623 * We need to save the next hment on the list since 6624 * it is possible for pagesync to remove an invalid hment 6625 * from the list. 6626 */ 6627 tmphme = sfhme->hme_next; 6628 /* 6629 * If we are looking for large mappings and this hme doesn't 6630 * reach the range we are seeking, just ignore its. 6631 */ 6632 hmeblkp = sfmmu_hmetohblk(sfhme); 6633 if (hmeblkp->hblk_xhat_bit) 6634 continue; 6635 6636 if (hme_size(sfhme) < cons) 6637 continue; 6638 tset = sfmmu_pagesync(pp, sfhme, 6639 clearflag & ~HAT_SYNC_STOPON_RM); 6640 CPUSET_OR(cpuset, tset); 6641 /* 6642 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 6643 * as the "ref" or "mod" is set. 6644 */ 6645 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 6646 ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 6647 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) { 6648 index = 0; 6649 break; 6650 } 6651 } 6652 6653 while (index) { 6654 index = index >> 1; 6655 cons++; 6656 if (index & 0x1) { 6657 /* Go to leading page */ 6658 pp = PP_GROUPLEADER(pp, cons); 6659 goto retry; 6660 } 6661 } 6662 6663 xt_sync(cpuset); 6664 sfmmu_mlist_exit(pml); 6665 return (PP_GENERIC_ATTR(save_pp)); 6666 } 6667 6668 /* 6669 * Get all the hardware dependent attributes for a page struct 6670 */ 6671 static cpuset_t 6672 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 6673 uint_t clearflag) 6674 { 6675 caddr_t addr; 6676 tte_t tte, ttemod; 6677 struct hme_blk *hmeblkp; 6678 int ret; 6679 sfmmu_t *sfmmup; 6680 cpuset_t cpuset; 6681 6682 ASSERT(pp != NULL); 6683 ASSERT(sfmmu_mlist_held(pp)); 6684 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6685 (clearflag == HAT_SYNC_ZERORM)); 6686 6687 SFMMU_STAT(sf_pagesync); 6688 6689 CPUSET_ZERO(cpuset); 6690 6691 sfmmu_pagesync_retry: 6692 6693 sfmmu_copytte(&sfhme->hme_tte, &tte); 6694 if (TTE_IS_VALID(&tte)) { 6695 hmeblkp = sfmmu_hmetohblk(sfhme); 6696 sfmmup = hblktosfmmu(hmeblkp); 6697 addr = tte_to_vaddr(hmeblkp, tte); 6698 if (clearflag == HAT_SYNC_ZERORM) { 6699 ttemod = tte; 6700 TTE_CLR_RM(&ttemod); 6701 ret = sfmmu_modifytte_try(&tte, &ttemod, 6702 &sfhme->hme_tte); 6703 if (ret < 0) { 6704 /* 6705 * cas failed and the new value is not what 6706 * we want. 6707 */ 6708 goto sfmmu_pagesync_retry; 6709 } 6710 6711 if (ret > 0) { 6712 /* we win the cas */ 6713 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6714 cpuset = sfmmup->sfmmu_cpusran; 6715 } 6716 } 6717 6718 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6719 } 6720 return (cpuset); 6721 } 6722 6723 /* 6724 * Remove write permission from a mappings to a page, so that 6725 * we can detect the next modification of it. This requires modifying 6726 * the TTE then invalidating (demap) any TLB entry using that TTE. 6727 * This code is similar to sfmmu_pagesync(). 6728 */ 6729 static cpuset_t 6730 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 6731 { 6732 caddr_t addr; 6733 tte_t tte; 6734 tte_t ttemod; 6735 struct hme_blk *hmeblkp; 6736 int ret; 6737 sfmmu_t *sfmmup; 6738 cpuset_t cpuset; 6739 6740 ASSERT(pp != NULL); 6741 ASSERT(sfmmu_mlist_held(pp)); 6742 6743 CPUSET_ZERO(cpuset); 6744 SFMMU_STAT(sf_clrwrt); 6745 6746 retry: 6747 6748 sfmmu_copytte(&sfhme->hme_tte, &tte); 6749 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 6750 hmeblkp = sfmmu_hmetohblk(sfhme); 6751 6752 /* 6753 * xhat mappings should never be to a VMODSORT page. 6754 */ 6755 ASSERT(hmeblkp->hblk_xhat_bit == 0); 6756 6757 sfmmup = hblktosfmmu(hmeblkp); 6758 addr = tte_to_vaddr(hmeblkp, tte); 6759 6760 ttemod = tte; 6761 TTE_CLR_WRT(&ttemod); 6762 TTE_CLR_MOD(&ttemod); 6763 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6764 6765 /* 6766 * if cas failed and the new value is not what 6767 * we want retry 6768 */ 6769 if (ret < 0) 6770 goto retry; 6771 6772 /* we win the cas */ 6773 if (ret > 0) { 6774 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6775 cpuset = sfmmup->sfmmu_cpusran; 6776 } 6777 } 6778 6779 return (cpuset); 6780 } 6781 6782 /* 6783 * Walk all mappings of a page, removing write permission and clearing the 6784 * ref/mod bits. This code is similar to hat_pagesync() 6785 */ 6786 static void 6787 hat_page_clrwrt(page_t *pp) 6788 { 6789 struct sf_hment *sfhme; 6790 struct sf_hment *tmphme = NULL; 6791 kmutex_t *pml; 6792 cpuset_t cpuset; 6793 cpuset_t tset; 6794 int index; 6795 int cons; 6796 6797 CPUSET_ZERO(cpuset); 6798 6799 pml = sfmmu_mlist_enter(pp); 6800 index = PP_MAPINDEX(pp); 6801 cons = TTE8K; 6802 retry: 6803 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6804 tmphme = sfhme->hme_next; 6805 6806 /* 6807 * If we are looking for large mappings and this hme doesn't 6808 * reach the range we are seeking, just ignore its. 6809 */ 6810 6811 if (hme_size(sfhme) < cons) 6812 continue; 6813 6814 tset = sfmmu_pageclrwrt(pp, sfhme); 6815 CPUSET_OR(cpuset, tset); 6816 } 6817 6818 while (index) { 6819 index = index >> 1; 6820 cons++; 6821 if (index & 0x1) { 6822 /* Go to leading page */ 6823 pp = PP_GROUPLEADER(pp, cons); 6824 goto retry; 6825 } 6826 } 6827 6828 xt_sync(cpuset); 6829 sfmmu_mlist_exit(pml); 6830 } 6831 6832 /* 6833 * Set the given REF/MOD/RO bits for the given page. 6834 * For a vnode with a sorted v_pages list, we need to change 6835 * the attributes and the v_pages list together under page_vnode_mutex. 6836 */ 6837 void 6838 hat_page_setattr(page_t *pp, uint_t flag) 6839 { 6840 vnode_t *vp = pp->p_vnode; 6841 page_t **listp; 6842 kmutex_t *pmtx; 6843 kmutex_t *vphm = NULL; 6844 6845 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 6846 6847 /* 6848 * nothing to do if attribute already set 6849 */ 6850 if ((pp->p_nrm & flag) == flag) 6851 return; 6852 6853 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 6854 vphm = page_vnode_mutex(vp); 6855 mutex_enter(vphm); 6856 } 6857 6858 pmtx = sfmmu_page_enter(pp); 6859 pp->p_nrm |= flag; 6860 sfmmu_page_exit(pmtx); 6861 6862 if (vphm != NULL) { 6863 /* 6864 * Some File Systems examine v_pages for NULL w/o 6865 * grabbing the vphm mutex. Must not let it become NULL when 6866 * pp is the only page on the list. 6867 */ 6868 if (pp->p_vpnext != pp) { 6869 page_vpsub(&vp->v_pages, pp); 6870 if (vp->v_pages != NULL) 6871 listp = &vp->v_pages->p_vpprev->p_vpnext; 6872 else 6873 listp = &vp->v_pages; 6874 page_vpadd(listp, pp); 6875 } 6876 mutex_exit(vphm); 6877 } 6878 } 6879 6880 void 6881 hat_page_clrattr(page_t *pp, uint_t flag) 6882 { 6883 vnode_t *vp = pp->p_vnode; 6884 kmutex_t *vphm = NULL; 6885 kmutex_t *pmtx; 6886 6887 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 6888 6889 /* 6890 * For vnode with a sorted v_pages list, we need to change 6891 * the attributes and the v_pages list together under page_vnode_mutex. 6892 */ 6893 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 6894 vphm = page_vnode_mutex(vp); 6895 mutex_enter(vphm); 6896 } 6897 6898 pmtx = sfmmu_page_enter(pp); 6899 pp->p_nrm &= ~flag; 6900 sfmmu_page_exit(pmtx); 6901 6902 if (vphm != NULL) { 6903 /* 6904 * Some File Systems examine v_pages for NULL w/o 6905 * grabbing the vphm mutex. Must not let it become NULL when 6906 * pp is the only page on the list. 6907 */ 6908 if (pp->p_vpnext != pp) { 6909 page_vpsub(&vp->v_pages, pp); 6910 page_vpadd(&vp->v_pages, pp); 6911 } 6912 mutex_exit(vphm); 6913 6914 /* 6915 * VMODSORT works by removing write permissions and getting 6916 * a fault when a page is made dirty. At this point 6917 * we need to remove write permission from all mappings 6918 * to this page. 6919 */ 6920 hat_page_clrwrt(pp); 6921 } 6922 } 6923 6924 6925 uint_t 6926 hat_page_getattr(page_t *pp, uint_t flag) 6927 { 6928 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 6929 return ((uint_t)(pp->p_nrm & flag)); 6930 } 6931 6932 /* 6933 * DEBUG kernels: verify that a kernel va<->pa translation 6934 * is safe by checking the underlying page_t is in a page 6935 * relocation-safe state. 6936 */ 6937 #ifdef DEBUG 6938 void 6939 sfmmu_check_kpfn(pfn_t pfn) 6940 { 6941 page_t *pp; 6942 int index, cons; 6943 6944 if (hat_check_vtop == 0) 6945 return; 6946 6947 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 6948 return; 6949 6950 pp = page_numtopp_nolock(pfn); 6951 if (!pp) 6952 return; 6953 6954 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 6955 return; 6956 6957 /* 6958 * Handed a large kernel page, we dig up the root page since we 6959 * know the root page might have the lock also. 6960 */ 6961 if (pp->p_szc != 0) { 6962 index = PP_MAPINDEX(pp); 6963 cons = TTE8K; 6964 again: 6965 while (index != 0) { 6966 index >>= 1; 6967 if (index != 0) 6968 cons++; 6969 if (index & 0x1) { 6970 pp = PP_GROUPLEADER(pp, cons); 6971 goto again; 6972 } 6973 } 6974 } 6975 6976 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 6977 return; 6978 6979 /* 6980 * Pages need to be locked or allocated "permanent" (either from 6981 * static_arena arena or explicitly setting PG_NORELOC when calling 6982 * page_create_va()) for VA->PA translations to be valid. 6983 */ 6984 if (!PP_ISNORELOC(pp)) 6985 panic("Illegal VA->PA translation, pp 0x%p not permanent", pp); 6986 else 6987 panic("Illegal VA->PA translation, pp 0x%p not locked", pp); 6988 } 6989 #endif /* DEBUG */ 6990 6991 /* 6992 * Returns a page frame number for a given virtual address. 6993 * Returns PFN_INVALID to indicate an invalid mapping 6994 */ 6995 pfn_t 6996 hat_getpfnum(struct hat *hat, caddr_t addr) 6997 { 6998 pfn_t pfn; 6999 tte_t tte; 7000 7001 /* 7002 * We would like to 7003 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7004 * but we can't because the iommu driver will call this 7005 * routine at interrupt time and it can't grab the as lock 7006 * or it will deadlock: A thread could have the as lock 7007 * and be waiting for io. The io can't complete 7008 * because the interrupt thread is blocked trying to grab 7009 * the as lock. 7010 */ 7011 7012 ASSERT(hat->sfmmu_xhat_provider == NULL); 7013 7014 if (hat == ksfmmup) { 7015 if (segkpm && IS_KPM_ADDR(addr)) 7016 return (sfmmu_kpm_vatopfn(addr)); 7017 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7018 == PFN_SUSPENDED) { 7019 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7020 } 7021 sfmmu_check_kpfn(pfn); 7022 return (pfn); 7023 } else { 7024 return (sfmmu_uvatopfn(addr, hat)); 7025 } 7026 } 7027 7028 /* 7029 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7030 * Use hat_getpfnum(kas.a_hat, ...) instead. 7031 * 7032 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7033 * but can't right now due to the fact that some software has grown to use 7034 * this interface incorrectly. So for now when the interface is misused, 7035 * return a warning to the user that in the future it won't work in the 7036 * way they're abusing it, and carry on (after disabling page relocation). 7037 */ 7038 pfn_t 7039 hat_getkpfnum(caddr_t addr) 7040 { 7041 pfn_t pfn; 7042 tte_t tte; 7043 int badcaller = 0; 7044 extern int segkmem_reloc; 7045 7046 if (segkpm && IS_KPM_ADDR(addr)) { 7047 badcaller = 1; 7048 pfn = sfmmu_kpm_vatopfn(addr); 7049 } else { 7050 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7051 == PFN_SUSPENDED) { 7052 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7053 } 7054 badcaller = pf_is_memory(pfn); 7055 } 7056 7057 if (badcaller) { 7058 /* 7059 * We can't return PFN_INVALID or the caller may panic 7060 * or corrupt the system. The only alternative is to 7061 * disable page relocation at this point for all kernel 7062 * memory. This will impact any callers of page_relocate() 7063 * such as FMA or DR. 7064 * 7065 * RFE: Add junk here to spit out an ereport so the sysadmin 7066 * can be advised that he should upgrade his device driver 7067 * so that this doesn't happen. 7068 */ 7069 hat_getkpfnum_badcall(caller()); 7070 if (hat_kpr_enabled && segkmem_reloc) { 7071 hat_kpr_enabled = 0; 7072 segkmem_reloc = 0; 7073 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 7074 } 7075 } 7076 return (pfn); 7077 } 7078 7079 pfn_t 7080 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup) 7081 { 7082 struct hmehash_bucket *hmebp; 7083 hmeblk_tag hblktag; 7084 int hmeshift, hashno = 1; 7085 struct hme_blk *hmeblkp = NULL; 7086 7087 struct sf_hment *sfhmep; 7088 tte_t tte; 7089 pfn_t pfn; 7090 7091 /* support for ISM */ 7092 ism_map_t *ism_map; 7093 ism_blk_t *ism_blkp; 7094 int i; 7095 sfmmu_t *ism_hatid = NULL; 7096 sfmmu_t *locked_hatid = NULL; 7097 7098 7099 ASSERT(sfmmup != ksfmmup); 7100 SFMMU_STAT(sf_user_vtop); 7101 /* 7102 * Set ism_hatid if vaddr falls in a ISM segment. 7103 */ 7104 ism_blkp = sfmmup->sfmmu_iblk; 7105 if (ism_blkp) { 7106 sfmmu_ismhat_enter(sfmmup, 0); 7107 locked_hatid = sfmmup; 7108 } 7109 while (ism_blkp && ism_hatid == NULL) { 7110 ism_map = ism_blkp->iblk_maps; 7111 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7112 if (vaddr >= ism_start(ism_map[i]) && 7113 vaddr < ism_end(ism_map[i])) { 7114 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7115 vaddr = (caddr_t)(vaddr - 7116 ism_start(ism_map[i])); 7117 break; 7118 } 7119 } 7120 ism_blkp = ism_blkp->iblk_next; 7121 } 7122 if (locked_hatid) { 7123 sfmmu_ismhat_exit(locked_hatid, 0); 7124 } 7125 7126 hblktag.htag_id = sfmmup; 7127 do { 7128 hmeshift = HME_HASH_SHIFT(hashno); 7129 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7130 hblktag.htag_rehash = hashno; 7131 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7132 7133 SFMMU_HASH_LOCK(hmebp); 7134 7135 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7136 if (hmeblkp != NULL) { 7137 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7138 sfmmu_copytte(&sfhmep->hme_tte, &tte); 7139 if (TTE_IS_VALID(&tte)) { 7140 pfn = TTE_TO_PFN(vaddr, &tte); 7141 } else { 7142 pfn = PFN_INVALID; 7143 } 7144 SFMMU_HASH_UNLOCK(hmebp); 7145 return (pfn); 7146 } 7147 SFMMU_HASH_UNLOCK(hmebp); 7148 hashno++; 7149 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7150 return (PFN_INVALID); 7151 } 7152 7153 7154 /* 7155 * For compatability with AT&T and later optimizations 7156 */ 7157 /* ARGSUSED */ 7158 void 7159 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 7160 { 7161 ASSERT(hat != NULL); 7162 ASSERT(hat->sfmmu_xhat_provider == NULL); 7163 } 7164 7165 /* 7166 * Return the number of mappings to a particular page. 7167 * This number is an approximation of the number of 7168 * number of people sharing the page. 7169 */ 7170 ulong_t 7171 hat_page_getshare(page_t *pp) 7172 { 7173 page_t *spp = pp; /* start page */ 7174 kmutex_t *pml; 7175 ulong_t cnt; 7176 int index, sz = TTE64K; 7177 7178 /* 7179 * We need to grab the mlist lock to make sure any outstanding 7180 * load/unloads complete. Otherwise we could return zero 7181 * even though the unload(s) hasn't finished yet. 7182 */ 7183 pml = sfmmu_mlist_enter(spp); 7184 cnt = spp->p_share; 7185 7186 if (kpm_enable) 7187 cnt += spp->p_kpmref; 7188 7189 /* 7190 * If we have any large mappings, we count the number of 7191 * mappings that this large page is part of. 7192 */ 7193 index = PP_MAPINDEX(spp); 7194 index >>= 1; 7195 while (index) { 7196 pp = PP_GROUPLEADER(spp, sz); 7197 if ((index & 0x1) && pp != spp) { 7198 cnt += pp->p_share; 7199 spp = pp; 7200 } 7201 index >>= 1; 7202 sz++; 7203 } 7204 sfmmu_mlist_exit(pml); 7205 return (cnt); 7206 } 7207 7208 /* 7209 * Unload all large mappings to the pp and reset the p_szc field of every 7210 * constituent page according to the remaining mappings. 7211 * 7212 * pp must be locked SE_EXCL. Even though no other constituent pages are 7213 * locked it's legal to unload the large mappings to the pp because all 7214 * constituent pages of large locked mappings have to be locked SE_SHARED. 7215 * This means if we have SE_EXCL lock on one of constituent pages none of the 7216 * large mappings to pp are locked. 7217 * 7218 * Decrease p_szc field starting from the last constituent page and ending 7219 * with the root page. This method is used because other threads rely on the 7220 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 7221 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 7222 * ensures that p_szc changes of the constituent pages appears atomic for all 7223 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 7224 * 7225 * This mechanism is only used for file system pages where it's not always 7226 * possible to get SE_EXCL locks on all constituent pages to demote the size 7227 * code (as is done for anonymous or kernel large pages). 7228 * 7229 * See more comments in front of sfmmu_mlspl_enter(). 7230 */ 7231 void 7232 hat_page_demote(page_t *pp) 7233 { 7234 int index; 7235 int sz; 7236 cpuset_t cpuset; 7237 int sync = 0; 7238 page_t *rootpp; 7239 struct sf_hment *sfhme; 7240 struct sf_hment *tmphme = NULL; 7241 struct hme_blk *hmeblkp; 7242 uint_t pszc; 7243 page_t *lastpp; 7244 cpuset_t tset; 7245 pgcnt_t npgs; 7246 kmutex_t *pml; 7247 kmutex_t *pmtx = NULL; 7248 7249 ASSERT(PAGE_EXCL(pp)); 7250 ASSERT(!PP_ISFREE(pp)); 7251 ASSERT(page_szc_lock_assert(pp)); 7252 pml = sfmmu_mlist_enter(pp); 7253 7254 pszc = pp->p_szc; 7255 if (pszc == 0) { 7256 goto out; 7257 } 7258 7259 index = PP_MAPINDEX(pp) >> 1; 7260 7261 if (index) { 7262 CPUSET_ZERO(cpuset); 7263 sz = TTE64K; 7264 sync = 1; 7265 } 7266 7267 while (index) { 7268 if (!(index & 0x1)) { 7269 index >>= 1; 7270 sz++; 7271 continue; 7272 } 7273 ASSERT(sz <= pszc); 7274 rootpp = PP_GROUPLEADER(pp, sz); 7275 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 7276 tmphme = sfhme->hme_next; 7277 hmeblkp = sfmmu_hmetohblk(sfhme); 7278 if (hme_size(sfhme) != sz) { 7279 continue; 7280 } 7281 if (hmeblkp->hblk_xhat_bit) { 7282 cmn_err(CE_PANIC, 7283 "hat_page_demote: xhat hmeblk"); 7284 } 7285 tset = sfmmu_pageunload(rootpp, sfhme, sz); 7286 CPUSET_OR(cpuset, tset); 7287 } 7288 if (index >>= 1) { 7289 sz++; 7290 } 7291 } 7292 7293 ASSERT(!PP_ISMAPPED_LARGE(pp)); 7294 7295 if (sync) { 7296 xt_sync(cpuset); 7297 if (PP_ISTNC(pp)) { 7298 conv_tnc(rootpp, sz); 7299 } 7300 } 7301 7302 pmtx = sfmmu_page_enter(pp); 7303 7304 ASSERT(pp->p_szc == pszc); 7305 rootpp = PP_PAGEROOT(pp); 7306 ASSERT(rootpp->p_szc == pszc); 7307 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 7308 7309 while (lastpp != rootpp) { 7310 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 7311 ASSERT(sz < pszc); 7312 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 7313 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 7314 while (--npgs > 0) { 7315 lastpp->p_szc = (uchar_t)sz; 7316 lastpp = PP_PAGEPREV(lastpp); 7317 } 7318 if (sz) { 7319 /* 7320 * make sure before current root's pszc 7321 * is updated all updates to constituent pages pszc 7322 * fields are globally visible. 7323 */ 7324 membar_producer(); 7325 } 7326 lastpp->p_szc = sz; 7327 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 7328 if (lastpp != rootpp) { 7329 lastpp = PP_PAGEPREV(lastpp); 7330 } 7331 } 7332 if (sz == 0) { 7333 /* the loop above doesn't cover this case */ 7334 rootpp->p_szc = 0; 7335 } 7336 out: 7337 ASSERT(pp->p_szc == 0); 7338 if (pmtx != NULL) { 7339 sfmmu_page_exit(pmtx); 7340 } 7341 sfmmu_mlist_exit(pml); 7342 } 7343 7344 /* 7345 * Refresh the HAT ismttecnt[] element for size szc. 7346 * Caller must have set ISM busy flag to prevent mapping 7347 * lists from changing while we're traversing them. 7348 */ 7349 pgcnt_t 7350 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 7351 { 7352 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 7353 ism_map_t *ism_map; 7354 pgcnt_t npgs = 0; 7355 int j; 7356 7357 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 7358 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 7359 ism_map = ism_blkp->iblk_maps; 7360 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) 7361 npgs += ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 7362 } 7363 sfmmup->sfmmu_ismttecnt[szc] = npgs; 7364 return (npgs); 7365 } 7366 7367 /* 7368 * Yield the memory claim requirement for an address space. 7369 * 7370 * This is currently implemented as the number of bytes that have active 7371 * hardware translations that have page structures. Therefore, it can 7372 * underestimate the traditional resident set size, eg, if the 7373 * physical page is present and the hardware translation is missing; 7374 * and it can overestimate the rss, eg, if there are active 7375 * translations to a frame buffer with page structs. 7376 * Also, it does not take sharing into account. 7377 * 7378 * Note that we don't acquire locks here since this function is most often 7379 * called from the clock thread. 7380 */ 7381 size_t 7382 hat_get_mapped_size(struct hat *hat) 7383 { 7384 size_t assize = 0; 7385 int i; 7386 7387 if (hat == NULL) 7388 return (0); 7389 7390 ASSERT(hat->sfmmu_xhat_provider == NULL); 7391 7392 for (i = 0; i < mmu_page_sizes; i++) 7393 assize += (pgcnt_t)hat->sfmmu_ttecnt[i] * TTEBYTES(i); 7394 7395 if (hat->sfmmu_iblk == NULL) 7396 return (assize); 7397 7398 for (i = 0; i < mmu_page_sizes; i++) 7399 assize += (pgcnt_t)hat->sfmmu_ismttecnt[i] * TTEBYTES(i); 7400 7401 return (assize); 7402 } 7403 7404 int 7405 hat_stats_enable(struct hat *hat) 7406 { 7407 hatlock_t *hatlockp; 7408 7409 ASSERT(hat->sfmmu_xhat_provider == NULL); 7410 7411 hatlockp = sfmmu_hat_enter(hat); 7412 hat->sfmmu_rmstat++; 7413 sfmmu_hat_exit(hatlockp); 7414 return (1); 7415 } 7416 7417 void 7418 hat_stats_disable(struct hat *hat) 7419 { 7420 hatlock_t *hatlockp; 7421 7422 ASSERT(hat->sfmmu_xhat_provider == NULL); 7423 7424 hatlockp = sfmmu_hat_enter(hat); 7425 hat->sfmmu_rmstat--; 7426 sfmmu_hat_exit(hatlockp); 7427 } 7428 7429 /* 7430 * Routines for entering or removing ourselves from the 7431 * ism_hat's mapping list. 7432 */ 7433 static void 7434 iment_add(struct ism_ment *iment, struct hat *ism_hat) 7435 { 7436 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7437 7438 iment->iment_prev = NULL; 7439 iment->iment_next = ism_hat->sfmmu_iment; 7440 if (ism_hat->sfmmu_iment) { 7441 ism_hat->sfmmu_iment->iment_prev = iment; 7442 } 7443 ism_hat->sfmmu_iment = iment; 7444 } 7445 7446 static void 7447 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 7448 { 7449 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7450 7451 if (ism_hat->sfmmu_iment == NULL) { 7452 panic("ism map entry remove - no entries"); 7453 } 7454 7455 if (iment->iment_prev) { 7456 ASSERT(ism_hat->sfmmu_iment != iment); 7457 iment->iment_prev->iment_next = iment->iment_next; 7458 } else { 7459 ASSERT(ism_hat->sfmmu_iment == iment); 7460 ism_hat->sfmmu_iment = iment->iment_next; 7461 } 7462 7463 if (iment->iment_next) { 7464 iment->iment_next->iment_prev = iment->iment_prev; 7465 } 7466 7467 /* 7468 * zero out the entry 7469 */ 7470 iment->iment_next = NULL; 7471 iment->iment_prev = NULL; 7472 iment->iment_hat = NULL; 7473 } 7474 7475 /* 7476 * Hat_share()/unshare() return an (non-zero) error 7477 * when saddr and daddr are not properly aligned. 7478 * 7479 * The top level mapping element determines the alignment 7480 * requirement for saddr and daddr, depending on different 7481 * architectures. 7482 * 7483 * When hat_share()/unshare() are not supported, 7484 * HATOP_SHARE()/UNSHARE() return 0 7485 */ 7486 int 7487 hat_share(struct hat *sfmmup, caddr_t addr, 7488 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 7489 { 7490 ism_blk_t *ism_blkp; 7491 ism_blk_t *new_iblk; 7492 ism_map_t *ism_map; 7493 ism_ment_t *ism_ment; 7494 int i, added; 7495 hatlock_t *hatlockp; 7496 int reload_mmu = 0; 7497 uint_t ismshift = page_get_shift(ismszc); 7498 size_t ismpgsz = page_get_pagesize(ismszc); 7499 uint_t ismmask = (uint_t)ismpgsz - 1; 7500 size_t sh_size = ISM_SHIFT(ismshift, len); 7501 ushort_t ismhatflag; 7502 7503 #ifdef DEBUG 7504 caddr_t eaddr = addr + len; 7505 #endif /* DEBUG */ 7506 7507 ASSERT(ism_hatid != NULL && sfmmup != NULL); 7508 ASSERT(sptaddr == ISMID_STARTADDR); 7509 /* 7510 * Check the alignment. 7511 */ 7512 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 7513 return (EINVAL); 7514 7515 /* 7516 * Check size alignment. 7517 */ 7518 if (!ISM_ALIGNED(ismshift, len)) 7519 return (EINVAL); 7520 7521 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 7522 7523 /* 7524 * Allocate ism_ment for the ism_hat's mapping list, and an 7525 * ism map blk in case we need one. We must do our 7526 * allocations before acquiring locks to prevent a deadlock 7527 * in the kmem allocator on the mapping list lock. 7528 */ 7529 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 7530 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 7531 7532 /* 7533 * Serialize ISM mappings with the ISM busy flag, and also the 7534 * trap handlers. 7535 */ 7536 sfmmu_ismhat_enter(sfmmup, 0); 7537 7538 /* 7539 * Allocate an ism map blk if necessary. 7540 */ 7541 if (sfmmup->sfmmu_iblk == NULL) { 7542 sfmmup->sfmmu_iblk = new_iblk; 7543 bzero(new_iblk, sizeof (*new_iblk)); 7544 new_iblk->iblk_nextpa = (uint64_t)-1; 7545 membar_stst(); /* make sure next ptr visible to all CPUs */ 7546 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 7547 reload_mmu = 1; 7548 new_iblk = NULL; 7549 } 7550 7551 #ifdef DEBUG 7552 /* 7553 * Make sure mapping does not already exist. 7554 */ 7555 ism_blkp = sfmmup->sfmmu_iblk; 7556 while (ism_blkp) { 7557 ism_map = ism_blkp->iblk_maps; 7558 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 7559 if ((addr >= ism_start(ism_map[i]) && 7560 addr < ism_end(ism_map[i])) || 7561 eaddr > ism_start(ism_map[i]) && 7562 eaddr <= ism_end(ism_map[i])) { 7563 panic("sfmmu_share: Already mapped!"); 7564 } 7565 } 7566 ism_blkp = ism_blkp->iblk_next; 7567 } 7568 #endif /* DEBUG */ 7569 7570 ASSERT(ismszc >= TTE4M); 7571 if (ismszc == TTE4M) { 7572 ismhatflag = HAT_4M_FLAG; 7573 } else if (ismszc == TTE32M) { 7574 ismhatflag = HAT_32M_FLAG; 7575 } else if (ismszc == TTE256M) { 7576 ismhatflag = HAT_256M_FLAG; 7577 } 7578 /* 7579 * Add mapping to first available mapping slot. 7580 */ 7581 ism_blkp = sfmmup->sfmmu_iblk; 7582 added = 0; 7583 while (!added) { 7584 ism_map = ism_blkp->iblk_maps; 7585 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7586 if (ism_map[i].imap_ismhat == NULL) { 7587 7588 ism_map[i].imap_ismhat = ism_hatid; 7589 ism_map[i].imap_vb_shift = (ushort_t)ismshift; 7590 ism_map[i].imap_hatflags = ismhatflag; 7591 ism_map[i].imap_sz_mask = ismmask; 7592 /* 7593 * imap_seg is checked in ISM_CHECK to see if 7594 * non-NULL, then other info assumed valid. 7595 */ 7596 membar_stst(); 7597 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 7598 ism_map[i].imap_ment = ism_ment; 7599 7600 /* 7601 * Now add ourselves to the ism_hat's 7602 * mapping list. 7603 */ 7604 ism_ment->iment_hat = sfmmup; 7605 ism_ment->iment_base_va = addr; 7606 ism_hatid->sfmmu_ismhat = 1; 7607 ism_hatid->sfmmu_flags = 0; 7608 mutex_enter(&ism_mlist_lock); 7609 iment_add(ism_ment, ism_hatid); 7610 mutex_exit(&ism_mlist_lock); 7611 added = 1; 7612 break; 7613 } 7614 } 7615 if (!added && ism_blkp->iblk_next == NULL) { 7616 ism_blkp->iblk_next = new_iblk; 7617 new_iblk = NULL; 7618 bzero(ism_blkp->iblk_next, 7619 sizeof (*ism_blkp->iblk_next)); 7620 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 7621 membar_stst(); 7622 ism_blkp->iblk_nextpa = 7623 va_to_pa((caddr_t)ism_blkp->iblk_next); 7624 } 7625 ism_blkp = ism_blkp->iblk_next; 7626 } 7627 7628 /* 7629 * Update our counters for this sfmmup's ism mappings. 7630 */ 7631 for (i = 0; i <= ismszc; i++) { 7632 if (!(disable_ism_large_pages & (1 << i))) 7633 (void) ism_tsb_entries(sfmmup, i); 7634 } 7635 7636 hatlockp = sfmmu_hat_enter(sfmmup); 7637 7638 /* 7639 * For ISM and DISM we do not support 512K pages, so we only 7640 * only search the 4M and 8K/64K hashes for 4 pagesize cpus, and search 7641 * the 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 7642 */ 7643 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 7644 7645 if (ismszc > TTE4M && !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) 7646 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7647 7648 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) 7649 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 7650 7651 /* 7652 * If we updated the ismblkpa for this HAT or we need 7653 * to start searching the 256M or 32M or 4M hash, we must 7654 * make sure all CPUs running this process reload their 7655 * tsbmiss area. Otherwise they will fail to load the mappings 7656 * in the tsbmiss handler and will loop calling pagefault(). 7657 */ 7658 switch (ismszc) { 7659 case TTE256M: 7660 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 7661 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 7662 sfmmu_sync_mmustate(sfmmup); 7663 } 7664 break; 7665 case TTE32M: 7666 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 7667 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 7668 sfmmu_sync_mmustate(sfmmup); 7669 } 7670 break; 7671 case TTE4M: 7672 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 7673 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7674 sfmmu_sync_mmustate(sfmmup); 7675 } 7676 break; 7677 default: 7678 break; 7679 } 7680 7681 /* 7682 * Now we can drop the locks. 7683 */ 7684 sfmmu_ismhat_exit(sfmmup, 1); 7685 sfmmu_hat_exit(hatlockp); 7686 7687 /* 7688 * Free up ismblk if we didn't use it. 7689 */ 7690 if (new_iblk != NULL) 7691 kmem_cache_free(ism_blk_cache, new_iblk); 7692 7693 /* 7694 * Check TSB and TLB page sizes. 7695 */ 7696 sfmmu_check_page_sizes(sfmmup, 1); 7697 7698 return (0); 7699 } 7700 7701 /* 7702 * hat_unshare removes exactly one ism_map from 7703 * this process's as. It expects multiple calls 7704 * to hat_unshare for multiple shm segments. 7705 */ 7706 void 7707 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 7708 { 7709 ism_map_t *ism_map; 7710 ism_ment_t *free_ment = NULL; 7711 ism_blk_t *ism_blkp; 7712 struct hat *ism_hatid; 7713 struct ctx *ctx; 7714 int cnum, found, i; 7715 hatlock_t *hatlockp; 7716 struct tsb_info *tsbinfo; 7717 uint_t ismshift = page_get_shift(ismszc); 7718 size_t sh_size = ISM_SHIFT(ismshift, len); 7719 7720 ASSERT(ISM_ALIGNED(ismshift, addr)); 7721 ASSERT(ISM_ALIGNED(ismshift, len)); 7722 ASSERT(sfmmup != NULL); 7723 ASSERT(sfmmup != ksfmmup); 7724 7725 if (sfmmup->sfmmu_xhat_provider) { 7726 XHAT_UNSHARE(sfmmup, addr, len); 7727 return; 7728 } else { 7729 /* 7730 * This must be a CPU HAT. If the address space has 7731 * XHATs attached, inform all XHATs that ISM segment 7732 * is going away 7733 */ 7734 ASSERT(sfmmup->sfmmu_as != NULL); 7735 if (sfmmup->sfmmu_as->a_xhat != NULL) 7736 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 7737 } 7738 7739 /* 7740 * Make sure that during the entire time ISM mappings are removed, 7741 * the trap handlers serialize behind us, and that no one else 7742 * can be mucking with ISM mappings. This also lets us get away 7743 * with not doing expensive cross calls to flush the TLB -- we 7744 * just discard the context, flush the entire TSB, and call it 7745 * a day. 7746 */ 7747 sfmmu_ismhat_enter(sfmmup, 0); 7748 7749 /* 7750 * Remove the mapping. 7751 * 7752 * We can't have any holes in the ism map. 7753 * The tsb miss code while searching the ism map will 7754 * stop on an empty map slot. So we must move 7755 * everyone past the hole up 1 if any. 7756 * 7757 * Also empty ism map blks are not freed until the 7758 * process exits. This is to prevent a MT race condition 7759 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 7760 */ 7761 found = 0; 7762 ism_blkp = sfmmup->sfmmu_iblk; 7763 while (!found && ism_blkp) { 7764 ism_map = ism_blkp->iblk_maps; 7765 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7766 if (addr == ism_start(ism_map[i]) && 7767 sh_size == (size_t)(ism_size(ism_map[i]))) { 7768 found = 1; 7769 break; 7770 } 7771 } 7772 if (!found) 7773 ism_blkp = ism_blkp->iblk_next; 7774 } 7775 7776 if (found) { 7777 ism_hatid = ism_map[i].imap_ismhat; 7778 ASSERT(ism_hatid != NULL); 7779 ASSERT(ism_hatid->sfmmu_ismhat == 1); 7780 ASSERT(ism_hatid->sfmmu_cnum == INVALID_CONTEXT); 7781 7782 /* 7783 * First remove ourselves from the ism mapping list. 7784 */ 7785 mutex_enter(&ism_mlist_lock); 7786 iment_sub(ism_map[i].imap_ment, ism_hatid); 7787 mutex_exit(&ism_mlist_lock); 7788 free_ment = ism_map[i].imap_ment; 7789 7790 /* 7791 * Now gurantee that any other cpu 7792 * that tries to process an ISM miss 7793 * will go to tl=0. 7794 */ 7795 hatlockp = sfmmu_hat_enter(sfmmup); 7796 ctx = sfmmutoctx(sfmmup); 7797 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 7798 cnum = sfmmutoctxnum(sfmmup); 7799 7800 if (cnum != INVALID_CONTEXT) { 7801 sfmmu_tlb_swap_ctx(sfmmup, ctx); 7802 } 7803 rw_exit(&ctx->ctx_rwlock); 7804 sfmmu_hat_exit(hatlockp); 7805 7806 /* 7807 * We delete the ism map by copying 7808 * the next map over the current one. 7809 * We will take the next one in the maps 7810 * array or from the next ism_blk. 7811 */ 7812 while (ism_blkp) { 7813 ism_map = ism_blkp->iblk_maps; 7814 while (i < (ISM_MAP_SLOTS - 1)) { 7815 ism_map[i] = ism_map[i + 1]; 7816 i++; 7817 } 7818 /* i == (ISM_MAP_SLOTS - 1) */ 7819 ism_blkp = ism_blkp->iblk_next; 7820 if (ism_blkp) { 7821 ism_map[i] = ism_blkp->iblk_maps[0]; 7822 i = 0; 7823 } else { 7824 ism_map[i].imap_seg = 0; 7825 ism_map[i].imap_vb_shift = 0; 7826 ism_map[i].imap_hatflags = 0; 7827 ism_map[i].imap_sz_mask = 0; 7828 ism_map[i].imap_ismhat = NULL; 7829 ism_map[i].imap_ment = NULL; 7830 } 7831 } 7832 7833 /* 7834 * Now flush entire TSB for the process, since 7835 * demapping page by page can be too expensive. 7836 * We don't have to flush the TLB here anymore 7837 * since we switch to a new TLB ctx instead. 7838 * Also, there is no need to flush if the process 7839 * is exiting since the TSB will be freed later. 7840 */ 7841 if (!sfmmup->sfmmu_free) { 7842 hatlockp = sfmmu_hat_enter(sfmmup); 7843 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 7844 tsbinfo = tsbinfo->tsb_next) { 7845 if (tsbinfo->tsb_flags & TSB_SWAPPED) 7846 continue; 7847 sfmmu_inv_tsb(tsbinfo->tsb_va, 7848 TSB_BYTES(tsbinfo->tsb_szc)); 7849 } 7850 sfmmu_hat_exit(hatlockp); 7851 } 7852 } 7853 7854 /* 7855 * Update our counters for this sfmmup's ism mappings. 7856 */ 7857 for (i = 0; i <= ismszc; i++) { 7858 if (!(disable_ism_large_pages & (1 << i))) 7859 (void) ism_tsb_entries(sfmmup, i); 7860 } 7861 7862 sfmmu_ismhat_exit(sfmmup, 0); 7863 7864 /* 7865 * We must do our freeing here after dropping locks 7866 * to prevent a deadlock in the kmem allocator on the 7867 * mapping list lock. 7868 */ 7869 if (free_ment != NULL) 7870 kmem_cache_free(ism_ment_cache, free_ment); 7871 7872 /* 7873 * Check TSB and TLB page sizes if the process isn't exiting. 7874 */ 7875 if (!sfmmup->sfmmu_free) 7876 sfmmu_check_page_sizes(sfmmup, 0); 7877 } 7878 7879 /* ARGSUSED */ 7880 static int 7881 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 7882 { 7883 /* void *buf is sfmmu_t pointer */ 7884 return (0); 7885 } 7886 7887 /* ARGSUSED */ 7888 static void 7889 sfmmu_idcache_destructor(void *buf, void *cdrarg) 7890 { 7891 /* void *buf is sfmmu_t pointer */ 7892 } 7893 7894 /* 7895 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 7896 * field to be the pa of this hmeblk 7897 */ 7898 /* ARGSUSED */ 7899 static int 7900 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 7901 { 7902 struct hme_blk *hmeblkp; 7903 7904 bzero(buf, (size_t)cdrarg); 7905 hmeblkp = (struct hme_blk *)buf; 7906 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 7907 7908 #ifdef HBLK_TRACE 7909 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 7910 #endif /* HBLK_TRACE */ 7911 7912 return (0); 7913 } 7914 7915 /* ARGSUSED */ 7916 static void 7917 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 7918 { 7919 7920 #ifdef HBLK_TRACE 7921 7922 struct hme_blk *hmeblkp; 7923 7924 hmeblkp = (struct hme_blk *)buf; 7925 mutex_destroy(&hmeblkp->hblk_audit_lock); 7926 7927 #endif /* HBLK_TRACE */ 7928 } 7929 7930 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 7931 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 7932 /* 7933 * The kmem allocator will callback into our reclaim routine when the system 7934 * is running low in memory. We traverse the hash and free up all unused but 7935 * still cached hme_blks. We also traverse the free list and free them up 7936 * as well. 7937 */ 7938 /*ARGSUSED*/ 7939 static void 7940 sfmmu_hblkcache_reclaim(void *cdrarg) 7941 { 7942 int i; 7943 uint64_t hblkpa, prevpa, nx_pa; 7944 struct hmehash_bucket *hmebp; 7945 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 7946 static struct hmehash_bucket *uhmehash_reclaim_hand; 7947 static struct hmehash_bucket *khmehash_reclaim_hand; 7948 struct hme_blk *list = NULL; 7949 7950 hmebp = uhmehash_reclaim_hand; 7951 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 7952 uhmehash_reclaim_hand = hmebp = uhme_hash; 7953 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 7954 7955 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 7956 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 7957 hmeblkp = hmebp->hmeblkp; 7958 hblkpa = hmebp->hmeh_nextpa; 7959 prevpa = 0; 7960 pr_hblk = NULL; 7961 while (hmeblkp) { 7962 nx_hblk = hmeblkp->hblk_next; 7963 nx_pa = hmeblkp->hblk_nextpa; 7964 if (!hmeblkp->hblk_vcnt && 7965 !hmeblkp->hblk_hmecnt) { 7966 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 7967 prevpa, pr_hblk); 7968 sfmmu_hblk_free(hmebp, hmeblkp, 7969 hblkpa, &list); 7970 } else { 7971 pr_hblk = hmeblkp; 7972 prevpa = hblkpa; 7973 } 7974 hmeblkp = nx_hblk; 7975 hblkpa = nx_pa; 7976 } 7977 SFMMU_HASH_UNLOCK(hmebp); 7978 } 7979 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 7980 hmebp = uhme_hash; 7981 } 7982 7983 hmebp = khmehash_reclaim_hand; 7984 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 7985 khmehash_reclaim_hand = hmebp = khme_hash; 7986 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 7987 7988 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 7989 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 7990 hmeblkp = hmebp->hmeblkp; 7991 hblkpa = hmebp->hmeh_nextpa; 7992 prevpa = 0; 7993 pr_hblk = NULL; 7994 while (hmeblkp) { 7995 nx_hblk = hmeblkp->hblk_next; 7996 nx_pa = hmeblkp->hblk_nextpa; 7997 if (!hmeblkp->hblk_vcnt && 7998 !hmeblkp->hblk_hmecnt) { 7999 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8000 prevpa, pr_hblk); 8001 sfmmu_hblk_free(hmebp, hmeblkp, 8002 hblkpa, &list); 8003 } else { 8004 pr_hblk = hmeblkp; 8005 prevpa = hblkpa; 8006 } 8007 hmeblkp = nx_hblk; 8008 hblkpa = nx_pa; 8009 } 8010 SFMMU_HASH_UNLOCK(hmebp); 8011 } 8012 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 8013 hmebp = khme_hash; 8014 } 8015 sfmmu_hblks_list_purge(&list); 8016 } 8017 8018 /* 8019 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 8020 * same goes for sfmmu_get_addrvcolor(). 8021 * 8022 * This function will return the virtual color for the specified page. The 8023 * virtual color corresponds to this page current mapping or its last mapping. 8024 * It is used by memory allocators to choose addresses with the correct 8025 * alignment so vac consistency is automatically maintained. If the page 8026 * has no color it returns -1. 8027 */ 8028 int 8029 sfmmu_get_ppvcolor(struct page *pp) 8030 { 8031 int color; 8032 8033 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 8034 return (-1); 8035 } 8036 color = PP_GET_VCOLOR(pp); 8037 ASSERT(color < mmu_btop(shm_alignment)); 8038 return (color); 8039 } 8040 8041 /* 8042 * This function will return the desired alignment for vac consistency 8043 * (vac color) given a virtual address. If no vac is present it returns -1. 8044 */ 8045 int 8046 sfmmu_get_addrvcolor(caddr_t vaddr) 8047 { 8048 if (cache & CACHE_VAC) { 8049 return (addr_to_vcolor(vaddr)); 8050 } else { 8051 return (-1); 8052 } 8053 8054 } 8055 8056 /* 8057 * Check for conflicts. 8058 * A conflict exists if the new and existent mappings do not match in 8059 * their "shm_alignment fields. If conflicts exist, the existant mappings 8060 * are flushed unless one of them is locked. If one of them is locked, then 8061 * the mappings are flushed and converted to non-cacheable mappings. 8062 */ 8063 static void 8064 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 8065 { 8066 struct hat *tmphat; 8067 struct sf_hment *sfhmep, *tmphme = NULL; 8068 struct hme_blk *hmeblkp; 8069 int vcolor; 8070 tte_t tte; 8071 8072 ASSERT(sfmmu_mlist_held(pp)); 8073 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 8074 8075 vcolor = addr_to_vcolor(addr); 8076 if (PP_NEWPAGE(pp)) { 8077 PP_SET_VCOLOR(pp, vcolor); 8078 return; 8079 } 8080 8081 if (PP_GET_VCOLOR(pp) == vcolor) { 8082 return; 8083 } 8084 8085 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 8086 /* 8087 * Previous user of page had a different color 8088 * but since there are no current users 8089 * we just flush the cache and change the color. 8090 */ 8091 SFMMU_STAT(sf_pgcolor_conflict); 8092 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8093 PP_SET_VCOLOR(pp, vcolor); 8094 return; 8095 } 8096 8097 /* 8098 * If we get here we have a vac conflict with a current 8099 * mapping. VAC conflict policy is as follows. 8100 * - The default is to unload the other mappings unless: 8101 * - If we have a large mapping we uncache the page. 8102 * We need to uncache the rest of the large page too. 8103 * - If any of the mappings are locked we uncache the page. 8104 * - If the requested mapping is inconsistent 8105 * with another mapping and that mapping 8106 * is in the same address space we have to 8107 * make it non-cached. The default thing 8108 * to do is unload the inconsistent mapping 8109 * but if they are in the same address space 8110 * we run the risk of unmapping the pc or the 8111 * stack which we will use as we return to the user, 8112 * in which case we can then fault on the thing 8113 * we just unloaded and get into an infinite loop. 8114 */ 8115 if (PP_ISMAPPED_LARGE(pp)) { 8116 int sz; 8117 8118 /* 8119 * Existing mapping is for big pages. We don't unload 8120 * existing big mappings to satisfy new mappings. 8121 * Always convert all mappings to TNC. 8122 */ 8123 sz = fnd_mapping_sz(pp); 8124 pp = PP_GROUPLEADER(pp, sz); 8125 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 8126 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 8127 TTEPAGES(sz)); 8128 8129 return; 8130 } 8131 8132 /* 8133 * check if any mapping is in same as or if it is locked 8134 * since in that case we need to uncache. 8135 */ 8136 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8137 tmphme = sfhmep->hme_next; 8138 hmeblkp = sfmmu_hmetohblk(sfhmep); 8139 if (hmeblkp->hblk_xhat_bit) 8140 continue; 8141 tmphat = hblktosfmmu(hmeblkp); 8142 sfmmu_copytte(&sfhmep->hme_tte, &tte); 8143 ASSERT(TTE_IS_VALID(&tte)); 8144 if ((tmphat == hat) || hmeblkp->hblk_lckcnt) { 8145 /* 8146 * We have an uncache conflict 8147 */ 8148 SFMMU_STAT(sf_uncache_conflict); 8149 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 8150 return; 8151 } 8152 } 8153 8154 /* 8155 * We have an unload conflict 8156 * We have already checked for LARGE mappings, therefore 8157 * the remaining mapping(s) must be TTE8K. 8158 */ 8159 SFMMU_STAT(sf_unload_conflict); 8160 8161 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8162 tmphme = sfhmep->hme_next; 8163 hmeblkp = sfmmu_hmetohblk(sfhmep); 8164 if (hmeblkp->hblk_xhat_bit) 8165 continue; 8166 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 8167 } 8168 8169 if (PP_ISMAPPED_KPM(pp)) 8170 sfmmu_kpm_vac_unload(pp, addr); 8171 8172 /* 8173 * Unloads only do TLB flushes so we need to flush the 8174 * cache here. 8175 */ 8176 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8177 PP_SET_VCOLOR(pp, vcolor); 8178 } 8179 8180 /* 8181 * Whenever a mapping is unloaded and the page is in TNC state, 8182 * we see if the page can be made cacheable again. 'pp' is 8183 * the page that we just unloaded a mapping from, the size 8184 * of mapping that was unloaded is 'ottesz'. 8185 * Remark: 8186 * The recache policy for mpss pages can leave a performance problem 8187 * under the following circumstances: 8188 * . A large page in uncached mode has just been unmapped. 8189 * . All constituent pages are TNC due to a conflicting small mapping. 8190 * . There are many other, non conflicting, small mappings around for 8191 * a lot of the constituent pages. 8192 * . We're called w/ the "old" groupleader page and the old ottesz, 8193 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 8194 * we end up w/ TTE8K or npages == 1. 8195 * . We call tst_tnc w/ the old groupleader only, and if there is no 8196 * conflict, we re-cache only this page. 8197 * . All other small mappings are not checked and will be left in TNC mode. 8198 * The problem is not very serious because: 8199 * . mpss is actually only defined for heap and stack, so the probability 8200 * is not very high that a large page mapping exists in parallel to a small 8201 * one (this is possible, but seems to be bad programming style in the 8202 * appl). 8203 * . The problem gets a little bit more serious, when those TNC pages 8204 * have to be mapped into kernel space, e.g. for networking. 8205 * . When VAC alias conflicts occur in applications, this is regarded 8206 * as an application bug. So if kstat's show them, the appl should 8207 * be changed anyway. 8208 */ 8209 static void 8210 conv_tnc(page_t *pp, int ottesz) 8211 { 8212 int cursz, dosz; 8213 pgcnt_t curnpgs, dopgs; 8214 pgcnt_t pg64k; 8215 page_t *pp2; 8216 8217 /* 8218 * Determine how big a range we check for TNC and find 8219 * leader page. cursz is the size of the biggest 8220 * mapping that still exist on 'pp'. 8221 */ 8222 if (PP_ISMAPPED_LARGE(pp)) { 8223 cursz = fnd_mapping_sz(pp); 8224 } else { 8225 cursz = TTE8K; 8226 } 8227 8228 if (ottesz >= cursz) { 8229 dosz = ottesz; 8230 pp2 = pp; 8231 } else { 8232 dosz = cursz; 8233 pp2 = PP_GROUPLEADER(pp, dosz); 8234 } 8235 8236 pg64k = TTEPAGES(TTE64K); 8237 dopgs = TTEPAGES(dosz); 8238 8239 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 8240 8241 while (dopgs != 0) { 8242 curnpgs = TTEPAGES(cursz); 8243 if (tst_tnc(pp2, curnpgs)) { 8244 SFMMU_STAT_ADD(sf_recache, curnpgs); 8245 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 8246 curnpgs); 8247 } 8248 8249 ASSERT(dopgs >= curnpgs); 8250 dopgs -= curnpgs; 8251 8252 if (dopgs == 0) { 8253 break; 8254 } 8255 8256 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 8257 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 8258 cursz = fnd_mapping_sz(pp2); 8259 } else { 8260 cursz = TTE8K; 8261 } 8262 } 8263 } 8264 8265 /* 8266 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 8267 * returns 0 otherwise. Note that oaddr argument is valid for only 8268 * 8k pages. 8269 */ 8270 static int 8271 tst_tnc(page_t *pp, pgcnt_t npages) 8272 { 8273 struct sf_hment *sfhme; 8274 struct hme_blk *hmeblkp; 8275 tte_t tte; 8276 caddr_t vaddr; 8277 int clr_valid = 0; 8278 int color, color1, bcolor; 8279 int i, ncolors; 8280 8281 ASSERT(pp != NULL); 8282 ASSERT(!(cache & CACHE_WRITEBACK)); 8283 8284 if (npages > 1) { 8285 ncolors = CACHE_NUM_COLOR; 8286 } 8287 8288 for (i = 0; i < npages; i++) { 8289 ASSERT(sfmmu_mlist_held(pp)); 8290 ASSERT(PP_ISTNC(pp)); 8291 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 8292 8293 if (PP_ISPNC(pp)) { 8294 return (0); 8295 } 8296 8297 clr_valid = 0; 8298 if (PP_ISMAPPED_KPM(pp)) { 8299 caddr_t kpmvaddr; 8300 8301 ASSERT(kpm_enable); 8302 kpmvaddr = hat_kpm_page2va(pp, 1); 8303 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 8304 color1 = addr_to_vcolor(kpmvaddr); 8305 clr_valid = 1; 8306 } 8307 8308 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8309 hmeblkp = sfmmu_hmetohblk(sfhme); 8310 if (hmeblkp->hblk_xhat_bit) 8311 continue; 8312 8313 sfmmu_copytte(&sfhme->hme_tte, &tte); 8314 ASSERT(TTE_IS_VALID(&tte)); 8315 8316 vaddr = tte_to_vaddr(hmeblkp, tte); 8317 color = addr_to_vcolor(vaddr); 8318 8319 if (npages > 1) { 8320 /* 8321 * If there is a big mapping, make sure 8322 * 8K mapping is consistent with the big 8323 * mapping. 8324 */ 8325 bcolor = i % ncolors; 8326 if (color != bcolor) { 8327 return (0); 8328 } 8329 } 8330 if (!clr_valid) { 8331 clr_valid = 1; 8332 color1 = color; 8333 } 8334 8335 if (color1 != color) { 8336 return (0); 8337 } 8338 } 8339 8340 pp = PP_PAGENEXT(pp); 8341 } 8342 8343 return (1); 8344 } 8345 8346 static void 8347 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 8348 pgcnt_t npages) 8349 { 8350 kmutex_t *pmtx; 8351 int i, ncolors, bcolor; 8352 kpm_hlk_t *kpmp; 8353 cpuset_t cpuset; 8354 8355 ASSERT(pp != NULL); 8356 ASSERT(!(cache & CACHE_WRITEBACK)); 8357 8358 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 8359 pmtx = sfmmu_page_enter(pp); 8360 8361 /* 8362 * Fast path caching single unmapped page 8363 */ 8364 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 8365 flags == HAT_CACHE) { 8366 PP_CLRTNC(pp); 8367 PP_CLRPNC(pp); 8368 sfmmu_page_exit(pmtx); 8369 sfmmu_kpm_kpmp_exit(kpmp); 8370 return; 8371 } 8372 8373 /* 8374 * We need to capture all cpus in order to change cacheability 8375 * because we can't allow one cpu to access the same physical 8376 * page using a cacheable and a non-cachebale mapping at the same 8377 * time. Since we may end up walking the ism mapping list 8378 * have to grab it's lock now since we can't after all the 8379 * cpus have been captured. 8380 */ 8381 sfmmu_hat_lock_all(); 8382 mutex_enter(&ism_mlist_lock); 8383 kpreempt_disable(); 8384 cpuset = cpu_ready_set; 8385 xc_attention(cpuset); 8386 8387 if (npages > 1) { 8388 /* 8389 * Make sure all colors are flushed since the 8390 * sfmmu_page_cache() only flushes one color- 8391 * it does not know big pages. 8392 */ 8393 ncolors = CACHE_NUM_COLOR; 8394 if (flags & HAT_TMPNC) { 8395 for (i = 0; i < ncolors; i++) { 8396 sfmmu_cache_flushcolor(i, pp->p_pagenum); 8397 } 8398 cache_flush_flag = CACHE_NO_FLUSH; 8399 } 8400 } 8401 8402 for (i = 0; i < npages; i++) { 8403 8404 ASSERT(sfmmu_mlist_held(pp)); 8405 8406 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 8407 8408 if (npages > 1) { 8409 bcolor = i % ncolors; 8410 } else { 8411 bcolor = NO_VCOLOR; 8412 } 8413 8414 sfmmu_page_cache(pp, flags, cache_flush_flag, 8415 bcolor); 8416 } 8417 8418 pp = PP_PAGENEXT(pp); 8419 } 8420 8421 xt_sync(cpuset); 8422 xc_dismissed(cpuset); 8423 mutex_exit(&ism_mlist_lock); 8424 sfmmu_hat_unlock_all(); 8425 sfmmu_page_exit(pmtx); 8426 sfmmu_kpm_kpmp_exit(kpmp); 8427 kpreempt_enable(); 8428 } 8429 8430 /* 8431 * This function changes the virtual cacheability of all mappings to a 8432 * particular page. When changing from uncache to cacheable the mappings will 8433 * only be changed if all of them have the same virtual color. 8434 * We need to flush the cache in all cpus. It is possible that 8435 * a process referenced a page as cacheable but has sinced exited 8436 * and cleared the mapping list. We still to flush it but have no 8437 * state so all cpus is the only alternative. 8438 */ 8439 static void 8440 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 8441 { 8442 struct sf_hment *sfhme; 8443 struct hme_blk *hmeblkp; 8444 sfmmu_t *sfmmup; 8445 tte_t tte, ttemod; 8446 caddr_t vaddr; 8447 int ret, color; 8448 pfn_t pfn; 8449 8450 color = bcolor; 8451 pfn = pp->p_pagenum; 8452 8453 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8454 8455 hmeblkp = sfmmu_hmetohblk(sfhme); 8456 8457 if (hmeblkp->hblk_xhat_bit) 8458 continue; 8459 8460 sfmmu_copytte(&sfhme->hme_tte, &tte); 8461 ASSERT(TTE_IS_VALID(&tte)); 8462 vaddr = tte_to_vaddr(hmeblkp, tte); 8463 color = addr_to_vcolor(vaddr); 8464 8465 #ifdef DEBUG 8466 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 8467 ASSERT(color == bcolor); 8468 } 8469 #endif 8470 8471 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 8472 8473 ttemod = tte; 8474 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 8475 TTE_CLR_VCACHEABLE(&ttemod); 8476 } else { /* flags & HAT_CACHE */ 8477 TTE_SET_VCACHEABLE(&ttemod); 8478 } 8479 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 8480 if (ret < 0) { 8481 /* 8482 * Since all cpus are captured modifytte should not 8483 * fail. 8484 */ 8485 panic("sfmmu_page_cache: write to tte failed"); 8486 } 8487 8488 sfmmup = hblktosfmmu(hmeblkp); 8489 if (cache_flush_flag == CACHE_FLUSH) { 8490 /* 8491 * Flush TSBs, TLBs and caches 8492 */ 8493 if (sfmmup->sfmmu_ismhat) { 8494 if (flags & HAT_CACHE) { 8495 SFMMU_STAT(sf_ism_recache); 8496 } else { 8497 SFMMU_STAT(sf_ism_uncache); 8498 } 8499 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8500 pfn, CACHE_FLUSH); 8501 } else { 8502 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 8503 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 8504 } 8505 8506 /* 8507 * all cache entries belonging to this pfn are 8508 * now flushed. 8509 */ 8510 cache_flush_flag = CACHE_NO_FLUSH; 8511 } else { 8512 8513 /* 8514 * Flush only TSBs and TLBs. 8515 */ 8516 if (sfmmup->sfmmu_ismhat) { 8517 if (flags & HAT_CACHE) { 8518 SFMMU_STAT(sf_ism_recache); 8519 } else { 8520 SFMMU_STAT(sf_ism_uncache); 8521 } 8522 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8523 pfn, CACHE_NO_FLUSH); 8524 } else { 8525 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 8526 } 8527 } 8528 } 8529 8530 if (PP_ISMAPPED_KPM(pp)) 8531 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 8532 8533 switch (flags) { 8534 8535 default: 8536 panic("sfmmu_pagecache: unknown flags"); 8537 break; 8538 8539 case HAT_CACHE: 8540 PP_CLRTNC(pp); 8541 PP_CLRPNC(pp); 8542 PP_SET_VCOLOR(pp, color); 8543 break; 8544 8545 case HAT_TMPNC: 8546 PP_SETTNC(pp); 8547 PP_SET_VCOLOR(pp, NO_VCOLOR); 8548 break; 8549 8550 case HAT_UNCACHE: 8551 PP_SETPNC(pp); 8552 PP_CLRTNC(pp); 8553 PP_SET_VCOLOR(pp, NO_VCOLOR); 8554 break; 8555 } 8556 } 8557 8558 /* 8559 * This routine gets called when the system has run out of free contexts. 8560 * This will simply choose context passed to it to be stolen and reused. 8561 */ 8562 /* ARGSUSED */ 8563 static void 8564 sfmmu_reuse_ctx(struct ctx *ctx, sfmmu_t *sfmmup) 8565 { 8566 sfmmu_t *stolen_sfmmup; 8567 cpuset_t cpuset; 8568 ushort_t cnum = ctxtoctxnum(ctx); 8569 8570 ASSERT(cnum != KCONTEXT); 8571 ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0); /* write locked */ 8572 8573 /* 8574 * simply steal and reuse the ctx passed to us. 8575 */ 8576 stolen_sfmmup = ctx->ctx_sfmmu; 8577 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8578 ASSERT(stolen_sfmmup->sfmmu_cnum == cnum); 8579 ASSERT(stolen_sfmmup != ksfmmup); 8580 8581 TRACE_CTXS(&ctx_trace_mutex, ctx_trace_ptr, cnum, stolen_sfmmup, 8582 sfmmup, CTX_TRC_STEAL); 8583 SFMMU_STAT(sf_ctxsteal); 8584 8585 /* 8586 * Update sfmmu and ctx structs. After this point all threads 8587 * belonging to this hat/proc will fault and not use the ctx 8588 * being stolen. 8589 */ 8590 kpreempt_disable(); 8591 /* 8592 * Enforce reverse order of assignments from sfmmu_get_ctx(). This 8593 * is done to prevent a race where a thread faults with the context 8594 * but the TSB has changed. 8595 */ 8596 stolen_sfmmup->sfmmu_cnum = INVALID_CONTEXT; 8597 membar_enter(); 8598 ctx->ctx_sfmmu = NULL; 8599 8600 /* 8601 * 1. flush TLB in all CPUs that ran the process whose ctx 8602 * we are stealing. 8603 * 2. change context for all other CPUs to INVALID_CONTEXT, 8604 * if they are running in the context that we are going to steal. 8605 */ 8606 cpuset = stolen_sfmmup->sfmmu_cpusran; 8607 CPUSET_DEL(cpuset, CPU->cpu_id); 8608 CPUSET_AND(cpuset, cpu_ready_set); 8609 SFMMU_XCALL_STATS(cnum); 8610 xt_some(cpuset, sfmmu_ctx_steal_tl1, cnum, INVALID_CONTEXT); 8611 xt_sync(cpuset); 8612 8613 /* 8614 * flush TLB of local processor 8615 */ 8616 vtag_flushctx(cnum); 8617 8618 /* 8619 * If we just stole the ctx from the current process 8620 * on local cpu then we also invalidate his context 8621 * here. 8622 */ 8623 if (sfmmu_getctx_sec() == cnum) { 8624 sfmmu_setctx_sec(INVALID_CONTEXT); 8625 sfmmu_clear_utsbinfo(); 8626 } 8627 8628 kpreempt_enable(); 8629 SFMMU_STAT(sf_tlbflush_ctx); 8630 } 8631 8632 /* 8633 * Returns a context with the reader lock held. 8634 * 8635 * We maintain 2 different list of contexts. The first list 8636 * is the free list and it is headed by ctxfree. These contexts 8637 * are ready to use. The second list is the dirty list and is 8638 * headed by ctxdirty. These contexts have been freed but haven't 8639 * been flushed from the TLB. 8640 * 8641 * It's the responsibility of the caller to guarantee that the 8642 * process serializes on calls here by taking the HAT lock for 8643 * the hat. 8644 * 8645 * Changing the page size is a rather complicated process, so 8646 * rather than jump through lots of hoops to special case it, 8647 * the easiest way to go about it is to tell the MMU we want 8648 * to change page sizes and then switch to using a different 8649 * context. When we program the context registers for the 8650 * process, we can take care of setting up the (new) page size 8651 * for that context at that point. 8652 */ 8653 8654 static struct ctx * 8655 sfmmu_get_ctx(sfmmu_t *sfmmup) 8656 { 8657 struct ctx *ctx; 8658 ushort_t cnum; 8659 struct ctx *lastctx = &ctxs[nctxs-1]; 8660 struct ctx *firstctx = &ctxs[NUM_LOCKED_CTXS]; 8661 uint_t found_stealable_ctx; 8662 uint_t retry_count = 0; 8663 8664 #define NEXT_CTX(ctx) (((ctx) >= lastctx) ? firstctx : ((ctx) + 1)) 8665 8666 retry: 8667 8668 ASSERT(sfmmup->sfmmu_cnum != KCONTEXT); 8669 /* 8670 * Check to see if this process has already got a ctx. 8671 * In that case just set the sec-ctx, grab a readers lock, and 8672 * return. 8673 * 8674 * We have to double check after we get the readers lock on the 8675 * context, since it could be stolen in this short window. 8676 */ 8677 if (sfmmup->sfmmu_cnum >= NUM_LOCKED_CTXS) { 8678 ctx = sfmmutoctx(sfmmup); 8679 rw_enter(&ctx->ctx_rwlock, RW_READER); 8680 if (ctx->ctx_sfmmu == sfmmup) { 8681 return (ctx); 8682 } else { 8683 rw_exit(&ctx->ctx_rwlock); 8684 } 8685 } 8686 8687 found_stealable_ctx = 0; 8688 mutex_enter(&ctx_list_lock); 8689 if ((ctx = ctxfree) != NULL) { 8690 /* 8691 * Found a ctx in free list. Delete it from the list and 8692 * use it. There's a short window where the stealer can 8693 * look at the context before we grab the lock on the 8694 * context, so we have to handle that with the free flag. 8695 */ 8696 SFMMU_STAT(sf_ctxfree); 8697 ctxfree = ctx->ctx_free; 8698 ctx->ctx_sfmmu = NULL; 8699 mutex_exit(&ctx_list_lock); 8700 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 8701 ASSERT(ctx->ctx_sfmmu == NULL); 8702 ASSERT((ctx->ctx_flags & CTX_FREE_FLAG) != 0); 8703 } else if ((ctx = ctxdirty) != NULL) { 8704 /* 8705 * No free contexts. If we have at least one dirty ctx 8706 * then flush the TLBs on all cpus if necessary and move 8707 * the dirty list to the free list. 8708 */ 8709 SFMMU_STAT(sf_ctxdirty); 8710 ctxdirty = NULL; 8711 if (delay_tlb_flush) 8712 sfmmu_tlb_all_demap(); 8713 ctxfree = ctx->ctx_free; 8714 ctx->ctx_sfmmu = NULL; 8715 mutex_exit(&ctx_list_lock); 8716 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 8717 ASSERT(ctx->ctx_sfmmu == NULL); 8718 ASSERT((ctx->ctx_flags & CTX_FREE_FLAG) != 0); 8719 } else { 8720 /* 8721 * No free context available, so steal one. 8722 * 8723 * The policy to choose the appropriate context is simple; 8724 * just sweep all the ctxs using ctxhand. This will steal 8725 * the LRU ctx. 8726 * 8727 * We however only steal a non-free context that can be 8728 * write locked. Keep searching till we find a stealable 8729 * ctx. 8730 */ 8731 mutex_exit(&ctx_list_lock); 8732 ctx = ctxhand; 8733 do { 8734 /* 8735 * If you get the writers lock, and the ctx isn't 8736 * a free ctx, THEN you can steal this ctx. 8737 */ 8738 if ((ctx->ctx_flags & CTX_FREE_FLAG) == 0 && 8739 rw_tryenter(&ctx->ctx_rwlock, RW_WRITER) != 0) { 8740 if (ctx->ctx_flags & CTX_FREE_FLAG) { 8741 /* let the first guy have it */ 8742 rw_exit(&ctx->ctx_rwlock); 8743 } else { 8744 found_stealable_ctx = 1; 8745 break; 8746 } 8747 } 8748 ctx = NEXT_CTX(ctx); 8749 } while (ctx != ctxhand); 8750 8751 if (found_stealable_ctx) { 8752 /* 8753 * Try and reuse the ctx. 8754 */ 8755 sfmmu_reuse_ctx(ctx, sfmmup); 8756 8757 } else if (retry_count++ < GET_CTX_RETRY_CNT) { 8758 goto retry; 8759 8760 } else { 8761 panic("Can't find any stealable context"); 8762 } 8763 } 8764 8765 ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0); /* write locked */ 8766 ctx->ctx_sfmmu = sfmmup; 8767 8768 /* 8769 * Clear the ctx_flags field. 8770 */ 8771 ctx->ctx_flags = 0; 8772 8773 cnum = ctxtoctxnum(ctx); 8774 membar_exit(); 8775 sfmmup->sfmmu_cnum = cnum; 8776 8777 /* 8778 * Let the MMU set up the page sizes to use for 8779 * this context in the TLB. Don't program 2nd dtlb for ism hat. 8780 */ 8781 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) 8782 mmu_set_ctx_page_sizes(sfmmup); 8783 8784 /* 8785 * Downgrade to reader's lock. 8786 */ 8787 rw_downgrade(&ctx->ctx_rwlock); 8788 8789 /* 8790 * If this value doesn't get set to what we want 8791 * it won't matter, so don't worry about locking. 8792 */ 8793 ctxhand = NEXT_CTX(ctx); 8794 8795 /* 8796 * Better not have been stolen while we held the ctx' 8797 * lock or we're hosed. 8798 */ 8799 ASSERT(sfmmup == sfmmutoctx(sfmmup)->ctx_sfmmu); 8800 8801 return (ctx); 8802 8803 #undef NEXT_CTX 8804 } 8805 8806 8807 /* 8808 * Set the process context to INVALID_CONTEXT (but 8809 * without stealing the ctx) so that it faults and 8810 * reloads the MMU state from TL=0. Caller must 8811 * hold the hat lock since we don't acquire it here. 8812 */ 8813 static void 8814 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 8815 { 8816 int cnum; 8817 cpuset_t cpuset; 8818 8819 ASSERT(sfmmup != ksfmmup); 8820 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8821 8822 kpreempt_disable(); 8823 8824 cnum = sfmmutoctxnum(sfmmup); 8825 if (cnum != INVALID_CONTEXT) { 8826 cpuset = sfmmup->sfmmu_cpusran; 8827 CPUSET_DEL(cpuset, CPU->cpu_id); 8828 CPUSET_AND(cpuset, cpu_ready_set); 8829 SFMMU_XCALL_STATS(cnum); 8830 8831 xt_some(cpuset, sfmmu_raise_tsb_exception, 8832 cnum, INVALID_CONTEXT); 8833 xt_sync(cpuset); 8834 8835 /* 8836 * If the process is running on the local CPU 8837 * we need to update the MMU state here as well. 8838 */ 8839 if (sfmmu_getctx_sec() == cnum) 8840 sfmmu_load_mmustate(sfmmup); 8841 8842 SFMMU_STAT(sf_tsb_raise_exception); 8843 } 8844 8845 kpreempt_enable(); 8846 } 8847 8848 8849 /* 8850 * Replace the specified TSB with a new TSB. This function gets called when 8851 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 8852 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 8853 * (8K). 8854 * 8855 * Caller must hold the HAT lock, but should assume any tsb_info 8856 * pointers it has are no longer valid after calling this function. 8857 * 8858 * Return values: 8859 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 8860 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 8861 * something to this tsbinfo/TSB 8862 * TSB_SUCCESS Operation succeeded 8863 */ 8864 static tsb_replace_rc_t 8865 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 8866 hatlock_t *hatlockp, uint_t flags) 8867 { 8868 struct tsb_info *new_tsbinfo = NULL; 8869 struct tsb_info *curtsb, *prevtsb; 8870 uint_t tte_sz_mask; 8871 cpuset_t cpuset; 8872 struct ctx *ctx = NULL; 8873 int ctxnum; 8874 8875 ASSERT(sfmmup != ksfmmup); 8876 ASSERT(sfmmup->sfmmu_ismhat == 0); 8877 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8878 ASSERT(szc <= tsb_max_growsize); 8879 8880 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 8881 return (TSB_LOSTRACE); 8882 8883 /* 8884 * Find the tsb_info ahead of this one in the list, and 8885 * also make sure that the tsb_info passed in really 8886 * exists! 8887 */ 8888 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8889 curtsb != old_tsbinfo && curtsb != NULL; 8890 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8891 ASSERT(curtsb != NULL); 8892 8893 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8894 /* 8895 * The process is swapped out, so just set the new size 8896 * code. When it swaps back in, we'll allocate a new one 8897 * of the new chosen size. 8898 */ 8899 curtsb->tsb_szc = szc; 8900 return (TSB_SUCCESS); 8901 } 8902 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 8903 8904 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 8905 8906 /* 8907 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 8908 * If we fail to allocate a TSB, exit. 8909 */ 8910 sfmmu_hat_exit(hatlockp); 8911 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, tte_sz_mask, 8912 flags, sfmmup)) { 8913 (void) sfmmu_hat_enter(sfmmup); 8914 if (!(flags & TSB_SWAPIN)) 8915 SFMMU_STAT(sf_tsb_resize_failures); 8916 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8917 return (TSB_ALLOCFAIL); 8918 } 8919 (void) sfmmu_hat_enter(sfmmup); 8920 8921 /* 8922 * Re-check to make sure somebody else didn't muck with us while we 8923 * didn't hold the HAT lock. If the process swapped out, fine, just 8924 * exit; this can happen if we try to shrink the TSB from the context 8925 * of another process (such as on an ISM unmap), though it is rare. 8926 */ 8927 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8928 SFMMU_STAT(sf_tsb_resize_failures); 8929 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8930 sfmmu_hat_exit(hatlockp); 8931 sfmmu_tsbinfo_free(new_tsbinfo); 8932 (void) sfmmu_hat_enter(sfmmup); 8933 return (TSB_LOSTRACE); 8934 } 8935 8936 #ifdef DEBUG 8937 /* Reverify that the tsb_info still exists.. for debugging only */ 8938 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8939 curtsb != old_tsbinfo && curtsb != NULL; 8940 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8941 ASSERT(curtsb != NULL); 8942 #endif /* DEBUG */ 8943 8944 /* 8945 * Quiesce any CPUs running this process on their next TLB miss 8946 * so they atomically see the new tsb_info. We temporarily set the 8947 * context to invalid context so new threads that come on processor 8948 * after we do the xcall to cpusran will also serialize behind the 8949 * HAT lock on TLB miss and will see the new TSB. Since this short 8950 * race with a new thread coming on processor is relatively rare, 8951 * this synchronization mechanism should be cheaper than always 8952 * pausing all CPUs for the duration of the setup, which is what 8953 * the old implementation did. This is particuarly true if we are 8954 * copying a huge chunk of memory around during that window. 8955 * 8956 * The memory barriers are to make sure things stay consistent 8957 * with resume() since it does not hold the HAT lock while 8958 * walking the list of tsb_info structures. 8959 */ 8960 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 8961 /* The TSB is either growing or shrinking. */ 8962 ctx = sfmmutoctx(sfmmup); 8963 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 8964 8965 ctxnum = sfmmutoctxnum(sfmmup); 8966 sfmmup->sfmmu_cnum = INVALID_CONTEXT; 8967 membar_enter(); /* make sure visible on all CPUs */ 8968 8969 kpreempt_disable(); 8970 if (ctxnum != INVALID_CONTEXT) { 8971 cpuset = sfmmup->sfmmu_cpusran; 8972 CPUSET_DEL(cpuset, CPU->cpu_id); 8973 CPUSET_AND(cpuset, cpu_ready_set); 8974 SFMMU_XCALL_STATS(ctxnum); 8975 8976 xt_some(cpuset, sfmmu_raise_tsb_exception, 8977 ctxnum, INVALID_CONTEXT); 8978 xt_sync(cpuset); 8979 8980 SFMMU_STAT(sf_tsb_raise_exception); 8981 } 8982 kpreempt_enable(); 8983 } else { 8984 /* 8985 * It is illegal to swap in TSBs from a process other 8986 * than a process being swapped in. This in turn 8987 * implies we do not have a valid MMU context here 8988 * since a process needs one to resolve translation 8989 * misses. 8990 */ 8991 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 8992 ASSERT(sfmmutoctxnum(sfmmup) == INVALID_CONTEXT); 8993 } 8994 8995 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 8996 membar_stst(); /* strict ordering required */ 8997 if (prevtsb) 8998 prevtsb->tsb_next = new_tsbinfo; 8999 else 9000 sfmmup->sfmmu_tsb = new_tsbinfo; 9001 membar_enter(); /* make sure new TSB globally visible */ 9002 sfmmu_setup_tsbinfo(sfmmup); 9003 9004 /* 9005 * We need to migrate TSB entries from the old TSB to the new TSB 9006 * if tsb_remap_ttes is set and the TSB is growing. 9007 */ 9008 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 9009 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 9010 9011 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 9012 kpreempt_disable(); 9013 membar_exit(); 9014 sfmmup->sfmmu_cnum = ctxnum; 9015 if (ctxnum != INVALID_CONTEXT && 9016 sfmmu_getctx_sec() == ctxnum) { 9017 sfmmu_load_mmustate(sfmmup); 9018 } 9019 kpreempt_enable(); 9020 rw_exit(&ctx->ctx_rwlock); 9021 } 9022 9023 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9024 9025 /* 9026 * Drop the HAT lock to free our old tsb_info. 9027 */ 9028 sfmmu_hat_exit(hatlockp); 9029 9030 if ((flags & TSB_GROW) == TSB_GROW) { 9031 SFMMU_STAT(sf_tsb_grow); 9032 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 9033 SFMMU_STAT(sf_tsb_shrink); 9034 } 9035 9036 sfmmu_tsbinfo_free(old_tsbinfo); 9037 9038 (void) sfmmu_hat_enter(sfmmup); 9039 return (TSB_SUCCESS); 9040 } 9041 9042 /* 9043 * Steal context from process, forcing the process to switch to another 9044 * context on the next TLB miss, and therefore start using the TLB that 9045 * is reprogrammed for the new page sizes. 9046 */ 9047 void 9048 sfmmu_steal_context(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 9049 { 9050 struct ctx *ctx; 9051 int i, cnum; 9052 hatlock_t *hatlockp = NULL; 9053 9054 hatlockp = sfmmu_hat_enter(sfmmup); 9055 /* USIII+-IV+ optimization, requires hat lock */ 9056 if (tmp_pgsz) { 9057 for (i = 0; i < mmu_page_sizes; i++) 9058 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 9059 } 9060 SFMMU_STAT(sf_tlb_reprog_pgsz); 9061 ctx = sfmmutoctx(sfmmup); 9062 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 9063 cnum = sfmmutoctxnum(sfmmup); 9064 9065 if (cnum != INVALID_CONTEXT) { 9066 sfmmu_tlb_swap_ctx(sfmmup, ctx); 9067 } 9068 rw_exit(&ctx->ctx_rwlock); 9069 sfmmu_hat_exit(hatlockp); 9070 } 9071 9072 /* 9073 * This function assumes that there are either four or six supported page 9074 * sizes and at most two programmable TLBs, so we need to decide which 9075 * page sizes are most important and then tell the MMU layer so it 9076 * can adjust the TLB page sizes accordingly (if supported). 9077 * 9078 * If these assumptions change, this function will need to be 9079 * updated to support whatever the new limits are. 9080 * 9081 * The growing flag is nonzero if we are growing the address space, 9082 * and zero if it is shrinking. This allows us to decide whether 9083 * to grow or shrink our TSB, depending upon available memory 9084 * conditions. 9085 */ 9086 static void 9087 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 9088 { 9089 uint64_t ttecnt[MMU_PAGE_SIZES]; 9090 uint64_t tte8k_cnt, tte4m_cnt; 9091 uint8_t i; 9092 int sectsb_thresh; 9093 9094 /* 9095 * Kernel threads, processes with small address spaces not using 9096 * large pages, and dummy ISM HATs need not apply. 9097 */ 9098 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 9099 return; 9100 9101 if ((sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) == 0 && 9102 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 9103 return; 9104 9105 for (i = 0; i < mmu_page_sizes; i++) { 9106 ttecnt[i] = SFMMU_TTE_CNT(sfmmup, i); 9107 } 9108 9109 /* Check pagesizes in use, and possibly reprogram DTLB. */ 9110 if (&mmu_check_page_sizes) 9111 mmu_check_page_sizes(sfmmup, ttecnt); 9112 9113 /* 9114 * Calculate the number of 8k ttes to represent the span of these 9115 * pages. 9116 */ 9117 tte8k_cnt = ttecnt[TTE8K] + 9118 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 9119 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 9120 if (mmu_page_sizes == max_mmu_page_sizes) { 9121 tte4m_cnt = ttecnt[TTE4M] + 9122 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 9123 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 9124 } else { 9125 tte4m_cnt = ttecnt[TTE4M]; 9126 } 9127 9128 /* 9129 * Inflate TSB sizes by a factor of 2 if this process 9130 * uses 4M text pages to minimize extra conflict misses 9131 * in the first TSB since without counting text pages 9132 * 8K TSB may become too small. 9133 * 9134 * Also double the size of the second TSB to minimize 9135 * extra conflict misses due to competition between 4M text pages 9136 * and data pages. 9137 * 9138 * We need to adjust the second TSB allocation threshold by the 9139 * inflation factor, since there is no point in creating a second 9140 * TSB when we know all the mappings can fit in the I/D TLBs. 9141 */ 9142 sectsb_thresh = tsb_sectsb_threshold; 9143 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 9144 tte8k_cnt <<= 1; 9145 tte4m_cnt <<= 1; 9146 sectsb_thresh <<= 1; 9147 } 9148 9149 /* 9150 * Check to see if our TSB is the right size; we may need to 9151 * grow or shrink it. If the process is small, our work is 9152 * finished at this point. 9153 */ 9154 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 9155 return; 9156 } 9157 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 9158 } 9159 9160 static void 9161 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 9162 uint64_t tte4m_cnt, int sectsb_thresh) 9163 { 9164 int tsb_bits; 9165 uint_t tsb_szc; 9166 struct tsb_info *tsbinfop; 9167 hatlock_t *hatlockp = NULL; 9168 9169 hatlockp = sfmmu_hat_enter(sfmmup); 9170 ASSERT(hatlockp != NULL); 9171 tsbinfop = sfmmup->sfmmu_tsb; 9172 ASSERT(tsbinfop != NULL); 9173 9174 /* 9175 * If we're growing, select the size based on RSS. If we're 9176 * shrinking, leave some room so we don't have to turn around and 9177 * grow again immediately. 9178 */ 9179 if (growing) 9180 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 9181 else 9182 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 9183 9184 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9185 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9186 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9187 hatlockp, TSB_SHRINK); 9188 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 9189 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9190 hatlockp, TSB_GROW); 9191 } 9192 tsbinfop = sfmmup->sfmmu_tsb; 9193 9194 /* 9195 * With the TLB and first TSB out of the way, we need to see if 9196 * we need a second TSB for 4M pages. If we managed to reprogram 9197 * the TLB page sizes above, the process will start using this new 9198 * TSB right away; otherwise, it will start using it on the next 9199 * context switch. Either way, it's no big deal so there's no 9200 * synchronization with the trap handlers here unless we grow the 9201 * TSB (in which case it's required to prevent using the old one 9202 * after it's freed). Note: second tsb is required for 32M/256M 9203 * page sizes. 9204 */ 9205 if (tte4m_cnt > sectsb_thresh) { 9206 /* 9207 * If we're growing, select the size based on RSS. If we're 9208 * shrinking, leave some room so we don't have to turn 9209 * around and grow again immediately. 9210 */ 9211 if (growing) 9212 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 9213 else 9214 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 9215 if (tsbinfop->tsb_next == NULL) { 9216 struct tsb_info *newtsb; 9217 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 9218 0 : TSB_ALLOC; 9219 9220 sfmmu_hat_exit(hatlockp); 9221 9222 /* 9223 * Try to allocate a TSB for 4[32|256]M pages. If we 9224 * can't get the size we want, retry w/a minimum sized 9225 * TSB. If that still didn't work, give up; we can 9226 * still run without one. 9227 */ 9228 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 9229 TSB4M|TSB32M|TSB256M:TSB4M; 9230 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 9231 allocflags, sfmmup) != 0) && 9232 (sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 9233 tsb_bits, allocflags, sfmmup) != 0)) { 9234 return; 9235 } 9236 9237 hatlockp = sfmmu_hat_enter(sfmmup); 9238 9239 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 9240 sfmmup->sfmmu_tsb->tsb_next = newtsb; 9241 SFMMU_STAT(sf_tsb_sectsb_create); 9242 sfmmu_setup_tsbinfo(sfmmup); 9243 sfmmu_hat_exit(hatlockp); 9244 return; 9245 } else { 9246 /* 9247 * It's annoying, but possible for us 9248 * to get here.. we dropped the HAT lock 9249 * because of locking order in the kmem 9250 * allocator, and while we were off getting 9251 * our memory, some other thread decided to 9252 * do us a favor and won the race to get a 9253 * second TSB for this process. Sigh. 9254 */ 9255 sfmmu_hat_exit(hatlockp); 9256 sfmmu_tsbinfo_free(newtsb); 9257 return; 9258 } 9259 } 9260 9261 /* 9262 * We have a second TSB, see if it's big enough. 9263 */ 9264 tsbinfop = tsbinfop->tsb_next; 9265 9266 /* 9267 * Check to see if our second TSB is the right size; 9268 * we may need to grow or shrink it. 9269 * To prevent thrashing (e.g. growing the TSB on a 9270 * subsequent map operation), only try to shrink if 9271 * the TSB reach exceeds twice the virtual address 9272 * space size. 9273 */ 9274 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9275 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9276 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9277 tsb_szc, hatlockp, TSB_SHRINK); 9278 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 9279 TSB_OK_GROW()) { 9280 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9281 tsb_szc, hatlockp, TSB_GROW); 9282 } 9283 } 9284 9285 sfmmu_hat_exit(hatlockp); 9286 } 9287 9288 /* 9289 * Get the preferred page size code for a hat. 9290 * This is only advice, so locking is not done; 9291 * this transitory information could change 9292 * following the call anyway. This interface is 9293 * sun4 private. 9294 */ 9295 /*ARGSUSED*/ 9296 uint_t 9297 hat_preferred_pgsz(struct hat *hat, caddr_t vaddr, size_t maplen, int maptype) 9298 { 9299 sfmmu_t *sfmmup = (sfmmu_t *)hat; 9300 uint_t szc, maxszc = mmu_page_sizes - 1; 9301 size_t pgsz; 9302 9303 if (maptype == MAPPGSZ_ISM) { 9304 for (szc = maxszc; szc >= TTE4M; szc--) { 9305 if (disable_ism_large_pages & (1 << szc)) 9306 continue; 9307 9308 pgsz = hw_page_array[szc].hp_size; 9309 if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz)) 9310 return (szc); 9311 } 9312 return (TTE4M); 9313 } else if (&mmu_preferred_pgsz) { /* USIII+-USIV+ */ 9314 return (mmu_preferred_pgsz(sfmmup, vaddr, maplen)); 9315 } else { /* USIII, USII, Niagara */ 9316 for (szc = maxszc; szc > TTE8K; szc--) { 9317 if (disable_large_pages & (1 << szc)) 9318 continue; 9319 9320 pgsz = hw_page_array[szc].hp_size; 9321 if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz)) 9322 return (szc); 9323 } 9324 return (TTE8K); 9325 } 9326 } 9327 9328 /* 9329 * Free up a ctx 9330 */ 9331 static void 9332 sfmmu_free_ctx(sfmmu_t *sfmmup, struct ctx *ctx) 9333 { 9334 int ctxnum; 9335 9336 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 9337 9338 TRACE_CTXS(&ctx_trace_mutex, ctx_trace_ptr, sfmmup->sfmmu_cnum, 9339 sfmmup, 0, CTX_TRC_FREE); 9340 9341 if (sfmmup->sfmmu_cnum == INVALID_CONTEXT) { 9342 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 9343 rw_exit(&ctx->ctx_rwlock); 9344 return; 9345 } 9346 9347 ASSERT(sfmmup == ctx->ctx_sfmmu); 9348 9349 ctx->ctx_sfmmu = NULL; 9350 ctx->ctx_flags = 0; 9351 sfmmup->sfmmu_cnum = INVALID_CONTEXT; 9352 membar_enter(); 9353 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 9354 ctxnum = sfmmu_getctx_sec(); 9355 if (ctxnum == ctxtoctxnum(ctx)) { 9356 sfmmu_setctx_sec(INVALID_CONTEXT); 9357 sfmmu_clear_utsbinfo(); 9358 } 9359 9360 /* 9361 * Put the freed ctx on the dirty list 9362 */ 9363 mutex_enter(&ctx_list_lock); 9364 CTX_SET_FLAGS(ctx, CTX_FREE_FLAG); 9365 ctx->ctx_free = ctxdirty; 9366 ctxdirty = ctx; 9367 mutex_exit(&ctx_list_lock); 9368 9369 rw_exit(&ctx->ctx_rwlock); 9370 } 9371 9372 /* 9373 * Free up a sfmmu 9374 * Since the sfmmu is currently embedded in the hat struct we simply zero 9375 * out our fields and free up the ism map blk list if any. 9376 */ 9377 static void 9378 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 9379 { 9380 ism_blk_t *blkp, *nx_blkp; 9381 #ifdef DEBUG 9382 ism_map_t *map; 9383 int i; 9384 #endif 9385 9386 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 9387 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 9388 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 9389 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 9390 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 9391 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 9392 ASSERT(sfmmup->sfmmu_cnum == INVALID_CONTEXT); 9393 sfmmup->sfmmu_free = 0; 9394 sfmmup->sfmmu_ismhat = 0; 9395 9396 blkp = sfmmup->sfmmu_iblk; 9397 sfmmup->sfmmu_iblk = NULL; 9398 9399 while (blkp) { 9400 #ifdef DEBUG 9401 map = blkp->iblk_maps; 9402 for (i = 0; i < ISM_MAP_SLOTS; i++) { 9403 ASSERT(map[i].imap_seg == 0); 9404 ASSERT(map[i].imap_ismhat == NULL); 9405 ASSERT(map[i].imap_ment == NULL); 9406 } 9407 #endif 9408 nx_blkp = blkp->iblk_next; 9409 blkp->iblk_next = NULL; 9410 blkp->iblk_nextpa = (uint64_t)-1; 9411 kmem_cache_free(ism_blk_cache, blkp); 9412 blkp = nx_blkp; 9413 } 9414 } 9415 9416 /* 9417 * Locking primitves accessed by HATLOCK macros 9418 */ 9419 9420 #define SFMMU_SPL_MTX (0x0) 9421 #define SFMMU_ML_MTX (0x1) 9422 9423 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 9424 SPL_HASH(pg) : MLIST_HASH(pg)) 9425 9426 kmutex_t * 9427 sfmmu_page_enter(struct page *pp) 9428 { 9429 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 9430 } 9431 9432 static void 9433 sfmmu_page_exit(kmutex_t *spl) 9434 { 9435 mutex_exit(spl); 9436 } 9437 9438 static int 9439 sfmmu_page_spl_held(struct page *pp) 9440 { 9441 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 9442 } 9443 9444 kmutex_t * 9445 sfmmu_mlist_enter(struct page *pp) 9446 { 9447 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 9448 } 9449 9450 void 9451 sfmmu_mlist_exit(kmutex_t *mml) 9452 { 9453 mutex_exit(mml); 9454 } 9455 9456 int 9457 sfmmu_mlist_held(struct page *pp) 9458 { 9459 9460 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 9461 } 9462 9463 /* 9464 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 9465 * sfmmu_mlist_enter() case mml_table lock array is used and for 9466 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 9467 * 9468 * The lock is taken on a root page so that it protects an operation on all 9469 * constituent pages of a large page pp belongs to. 9470 * 9471 * The routine takes a lock from the appropriate array. The lock is determined 9472 * by hashing the root page. After taking the lock this routine checks if the 9473 * root page has the same size code that was used to determine the root (i.e 9474 * that root hasn't changed). If root page has the expected p_szc field we 9475 * have the right lock and it's returned to the caller. If root's p_szc 9476 * decreased we release the lock and retry from the beginning. This case can 9477 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 9478 * value and taking the lock. The number of retries due to p_szc decrease is 9479 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 9480 * determined by hashing pp itself. 9481 * 9482 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 9483 * possible that p_szc can increase. To increase p_szc a thread has to lock 9484 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 9485 * callers that don't hold a page locked recheck if hmeblk through which pp 9486 * was found still maps this pp. If it doesn't map it anymore returned lock 9487 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 9488 * p_szc increase after taking the lock it returns this lock without further 9489 * retries because in this case the caller doesn't care about which lock was 9490 * taken. The caller will drop it right away. 9491 * 9492 * After the routine returns it's guaranteed that hat_page_demote() can't 9493 * change p_szc field of any of constituent pages of a large page pp belongs 9494 * to as long as pp was either locked at least SHARED prior to this call or 9495 * the caller finds that hment that pointed to this pp still references this 9496 * pp (this also assumes that the caller holds hme hash bucket lock so that 9497 * the same pp can't be remapped into the same hmeblk after it was unmapped by 9498 * hat_pageunload()). 9499 */ 9500 static kmutex_t * 9501 sfmmu_mlspl_enter(struct page *pp, int type) 9502 { 9503 kmutex_t *mtx; 9504 uint_t prev_rszc = UINT_MAX; 9505 page_t *rootpp; 9506 uint_t szc; 9507 uint_t rszc; 9508 uint_t pszc = pp->p_szc; 9509 9510 ASSERT(pp != NULL); 9511 9512 again: 9513 if (pszc == 0) { 9514 mtx = SFMMU_MLSPL_MTX(type, pp); 9515 mutex_enter(mtx); 9516 return (mtx); 9517 } 9518 9519 /* The lock lives in the root page */ 9520 rootpp = PP_GROUPLEADER(pp, pszc); 9521 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9522 mutex_enter(mtx); 9523 9524 /* 9525 * Return mml in the following 3 cases: 9526 * 9527 * 1) If pp itself is root since if its p_szc decreased before we took 9528 * the lock pp is still the root of smaller szc page. And if its p_szc 9529 * increased it doesn't matter what lock we return (see comment in 9530 * front of this routine). 9531 * 9532 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 9533 * large page we have the right lock since any previous potential 9534 * hat_page_demote() is done demoting from greater than current root's 9535 * p_szc because hat_page_demote() changes root's p_szc last. No 9536 * further hat_page_demote() can start or be in progress since it 9537 * would need the same lock we currently hold. 9538 * 9539 * 3) If rootpp's p_szc increased since previous iteration it doesn't 9540 * matter what lock we return (see comment in front of this routine). 9541 */ 9542 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 9543 rszc >= prev_rszc) { 9544 return (mtx); 9545 } 9546 9547 /* 9548 * hat_page_demote() could have decreased root's p_szc. 9549 * In this case pp's p_szc must also be smaller than pszc. 9550 * Retry. 9551 */ 9552 if (rszc < pszc) { 9553 szc = pp->p_szc; 9554 if (szc < pszc) { 9555 mutex_exit(mtx); 9556 pszc = szc; 9557 goto again; 9558 } 9559 /* 9560 * pp's p_szc increased after it was decreased. 9561 * page cannot be mapped. Return current lock. The caller 9562 * will drop it right away. 9563 */ 9564 return (mtx); 9565 } 9566 9567 /* 9568 * root's p_szc is greater than pp's p_szc. 9569 * hat_page_demote() is not done with all pages 9570 * yet. Wait for it to complete. 9571 */ 9572 mutex_exit(mtx); 9573 rootpp = PP_GROUPLEADER(rootpp, rszc); 9574 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9575 mutex_enter(mtx); 9576 mutex_exit(mtx); 9577 prev_rszc = rszc; 9578 goto again; 9579 } 9580 9581 static int 9582 sfmmu_mlspl_held(struct page *pp, int type) 9583 { 9584 kmutex_t *mtx; 9585 9586 ASSERT(pp != NULL); 9587 /* The lock lives in the root page */ 9588 pp = PP_PAGEROOT(pp); 9589 ASSERT(pp != NULL); 9590 9591 mtx = SFMMU_MLSPL_MTX(type, pp); 9592 return (MUTEX_HELD(mtx)); 9593 } 9594 9595 static uint_t 9596 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 9597 { 9598 struct hme_blk *hblkp; 9599 9600 if (freehblkp != NULL) { 9601 mutex_enter(&freehblkp_lock); 9602 if (freehblkp != NULL) { 9603 /* 9604 * If the current thread is owning hblk_reserve, 9605 * let it succede even if freehblkcnt is really low. 9606 */ 9607 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 9608 SFMMU_STAT(sf_get_free_throttle); 9609 mutex_exit(&freehblkp_lock); 9610 return (0); 9611 } 9612 freehblkcnt--; 9613 *hmeblkpp = freehblkp; 9614 hblkp = *hmeblkpp; 9615 freehblkp = hblkp->hblk_next; 9616 mutex_exit(&freehblkp_lock); 9617 hblkp->hblk_next = NULL; 9618 SFMMU_STAT(sf_get_free_success); 9619 return (1); 9620 } 9621 mutex_exit(&freehblkp_lock); 9622 } 9623 SFMMU_STAT(sf_get_free_fail); 9624 return (0); 9625 } 9626 9627 static uint_t 9628 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 9629 { 9630 struct hme_blk *hblkp; 9631 9632 /* 9633 * If the current thread is mapping into kernel space, 9634 * let it succede even if freehblkcnt is max 9635 * so that it will avoid freeing it to kmem. 9636 * This will prevent stack overflow due to 9637 * possible recursion since kmem_cache_free() 9638 * might require creation of a slab which 9639 * in turn needs an hmeblk to map that slab; 9640 * let's break this vicious chain at the first 9641 * opportunity. 9642 */ 9643 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9644 mutex_enter(&freehblkp_lock); 9645 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9646 SFMMU_STAT(sf_put_free_success); 9647 freehblkcnt++; 9648 hmeblkp->hblk_next = freehblkp; 9649 freehblkp = hmeblkp; 9650 mutex_exit(&freehblkp_lock); 9651 return (1); 9652 } 9653 mutex_exit(&freehblkp_lock); 9654 } 9655 9656 /* 9657 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 9658 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 9659 * we are not in the process of mapping into kernel space. 9660 */ 9661 ASSERT(!critical); 9662 while (freehblkcnt > HBLK_RESERVE_CNT) { 9663 mutex_enter(&freehblkp_lock); 9664 if (freehblkcnt > HBLK_RESERVE_CNT) { 9665 freehblkcnt--; 9666 hblkp = freehblkp; 9667 freehblkp = hblkp->hblk_next; 9668 mutex_exit(&freehblkp_lock); 9669 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 9670 kmem_cache_free(sfmmu8_cache, hblkp); 9671 continue; 9672 } 9673 mutex_exit(&freehblkp_lock); 9674 } 9675 SFMMU_STAT(sf_put_free_fail); 9676 return (0); 9677 } 9678 9679 static void 9680 sfmmu_hblk_swap(struct hme_blk *new) 9681 { 9682 struct hme_blk *old, *hblkp, *prev; 9683 uint64_t hblkpa, prevpa, newpa; 9684 caddr_t base, vaddr, endaddr; 9685 struct hmehash_bucket *hmebp; 9686 struct sf_hment *osfhme, *nsfhme; 9687 page_t *pp; 9688 kmutex_t *pml; 9689 tte_t tte; 9690 9691 #ifdef DEBUG 9692 hmeblk_tag hblktag; 9693 struct hme_blk *found; 9694 #endif 9695 old = HBLK_RESERVE; 9696 9697 /* 9698 * save pa before bcopy clobbers it 9699 */ 9700 newpa = new->hblk_nextpa; 9701 9702 base = (caddr_t)get_hblk_base(old); 9703 endaddr = base + get_hblk_span(old); 9704 9705 /* 9706 * acquire hash bucket lock. 9707 */ 9708 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K); 9709 9710 /* 9711 * copy contents from old to new 9712 */ 9713 bcopy((void *)old, (void *)new, HME8BLK_SZ); 9714 9715 /* 9716 * add new to hash chain 9717 */ 9718 sfmmu_hblk_hash_add(hmebp, new, newpa); 9719 9720 /* 9721 * search hash chain for hblk_reserve; this needs to be performed 9722 * after adding new, otherwise prevpa and prev won't correspond 9723 * to the hblk which is prior to old in hash chain when we call 9724 * sfmmu_hblk_hash_rm to remove old later. 9725 */ 9726 for (prevpa = 0, prev = NULL, 9727 hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp; 9728 hblkp != NULL && hblkp != old; 9729 prevpa = hblkpa, prev = hblkp, 9730 hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next); 9731 9732 if (hblkp != old) 9733 panic("sfmmu_hblk_swap: hblk_reserve not found"); 9734 9735 /* 9736 * p_mapping list is still pointing to hments in hblk_reserve; 9737 * fix up p_mapping list so that they point to hments in new. 9738 * 9739 * Since all these mappings are created by hblk_reserve_thread 9740 * on the way and it's using at least one of the buffers from each of 9741 * the newly minted slabs, there is no danger of any of these 9742 * mappings getting unloaded by another thread. 9743 * 9744 * tsbmiss could only modify ref/mod bits of hments in old/new. 9745 * Since all of these hments hold mappings established by segkmem 9746 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 9747 * have no meaning for the mappings in hblk_reserve. hments in 9748 * old and new are identical except for ref/mod bits. 9749 */ 9750 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 9751 9752 HBLKTOHME(osfhme, old, vaddr); 9753 sfmmu_copytte(&osfhme->hme_tte, &tte); 9754 9755 if (TTE_IS_VALID(&tte)) { 9756 if ((pp = osfhme->hme_page) == NULL) 9757 panic("sfmmu_hblk_swap: page not mapped"); 9758 9759 pml = sfmmu_mlist_enter(pp); 9760 9761 if (pp != osfhme->hme_page) 9762 panic("sfmmu_hblk_swap: mapping changed"); 9763 9764 HBLKTOHME(nsfhme, new, vaddr); 9765 9766 HME_ADD(nsfhme, pp); 9767 HME_SUB(osfhme, pp); 9768 9769 sfmmu_mlist_exit(pml); 9770 } 9771 } 9772 9773 /* 9774 * remove old from hash chain 9775 */ 9776 sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev); 9777 9778 #ifdef DEBUG 9779 9780 hblktag.htag_id = ksfmmup; 9781 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 9782 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 9783 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 9784 9785 if (found != new) 9786 panic("sfmmu_hblk_swap: new hblk not found"); 9787 #endif 9788 9789 SFMMU_HASH_UNLOCK(hmebp); 9790 9791 /* 9792 * Reset hblk_reserve 9793 */ 9794 bzero((void *)old, HME8BLK_SZ); 9795 old->hblk_nextpa = va_to_pa((caddr_t)old); 9796 } 9797 9798 /* 9799 * Grab the mlist mutex for both pages passed in. 9800 * 9801 * low and high will be returned as pointers to the mutexes for these pages. 9802 * low refers to the mutex residing in the lower bin of the mlist hash, while 9803 * high refers to the mutex residing in the higher bin of the mlist hash. This 9804 * is due to the locking order restrictions on the same thread grabbing 9805 * multiple mlist mutexes. The low lock must be acquired before the high lock. 9806 * 9807 * If both pages hash to the same mutex, only grab that single mutex, and 9808 * high will be returned as NULL 9809 * If the pages hash to different bins in the hash, grab the lower addressed 9810 * lock first and then the higher addressed lock in order to follow the locking 9811 * rules involved with the same thread grabbing multiple mlist mutexes. 9812 * low and high will both have non-NULL values. 9813 */ 9814 static void 9815 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 9816 kmutex_t **low, kmutex_t **high) 9817 { 9818 kmutex_t *mml_targ, *mml_repl; 9819 9820 /* 9821 * no need to do the dance around szc as in sfmmu_mlist_enter() 9822 * because this routine is only called by hat_page_relocate() and all 9823 * targ and repl pages are already locked EXCL so szc can't change. 9824 */ 9825 9826 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 9827 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 9828 9829 if (mml_targ == mml_repl) { 9830 *low = mml_targ; 9831 *high = NULL; 9832 } else { 9833 if (mml_targ < mml_repl) { 9834 *low = mml_targ; 9835 *high = mml_repl; 9836 } else { 9837 *low = mml_repl; 9838 *high = mml_targ; 9839 } 9840 } 9841 9842 mutex_enter(*low); 9843 if (*high) 9844 mutex_enter(*high); 9845 } 9846 9847 static void 9848 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 9849 { 9850 if (high) 9851 mutex_exit(high); 9852 mutex_exit(low); 9853 } 9854 9855 static hatlock_t * 9856 sfmmu_hat_enter(sfmmu_t *sfmmup) 9857 { 9858 hatlock_t *hatlockp; 9859 9860 if (sfmmup != ksfmmup) { 9861 hatlockp = TSB_HASH(sfmmup); 9862 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 9863 return (hatlockp); 9864 } 9865 return (NULL); 9866 } 9867 9868 static hatlock_t * 9869 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 9870 { 9871 hatlock_t *hatlockp; 9872 9873 if (sfmmup != ksfmmup) { 9874 hatlockp = TSB_HASH(sfmmup); 9875 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 9876 return (NULL); 9877 return (hatlockp); 9878 } 9879 return (NULL); 9880 } 9881 9882 static void 9883 sfmmu_hat_exit(hatlock_t *hatlockp) 9884 { 9885 if (hatlockp != NULL) 9886 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 9887 } 9888 9889 static void 9890 sfmmu_hat_lock_all(void) 9891 { 9892 int i; 9893 for (i = 0; i < SFMMU_NUM_LOCK; i++) 9894 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 9895 } 9896 9897 static void 9898 sfmmu_hat_unlock_all(void) 9899 { 9900 int i; 9901 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 9902 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 9903 } 9904 9905 int 9906 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 9907 { 9908 ASSERT(sfmmup != ksfmmup); 9909 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 9910 } 9911 9912 /* 9913 * Locking primitives to provide consistency between ISM unmap 9914 * and other operations. Since ISM unmap can take a long time, we 9915 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 9916 * contention on the hatlock buckets while ISM segments are being 9917 * unmapped. The tradeoff is that the flags don't prevent priority 9918 * inversion from occurring, so we must request kernel priority in 9919 * case we have to sleep to keep from getting buried while holding 9920 * the HAT_ISMBUSY flag set, which in turn could block other kernel 9921 * threads from running (for example, in sfmmu_uvatopfn()). 9922 */ 9923 static void 9924 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 9925 { 9926 hatlock_t *hatlockp; 9927 9928 THREAD_KPRI_REQUEST(); 9929 if (!hatlock_held) 9930 hatlockp = sfmmu_hat_enter(sfmmup); 9931 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 9932 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 9933 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 9934 if (!hatlock_held) 9935 sfmmu_hat_exit(hatlockp); 9936 } 9937 9938 static void 9939 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 9940 { 9941 hatlock_t *hatlockp; 9942 9943 if (!hatlock_held) 9944 hatlockp = sfmmu_hat_enter(sfmmup); 9945 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 9946 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 9947 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 9948 if (!hatlock_held) 9949 sfmmu_hat_exit(hatlockp); 9950 THREAD_KPRI_RELEASE(); 9951 } 9952 9953 /* 9954 * 9955 * Algorithm: 9956 * 9957 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 9958 * hblks. 9959 * 9960 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 9961 * 9962 * (a) try to return an hblk from reserve pool of free hblks; 9963 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 9964 * and return hblk_reserve. 9965 * 9966 * (3) call kmem_cache_alloc() to allocate hblk; 9967 * 9968 * (a) if hblk_reserve_lock is held by the current thread, 9969 * atomically replace hblk_reserve by the hblk that is 9970 * returned by kmem_cache_alloc; release hblk_reserve_lock 9971 * and call kmem_cache_alloc() again. 9972 * (b) if reserve pool is not full, add the hblk that is 9973 * returned by kmem_cache_alloc to reserve pool and 9974 * call kmem_cache_alloc again. 9975 * 9976 */ 9977 static struct hme_blk * 9978 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 9979 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 9980 uint_t flags) 9981 { 9982 struct hme_blk *hmeblkp = NULL; 9983 struct hme_blk *newhblkp; 9984 struct hme_blk *shw_hblkp = NULL; 9985 struct kmem_cache *sfmmu_cache = NULL; 9986 uint64_t hblkpa; 9987 ulong_t index; 9988 uint_t owner; /* set to 1 if using hblk_reserve */ 9989 uint_t forcefree; 9990 int sleep; 9991 9992 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 9993 9994 /* 9995 * If segkmem is not created yet, allocate from static hmeblks 9996 * created at the end of startup_modules(). See the block comment 9997 * in startup_modules() describing how we estimate the number of 9998 * static hmeblks that will be needed during re-map. 9999 */ 10000 if (!hblk_alloc_dynamic) { 10001 10002 if (size == TTE8K) { 10003 index = nucleus_hblk8.index; 10004 if (index >= nucleus_hblk8.len) { 10005 /* 10006 * If we panic here, see startup_modules() to 10007 * make sure that we are calculating the 10008 * number of hblk8's that we need correctly. 10009 */ 10010 panic("no nucleus hblk8 to allocate"); 10011 } 10012 hmeblkp = 10013 (struct hme_blk *)&nucleus_hblk8.list[index]; 10014 nucleus_hblk8.index++; 10015 SFMMU_STAT(sf_hblk8_nalloc); 10016 } else { 10017 index = nucleus_hblk1.index; 10018 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 10019 /* 10020 * If we panic here, see startup_modules() 10021 * and H8TOH1; most likely you need to 10022 * update the calculation of the number 10023 * of hblk1's the kernel needs to boot. 10024 */ 10025 panic("no nucleus hblk1 to allocate"); 10026 } 10027 hmeblkp = 10028 (struct hme_blk *)&nucleus_hblk1.list[index]; 10029 nucleus_hblk1.index++; 10030 SFMMU_STAT(sf_hblk1_nalloc); 10031 } 10032 10033 goto hblk_init; 10034 } 10035 10036 SFMMU_HASH_UNLOCK(hmebp); 10037 10038 if (sfmmup != KHATID) { 10039 if (mmu_page_sizes == max_mmu_page_sizes) { 10040 if (size < TTE256M) 10041 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10042 size, flags); 10043 } else { 10044 if (size < TTE4M) 10045 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10046 size, flags); 10047 } 10048 } 10049 10050 fill_hblk: 10051 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 10052 10053 if (owner && size == TTE8K) { 10054 10055 /* 10056 * We are really in a tight spot. We already own 10057 * hblk_reserve and we need another hblk. In anticipation 10058 * of this kind of scenario, we specifically set aside 10059 * HBLK_RESERVE_MIN number of hblks to be used exclusively 10060 * by owner of hblk_reserve. 10061 */ 10062 SFMMU_STAT(sf_hblk_recurse_cnt); 10063 10064 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 10065 panic("sfmmu_hblk_alloc: reserve list is empty"); 10066 10067 goto hblk_verify; 10068 } 10069 10070 ASSERT(!owner); 10071 10072 if ((flags & HAT_NO_KALLOC) == 0) { 10073 10074 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 10075 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 10076 10077 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 10078 hmeblkp = sfmmu_hblk_steal(size); 10079 } else { 10080 /* 10081 * if we are the owner of hblk_reserve, 10082 * swap hblk_reserve with hmeblkp and 10083 * start a fresh life. Hope things go 10084 * better this time. 10085 */ 10086 if (hblk_reserve_thread == curthread) { 10087 ASSERT(sfmmu_cache == sfmmu8_cache); 10088 sfmmu_hblk_swap(hmeblkp); 10089 hblk_reserve_thread = NULL; 10090 mutex_exit(&hblk_reserve_lock); 10091 goto fill_hblk; 10092 } 10093 /* 10094 * let's donate this hblk to our reserve list if 10095 * we are not mapping kernel range 10096 */ 10097 if (size == TTE8K && sfmmup != KHATID) 10098 if (sfmmu_put_free_hblk(hmeblkp, 0)) 10099 goto fill_hblk; 10100 } 10101 } else { 10102 /* 10103 * We are here to map the slab in sfmmu8_cache; let's 10104 * check if we could tap our reserve list; if successful, 10105 * this will avoid the pain of going thru sfmmu_hblk_swap 10106 */ 10107 SFMMU_STAT(sf_hblk_slab_cnt); 10108 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 10109 /* 10110 * let's start hblk_reserve dance 10111 */ 10112 SFMMU_STAT(sf_hblk_reserve_cnt); 10113 owner = 1; 10114 mutex_enter(&hblk_reserve_lock); 10115 hmeblkp = HBLK_RESERVE; 10116 hblk_reserve_thread = curthread; 10117 } 10118 } 10119 10120 hblk_verify: 10121 ASSERT(hmeblkp != NULL); 10122 set_hblk_sz(hmeblkp, size); 10123 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10124 SFMMU_HASH_LOCK(hmebp); 10125 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10126 if (newhblkp != NULL) { 10127 SFMMU_HASH_UNLOCK(hmebp); 10128 if (hmeblkp != HBLK_RESERVE) { 10129 /* 10130 * This is really tricky! 10131 * 10132 * vmem_alloc(vmem_seg_arena) 10133 * vmem_alloc(vmem_internal_arena) 10134 * segkmem_alloc(heap_arena) 10135 * vmem_alloc(heap_arena) 10136 * page_create() 10137 * hat_memload() 10138 * kmem_cache_free() 10139 * kmem_cache_alloc() 10140 * kmem_slab_create() 10141 * vmem_alloc(kmem_internal_arena) 10142 * segkmem_alloc(heap_arena) 10143 * vmem_alloc(heap_arena) 10144 * page_create() 10145 * hat_memload() 10146 * kmem_cache_free() 10147 * ... 10148 * 10149 * Thus, hat_memload() could call kmem_cache_free 10150 * for enough number of times that we could easily 10151 * hit the bottom of the stack or run out of reserve 10152 * list of vmem_seg structs. So, we must donate 10153 * this hblk to reserve list if it's allocated 10154 * from sfmmu8_cache *and* mapping kernel range. 10155 * We don't need to worry about freeing hmeblk1's 10156 * to kmem since they don't map any kmem slabs. 10157 * 10158 * Note: When segkmem supports largepages, we must 10159 * free hmeblk1's to reserve list as well. 10160 */ 10161 forcefree = (sfmmup == KHATID) ? 1 : 0; 10162 if (size == TTE8K && 10163 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 10164 goto re_verify; 10165 } 10166 ASSERT(sfmmup != KHATID); 10167 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10168 } else { 10169 /* 10170 * Hey! we don't need hblk_reserve any more. 10171 */ 10172 ASSERT(owner); 10173 hblk_reserve_thread = NULL; 10174 mutex_exit(&hblk_reserve_lock); 10175 owner = 0; 10176 } 10177 re_verify: 10178 /* 10179 * let's check if the goodies are still present 10180 */ 10181 SFMMU_HASH_LOCK(hmebp); 10182 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10183 if (newhblkp != NULL) { 10184 /* 10185 * return newhblkp if it's not hblk_reserve; 10186 * if newhblkp is hblk_reserve, return it 10187 * _only if_ we are the owner of hblk_reserve. 10188 */ 10189 if (newhblkp != HBLK_RESERVE || owner) { 10190 return (newhblkp); 10191 } else { 10192 /* 10193 * we just hit hblk_reserve in the hash and 10194 * we are not the owner of that; 10195 * 10196 * block until hblk_reserve_thread completes 10197 * swapping hblk_reserve and try the dance 10198 * once again. 10199 */ 10200 SFMMU_HASH_UNLOCK(hmebp); 10201 mutex_enter(&hblk_reserve_lock); 10202 mutex_exit(&hblk_reserve_lock); 10203 SFMMU_STAT(sf_hblk_reserve_hit); 10204 goto fill_hblk; 10205 } 10206 } else { 10207 /* 10208 * it's no more! try the dance once again. 10209 */ 10210 SFMMU_HASH_UNLOCK(hmebp); 10211 goto fill_hblk; 10212 } 10213 } 10214 10215 hblk_init: 10216 set_hblk_sz(hmeblkp, size); 10217 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10218 hmeblkp->hblk_next = (struct hme_blk *)NULL; 10219 hmeblkp->hblk_tag = hblktag; 10220 hmeblkp->hblk_shadow = shw_hblkp; 10221 hblkpa = hmeblkp->hblk_nextpa; 10222 hmeblkp->hblk_nextpa = 0; 10223 10224 ASSERT(get_hblk_ttesz(hmeblkp) == size); 10225 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 10226 ASSERT(hmeblkp->hblk_hmecnt == 0); 10227 ASSERT(hmeblkp->hblk_vcnt == 0); 10228 ASSERT(hmeblkp->hblk_lckcnt == 0); 10229 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10230 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 10231 return (hmeblkp); 10232 } 10233 10234 /* 10235 * This function performs any cleanup required on the hme_blk 10236 * and returns it to the free list. 10237 */ 10238 /* ARGSUSED */ 10239 static void 10240 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10241 uint64_t hblkpa, struct hme_blk **listp) 10242 { 10243 int shw_size, vshift; 10244 struct hme_blk *shw_hblkp; 10245 uint_t shw_mask, newshw_mask; 10246 uintptr_t vaddr; 10247 int size; 10248 uint_t critical; 10249 10250 ASSERT(hmeblkp); 10251 ASSERT(!hmeblkp->hblk_hmecnt); 10252 ASSERT(!hmeblkp->hblk_vcnt); 10253 ASSERT(!hmeblkp->hblk_lckcnt); 10254 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10255 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 10256 10257 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 10258 10259 size = get_hblk_ttesz(hmeblkp); 10260 shw_hblkp = hmeblkp->hblk_shadow; 10261 if (shw_hblkp) { 10262 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 10263 if (mmu_page_sizes == max_mmu_page_sizes) { 10264 ASSERT(size < TTE256M); 10265 } else { 10266 ASSERT(size < TTE4M); 10267 } 10268 10269 shw_size = get_hblk_ttesz(shw_hblkp); 10270 vaddr = get_hblk_base(hmeblkp); 10271 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10272 ASSERT(vshift < 8); 10273 /* 10274 * Atomically clear shadow mask bit 10275 */ 10276 do { 10277 shw_mask = shw_hblkp->hblk_shw_mask; 10278 ASSERT(shw_mask & (1 << vshift)); 10279 newshw_mask = shw_mask & ~(1 << vshift); 10280 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10281 shw_mask, newshw_mask); 10282 } while (newshw_mask != shw_mask); 10283 hmeblkp->hblk_shadow = NULL; 10284 } 10285 hmeblkp->hblk_next = NULL; 10286 hmeblkp->hblk_nextpa = hblkpa; 10287 hmeblkp->hblk_shw_bit = 0; 10288 10289 if (hmeblkp->hblk_nuc_bit == 0) { 10290 10291 if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical)) 10292 return; 10293 10294 hmeblkp->hblk_next = *listp; 10295 *listp = hmeblkp; 10296 } 10297 } 10298 10299 static void 10300 sfmmu_hblks_list_purge(struct hme_blk **listp) 10301 { 10302 struct hme_blk *hmeblkp; 10303 10304 while ((hmeblkp = *listp) != NULL) { 10305 *listp = hmeblkp->hblk_next; 10306 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10307 } 10308 } 10309 10310 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 10311 10312 static uint_t sfmmu_hblk_steal_twice; 10313 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 10314 10315 /* 10316 * Steal a hmeblk 10317 * Enough hmeblks were allocated at startup (nucleus hmeblks) and also 10318 * hmeblks were added dynamically. We should never ever not be able to 10319 * find one. Look for an unused/unlocked hmeblk in user hash table. 10320 */ 10321 static struct hme_blk * 10322 sfmmu_hblk_steal(int size) 10323 { 10324 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 10325 struct hmehash_bucket *hmebp; 10326 struct hme_blk *hmeblkp = NULL, *pr_hblk; 10327 uint64_t hblkpa, prevpa; 10328 int i; 10329 10330 for (;;) { 10331 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 10332 uhmehash_steal_hand; 10333 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 10334 10335 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 10336 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 10337 SFMMU_HASH_LOCK(hmebp); 10338 hmeblkp = hmebp->hmeblkp; 10339 hblkpa = hmebp->hmeh_nextpa; 10340 prevpa = 0; 10341 pr_hblk = NULL; 10342 while (hmeblkp) { 10343 /* 10344 * check if it is a hmeblk that is not locked 10345 * and not shared. skip shadow hmeblks with 10346 * shadow_mask set i.e valid count non zero. 10347 */ 10348 if ((get_hblk_ttesz(hmeblkp) == size) && 10349 (hmeblkp->hblk_shw_bit == 0 || 10350 hmeblkp->hblk_vcnt == 0) && 10351 (hmeblkp->hblk_lckcnt == 0)) { 10352 /* 10353 * there is a high probability that we 10354 * will find a free one. search some 10355 * buckets for a free hmeblk initially 10356 * before unloading a valid hmeblk. 10357 */ 10358 if ((hmeblkp->hblk_vcnt == 0 && 10359 hmeblkp->hblk_hmecnt == 0) || (i >= 10360 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 10361 if (sfmmu_steal_this_hblk(hmebp, 10362 hmeblkp, hblkpa, prevpa, 10363 pr_hblk)) { 10364 /* 10365 * Hblk is unloaded 10366 * successfully 10367 */ 10368 break; 10369 } 10370 } 10371 } 10372 pr_hblk = hmeblkp; 10373 prevpa = hblkpa; 10374 hblkpa = hmeblkp->hblk_nextpa; 10375 hmeblkp = hmeblkp->hblk_next; 10376 } 10377 10378 SFMMU_HASH_UNLOCK(hmebp); 10379 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 10380 hmebp = uhme_hash; 10381 } 10382 uhmehash_steal_hand = hmebp; 10383 10384 if (hmeblkp != NULL) 10385 break; 10386 10387 /* 10388 * in the worst case, look for a free one in the kernel 10389 * hash table. 10390 */ 10391 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 10392 SFMMU_HASH_LOCK(hmebp); 10393 hmeblkp = hmebp->hmeblkp; 10394 hblkpa = hmebp->hmeh_nextpa; 10395 prevpa = 0; 10396 pr_hblk = NULL; 10397 while (hmeblkp) { 10398 /* 10399 * check if it is free hmeblk 10400 */ 10401 if ((get_hblk_ttesz(hmeblkp) == size) && 10402 (hmeblkp->hblk_lckcnt == 0) && 10403 (hmeblkp->hblk_vcnt == 0) && 10404 (hmeblkp->hblk_hmecnt == 0)) { 10405 if (sfmmu_steal_this_hblk(hmebp, 10406 hmeblkp, hblkpa, prevpa, pr_hblk)) { 10407 break; 10408 } else { 10409 /* 10410 * Cannot fail since we have 10411 * hash lock. 10412 */ 10413 panic("fail to steal?"); 10414 } 10415 } 10416 10417 pr_hblk = hmeblkp; 10418 prevpa = hblkpa; 10419 hblkpa = hmeblkp->hblk_nextpa; 10420 hmeblkp = hmeblkp->hblk_next; 10421 } 10422 10423 SFMMU_HASH_UNLOCK(hmebp); 10424 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 10425 hmebp = khme_hash; 10426 } 10427 10428 if (hmeblkp != NULL) 10429 break; 10430 sfmmu_hblk_steal_twice++; 10431 } 10432 return (hmeblkp); 10433 } 10434 10435 /* 10436 * This routine does real work to prepare a hblk to be "stolen" by 10437 * unloading the mappings, updating shadow counts .... 10438 * It returns 1 if the block is ready to be reused (stolen), or 0 10439 * means the block cannot be stolen yet- pageunload is still working 10440 * on this hblk. 10441 */ 10442 static int 10443 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10444 uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk) 10445 { 10446 int shw_size, vshift; 10447 struct hme_blk *shw_hblkp; 10448 uintptr_t vaddr; 10449 uint_t shw_mask, newshw_mask; 10450 10451 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10452 10453 /* 10454 * check if the hmeblk is free, unload if necessary 10455 */ 10456 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10457 sfmmu_t *sfmmup; 10458 demap_range_t dmr; 10459 10460 sfmmup = hblktosfmmu(hmeblkp); 10461 DEMAP_RANGE_INIT(sfmmup, &dmr); 10462 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 10463 (caddr_t)get_hblk_base(hmeblkp), 10464 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 10465 DEMAP_RANGE_FLUSH(&dmr); 10466 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10467 /* 10468 * Pageunload is working on the same hblk. 10469 */ 10470 return (0); 10471 } 10472 10473 sfmmu_hblk_steal_unload_count++; 10474 } 10475 10476 ASSERT(hmeblkp->hblk_lckcnt == 0); 10477 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 10478 10479 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 10480 hmeblkp->hblk_nextpa = hblkpa; 10481 10482 shw_hblkp = hmeblkp->hblk_shadow; 10483 if (shw_hblkp) { 10484 shw_size = get_hblk_ttesz(shw_hblkp); 10485 vaddr = get_hblk_base(hmeblkp); 10486 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10487 ASSERT(vshift < 8); 10488 /* 10489 * Atomically clear shadow mask bit 10490 */ 10491 do { 10492 shw_mask = shw_hblkp->hblk_shw_mask; 10493 ASSERT(shw_mask & (1 << vshift)); 10494 newshw_mask = shw_mask & ~(1 << vshift); 10495 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10496 shw_mask, newshw_mask); 10497 } while (newshw_mask != shw_mask); 10498 hmeblkp->hblk_shadow = NULL; 10499 } 10500 10501 /* 10502 * remove shadow bit if we are stealing an unused shadow hmeblk. 10503 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 10504 * we are indeed allocating a shadow hmeblk. 10505 */ 10506 hmeblkp->hblk_shw_bit = 0; 10507 10508 sfmmu_hblk_steal_count++; 10509 SFMMU_STAT(sf_steal_count); 10510 10511 return (1); 10512 } 10513 10514 struct hme_blk * 10515 sfmmu_hmetohblk(struct sf_hment *sfhme) 10516 { 10517 struct hme_blk *hmeblkp; 10518 struct sf_hment *sfhme0; 10519 struct hme_blk *hblk_dummy = 0; 10520 10521 /* 10522 * No dummy sf_hments, please. 10523 */ 10524 ASSERT(sfhme->hme_tte.ll != 0); 10525 10526 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 10527 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 10528 (uintptr_t)&hblk_dummy->hblk_hme[0]); 10529 10530 return (hmeblkp); 10531 } 10532 10533 /* 10534 * Make sure that there is a valid ctx, if not get a ctx. 10535 * Also, get a readers lock on the ctx, so that the ctx cannot 10536 * be stolen underneath us. 10537 */ 10538 static void 10539 sfmmu_disallow_ctx_steal(sfmmu_t *sfmmup) 10540 { 10541 struct ctx *ctx; 10542 10543 ASSERT(sfmmup != ksfmmup); 10544 ASSERT(sfmmup->sfmmu_ismhat == 0); 10545 10546 /* 10547 * If ctx has been stolen, get a ctx. 10548 */ 10549 if (sfmmup->sfmmu_cnum == INVALID_CONTEXT) { 10550 /* 10551 * Our ctx was stolen. Get a ctx with rlock. 10552 */ 10553 ctx = sfmmu_get_ctx(sfmmup); 10554 return; 10555 } else { 10556 ctx = sfmmutoctx(sfmmup); 10557 } 10558 10559 /* 10560 * Get the reader lock. 10561 */ 10562 rw_enter(&ctx->ctx_rwlock, RW_READER); 10563 if (ctx->ctx_sfmmu != sfmmup) { 10564 /* 10565 * The ctx got stolen, so spin again. 10566 */ 10567 rw_exit(&ctx->ctx_rwlock); 10568 ctx = sfmmu_get_ctx(sfmmup); 10569 } 10570 10571 ASSERT(sfmmup->sfmmu_cnum >= NUM_LOCKED_CTXS); 10572 } 10573 10574 /* 10575 * Decrement reference count for our ctx. If the reference count 10576 * becomes 0, our ctx can be stolen by someone. 10577 */ 10578 static void 10579 sfmmu_allow_ctx_steal(sfmmu_t *sfmmup) 10580 { 10581 struct ctx *ctx; 10582 10583 ASSERT(sfmmup != ksfmmup); 10584 ASSERT(sfmmup->sfmmu_ismhat == 0); 10585 ctx = sfmmutoctx(sfmmup); 10586 10587 ASSERT(sfmmup == ctx->ctx_sfmmu); 10588 ASSERT(sfmmup->sfmmu_cnum != INVALID_CONTEXT); 10589 rw_exit(&ctx->ctx_rwlock); 10590 } 10591 10592 /* 10593 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 10594 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 10595 * KM_SLEEP allocation. 10596 * 10597 * Return 0 on success, -1 otherwise. 10598 */ 10599 static void 10600 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 10601 { 10602 struct tsb_info *tsbinfop, *next; 10603 tsb_replace_rc_t rc; 10604 boolean_t gotfirst = B_FALSE; 10605 10606 ASSERT(sfmmup != ksfmmup); 10607 ASSERT(sfmmu_hat_lock_held(sfmmup)); 10608 10609 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 10610 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10611 } 10612 10613 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10614 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 10615 } else { 10616 return; 10617 } 10618 10619 ASSERT(sfmmup->sfmmu_tsb != NULL); 10620 10621 /* 10622 * Loop over all tsbinfo's replacing them with ones that actually have 10623 * a TSB. If any of the replacements ever fail, bail out of the loop. 10624 */ 10625 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 10626 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 10627 next = tsbinfop->tsb_next; 10628 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 10629 hatlockp, TSB_SWAPIN); 10630 if (rc != TSB_SUCCESS) { 10631 break; 10632 } 10633 gotfirst = B_TRUE; 10634 } 10635 10636 switch (rc) { 10637 case TSB_SUCCESS: 10638 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10639 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10640 return; 10641 case TSB_ALLOCFAIL: 10642 break; 10643 default: 10644 panic("sfmmu_replace_tsb returned unrecognized failure code " 10645 "%d", rc); 10646 } 10647 10648 /* 10649 * In this case, we failed to get one of our TSBs. If we failed to 10650 * get the first TSB, get one of minimum size (8KB). Walk the list 10651 * and throw away the tsbinfos, starting where the allocation failed; 10652 * we can get by with just one TSB as long as we don't leave the 10653 * SWAPPED tsbinfo structures lying around. 10654 */ 10655 tsbinfop = sfmmup->sfmmu_tsb; 10656 next = tsbinfop->tsb_next; 10657 tsbinfop->tsb_next = NULL; 10658 10659 sfmmu_hat_exit(hatlockp); 10660 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 10661 next = tsbinfop->tsb_next; 10662 sfmmu_tsbinfo_free(tsbinfop); 10663 } 10664 hatlockp = sfmmu_hat_enter(sfmmup); 10665 10666 /* 10667 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 10668 * pages. 10669 */ 10670 if (!gotfirst) { 10671 tsbinfop = sfmmup->sfmmu_tsb; 10672 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 10673 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 10674 ASSERT(rc == TSB_SUCCESS); 10675 } 10676 10677 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10678 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10679 } 10680 10681 /* 10682 * Handle exceptions for low level tsb_handler. 10683 * 10684 * There are many scenarios that could land us here: 10685 * 10686 * 1) Process has no context. In this case, ctx is 10687 * INVALID_CONTEXT and sfmmup->sfmmu_cnum == 1 so 10688 * we will acquire a context before returning. 10689 * 2) Need to re-load our MMU state. In this case, 10690 * ctx is INVALID_CONTEXT and sfmmup->sfmmu_cnum != 1. 10691 * 3) ISM mappings are being updated. This is handled 10692 * just like case #2. 10693 * 4) We wish to program a new page size into the TLB. 10694 * This is handled just like case #1, since changing 10695 * TLB page size requires us to flush the TLB. 10696 * 5) Window fault and no valid translation found. 10697 * 10698 * Cases 1-4, ctx is INVALID_CONTEXT so we handle it and then 10699 * exit which will retry the trapped instruction. Case #5 we 10700 * punt to trap() which will raise us a trap level and handle 10701 * the fault before unwinding. 10702 * 10703 * Note that the process will run in INVALID_CONTEXT before 10704 * faulting into here and subsequently loading the MMU registers 10705 * (including the TSB base register) associated with this process. 10706 * For this reason, the trap handlers must all test for 10707 * INVALID_CONTEXT before attempting to access any registers other 10708 * than the context registers. 10709 */ 10710 void 10711 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 10712 { 10713 sfmmu_t *sfmmup; 10714 uint_t ctxnum; 10715 klwp_id_t lwp; 10716 char lwp_save_state; 10717 hatlock_t *hatlockp; 10718 struct tsb_info *tsbinfop; 10719 10720 SFMMU_STAT(sf_tsb_exceptions); 10721 sfmmup = astosfmmu(curthread->t_procp->p_as); 10722 ctxnum = tagaccess & TAGACC_CTX_MASK; 10723 10724 ASSERT(sfmmup != ksfmmup && ctxnum != KCONTEXT); 10725 ASSERT(sfmmup->sfmmu_ismhat == 0); 10726 /* 10727 * First, make sure we come out of here with a valid ctx, 10728 * since if we don't get one we'll simply loop on the 10729 * faulting instruction. 10730 * 10731 * If the ISM mappings are changing, the TSB is being relocated, or 10732 * the process is swapped out we serialize behind the controlling 10733 * thread with the sfmmu_flags and sfmmu_tsb_cv condition variable. 10734 * Otherwise we synchronize with the context stealer or the thread 10735 * that required us to change out our MMU registers (such 10736 * as a thread changing out our TSB while we were running) by 10737 * locking the HAT and grabbing the rwlock on the context as a 10738 * reader temporarily. 10739 */ 10740 if (ctxnum == INVALID_CONTEXT || 10741 SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10742 /* 10743 * Must set lwp state to LWP_SYS before 10744 * trying to acquire any adaptive lock 10745 */ 10746 lwp = ttolwp(curthread); 10747 ASSERT(lwp); 10748 lwp_save_state = lwp->lwp_state; 10749 lwp->lwp_state = LWP_SYS; 10750 10751 hatlockp = sfmmu_hat_enter(sfmmup); 10752 retry: 10753 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 10754 tsbinfop = tsbinfop->tsb_next) { 10755 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 10756 cv_wait(&sfmmup->sfmmu_tsb_cv, 10757 HATLOCK_MUTEXP(hatlockp)); 10758 goto retry; 10759 } 10760 } 10761 10762 /* 10763 * Wait for ISM maps to be updated. 10764 */ 10765 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 10766 cv_wait(&sfmmup->sfmmu_tsb_cv, 10767 HATLOCK_MUTEXP(hatlockp)); 10768 goto retry; 10769 } 10770 10771 /* 10772 * If we're swapping in, get TSB(s). Note that we must do 10773 * this before we get a ctx or load the MMU state. Once 10774 * we swap in we have to recheck to make sure the TSB(s) and 10775 * ISM mappings didn't change while we slept. 10776 */ 10777 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10778 sfmmu_tsb_swapin(sfmmup, hatlockp); 10779 goto retry; 10780 } 10781 10782 sfmmu_disallow_ctx_steal(sfmmup); 10783 ctxnum = sfmmup->sfmmu_cnum; 10784 kpreempt_disable(); 10785 sfmmu_setctx_sec(ctxnum); 10786 sfmmu_load_mmustate(sfmmup); 10787 kpreempt_enable(); 10788 sfmmu_allow_ctx_steal(sfmmup); 10789 sfmmu_hat_exit(hatlockp); 10790 /* 10791 * Must restore lwp_state if not calling 10792 * trap() for further processing. Restore 10793 * it anyway. 10794 */ 10795 lwp->lwp_state = lwp_save_state; 10796 if (sfmmup->sfmmu_ttecnt[TTE8K] != 0 || 10797 sfmmup->sfmmu_ttecnt[TTE64K] != 0 || 10798 sfmmup->sfmmu_ttecnt[TTE512K] != 0 || 10799 sfmmup->sfmmu_ttecnt[TTE4M] != 0 || 10800 sfmmup->sfmmu_ttecnt[TTE32M] != 0 || 10801 sfmmup->sfmmu_ttecnt[TTE256M] != 0) { 10802 return; 10803 } 10804 if (traptype == T_DATA_PROT) { 10805 traptype = T_DATA_MMU_MISS; 10806 } 10807 } 10808 trap(rp, (caddr_t)tagaccess, traptype, 0); 10809 } 10810 10811 /* 10812 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 10813 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 10814 * rather than spinning to avoid send mondo timeouts with 10815 * interrupts enabled. When the lock is acquired it is immediately 10816 * released and we return back to sfmmu_vatopfn just after 10817 * the GET_TTE call. 10818 */ 10819 void 10820 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 10821 { 10822 struct page **pp; 10823 10824 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10825 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10826 } 10827 10828 /* 10829 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 10830 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 10831 * cross traps which cannot be handled while spinning in the 10832 * trap handlers. Simply enter and exit the kpr_suspendlock spin 10833 * mutex, which is held by the holder of the suspend bit, and then 10834 * retry the trapped instruction after unwinding. 10835 */ 10836 /*ARGSUSED*/ 10837 void 10838 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 10839 { 10840 ASSERT(curthread != kreloc_thread); 10841 mutex_enter(&kpr_suspendlock); 10842 mutex_exit(&kpr_suspendlock); 10843 } 10844 10845 /* 10846 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 10847 * This routine may be called with all cpu's captured. Therefore, the 10848 * caller is responsible for holding all locks and disabling kernel 10849 * preemption. 10850 */ 10851 /* ARGSUSED */ 10852 static void 10853 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 10854 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 10855 { 10856 cpuset_t cpuset; 10857 caddr_t va; 10858 ism_ment_t *ment; 10859 sfmmu_t *sfmmup; 10860 int ctxnum; 10861 int vcolor; 10862 int ttesz; 10863 10864 /* 10865 * Walk the ism_hat's mapping list and flush the page 10866 * from every hat sharing this ism_hat. This routine 10867 * may be called while all cpu's have been captured. 10868 * Therefore we can't attempt to grab any locks. For now 10869 * this means we will protect the ism mapping list under 10870 * a single lock which will be grabbed by the caller. 10871 * If hat_share/unshare scalibility becomes a performance 10872 * problem then we may need to re-think ism mapping list locking. 10873 */ 10874 ASSERT(ism_sfmmup->sfmmu_ismhat); 10875 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 10876 addr = addr - ISMID_STARTADDR; 10877 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 10878 10879 sfmmup = ment->iment_hat; 10880 ctxnum = sfmmup->sfmmu_cnum; 10881 va = ment->iment_base_va; 10882 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 10883 10884 /* 10885 * Flush TSB of ISM mappings. 10886 */ 10887 ttesz = get_hblk_ttesz(hmeblkp); 10888 if (ttesz == TTE8K || ttesz == TTE4M) { 10889 sfmmu_unload_tsb(sfmmup, va, ttesz); 10890 } else { 10891 caddr_t sva = va; 10892 caddr_t eva; 10893 ASSERT(addr == (caddr_t)get_hblk_base(hmeblkp)); 10894 eva = sva + get_hblk_span(hmeblkp); 10895 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); 10896 } 10897 10898 if (ctxnum != INVALID_CONTEXT) { 10899 /* 10900 * Flush TLBs. We don't need to do this for 10901 * invalid context since the flushing is already 10902 * done as part of context stealing. 10903 */ 10904 cpuset = sfmmup->sfmmu_cpusran; 10905 CPUSET_AND(cpuset, cpu_ready_set); 10906 CPUSET_DEL(cpuset, CPU->cpu_id); 10907 SFMMU_XCALL_STATS(ctxnum); 10908 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 10909 ctxnum); 10910 vtag_flushpage(va, ctxnum); 10911 } 10912 10913 /* 10914 * Flush D$ 10915 * When flushing D$ we must flush all 10916 * cpu's. See sfmmu_cache_flush(). 10917 */ 10918 if (cache_flush_flag == CACHE_FLUSH) { 10919 cpuset = cpu_ready_set; 10920 CPUSET_DEL(cpuset, CPU->cpu_id); 10921 SFMMU_XCALL_STATS(ctxnum); 10922 vcolor = addr_to_vcolor(va); 10923 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10924 vac_flushpage(pfnum, vcolor); 10925 } 10926 } 10927 } 10928 10929 /* 10930 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 10931 * a particular virtual address and ctx. If noflush is set we do not 10932 * flush the TLB/TSB. This function may or may not be called with the 10933 * HAT lock held. 10934 */ 10935 static void 10936 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10937 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 10938 int hat_lock_held) 10939 { 10940 int ctxnum, vcolor; 10941 cpuset_t cpuset; 10942 hatlock_t *hatlockp; 10943 10944 /* 10945 * There is no longer a need to protect against ctx being 10946 * stolen here since we don't store the ctx in the TSB anymore. 10947 */ 10948 vcolor = addr_to_vcolor(addr); 10949 10950 kpreempt_disable(); 10951 if (!tlb_noflush) { 10952 /* 10953 * Flush the TSB. 10954 */ 10955 if (!hat_lock_held) 10956 hatlockp = sfmmu_hat_enter(sfmmup); 10957 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10958 ctxnum = (int)sfmmutoctxnum(sfmmup); 10959 if (!hat_lock_held) 10960 sfmmu_hat_exit(hatlockp); 10961 10962 if (ctxnum != INVALID_CONTEXT) { 10963 /* 10964 * Flush TLBs. We don't need to do this if our 10965 * context is invalid context. Since we hold the 10966 * HAT lock the context must have been stolen and 10967 * hence will be flushed before re-use. 10968 */ 10969 cpuset = sfmmup->sfmmu_cpusran; 10970 CPUSET_AND(cpuset, cpu_ready_set); 10971 CPUSET_DEL(cpuset, CPU->cpu_id); 10972 SFMMU_XCALL_STATS(ctxnum); 10973 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 10974 ctxnum); 10975 vtag_flushpage(addr, ctxnum); 10976 } 10977 } 10978 10979 /* 10980 * Flush the D$ 10981 * 10982 * Even if the ctx is stolen, we need to flush the 10983 * cache. Our ctx stealer only flushes the TLBs. 10984 */ 10985 if (cache_flush_flag == CACHE_FLUSH) { 10986 if (cpu_flag & FLUSH_ALL_CPUS) { 10987 cpuset = cpu_ready_set; 10988 } else { 10989 cpuset = sfmmup->sfmmu_cpusran; 10990 CPUSET_AND(cpuset, cpu_ready_set); 10991 } 10992 CPUSET_DEL(cpuset, CPU->cpu_id); 10993 SFMMU_XCALL_STATS(sfmmutoctxnum(sfmmup)); 10994 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10995 vac_flushpage(pfnum, vcolor); 10996 } 10997 kpreempt_enable(); 10998 } 10999 11000 /* 11001 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 11002 * address and ctx. If noflush is set we do not currently do anything. 11003 * This function may or may not be called with the HAT lock held. 11004 */ 11005 static void 11006 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 11007 int tlb_noflush, int hat_lock_held) 11008 { 11009 int ctxnum; 11010 cpuset_t cpuset; 11011 hatlock_t *hatlockp; 11012 11013 /* 11014 * If the process is exiting we have nothing to do. 11015 */ 11016 if (tlb_noflush) 11017 return; 11018 11019 /* 11020 * Flush TSB. 11021 */ 11022 if (!hat_lock_held) 11023 hatlockp = sfmmu_hat_enter(sfmmup); 11024 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 11025 ctxnum = sfmmutoctxnum(sfmmup); 11026 if (!hat_lock_held) 11027 sfmmu_hat_exit(hatlockp); 11028 11029 /* 11030 * Flush TLBs. We don't need to do this if our context is invalid 11031 * context. Since we hold the HAT lock the context must have been 11032 * stolen and hence will be flushed before re-use. 11033 */ 11034 if (ctxnum != INVALID_CONTEXT) { 11035 /* 11036 * There is no need to protect against ctx being stolen. 11037 * If the ctx is stolen we will simply get an extra flush. 11038 */ 11039 kpreempt_disable(); 11040 cpuset = sfmmup->sfmmu_cpusran; 11041 CPUSET_AND(cpuset, cpu_ready_set); 11042 CPUSET_DEL(cpuset, CPU->cpu_id); 11043 SFMMU_XCALL_STATS(ctxnum); 11044 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, ctxnum); 11045 vtag_flushpage(addr, ctxnum); 11046 kpreempt_enable(); 11047 } 11048 } 11049 11050 /* 11051 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 11052 * call handler that can flush a range of pages to save on xcalls. 11053 */ 11054 static int sfmmu_xcall_save; 11055 11056 static void 11057 sfmmu_tlb_range_demap(demap_range_t *dmrp) 11058 { 11059 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 11060 int ctxnum; 11061 hatlock_t *hatlockp; 11062 cpuset_t cpuset; 11063 uint64_t ctx_pgcnt; 11064 pgcnt_t pgcnt = 0; 11065 int pgunload = 0; 11066 int dirtypg = 0; 11067 caddr_t addr = dmrp->dmr_addr; 11068 caddr_t eaddr; 11069 uint64_t bitvec = dmrp->dmr_bitvec; 11070 11071 ASSERT(bitvec & 1); 11072 11073 /* 11074 * Flush TSB and calculate number of pages to flush. 11075 */ 11076 while (bitvec != 0) { 11077 dirtypg = 0; 11078 /* 11079 * Find the first page to flush and then count how many 11080 * pages there are after it that also need to be flushed. 11081 * This way the number of TSB flushes is minimized. 11082 */ 11083 while ((bitvec & 1) == 0) { 11084 pgcnt++; 11085 addr += MMU_PAGESIZE; 11086 bitvec >>= 1; 11087 } 11088 while (bitvec & 1) { 11089 dirtypg++; 11090 bitvec >>= 1; 11091 } 11092 eaddr = addr + ptob(dirtypg); 11093 hatlockp = sfmmu_hat_enter(sfmmup); 11094 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 11095 sfmmu_hat_exit(hatlockp); 11096 pgunload += dirtypg; 11097 addr = eaddr; 11098 pgcnt += dirtypg; 11099 } 11100 11101 /* 11102 * In the case where context is invalid context, bail. 11103 * We hold the hat lock while checking the ctx to prevent 11104 * a race with sfmmu_replace_tsb() which temporarily sets 11105 * the ctx to INVALID_CONTEXT to force processes to enter 11106 * sfmmu_tsbmiss_exception(). 11107 */ 11108 hatlockp = sfmmu_hat_enter(sfmmup); 11109 ctxnum = sfmmutoctxnum(sfmmup); 11110 sfmmu_hat_exit(hatlockp); 11111 if (ctxnum == INVALID_CONTEXT) { 11112 dmrp->dmr_bitvec = 0; 11113 return; 11114 } 11115 11116 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 11117 if (sfmmup->sfmmu_free == 0) { 11118 addr = dmrp->dmr_addr; 11119 bitvec = dmrp->dmr_bitvec; 11120 ctx_pgcnt = (uint64_t)((ctxnum << 16) | pgcnt); 11121 kpreempt_disable(); 11122 cpuset = sfmmup->sfmmu_cpusran; 11123 CPUSET_AND(cpuset, cpu_ready_set); 11124 CPUSET_DEL(cpuset, CPU->cpu_id); 11125 SFMMU_XCALL_STATS(ctxnum); 11126 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 11127 ctx_pgcnt); 11128 for (; bitvec != 0; bitvec >>= 1) { 11129 if (bitvec & 1) 11130 vtag_flushpage(addr, ctxnum); 11131 addr += MMU_PAGESIZE; 11132 } 11133 kpreempt_enable(); 11134 sfmmu_xcall_save += (pgunload-1); 11135 } 11136 dmrp->dmr_bitvec = 0; 11137 } 11138 11139 /* 11140 * Flushes only TLB. 11141 */ 11142 static void 11143 sfmmu_tlb_ctx_demap(sfmmu_t *sfmmup) 11144 { 11145 int ctxnum; 11146 cpuset_t cpuset; 11147 11148 ctxnum = (int)sfmmutoctxnum(sfmmup); 11149 if (ctxnum == INVALID_CONTEXT) { 11150 /* 11151 * if ctx was stolen then simply return 11152 * whoever stole ctx is responsible for flush. 11153 */ 11154 return; 11155 } 11156 ASSERT(ctxnum != KCONTEXT); 11157 /* 11158 * There is no need to protect against ctx being stolen. If the 11159 * ctx is stolen we will simply get an extra flush. 11160 */ 11161 kpreempt_disable(); 11162 11163 cpuset = sfmmup->sfmmu_cpusran; 11164 CPUSET_DEL(cpuset, CPU->cpu_id); 11165 CPUSET_AND(cpuset, cpu_ready_set); 11166 SFMMU_XCALL_STATS(ctxnum); 11167 11168 /* 11169 * Flush TLB. 11170 * RFE: it might be worth delaying the TLB flush as well. In that 11171 * case each cpu would have to traverse the dirty list and flush 11172 * each one of those ctx from the TLB. 11173 */ 11174 vtag_flushctx(ctxnum); 11175 xt_some(cpuset, vtag_flushctx_tl1, ctxnum, 0); 11176 11177 kpreempt_enable(); 11178 SFMMU_STAT(sf_tlbflush_ctx); 11179 } 11180 11181 /* 11182 * Flushes all TLBs. 11183 */ 11184 static void 11185 sfmmu_tlb_all_demap(void) 11186 { 11187 cpuset_t cpuset; 11188 11189 /* 11190 * There is no need to protect against ctx being stolen. If the 11191 * ctx is stolen we will simply get an extra flush. 11192 */ 11193 kpreempt_disable(); 11194 11195 cpuset = cpu_ready_set; 11196 CPUSET_DEL(cpuset, CPU->cpu_id); 11197 /* LINTED: constant in conditional context */ 11198 SFMMU_XCALL_STATS(INVALID_CONTEXT); 11199 11200 vtag_flushall(); 11201 xt_some(cpuset, vtag_flushall_tl1, 0, 0); 11202 xt_sync(cpuset); 11203 11204 kpreempt_enable(); 11205 SFMMU_STAT(sf_tlbflush_all); 11206 } 11207 11208 /* 11209 * In cases where we need to synchronize with TLB/TSB miss trap 11210 * handlers, _and_ need to flush the TLB, it's a lot easier to 11211 * steal the context from the process and free it than to do a 11212 * special song and dance to keep things consistent for the 11213 * handlers. 11214 * 11215 * Since the process suddenly ends up without a context and our caller 11216 * holds the hat lock, threads that fault after this function is called 11217 * will pile up on the lock. We can then do whatever we need to 11218 * atomically from the context of the caller. The first blocked thread 11219 * to resume executing will get the process a new context, and the 11220 * process will resume executing. 11221 * 11222 * One added advantage of this approach is that on MMUs that 11223 * support a "flush all" operation, we will delay the flush until 11224 * we run out of contexts, and then flush the TLB one time. This 11225 * is rather rare, so it's a lot less expensive than making 8000 11226 * x-calls to flush the TLB 8000 times. Another is that we can do 11227 * all of this without pausing CPUs, due to some knowledge of how 11228 * resume() loads processes onto the processor; it sets the thread 11229 * into cpusran, and _then_ looks at cnum. Because we do things in 11230 * the reverse order here, we guarantee exactly one of the following 11231 * statements is always true: 11232 * 11233 * 1) Nobody is in resume() so we have nothing to worry about anyway. 11234 * 2) The thread in resume() isn't in cpusran when we do the xcall, 11235 * so we know when it does set itself it'll see cnum is 11236 * INVALID_CONTEXT. 11237 * 3) The thread in resume() is in cpusran, and already might have 11238 * looked at the old cnum. That's OK, because we'll xcall it 11239 * and, if necessary, flush the TLB along with the rest of the 11240 * crowd. 11241 */ 11242 static void 11243 sfmmu_tlb_swap_ctx(sfmmu_t *sfmmup, struct ctx *ctx) 11244 { 11245 cpuset_t cpuset; 11246 int cnum; 11247 11248 if (sfmmup->sfmmu_cnum == INVALID_CONTEXT) 11249 return; 11250 11251 SFMMU_STAT(sf_ctx_swap); 11252 11253 kpreempt_disable(); 11254 11255 ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0); 11256 ASSERT(ctx->ctx_sfmmu == sfmmup); 11257 11258 cnum = ctxtoctxnum(ctx); 11259 ASSERT(sfmmup->sfmmu_cnum == cnum); 11260 ASSERT(cnum >= NUM_LOCKED_CTXS); 11261 11262 sfmmup->sfmmu_cnum = INVALID_CONTEXT; 11263 membar_enter(); /* make sure visible on all CPUs */ 11264 ctx->ctx_sfmmu = NULL; 11265 11266 cpuset = sfmmup->sfmmu_cpusran; 11267 CPUSET_DEL(cpuset, CPU->cpu_id); 11268 CPUSET_AND(cpuset, cpu_ready_set); 11269 SFMMU_XCALL_STATS(cnum); 11270 11271 /* 11272 * Force anybody running this process on CPU 11273 * to enter sfmmu_tsbmiss_exception() on the 11274 * next TLB miss, synchronize behind us on 11275 * the HAT lock, and grab a new context. At 11276 * that point the new page size will become 11277 * active in the TLB for the new context. 11278 * See sfmmu_get_ctx() for details. 11279 */ 11280 if (delay_tlb_flush) { 11281 xt_some(cpuset, sfmmu_raise_tsb_exception, 11282 cnum, INVALID_CONTEXT); 11283 SFMMU_STAT(sf_tlbflush_deferred); 11284 } else { 11285 xt_some(cpuset, sfmmu_ctx_steal_tl1, cnum, INVALID_CONTEXT); 11286 vtag_flushctx(cnum); 11287 SFMMU_STAT(sf_tlbflush_ctx); 11288 } 11289 xt_sync(cpuset); 11290 11291 /* 11292 * If we just stole the ctx from the current 11293 * process on local CPU we need to invalidate 11294 * this CPU context as well. 11295 */ 11296 if (sfmmu_getctx_sec() == cnum) { 11297 sfmmu_setctx_sec(INVALID_CONTEXT); 11298 sfmmu_clear_utsbinfo(); 11299 } 11300 11301 kpreempt_enable(); 11302 11303 /* 11304 * Now put old ctx on the dirty list since we may not 11305 * have flushed the context out of the TLB. We'll let 11306 * the next guy who uses this ctx flush it instead. 11307 */ 11308 mutex_enter(&ctx_list_lock); 11309 CTX_SET_FLAGS(ctx, CTX_FREE_FLAG); 11310 ctx->ctx_free = ctxdirty; 11311 ctxdirty = ctx; 11312 mutex_exit(&ctx_list_lock); 11313 } 11314 11315 /* 11316 * We need to flush the cache in all cpus. It is possible that 11317 * a process referenced a page as cacheable but has sinced exited 11318 * and cleared the mapping list. We still to flush it but have no 11319 * state so all cpus is the only alternative. 11320 */ 11321 void 11322 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 11323 { 11324 cpuset_t cpuset; 11325 int ctxnum = INVALID_CONTEXT; 11326 11327 kpreempt_disable(); 11328 cpuset = cpu_ready_set; 11329 CPUSET_DEL(cpuset, CPU->cpu_id); 11330 SFMMU_XCALL_STATS(ctxnum); /* account to any ctx */ 11331 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 11332 xt_sync(cpuset); 11333 vac_flushpage(pfnum, vcolor); 11334 kpreempt_enable(); 11335 } 11336 11337 void 11338 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 11339 { 11340 cpuset_t cpuset; 11341 int ctxnum = INVALID_CONTEXT; 11342 11343 ASSERT(vcolor >= 0); 11344 11345 kpreempt_disable(); 11346 cpuset = cpu_ready_set; 11347 CPUSET_DEL(cpuset, CPU->cpu_id); 11348 SFMMU_XCALL_STATS(ctxnum); /* account to any ctx */ 11349 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 11350 xt_sync(cpuset); 11351 vac_flushcolor(vcolor, pfnum); 11352 kpreempt_enable(); 11353 } 11354 11355 /* 11356 * We need to prevent processes from accessing the TSB using a cached physical 11357 * address. It's alright if they try to access the TSB via virtual address 11358 * since they will just fault on that virtual address once the mapping has 11359 * been suspended. 11360 */ 11361 #pragma weak sendmondo_in_recover 11362 11363 /* ARGSUSED */ 11364 static int 11365 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 11366 { 11367 hatlock_t *hatlockp; 11368 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11369 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11370 struct ctx *ctx; 11371 int cnum; 11372 extern uint32_t sendmondo_in_recover; 11373 11374 if (flags != HAT_PRESUSPEND) 11375 return (0); 11376 11377 hatlockp = sfmmu_hat_enter(sfmmup); 11378 11379 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 11380 11381 /* 11382 * For Cheetah+ Erratum 25: 11383 * Wait for any active recovery to finish. We can't risk 11384 * relocating the TSB of the thread running mondo_recover_proc() 11385 * since, if we did that, we would deadlock. The scenario we are 11386 * trying to avoid is as follows: 11387 * 11388 * THIS CPU RECOVER CPU 11389 * -------- ----------- 11390 * Begins recovery, walking through TSB 11391 * hat_pagesuspend() TSB TTE 11392 * TLB miss on TSB TTE, spins at TL1 11393 * xt_sync() 11394 * send_mondo_timeout() 11395 * mondo_recover_proc() 11396 * ((deadlocked)) 11397 * 11398 * The second half of the workaround is that mondo_recover_proc() 11399 * checks to see if the tsb_info has the RELOC flag set, and if it 11400 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 11401 * and hence avoiding the TLB miss that could result in a deadlock. 11402 */ 11403 if (&sendmondo_in_recover) { 11404 membar_enter(); /* make sure RELOC flag visible */ 11405 while (sendmondo_in_recover) { 11406 drv_usecwait(1); 11407 membar_consumer(); 11408 } 11409 } 11410 11411 ctx = sfmmutoctx(sfmmup); 11412 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 11413 cnum = sfmmutoctxnum(sfmmup); 11414 11415 if (cnum != INVALID_CONTEXT) { 11416 /* 11417 * Force all threads for this sfmmu to sfmmu_tsbmiss_exception 11418 * on their next TLB miss. 11419 */ 11420 sfmmu_tlb_swap_ctx(sfmmup, ctx); 11421 } 11422 11423 rw_exit(&ctx->ctx_rwlock); 11424 11425 sfmmu_hat_exit(hatlockp); 11426 11427 return (0); 11428 } 11429 11430 /* ARGSUSED */ 11431 static int 11432 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 11433 void *tsbinfo, pfn_t newpfn) 11434 { 11435 hatlock_t *hatlockp; 11436 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11437 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11438 11439 if (flags != HAT_POSTUNSUSPEND) 11440 return (0); 11441 11442 hatlockp = sfmmu_hat_enter(sfmmup); 11443 11444 SFMMU_STAT(sf_tsb_reloc); 11445 11446 /* 11447 * The process may have swapped out while we were relocating one 11448 * of its TSBs. If so, don't bother doing the setup since the 11449 * process can't be using the memory anymore. 11450 */ 11451 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 11452 ASSERT(va == tsbinfop->tsb_va); 11453 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 11454 sfmmu_setup_tsbinfo(sfmmup); 11455 11456 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 11457 sfmmu_inv_tsb(tsbinfop->tsb_va, 11458 TSB_BYTES(tsbinfop->tsb_szc)); 11459 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 11460 } 11461 } 11462 11463 membar_exit(); 11464 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 11465 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11466 11467 sfmmu_hat_exit(hatlockp); 11468 11469 return (0); 11470 } 11471 11472 /* 11473 * Allocate and initialize a tsb_info structure. Note that we may or may not 11474 * allocate a TSB here, depending on the flags passed in. 11475 */ 11476 static int 11477 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 11478 uint_t flags, sfmmu_t *sfmmup) 11479 { 11480 int err; 11481 11482 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 11483 sfmmu_tsbinfo_cache, KM_SLEEP); 11484 11485 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 11486 tsb_szc, flags, sfmmup)) != 0) { 11487 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 11488 SFMMU_STAT(sf_tsb_allocfail); 11489 *tsbinfopp = NULL; 11490 return (err); 11491 } 11492 SFMMU_STAT(sf_tsb_alloc); 11493 11494 /* 11495 * Bump the TSB size counters for this TSB size. 11496 */ 11497 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 11498 return (0); 11499 } 11500 11501 static void 11502 sfmmu_tsb_free(struct tsb_info *tsbinfo) 11503 { 11504 caddr_t tsbva = tsbinfo->tsb_va; 11505 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 11506 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 11507 vmem_t *vmp = tsbinfo->tsb_vmp; 11508 11509 /* 11510 * If we allocated this TSB from relocatable kernel memory, then we 11511 * need to uninstall the callback handler. 11512 */ 11513 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 11514 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11515 caddr_t slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 11516 page_t **ppl; 11517 int ret; 11518 11519 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 11520 ASSERT(ret == 0); 11521 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 11522 0); 11523 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 11524 } 11525 11526 if (kmem_cachep != NULL) { 11527 kmem_cache_free(kmem_cachep, tsbva); 11528 } else { 11529 vmem_xfree(vmp, (void *)tsbva, tsb_size); 11530 } 11531 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 11532 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 11533 } 11534 11535 static void 11536 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 11537 { 11538 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 11539 sfmmu_tsb_free(tsbinfo); 11540 } 11541 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 11542 11543 } 11544 11545 /* 11546 * Setup all the references to physical memory for this tsbinfo. 11547 * The underlying page(s) must be locked. 11548 */ 11549 static void 11550 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 11551 { 11552 ASSERT(pfn != PFN_INVALID); 11553 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 11554 11555 #ifndef sun4v 11556 if (tsbinfo->tsb_szc == 0) { 11557 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 11558 PROT_WRITE|PROT_READ, TTE8K); 11559 } else { 11560 /* 11561 * Round down PA and use a large mapping; the handlers will 11562 * compute the TSB pointer at the correct offset into the 11563 * big virtual page. NOTE: this assumes all TSBs larger 11564 * than 8K must come from physically contiguous slabs of 11565 * size tsb_slab_size. 11566 */ 11567 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 11568 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 11569 } 11570 tsbinfo->tsb_pa = ptob(pfn); 11571 11572 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 11573 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 11574 11575 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 11576 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 11577 #else /* sun4v */ 11578 tsbinfo->tsb_pa = ptob(pfn); 11579 #endif /* sun4v */ 11580 } 11581 11582 11583 /* 11584 * Returns zero on success, ENOMEM if over the high water mark, 11585 * or EAGAIN if the caller needs to retry with a smaller TSB 11586 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 11587 * 11588 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 11589 * is specified and the TSB requested is PAGESIZE, though it 11590 * may sleep waiting for memory if sufficient memory is not 11591 * available. 11592 */ 11593 static int 11594 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 11595 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 11596 { 11597 caddr_t vaddr = NULL; 11598 caddr_t slab_vaddr; 11599 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11600 int tsbbytes = TSB_BYTES(tsbcode); 11601 int lowmem = 0; 11602 struct kmem_cache *kmem_cachep = NULL; 11603 vmem_t *vmp = NULL; 11604 lgrp_id_t lgrpid = LGRP_NONE; 11605 pfn_t pfn; 11606 uint_t cbflags = HAC_SLEEP; 11607 page_t **pplist; 11608 int ret; 11609 11610 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 11611 flags |= TSB_ALLOC; 11612 11613 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 11614 11615 tsbinfo->tsb_sfmmu = sfmmup; 11616 11617 /* 11618 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 11619 * return. 11620 */ 11621 if ((flags & TSB_ALLOC) == 0) { 11622 tsbinfo->tsb_szc = tsbcode; 11623 tsbinfo->tsb_ttesz_mask = tteszmask; 11624 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 11625 tsbinfo->tsb_pa = -1; 11626 tsbinfo->tsb_tte.ll = 0; 11627 tsbinfo->tsb_next = NULL; 11628 tsbinfo->tsb_flags = TSB_SWAPPED; 11629 tsbinfo->tsb_cache = NULL; 11630 tsbinfo->tsb_vmp = NULL; 11631 return (0); 11632 } 11633 11634 #ifdef DEBUG 11635 /* 11636 * For debugging: 11637 * Randomly force allocation failures every tsb_alloc_mtbf 11638 * tries if TSB_FORCEALLOC is not specified. This will 11639 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 11640 * it is even, to allow testing of both failure paths... 11641 */ 11642 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 11643 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 11644 tsb_alloc_count = 0; 11645 tsb_alloc_fail_mtbf++; 11646 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 11647 } 11648 #endif /* DEBUG */ 11649 11650 /* 11651 * Enforce high water mark if we are not doing a forced allocation 11652 * and are not shrinking a process' TSB. 11653 */ 11654 if ((flags & TSB_SHRINK) == 0 && 11655 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 11656 if ((flags & TSB_FORCEALLOC) == 0) 11657 return (ENOMEM); 11658 lowmem = 1; 11659 } 11660 11661 /* 11662 * Allocate from the correct location based upon the size of the TSB 11663 * compared to the base page size, and what memory conditions dictate. 11664 * Note we always do nonblocking allocations from the TSB arena since 11665 * we don't want memory fragmentation to cause processes to block 11666 * indefinitely waiting for memory; until the kernel algorithms that 11667 * coalesce large pages are improved this is our best option. 11668 * 11669 * Algorithm: 11670 * If allocating a "large" TSB (>8K), allocate from the 11671 * appropriate kmem_tsb_default_arena vmem arena 11672 * else if low on memory or the TSB_FORCEALLOC flag is set or 11673 * tsb_forceheap is set 11674 * Allocate from kernel heap via sfmmu_tsb8k_cache with 11675 * KM_SLEEP (never fails) 11676 * else 11677 * Allocate from appropriate sfmmu_tsb_cache with 11678 * KM_NOSLEEP 11679 * endif 11680 */ 11681 if (tsb_lgrp_affinity) 11682 lgrpid = lgrp_home_id(curthread); 11683 if (lgrpid == LGRP_NONE) 11684 lgrpid = 0; /* use lgrp of boot CPU */ 11685 11686 if (tsbbytes > MMU_PAGESIZE) { 11687 vmp = kmem_tsb_default_arena[lgrpid]; 11688 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 0, 0, 11689 NULL, NULL, VM_NOSLEEP); 11690 #ifdef DEBUG 11691 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 11692 #else /* !DEBUG */ 11693 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 11694 #endif /* DEBUG */ 11695 kmem_cachep = sfmmu_tsb8k_cache; 11696 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 11697 ASSERT(vaddr != NULL); 11698 } else { 11699 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 11700 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 11701 } 11702 11703 tsbinfo->tsb_cache = kmem_cachep; 11704 tsbinfo->tsb_vmp = vmp; 11705 11706 if (vaddr == NULL) { 11707 return (EAGAIN); 11708 } 11709 11710 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 11711 kmem_cachep = tsbinfo->tsb_cache; 11712 11713 /* 11714 * If we are allocating from outside the cage, then we need to 11715 * register a relocation callback handler. Note that for now 11716 * since pseudo mappings always hang off of the slab's root page, 11717 * we need only lock the first 8K of the TSB slab. This is a bit 11718 * hacky but it is good for performance. 11719 */ 11720 if (kmem_cachep != sfmmu_tsb8k_cache) { 11721 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 11722 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 11723 ASSERT(ret == 0); 11724 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 11725 cbflags, (void *)tsbinfo, &pfn); 11726 11727 /* 11728 * Need to free up resources if we could not successfully 11729 * add the callback function and return an error condition. 11730 */ 11731 if (ret != 0) { 11732 if (kmem_cachep) { 11733 kmem_cache_free(kmem_cachep, vaddr); 11734 } else { 11735 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 11736 } 11737 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 11738 S_WRITE); 11739 return (EAGAIN); 11740 } 11741 } else { 11742 /* 11743 * Since allocation of 8K TSBs from heap is rare and occurs 11744 * during memory pressure we allocate them from permanent 11745 * memory rather than using callbacks to get the PFN. 11746 */ 11747 pfn = hat_getpfnum(kas.a_hat, vaddr); 11748 } 11749 11750 tsbinfo->tsb_va = vaddr; 11751 tsbinfo->tsb_szc = tsbcode; 11752 tsbinfo->tsb_ttesz_mask = tteszmask; 11753 tsbinfo->tsb_next = NULL; 11754 tsbinfo->tsb_flags = 0; 11755 11756 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 11757 11758 if (kmem_cachep != sfmmu_tsb8k_cache) { 11759 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 11760 } 11761 11762 sfmmu_inv_tsb(vaddr, tsbbytes); 11763 return (0); 11764 } 11765 11766 /* 11767 * Initialize per cpu tsb and per cpu tsbmiss_area 11768 */ 11769 void 11770 sfmmu_init_tsbs(void) 11771 { 11772 int i; 11773 struct tsbmiss *tsbmissp; 11774 struct kpmtsbm *kpmtsbmp; 11775 #ifndef sun4v 11776 extern int dcache_line_mask; 11777 #endif /* sun4v */ 11778 extern uint_t vac_colors; 11779 11780 /* 11781 * Init. tsb miss area. 11782 */ 11783 tsbmissp = tsbmiss_area; 11784 11785 for (i = 0; i < NCPU; tsbmissp++, i++) { 11786 /* 11787 * initialize the tsbmiss area. 11788 * Do this for all possible CPUs as some may be added 11789 * while the system is running. There is no cost to this. 11790 */ 11791 tsbmissp->ksfmmup = ksfmmup; 11792 #ifndef sun4v 11793 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 11794 #endif /* sun4v */ 11795 tsbmissp->khashstart = 11796 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 11797 tsbmissp->uhashstart = 11798 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 11799 tsbmissp->khashsz = khmehash_num; 11800 tsbmissp->uhashsz = uhmehash_num; 11801 } 11802 11803 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 11804 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 11805 11806 if (kpm_enable == 0) 11807 return; 11808 11809 /* -- Begin KPM specific init -- */ 11810 11811 if (kpm_smallpages) { 11812 /* 11813 * If we're using base pagesize pages for seg_kpm 11814 * mappings, we use the kernel TSB since we can't afford 11815 * to allocate a second huge TSB for these mappings. 11816 */ 11817 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11818 kpm_tsbsz = ktsb_szcode; 11819 kpmsm_tsbbase = kpm_tsbbase; 11820 kpmsm_tsbsz = kpm_tsbsz; 11821 } else { 11822 /* 11823 * In VAC conflict case, just put the entries in the 11824 * kernel 8K indexed TSB for now so we can find them. 11825 * This could really be changed in the future if we feel 11826 * the need... 11827 */ 11828 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11829 kpmsm_tsbsz = ktsb_szcode; 11830 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 11831 kpm_tsbsz = ktsb4m_szcode; 11832 } 11833 11834 kpmtsbmp = kpmtsbm_area; 11835 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 11836 /* 11837 * Initialize the kpmtsbm area. 11838 * Do this for all possible CPUs as some may be added 11839 * while the system is running. There is no cost to this. 11840 */ 11841 kpmtsbmp->vbase = kpm_vbase; 11842 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 11843 kpmtsbmp->sz_shift = kpm_size_shift; 11844 kpmtsbmp->kpmp_shift = kpmp_shift; 11845 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 11846 if (kpm_smallpages == 0) { 11847 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 11848 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 11849 } else { 11850 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 11851 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 11852 } 11853 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 11854 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 11855 #ifdef DEBUG 11856 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 11857 #endif /* DEBUG */ 11858 if (ktsb_phys) 11859 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 11860 } 11861 11862 /* -- End KPM specific init -- */ 11863 } 11864 11865 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 11866 struct tsb_info ktsb_info[2]; 11867 11868 /* 11869 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 11870 */ 11871 void 11872 sfmmu_init_ktsbinfo() 11873 { 11874 ASSERT(ksfmmup != NULL); 11875 ASSERT(ksfmmup->sfmmu_tsb == NULL); 11876 /* 11877 * Allocate tsbinfos for kernel and copy in data 11878 * to make debug easier and sun4v setup easier. 11879 */ 11880 ktsb_info[0].tsb_sfmmu = ksfmmup; 11881 ktsb_info[0].tsb_szc = ktsb_szcode; 11882 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 11883 ktsb_info[0].tsb_va = ktsb_base; 11884 ktsb_info[0].tsb_pa = ktsb_pbase; 11885 ktsb_info[0].tsb_flags = 0; 11886 ktsb_info[0].tsb_tte.ll = 0; 11887 ktsb_info[0].tsb_cache = NULL; 11888 11889 ktsb_info[1].tsb_sfmmu = ksfmmup; 11890 ktsb_info[1].tsb_szc = ktsb4m_szcode; 11891 ktsb_info[1].tsb_ttesz_mask = TSB4M; 11892 ktsb_info[1].tsb_va = ktsb4m_base; 11893 ktsb_info[1].tsb_pa = ktsb4m_pbase; 11894 ktsb_info[1].tsb_flags = 0; 11895 ktsb_info[1].tsb_tte.ll = 0; 11896 ktsb_info[1].tsb_cache = NULL; 11897 11898 /* Link them into ksfmmup. */ 11899 ktsb_info[0].tsb_next = &ktsb_info[1]; 11900 ktsb_info[1].tsb_next = NULL; 11901 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 11902 11903 sfmmu_setup_tsbinfo(ksfmmup); 11904 } 11905 11906 /* 11907 * Cache the last value returned from va_to_pa(). If the VA specified 11908 * in the current call to cached_va_to_pa() maps to the same Page (as the 11909 * previous call to cached_va_to_pa()), then compute the PA using 11910 * cached info, else call va_to_pa(). 11911 * 11912 * Note: this function is neither MT-safe nor consistent in the presence 11913 * of multiple, interleaved threads. This function was created to enable 11914 * an optimization used during boot (at a point when there's only one thread 11915 * executing on the "boot CPU", and before startup_vm() has been called). 11916 */ 11917 static uint64_t 11918 cached_va_to_pa(void *vaddr) 11919 { 11920 static uint64_t prev_vaddr_base = 0; 11921 static uint64_t prev_pfn = 0; 11922 11923 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 11924 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 11925 } else { 11926 uint64_t pa = va_to_pa(vaddr); 11927 11928 if (pa != ((uint64_t)-1)) { 11929 /* 11930 * Computed physical address is valid. Cache its 11931 * related info for the next cached_va_to_pa() call. 11932 */ 11933 prev_pfn = pa & MMU_PAGEMASK; 11934 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 11935 } 11936 11937 return (pa); 11938 } 11939 } 11940 11941 /* 11942 * Carve up our nucleus hblk region. We may allocate more hblks than 11943 * asked due to rounding errors but we are guaranteed to have at least 11944 * enough space to allocate the requested number of hblk8's and hblk1's. 11945 */ 11946 void 11947 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 11948 { 11949 struct hme_blk *hmeblkp; 11950 size_t hme8blk_sz, hme1blk_sz; 11951 size_t i; 11952 size_t hblk8_bound; 11953 ulong_t j = 0, k = 0; 11954 11955 ASSERT(addr != NULL && size != 0); 11956 11957 /* Need to use proper structure alignment */ 11958 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 11959 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 11960 11961 nucleus_hblk8.list = (void *)addr; 11962 nucleus_hblk8.index = 0; 11963 11964 /* 11965 * Use as much memory as possible for hblk8's since we 11966 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 11967 * We need to hold back enough space for the hblk1's which 11968 * we'll allocate next. 11969 */ 11970 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 11971 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 11972 hmeblkp = (struct hme_blk *)addr; 11973 addr += hme8blk_sz; 11974 hmeblkp->hblk_nuc_bit = 1; 11975 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11976 } 11977 nucleus_hblk8.len = j; 11978 ASSERT(j >= nhblk8); 11979 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 11980 11981 nucleus_hblk1.list = (void *)addr; 11982 nucleus_hblk1.index = 0; 11983 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 11984 hmeblkp = (struct hme_blk *)addr; 11985 addr += hme1blk_sz; 11986 hmeblkp->hblk_nuc_bit = 1; 11987 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11988 } 11989 ASSERT(k >= nhblk1); 11990 nucleus_hblk1.len = k; 11991 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 11992 } 11993 11994 /* 11995 * This function is currently not supported on this platform. For what 11996 * it's supposed to do, see hat.c and hat_srmmu.c 11997 */ 11998 /* ARGSUSED */ 11999 faultcode_t 12000 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 12001 uint_t flags) 12002 { 12003 ASSERT(hat->sfmmu_xhat_provider == NULL); 12004 return (FC_NOSUPPORT); 12005 } 12006 12007 /* 12008 * Searchs the mapping list of the page for a mapping of the same size. If not 12009 * found the corresponding bit is cleared in the p_index field. When large 12010 * pages are more prevalent in the system, we can maintain the mapping list 12011 * in order and we don't have to traverse the list each time. Just check the 12012 * next and prev entries, and if both are of different size, we clear the bit. 12013 */ 12014 static void 12015 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 12016 { 12017 struct sf_hment *sfhmep; 12018 struct hme_blk *hmeblkp; 12019 int index; 12020 pgcnt_t npgs; 12021 12022 ASSERT(ttesz > TTE8K); 12023 12024 ASSERT(sfmmu_mlist_held(pp)); 12025 12026 ASSERT(PP_ISMAPPED_LARGE(pp)); 12027 12028 /* 12029 * Traverse mapping list looking for another mapping of same size. 12030 * since we only want to clear index field if all mappings of 12031 * that size are gone. 12032 */ 12033 12034 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 12035 hmeblkp = sfmmu_hmetohblk(sfhmep); 12036 if (hmeblkp->hblk_xhat_bit) 12037 continue; 12038 if (hme_size(sfhmep) == ttesz) { 12039 /* 12040 * another mapping of the same size. don't clear index. 12041 */ 12042 return; 12043 } 12044 } 12045 12046 /* 12047 * Clear the p_index bit for large page. 12048 */ 12049 index = PAGESZ_TO_INDEX(ttesz); 12050 npgs = TTEPAGES(ttesz); 12051 while (npgs-- > 0) { 12052 ASSERT(pp->p_index & index); 12053 pp->p_index &= ~index; 12054 pp = PP_PAGENEXT(pp); 12055 } 12056 } 12057 12058 /* 12059 * return supported features 12060 */ 12061 /* ARGSUSED */ 12062 int 12063 hat_supported(enum hat_features feature, void *arg) 12064 { 12065 switch (feature) { 12066 case HAT_SHARED_PT: 12067 case HAT_DYNAMIC_ISM_UNMAP: 12068 case HAT_VMODSORT: 12069 return (1); 12070 default: 12071 return (0); 12072 } 12073 } 12074 12075 void 12076 hat_enter(struct hat *hat) 12077 { 12078 hatlock_t *hatlockp; 12079 12080 if (hat != ksfmmup) { 12081 hatlockp = TSB_HASH(hat); 12082 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 12083 } 12084 } 12085 12086 void 12087 hat_exit(struct hat *hat) 12088 { 12089 hatlock_t *hatlockp; 12090 12091 if (hat != ksfmmup) { 12092 hatlockp = TSB_HASH(hat); 12093 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 12094 } 12095 } 12096 12097 /*ARGSUSED*/ 12098 void 12099 hat_reserve(struct as *as, caddr_t addr, size_t len) 12100 { 12101 } 12102 12103 static void 12104 hat_kstat_init(void) 12105 { 12106 kstat_t *ksp; 12107 12108 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 12109 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 12110 KSTAT_FLAG_VIRTUAL); 12111 if (ksp) { 12112 ksp->ks_data = (void *) &sfmmu_global_stat; 12113 kstat_install(ksp); 12114 } 12115 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 12116 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 12117 KSTAT_FLAG_VIRTUAL); 12118 if (ksp) { 12119 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 12120 kstat_install(ksp); 12121 } 12122 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 12123 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 12124 KSTAT_FLAG_WRITABLE); 12125 if (ksp) { 12126 ksp->ks_update = sfmmu_kstat_percpu_update; 12127 kstat_install(ksp); 12128 } 12129 } 12130 12131 /* ARGSUSED */ 12132 static int 12133 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 12134 { 12135 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 12136 struct tsbmiss *tsbm = tsbmiss_area; 12137 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 12138 int i; 12139 12140 ASSERT(cpu_kstat); 12141 if (rw == KSTAT_READ) { 12142 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 12143 cpu_kstat->sf_itlb_misses = tsbm->itlb_misses; 12144 cpu_kstat->sf_dtlb_misses = tsbm->dtlb_misses; 12145 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 12146 tsbm->uprot_traps; 12147 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 12148 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 12149 12150 if (tsbm->itlb_misses > 0 && tsbm->dtlb_misses > 0) { 12151 cpu_kstat->sf_tsb_hits = 12152 (tsbm->itlb_misses + tsbm->dtlb_misses) - 12153 (tsbm->utsb_misses + tsbm->ktsb_misses + 12154 kpmtsbm->kpm_tsb_misses); 12155 } else { 12156 cpu_kstat->sf_tsb_hits = 0; 12157 } 12158 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 12159 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 12160 } 12161 } else { 12162 /* KSTAT_WRITE is used to clear stats */ 12163 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 12164 tsbm->itlb_misses = 0; 12165 tsbm->dtlb_misses = 0; 12166 tsbm->utsb_misses = 0; 12167 tsbm->ktsb_misses = 0; 12168 tsbm->uprot_traps = 0; 12169 tsbm->kprot_traps = 0; 12170 kpmtsbm->kpm_dtlb_misses = 0; 12171 kpmtsbm->kpm_tsb_misses = 0; 12172 } 12173 } 12174 return (0); 12175 } 12176 12177 #ifdef DEBUG 12178 12179 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 12180 12181 /* 12182 * A tte checker. *orig_old is the value we read before cas. 12183 * *cur is the value returned by cas. 12184 * *new is the desired value when we do the cas. 12185 * 12186 * *hmeblkp is currently unused. 12187 */ 12188 12189 /* ARGSUSED */ 12190 void 12191 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 12192 { 12193 pfn_t i, j, k; 12194 int cpuid = CPU->cpu_id; 12195 12196 gorig[cpuid] = orig_old; 12197 gcur[cpuid] = cur; 12198 gnew[cpuid] = new; 12199 12200 #ifdef lint 12201 hmeblkp = hmeblkp; 12202 #endif 12203 12204 if (TTE_IS_VALID(orig_old)) { 12205 if (TTE_IS_VALID(cur)) { 12206 i = TTE_TO_TTEPFN(orig_old); 12207 j = TTE_TO_TTEPFN(cur); 12208 k = TTE_TO_TTEPFN(new); 12209 if (i != j) { 12210 /* remap error? */ 12211 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 12212 } 12213 12214 if (i != k) { 12215 /* remap error? */ 12216 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 12217 } 12218 } else { 12219 if (TTE_IS_VALID(new)) { 12220 panic("chk_tte: invalid cur? "); 12221 } 12222 12223 i = TTE_TO_TTEPFN(orig_old); 12224 k = TTE_TO_TTEPFN(new); 12225 if (i != k) { 12226 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 12227 } 12228 } 12229 } else { 12230 if (TTE_IS_VALID(cur)) { 12231 j = TTE_TO_TTEPFN(cur); 12232 if (TTE_IS_VALID(new)) { 12233 k = TTE_TO_TTEPFN(new); 12234 if (j != k) { 12235 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 12236 j, k); 12237 } 12238 } else { 12239 panic("chk_tte: why here?"); 12240 } 12241 } else { 12242 if (!TTE_IS_VALID(new)) { 12243 panic("chk_tte: why here2 ?"); 12244 } 12245 } 12246 } 12247 } 12248 12249 #endif /* DEBUG */ 12250 12251 extern void prefetch_tsbe_read(struct tsbe *); 12252 extern void prefetch_tsbe_write(struct tsbe *); 12253 12254 12255 /* 12256 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 12257 * us optimal performance on Cheetah+. You can only have 8 outstanding 12258 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 12259 * prefetch to make the most utilization of the prefetch capability. 12260 */ 12261 #define TSBE_PREFETCH_STRIDE (7) 12262 12263 void 12264 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 12265 { 12266 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 12267 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 12268 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 12269 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 12270 struct tsbe *old; 12271 struct tsbe *new; 12272 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 12273 uint64_t va; 12274 int new_offset; 12275 int i; 12276 int vpshift; 12277 int last_prefetch; 12278 12279 if (old_bytes == new_bytes) { 12280 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 12281 } else { 12282 12283 /* 12284 * A TSBE is 16 bytes which means there are four TSBE's per 12285 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 12286 */ 12287 old = (struct tsbe *)old_tsbinfo->tsb_va; 12288 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 12289 for (i = 0; i < old_entries; i++, old++) { 12290 if (((i & (4-1)) == 0) && (i < last_prefetch)) 12291 prefetch_tsbe_read(old); 12292 if (!old->tte_tag.tag_invalid) { 12293 /* 12294 * We have a valid TTE to remap. Check the 12295 * size. We won't remap 64K or 512K TTEs 12296 * because they span more than one TSB entry 12297 * and are indexed using an 8K virt. page. 12298 * Ditto for 32M and 256M TTEs. 12299 */ 12300 if (TTE_CSZ(&old->tte_data) == TTE64K || 12301 TTE_CSZ(&old->tte_data) == TTE512K) 12302 continue; 12303 if (mmu_page_sizes == max_mmu_page_sizes) { 12304 if (TTE_CSZ(&old->tte_data) == TTE32M || 12305 TTE_CSZ(&old->tte_data) == TTE256M) 12306 continue; 12307 } 12308 12309 /* clear the lower 22 bits of the va */ 12310 va = *(uint64_t *)old << 22; 12311 /* turn va into a virtual pfn */ 12312 va >>= 22 - TSB_START_SIZE; 12313 /* 12314 * or in bits from the offset in the tsb 12315 * to get the real virtual pfn. These 12316 * correspond to bits [21:13] in the va 12317 */ 12318 vpshift = 12319 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 12320 0x1ff; 12321 va |= (i << vpshift); 12322 va >>= vpshift; 12323 new_offset = va & (new_entries - 1); 12324 new = new_base + new_offset; 12325 prefetch_tsbe_write(new); 12326 *new = *old; 12327 } 12328 } 12329 } 12330 } 12331 12332 /* 12333 * Kernel Physical Mapping (kpm) facility 12334 */ 12335 12336 /* -- hat_kpm interface section -- */ 12337 12338 /* 12339 * Mapin a locked page and return the vaddr. 12340 * When a kpme is provided by the caller it is added to 12341 * the page p_kpmelist. The page to be mapped in must 12342 * be at least read locked (p_selock). 12343 */ 12344 caddr_t 12345 hat_kpm_mapin(struct page *pp, struct kpme *kpme) 12346 { 12347 kmutex_t *pml; 12348 caddr_t vaddr; 12349 12350 if (kpm_enable == 0) { 12351 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set"); 12352 return ((caddr_t)NULL); 12353 } 12354 12355 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 12356 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked"); 12357 return ((caddr_t)NULL); 12358 } 12359 12360 pml = sfmmu_mlist_enter(pp); 12361 ASSERT(pp->p_kpmref >= 0); 12362 12363 vaddr = (pp->p_kpmref == 0) ? 12364 sfmmu_kpm_mapin(pp) : hat_kpm_page2va(pp, 1); 12365 12366 if (kpme != NULL) { 12367 /* 12368 * Tolerate multiple mapins for the same kpme to avoid 12369 * the need for an extra serialization. 12370 */ 12371 if ((sfmmu_kpme_lookup(kpme, pp)) == 0) 12372 sfmmu_kpme_add(kpme, pp); 12373 12374 ASSERT(pp->p_kpmref > 0); 12375 12376 } else { 12377 pp->p_kpmref++; 12378 } 12379 12380 sfmmu_mlist_exit(pml); 12381 return (vaddr); 12382 } 12383 12384 /* 12385 * Mapout a locked page. 12386 * When a kpme is provided by the caller it is removed from 12387 * the page p_kpmelist. The page to be mapped out must be at 12388 * least read locked (p_selock). 12389 * Note: The seg_kpm layer provides a mapout interface for the 12390 * case that a kpme is used and the underlying page is unlocked. 12391 * This can be used instead of calling this function directly. 12392 */ 12393 void 12394 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 12395 { 12396 kmutex_t *pml; 12397 12398 if (kpm_enable == 0) { 12399 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set"); 12400 return; 12401 } 12402 12403 if (IS_KPM_ADDR(vaddr) == 0) { 12404 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address"); 12405 return; 12406 } 12407 12408 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 12409 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked"); 12410 return; 12411 } 12412 12413 if (kpme != NULL) { 12414 ASSERT(pp == kpme->kpe_page); 12415 pp = kpme->kpe_page; 12416 pml = sfmmu_mlist_enter(pp); 12417 12418 if (sfmmu_kpme_lookup(kpme, pp) == 0) 12419 panic("hat_kpm_mapout: kpme not found pp=%p", 12420 (void *)pp); 12421 12422 ASSERT(pp->p_kpmref > 0); 12423 sfmmu_kpme_sub(kpme, pp); 12424 12425 } else { 12426 pml = sfmmu_mlist_enter(pp); 12427 pp->p_kpmref--; 12428 } 12429 12430 ASSERT(pp->p_kpmref >= 0); 12431 if (pp->p_kpmref == 0) 12432 sfmmu_kpm_mapout(pp, vaddr); 12433 12434 sfmmu_mlist_exit(pml); 12435 } 12436 12437 /* 12438 * Return the kpm virtual address for the page at pp. 12439 * If checkswap is non zero and the page is backed by a 12440 * swap vnode the physical address is used rather than 12441 * p_offset to determine the kpm region. 12442 * Note: The function has to be used w/ extreme care. The 12443 * stability of the page identity is in the responsibility 12444 * of the caller. 12445 */ 12446 caddr_t 12447 hat_kpm_page2va(struct page *pp, int checkswap) 12448 { 12449 int vcolor, vcolor_pa; 12450 uintptr_t paddr, vaddr; 12451 12452 ASSERT(kpm_enable); 12453 12454 paddr = ptob(pp->p_pagenum); 12455 vcolor_pa = addr_to_vcolor(paddr); 12456 12457 if (checkswap && pp->p_vnode && IS_SWAPFSVP(pp->p_vnode)) 12458 vcolor = (PP_ISNC(pp)) ? vcolor_pa : PP_GET_VCOLOR(pp); 12459 else 12460 vcolor = addr_to_vcolor(pp->p_offset); 12461 12462 vaddr = (uintptr_t)kpm_vbase + paddr; 12463 12464 if (vcolor_pa != vcolor) { 12465 vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT); 12466 vaddr += (vcolor_pa > vcolor) ? 12467 ((uintptr_t)vcolor_pa << kpm_size_shift) : 12468 ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift); 12469 } 12470 12471 return ((caddr_t)vaddr); 12472 } 12473 12474 /* 12475 * Return the page for the kpm virtual address vaddr. 12476 * Caller is responsible for the kpm mapping and lock 12477 * state of the page. 12478 */ 12479 page_t * 12480 hat_kpm_vaddr2page(caddr_t vaddr) 12481 { 12482 uintptr_t paddr; 12483 pfn_t pfn; 12484 12485 ASSERT(IS_KPM_ADDR(vaddr)); 12486 12487 SFMMU_KPM_VTOP(vaddr, paddr); 12488 pfn = (pfn_t)btop(paddr); 12489 12490 return (page_numtopp_nolock(pfn)); 12491 } 12492 12493 /* page to kpm_page */ 12494 #define PP2KPMPG(pp, kp) { \ 12495 struct memseg *mseg; \ 12496 pgcnt_t inx; \ 12497 pfn_t pfn; \ 12498 \ 12499 pfn = pp->p_pagenum; \ 12500 mseg = page_numtomemseg_nolock(pfn); \ 12501 ASSERT(mseg); \ 12502 inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase); \ 12503 ASSERT(inx < mseg->kpm_nkpmpgs); \ 12504 kp = &mseg->kpm_pages[inx]; \ 12505 } 12506 12507 /* page to kpm_spage */ 12508 #define PP2KPMSPG(pp, ksp) { \ 12509 struct memseg *mseg; \ 12510 pgcnt_t inx; \ 12511 pfn_t pfn; \ 12512 \ 12513 pfn = pp->p_pagenum; \ 12514 mseg = page_numtomemseg_nolock(pfn); \ 12515 ASSERT(mseg); \ 12516 inx = pfn - mseg->kpm_pbase; \ 12517 ksp = &mseg->kpm_spages[inx]; \ 12518 } 12519 12520 /* 12521 * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred 12522 * which could not be resolved by the trap level tsbmiss handler for the 12523 * following reasons: 12524 * . The vaddr is in VAC alias range (always PAGESIZE mapping size). 12525 * . The kpm (s)page range of vaddr is in a VAC alias prevention state. 12526 * . tsbmiss handling at trap level is not desired (DEBUG kernel only, 12527 * kpm_tsbmtl == 0). 12528 */ 12529 int 12530 hat_kpm_fault(struct hat *hat, caddr_t vaddr) 12531 { 12532 int error; 12533 uintptr_t paddr; 12534 pfn_t pfn; 12535 struct memseg *mseg; 12536 page_t *pp; 12537 12538 if (kpm_enable == 0) { 12539 cmn_err(CE_WARN, "hat_kpm_fault: kpm_enable not set"); 12540 return (ENOTSUP); 12541 } 12542 12543 ASSERT(hat == ksfmmup); 12544 ASSERT(IS_KPM_ADDR(vaddr)); 12545 12546 SFMMU_KPM_VTOP(vaddr, paddr); 12547 pfn = (pfn_t)btop(paddr); 12548 mseg = page_numtomemseg_nolock(pfn); 12549 if (mseg == NULL) 12550 return (EFAULT); 12551 12552 pp = &mseg->pages[(pgcnt_t)(pfn - mseg->pages_base)]; 12553 ASSERT((pfn_t)pp->p_pagenum == pfn); 12554 12555 if (!PAGE_LOCKED(pp)) 12556 return (EFAULT); 12557 12558 if (kpm_smallpages == 0) 12559 error = sfmmu_kpm_fault(vaddr, mseg, pp); 12560 else 12561 error = sfmmu_kpm_fault_small(vaddr, mseg, pp); 12562 12563 return (error); 12564 } 12565 12566 extern krwlock_t memsegslock; 12567 12568 /* 12569 * memseg_hash[] was cleared, need to clear memseg_phash[] too. 12570 */ 12571 void 12572 hat_kpm_mseghash_clear(int nentries) 12573 { 12574 pgcnt_t i; 12575 12576 if (kpm_enable == 0) 12577 return; 12578 12579 for (i = 0; i < nentries; i++) 12580 memseg_phash[i] = MSEG_NULLPTR_PA; 12581 } 12582 12583 /* 12584 * Update memseg_phash[inx] when memseg_hash[inx] was changed. 12585 */ 12586 void 12587 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 12588 { 12589 if (kpm_enable == 0) 12590 return; 12591 12592 memseg_phash[inx] = (msp) ? va_to_pa(msp) : MSEG_NULLPTR_PA; 12593 } 12594 12595 /* 12596 * Update kpm memseg members from basic memseg info. 12597 */ 12598 void 12599 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs, 12600 offset_t kpm_pages_off) 12601 { 12602 if (kpm_enable == 0) 12603 return; 12604 12605 msp->kpm_pages = (kpm_page_t *)((caddr_t)msp->pages + kpm_pages_off); 12606 msp->kpm_nkpmpgs = nkpmpgs; 12607 msp->kpm_pbase = kpmptop(ptokpmp(msp->pages_base)); 12608 msp->pagespa = va_to_pa(msp->pages); 12609 msp->epagespa = va_to_pa(msp->epages); 12610 msp->kpm_pagespa = va_to_pa(msp->kpm_pages); 12611 } 12612 12613 /* 12614 * Setup nextpa when a memseg is inserted. 12615 * Assumes that the memsegslock is already held. 12616 */ 12617 void 12618 hat_kpm_addmem_mseg_insert(struct memseg *msp) 12619 { 12620 if (kpm_enable == 0) 12621 return; 12622 12623 ASSERT(RW_LOCK_HELD(&memsegslock)); 12624 msp->nextpa = (memsegs) ? va_to_pa(memsegs) : MSEG_NULLPTR_PA; 12625 } 12626 12627 /* 12628 * Setup memsegspa when a memseg is (head) inserted. 12629 * Called before memsegs is updated to complete a 12630 * memseg insert operation. 12631 * Assumes that the memsegslock is already held. 12632 */ 12633 void 12634 hat_kpm_addmem_memsegs_update(struct memseg *msp) 12635 { 12636 if (kpm_enable == 0) 12637 return; 12638 12639 ASSERT(RW_LOCK_HELD(&memsegslock)); 12640 ASSERT(memsegs); 12641 memsegspa = va_to_pa(msp); 12642 } 12643 12644 /* 12645 * Return end of metadata for an already setup memseg. 12646 * 12647 * Note: kpm_pages and kpm_spages are aliases and the underlying 12648 * member of struct memseg is a union, therefore they always have 12649 * the same address within a memseg. They must be differentiated 12650 * when pointer arithmetic is used with them. 12651 */ 12652 caddr_t 12653 hat_kpm_mseg_reuse(struct memseg *msp) 12654 { 12655 caddr_t end; 12656 12657 if (kpm_smallpages == 0) 12658 end = (caddr_t)(msp->kpm_pages + msp->kpm_nkpmpgs); 12659 else 12660 end = (caddr_t)(msp->kpm_spages + msp->kpm_nkpmpgs); 12661 12662 return (end); 12663 } 12664 12665 /* 12666 * Update memsegspa (when first memseg in list 12667 * is deleted) or nextpa when a memseg deleted. 12668 * Assumes that the memsegslock is already held. 12669 */ 12670 void 12671 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp) 12672 { 12673 struct memseg *lmsp; 12674 12675 if (kpm_enable == 0) 12676 return; 12677 12678 ASSERT(RW_LOCK_HELD(&memsegslock)); 12679 12680 if (mspp == &memsegs) { 12681 memsegspa = (msp->next) ? 12682 va_to_pa(msp->next) : MSEG_NULLPTR_PA; 12683 } else { 12684 lmsp = (struct memseg *) 12685 ((uint64_t)mspp - offsetof(struct memseg, next)); 12686 lmsp->nextpa = (msp->next) ? 12687 va_to_pa(msp->next) : MSEG_NULLPTR_PA; 12688 } 12689 } 12690 12691 /* 12692 * Update kpm members for all memseg's involved in a split operation 12693 * and do the atomic update of the physical memseg chain. 12694 * 12695 * Note: kpm_pages and kpm_spages are aliases and the underlying member 12696 * of struct memseg is a union, therefore they always have the same 12697 * address within a memseg. With that the direct assignments and 12698 * va_to_pa conversions below don't have to be distinguished wrt. to 12699 * kpm_smallpages. They must be differentiated when pointer arithmetic 12700 * is used with them. 12701 * 12702 * Assumes that the memsegslock is already held. 12703 */ 12704 void 12705 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp, 12706 struct memseg *lo, struct memseg *mid, struct memseg *hi) 12707 { 12708 pgcnt_t start, end, kbase, kstart, num; 12709 struct memseg *lmsp; 12710 12711 if (kpm_enable == 0) 12712 return; 12713 12714 ASSERT(RW_LOCK_HELD(&memsegslock)); 12715 ASSERT(msp && mid && msp->kpm_pages); 12716 12717 kbase = ptokpmp(msp->kpm_pbase); 12718 12719 if (lo) { 12720 num = lo->pages_end - lo->pages_base; 12721 start = kpmptop(ptokpmp(lo->pages_base)); 12722 /* align end to kpm page size granularity */ 12723 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs; 12724 lo->kpm_pbase = start; 12725 lo->kpm_nkpmpgs = ptokpmp(end - start); 12726 lo->kpm_pages = msp->kpm_pages; 12727 lo->kpm_pagespa = va_to_pa(lo->kpm_pages); 12728 lo->pagespa = va_to_pa(lo->pages); 12729 lo->epagespa = va_to_pa(lo->epages); 12730 lo->nextpa = va_to_pa(lo->next); 12731 } 12732 12733 /* mid */ 12734 num = mid->pages_end - mid->pages_base; 12735 kstart = ptokpmp(mid->pages_base); 12736 start = kpmptop(kstart); 12737 /* align end to kpm page size granularity */ 12738 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs; 12739 mid->kpm_pbase = start; 12740 mid->kpm_nkpmpgs = ptokpmp(end - start); 12741 if (kpm_smallpages == 0) { 12742 mid->kpm_pages = msp->kpm_pages + (kstart - kbase); 12743 } else { 12744 mid->kpm_spages = msp->kpm_spages + (kstart - kbase); 12745 } 12746 mid->kpm_pagespa = va_to_pa(mid->kpm_pages); 12747 mid->pagespa = va_to_pa(mid->pages); 12748 mid->epagespa = va_to_pa(mid->epages); 12749 mid->nextpa = (mid->next) ? va_to_pa(mid->next) : MSEG_NULLPTR_PA; 12750 12751 if (hi) { 12752 num = hi->pages_end - hi->pages_base; 12753 kstart = ptokpmp(hi->pages_base); 12754 start = kpmptop(kstart); 12755 /* align end to kpm page size granularity */ 12756 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs; 12757 hi->kpm_pbase = start; 12758 hi->kpm_nkpmpgs = ptokpmp(end - start); 12759 if (kpm_smallpages == 0) { 12760 hi->kpm_pages = msp->kpm_pages + (kstart - kbase); 12761 } else { 12762 hi->kpm_spages = msp->kpm_spages + (kstart - kbase); 12763 } 12764 hi->kpm_pagespa = va_to_pa(hi->kpm_pages); 12765 hi->pagespa = va_to_pa(hi->pages); 12766 hi->epagespa = va_to_pa(hi->epages); 12767 hi->nextpa = (hi->next) ? va_to_pa(hi->next) : MSEG_NULLPTR_PA; 12768 } 12769 12770 /* 12771 * Atomic update of the physical memseg chain 12772 */ 12773 if (mspp == &memsegs) { 12774 memsegspa = (lo) ? va_to_pa(lo) : va_to_pa(mid); 12775 } else { 12776 lmsp = (struct memseg *) 12777 ((uint64_t)mspp - offsetof(struct memseg, next)); 12778 lmsp->nextpa = (lo) ? va_to_pa(lo) : va_to_pa(mid); 12779 } 12780 } 12781 12782 /* 12783 * Walk the memsegs chain, applying func to each memseg span and vcolor. 12784 */ 12785 void 12786 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg) 12787 { 12788 pfn_t pbase, pend; 12789 int vcolor; 12790 void *base; 12791 size_t size; 12792 struct memseg *msp; 12793 extern uint_t vac_colors; 12794 12795 for (msp = memsegs; msp; msp = msp->next) { 12796 pbase = msp->pages_base; 12797 pend = msp->pages_end; 12798 for (vcolor = 0; vcolor < vac_colors; vcolor++) { 12799 base = ptob(pbase) + kpm_vbase + kpm_size * vcolor; 12800 size = ptob(pend - pbase); 12801 func(arg, base, size); 12802 } 12803 } 12804 } 12805 12806 12807 /* -- sfmmu_kpm internal section -- */ 12808 12809 /* 12810 * Return the page frame number if a valid segkpm mapping exists 12811 * for vaddr, otherwise return PFN_INVALID. No locks are grabbed. 12812 * Should only be used by other sfmmu routines. 12813 */ 12814 pfn_t 12815 sfmmu_kpm_vatopfn(caddr_t vaddr) 12816 { 12817 uintptr_t paddr; 12818 pfn_t pfn; 12819 page_t *pp; 12820 12821 ASSERT(kpm_enable && IS_KPM_ADDR(vaddr)); 12822 12823 SFMMU_KPM_VTOP(vaddr, paddr); 12824 pfn = (pfn_t)btop(paddr); 12825 pp = page_numtopp_nolock(pfn); 12826 if (pp && pp->p_kpmref) 12827 return (pfn); 12828 else 12829 return ((pfn_t)PFN_INVALID); 12830 } 12831 12832 /* 12833 * Lookup a kpme in the p_kpmelist. 12834 */ 12835 static int 12836 sfmmu_kpme_lookup(struct kpme *kpme, page_t *pp) 12837 { 12838 struct kpme *p; 12839 12840 for (p = pp->p_kpmelist; p; p = p->kpe_next) { 12841 if (p == kpme) 12842 return (1); 12843 } 12844 return (0); 12845 } 12846 12847 /* 12848 * Insert a kpme into the p_kpmelist and increment 12849 * the per page kpm reference count. 12850 */ 12851 static void 12852 sfmmu_kpme_add(struct kpme *kpme, page_t *pp) 12853 { 12854 ASSERT(pp->p_kpmref >= 0); 12855 12856 /* head insert */ 12857 kpme->kpe_prev = NULL; 12858 kpme->kpe_next = pp->p_kpmelist; 12859 12860 if (pp->p_kpmelist) 12861 pp->p_kpmelist->kpe_prev = kpme; 12862 12863 pp->p_kpmelist = kpme; 12864 kpme->kpe_page = pp; 12865 pp->p_kpmref++; 12866 } 12867 12868 /* 12869 * Remove a kpme from the p_kpmelist and decrement 12870 * the per page kpm reference count. 12871 */ 12872 static void 12873 sfmmu_kpme_sub(struct kpme *kpme, page_t *pp) 12874 { 12875 ASSERT(pp->p_kpmref > 0); 12876 12877 if (kpme->kpe_prev) { 12878 ASSERT(pp->p_kpmelist != kpme); 12879 ASSERT(kpme->kpe_prev->kpe_page == pp); 12880 kpme->kpe_prev->kpe_next = kpme->kpe_next; 12881 } else { 12882 ASSERT(pp->p_kpmelist == kpme); 12883 pp->p_kpmelist = kpme->kpe_next; 12884 } 12885 12886 if (kpme->kpe_next) { 12887 ASSERT(kpme->kpe_next->kpe_page == pp); 12888 kpme->kpe_next->kpe_prev = kpme->kpe_prev; 12889 } 12890 12891 kpme->kpe_next = kpme->kpe_prev = NULL; 12892 kpme->kpe_page = NULL; 12893 pp->p_kpmref--; 12894 } 12895 12896 /* 12897 * Mapin a single page, it is called every time a page changes it's state 12898 * from kpm-unmapped to kpm-mapped. It may not be called, when only a new 12899 * kpm instance does a mapin and wants to share the mapping. 12900 * Assumes that the mlist mutex is already grabbed. 12901 */ 12902 static caddr_t 12903 sfmmu_kpm_mapin(page_t *pp) 12904 { 12905 kpm_page_t *kp; 12906 kpm_hlk_t *kpmp; 12907 caddr_t vaddr; 12908 int kpm_vac_range; 12909 pfn_t pfn; 12910 tte_t tte; 12911 kmutex_t *pmtx; 12912 int uncached; 12913 kpm_spage_t *ksp; 12914 kpm_shlk_t *kpmsp; 12915 int oldval; 12916 12917 ASSERT(sfmmu_mlist_held(pp)); 12918 ASSERT(pp->p_kpmref == 0); 12919 12920 vaddr = sfmmu_kpm_getvaddr(pp, &kpm_vac_range); 12921 12922 ASSERT(IS_KPM_ADDR(vaddr)); 12923 uncached = PP_ISNC(pp); 12924 pfn = pp->p_pagenum; 12925 12926 if (kpm_smallpages) 12927 goto smallpages_mapin; 12928 12929 PP2KPMPG(pp, kp); 12930 12931 kpmp = KPMP_HASH(kp); 12932 mutex_enter(&kpmp->khl_mutex); 12933 12934 ASSERT(PP_ISKPMC(pp) == 0); 12935 ASSERT(PP_ISKPMS(pp) == 0); 12936 12937 if (uncached) { 12938 /* ASSERT(pp->p_share); XXX use hat_page_getshare */ 12939 if (kpm_vac_range == 0) { 12940 if (kp->kp_refcnts == 0) { 12941 /* 12942 * Must remove large page mapping if it exists. 12943 * Pages in uncached state can only be mapped 12944 * small (PAGESIZE) within the regular kpm 12945 * range. 12946 */ 12947 if (kp->kp_refcntc == -1) { 12948 /* remove go indication */ 12949 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 12950 &kpmp->khl_lock, KPMTSBM_STOP); 12951 } 12952 if (kp->kp_refcnt > 0 && kp->kp_refcntc == 0) 12953 sfmmu_kpm_demap_large(vaddr); 12954 } 12955 ASSERT(kp->kp_refcntc >= 0); 12956 kp->kp_refcntc++; 12957 } 12958 pmtx = sfmmu_page_enter(pp); 12959 PP_SETKPMC(pp); 12960 sfmmu_page_exit(pmtx); 12961 } 12962 12963 if ((kp->kp_refcntc > 0 || kp->kp_refcnts > 0) && kpm_vac_range == 0) { 12964 /* 12965 * Have to do a small (PAGESIZE) mapin within this kpm_page 12966 * range since it is marked to be in VAC conflict mode or 12967 * when there are still other small mappings around. 12968 */ 12969 12970 /* tte assembly */ 12971 if (uncached == 0) 12972 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 12973 else 12974 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 12975 12976 /* tsb dropin */ 12977 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 12978 12979 pmtx = sfmmu_page_enter(pp); 12980 PP_SETKPMS(pp); 12981 sfmmu_page_exit(pmtx); 12982 12983 kp->kp_refcnts++; 12984 ASSERT(kp->kp_refcnts > 0); 12985 goto exit; 12986 } 12987 12988 if (kpm_vac_range == 0) { 12989 /* 12990 * Fast path / regular case, no VAC conflict handling 12991 * in progress within this kpm_page range. 12992 */ 12993 if (kp->kp_refcnt == 0) { 12994 12995 /* tte assembly */ 12996 KPM_TTE_VCACHED(tte.ll, pfn, TTE4M); 12997 12998 /* tsb dropin */ 12999 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M); 13000 13001 /* Set go flag for TL tsbmiss handler */ 13002 if (kp->kp_refcntc == 0) 13003 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 13004 &kpmp->khl_lock, KPMTSBM_START); 13005 13006 ASSERT(kp->kp_refcntc == -1); 13007 } 13008 kp->kp_refcnt++; 13009 ASSERT(kp->kp_refcnt); 13010 13011 } else { 13012 /* 13013 * The page is not setup according to the common VAC 13014 * prevention rules for the regular and kpm mapping layer 13015 * E.g. the page layer was not able to deliver a right 13016 * vcolor'ed page for a given vaddr corresponding to 13017 * the wanted p_offset. It has to be mapped in small in 13018 * within the corresponding kpm vac range in order to 13019 * prevent VAC alias conflicts. 13020 */ 13021 13022 /* tte assembly */ 13023 if (uncached == 0) { 13024 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 13025 } else { 13026 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 13027 } 13028 13029 /* tsb dropin */ 13030 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13031 13032 kp->kp_refcnta++; 13033 if (kp->kp_refcntc == -1) { 13034 ASSERT(kp->kp_refcnt > 0); 13035 13036 /* remove go indication */ 13037 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock, 13038 KPMTSBM_STOP); 13039 } 13040 ASSERT(kp->kp_refcntc >= 0); 13041 } 13042 exit: 13043 mutex_exit(&kpmp->khl_mutex); 13044 return (vaddr); 13045 13046 smallpages_mapin: 13047 if (uncached == 0) { 13048 /* tte assembly */ 13049 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 13050 } else { 13051 /* ASSERT(pp->p_share); XXX use hat_page_getshare */ 13052 pmtx = sfmmu_page_enter(pp); 13053 PP_SETKPMC(pp); 13054 sfmmu_page_exit(pmtx); 13055 /* tte assembly */ 13056 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 13057 } 13058 13059 /* tsb dropin */ 13060 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13061 13062 PP2KPMSPG(pp, ksp); 13063 kpmsp = KPMP_SHASH(ksp); 13064 13065 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, &kpmsp->kshl_lock, 13066 (uncached) ? KPM_MAPPEDSC : KPM_MAPPEDS); 13067 13068 if (oldval != 0) 13069 panic("sfmmu_kpm_mapin: stale smallpages mapping"); 13070 13071 return (vaddr); 13072 } 13073 13074 /* 13075 * Mapout a single page, it is called every time a page changes it's state 13076 * from kpm-mapped to kpm-unmapped. It may not be called, when only a kpm 13077 * instance calls mapout and there are still other instances mapping the 13078 * page. Assumes that the mlist mutex is already grabbed. 13079 * 13080 * Note: In normal mode (no VAC conflict prevention pending) TLB's are 13081 * not flushed. This is the core segkpm behavior to avoid xcalls. It is 13082 * no problem because a translation from a segkpm virtual address to a 13083 * physical address is always the same. The only downside is a slighty 13084 * increased window of vulnerability for misbehaving _kernel_ modules. 13085 */ 13086 static void 13087 sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr) 13088 { 13089 kpm_page_t *kp; 13090 kpm_hlk_t *kpmp; 13091 int alias_range; 13092 kmutex_t *pmtx; 13093 kpm_spage_t *ksp; 13094 kpm_shlk_t *kpmsp; 13095 int oldval; 13096 13097 ASSERT(sfmmu_mlist_held(pp)); 13098 ASSERT(pp->p_kpmref == 0); 13099 13100 alias_range = IS_KPM_ALIAS_RANGE(vaddr); 13101 13102 if (kpm_smallpages) 13103 goto smallpages_mapout; 13104 13105 PP2KPMPG(pp, kp); 13106 kpmp = KPMP_HASH(kp); 13107 mutex_enter(&kpmp->khl_mutex); 13108 13109 if (alias_range) { 13110 ASSERT(PP_ISKPMS(pp) == 0); 13111 if (kp->kp_refcnta <= 0) { 13112 panic("sfmmu_kpm_mapout: bad refcnta kp=%p", 13113 (void *)kp); 13114 } 13115 13116 if (PP_ISTNC(pp)) { 13117 if (PP_ISKPMC(pp) == 0) { 13118 /* 13119 * Uncached kpm mappings must always have 13120 * forced "small page" mode. 13121 */ 13122 panic("sfmmu_kpm_mapout: uncached page not " 13123 "kpm marked"); 13124 } 13125 sfmmu_kpm_demap_small(vaddr); 13126 13127 pmtx = sfmmu_page_enter(pp); 13128 PP_CLRKPMC(pp); 13129 sfmmu_page_exit(pmtx); 13130 13131 /* 13132 * Check if we can resume cached mode. This might 13133 * be the case if the kpm mapping was the only 13134 * mapping in conflict with other non rule 13135 * compliant mappings. The page is no more marked 13136 * as kpm mapped, so the conv_tnc path will not 13137 * change kpm state. 13138 */ 13139 conv_tnc(pp, TTE8K); 13140 13141 } else if (PP_ISKPMC(pp) == 0) { 13142 /* remove TSB entry only */ 13143 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT); 13144 13145 } else { 13146 /* already demapped */ 13147 pmtx = sfmmu_page_enter(pp); 13148 PP_CLRKPMC(pp); 13149 sfmmu_page_exit(pmtx); 13150 } 13151 kp->kp_refcnta--; 13152 goto exit; 13153 } 13154 13155 if (kp->kp_refcntc <= 0 && kp->kp_refcnts == 0) { 13156 /* 13157 * Fast path / regular case. 13158 */ 13159 ASSERT(kp->kp_refcntc >= -1); 13160 ASSERT(!(pp->p_nrm & (P_KPMC | P_KPMS | P_TNC | P_PNC))); 13161 13162 if (kp->kp_refcnt <= 0) 13163 panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp); 13164 13165 if (--kp->kp_refcnt == 0) { 13166 /* remove go indication */ 13167 if (kp->kp_refcntc == -1) { 13168 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 13169 &kpmp->khl_lock, KPMTSBM_STOP); 13170 } 13171 ASSERT(kp->kp_refcntc == 0); 13172 13173 /* remove TSB entry */ 13174 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M); 13175 #ifdef DEBUG 13176 if (kpm_tlb_flush) 13177 sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT); 13178 #endif 13179 } 13180 13181 } else { 13182 /* 13183 * The VAC alias path. 13184 * We come here if the kpm vaddr is not in any alias_range 13185 * and we are unmapping a page within the regular kpm_page 13186 * range. The kpm_page either holds conflict pages and/or 13187 * is in "small page" mode. If the page is not marked 13188 * P_KPMS it couldn't have a valid PAGESIZE sized TSB 13189 * entry. Dcache flushing is done lazy and follows the 13190 * rules of the regular virtual page coloring scheme. 13191 * 13192 * Per page states and required actions: 13193 * P_KPMC: remove a kpm mapping that is conflicting. 13194 * P_KPMS: remove a small kpm mapping within a kpm_page. 13195 * P_TNC: check if we can re-cache the page. 13196 * P_PNC: we cannot re-cache, sorry. 13197 * Per kpm_page: 13198 * kp_refcntc > 0: page is part of a kpm_page with conflicts. 13199 * kp_refcnts > 0: rm a small mapped page within a kpm_page. 13200 */ 13201 13202 if (PP_ISKPMS(pp)) { 13203 if (kp->kp_refcnts < 1) { 13204 panic("sfmmu_kpm_mapout: bad refcnts kp=%p", 13205 (void *)kp); 13206 } 13207 sfmmu_kpm_demap_small(vaddr); 13208 13209 /* 13210 * Check if we can resume cached mode. This might 13211 * be the case if the kpm mapping was the only 13212 * mapping in conflict with other non rule 13213 * compliant mappings. The page is no more marked 13214 * as kpm mapped, so the conv_tnc path will not 13215 * change kpm state. 13216 */ 13217 if (PP_ISTNC(pp)) { 13218 if (!PP_ISKPMC(pp)) { 13219 /* 13220 * Uncached kpm mappings must always 13221 * have forced "small page" mode. 13222 */ 13223 panic("sfmmu_kpm_mapout: uncached " 13224 "page not kpm marked"); 13225 } 13226 conv_tnc(pp, TTE8K); 13227 } 13228 kp->kp_refcnts--; 13229 kp->kp_refcnt++; 13230 pmtx = sfmmu_page_enter(pp); 13231 PP_CLRKPMS(pp); 13232 sfmmu_page_exit(pmtx); 13233 } 13234 13235 if (PP_ISKPMC(pp)) { 13236 if (kp->kp_refcntc < 1) { 13237 panic("sfmmu_kpm_mapout: bad refcntc kp=%p", 13238 (void *)kp); 13239 } 13240 pmtx = sfmmu_page_enter(pp); 13241 PP_CLRKPMC(pp); 13242 sfmmu_page_exit(pmtx); 13243 kp->kp_refcntc--; 13244 } 13245 13246 if (kp->kp_refcnt-- < 1) 13247 panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp); 13248 } 13249 exit: 13250 mutex_exit(&kpmp->khl_mutex); 13251 return; 13252 13253 smallpages_mapout: 13254 PP2KPMSPG(pp, ksp); 13255 kpmsp = KPMP_SHASH(ksp); 13256 13257 if (PP_ISKPMC(pp) == 0) { 13258 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 13259 &kpmsp->kshl_lock, 0); 13260 13261 if (oldval != KPM_MAPPEDS) { 13262 /* 13263 * When we're called after sfmmu_kpm_hme_unload, 13264 * KPM_MAPPEDSC is valid too. 13265 */ 13266 if (oldval != KPM_MAPPEDSC) 13267 panic("sfmmu_kpm_mapout: incorrect mapping"); 13268 } 13269 13270 /* remove TSB entry */ 13271 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT); 13272 #ifdef DEBUG 13273 if (kpm_tlb_flush) 13274 sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT); 13275 #endif 13276 13277 } else if (PP_ISTNC(pp)) { 13278 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 13279 &kpmsp->kshl_lock, 0); 13280 13281 if (oldval != KPM_MAPPEDSC || PP_ISKPMC(pp) == 0) 13282 panic("sfmmu_kpm_mapout: inconsistent TNC mapping"); 13283 13284 sfmmu_kpm_demap_small(vaddr); 13285 13286 pmtx = sfmmu_page_enter(pp); 13287 PP_CLRKPMC(pp); 13288 sfmmu_page_exit(pmtx); 13289 13290 /* 13291 * Check if we can resume cached mode. This might be 13292 * the case if the kpm mapping was the only mapping 13293 * in conflict with other non rule compliant mappings. 13294 * The page is no more marked as kpm mapped, so the 13295 * conv_tnc path will not change the kpm state. 13296 */ 13297 conv_tnc(pp, TTE8K); 13298 13299 } else { 13300 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 13301 &kpmsp->kshl_lock, 0); 13302 13303 if (oldval != KPM_MAPPEDSC) 13304 panic("sfmmu_kpm_mapout: inconsistent mapping"); 13305 13306 pmtx = sfmmu_page_enter(pp); 13307 PP_CLRKPMC(pp); 13308 sfmmu_page_exit(pmtx); 13309 } 13310 } 13311 13312 #define abs(x) ((x) < 0 ? -(x) : (x)) 13313 13314 /* 13315 * Determine appropriate kpm mapping address and handle any kpm/hme 13316 * conflicts. Page mapping list and its vcolor parts must be protected. 13317 */ 13318 static caddr_t 13319 sfmmu_kpm_getvaddr(page_t *pp, int *kpm_vac_rangep) 13320 { 13321 int vcolor, vcolor_pa; 13322 caddr_t vaddr; 13323 uintptr_t paddr; 13324 13325 13326 ASSERT(sfmmu_mlist_held(pp)); 13327 13328 paddr = ptob(pp->p_pagenum); 13329 vcolor_pa = addr_to_vcolor(paddr); 13330 13331 if (pp->p_vnode && IS_SWAPFSVP(pp->p_vnode)) { 13332 vcolor = (PP_NEWPAGE(pp) || PP_ISNC(pp)) ? 13333 vcolor_pa : PP_GET_VCOLOR(pp); 13334 } else { 13335 vcolor = addr_to_vcolor(pp->p_offset); 13336 } 13337 13338 vaddr = kpm_vbase + paddr; 13339 *kpm_vac_rangep = 0; 13340 13341 if (vcolor_pa != vcolor) { 13342 *kpm_vac_rangep = abs(vcolor - vcolor_pa); 13343 vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT); 13344 vaddr += (vcolor_pa > vcolor) ? 13345 ((uintptr_t)vcolor_pa << kpm_size_shift) : 13346 ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift); 13347 13348 ASSERT(!PP_ISMAPPED_LARGE(pp)); 13349 } 13350 13351 if (PP_ISNC(pp)) 13352 return (vaddr); 13353 13354 if (PP_NEWPAGE(pp)) { 13355 PP_SET_VCOLOR(pp, vcolor); 13356 return (vaddr); 13357 } 13358 13359 if (PP_GET_VCOLOR(pp) == vcolor) 13360 return (vaddr); 13361 13362 ASSERT(!PP_ISMAPPED_KPM(pp)); 13363 sfmmu_kpm_vac_conflict(pp, vaddr); 13364 13365 return (vaddr); 13366 } 13367 13368 /* 13369 * VAC conflict state bit values. 13370 * The following defines are used to make the handling of the 13371 * various input states more concise. For that the kpm states 13372 * per kpm_page and per page are combined in a summary state. 13373 * Each single state has a corresponding bit value in the 13374 * summary state. These defines only apply for kpm large page 13375 * mappings. Within comments the abbreviations "kc, c, ks, s" 13376 * are used as short form of the actual state, e.g. "kc" for 13377 * "kp_refcntc > 0", etc. 13378 */ 13379 #define KPM_KC 0x00000008 /* kpm_page: kp_refcntc > 0 */ 13380 #define KPM_C 0x00000004 /* page: P_KPMC set */ 13381 #define KPM_KS 0x00000002 /* kpm_page: kp_refcnts > 0 */ 13382 #define KPM_S 0x00000001 /* page: P_KPMS set */ 13383 13384 /* 13385 * Summary states used in sfmmu_kpm_fault (KPM_TSBM_*). 13386 * See also more detailed comments within in the sfmmu_kpm_fault switch. 13387 * Abbreviations used: 13388 * CONFL: VAC conflict(s) within a kpm_page. 13389 * MAPS: Mapped small: Page mapped in using a regular page size kpm mapping. 13390 * RASM: Re-assembling of a large page mapping possible. 13391 * RPLS: Replace: TSB miss due to TSB replacement only. 13392 * BRKO: Breakup Other: A large kpm mapping has to be broken because another 13393 * page within the kpm_page is already involved in a VAC conflict. 13394 * BRKT: Breakup This: A large kpm mapping has to be broken, this page is 13395 * is involved in a VAC conflict. 13396 */ 13397 #define KPM_TSBM_CONFL_GONE (0) 13398 #define KPM_TSBM_MAPS_RASM (KPM_KS) 13399 #define KPM_TSBM_RPLS_RASM (KPM_KS | KPM_S) 13400 #define KPM_TSBM_MAPS_BRKO (KPM_KC) 13401 #define KPM_TSBM_MAPS (KPM_KC | KPM_KS) 13402 #define KPM_TSBM_RPLS (KPM_KC | KPM_KS | KPM_S) 13403 #define KPM_TSBM_MAPS_BRKT (KPM_KC | KPM_C) 13404 #define KPM_TSBM_MAPS_CONFL (KPM_KC | KPM_C | KPM_KS) 13405 #define KPM_TSBM_RPLS_CONFL (KPM_KC | KPM_C | KPM_KS | KPM_S) 13406 13407 /* 13408 * kpm fault handler for mappings with large page size. 13409 */ 13410 int 13411 sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp) 13412 { 13413 int error; 13414 pgcnt_t inx; 13415 kpm_page_t *kp; 13416 tte_t tte; 13417 pfn_t pfn = pp->p_pagenum; 13418 kpm_hlk_t *kpmp; 13419 kmutex_t *pml; 13420 int alias_range; 13421 int uncached = 0; 13422 kmutex_t *pmtx; 13423 int badstate; 13424 uint_t tsbmcase; 13425 13426 alias_range = IS_KPM_ALIAS_RANGE(vaddr); 13427 13428 inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase); 13429 if (inx >= mseg->kpm_nkpmpgs) { 13430 cmn_err(CE_PANIC, "sfmmu_kpm_fault: kpm overflow in memseg " 13431 "0x%p pp 0x%p", (void *)mseg, (void *)pp); 13432 } 13433 13434 kp = &mseg->kpm_pages[inx]; 13435 kpmp = KPMP_HASH(kp); 13436 13437 pml = sfmmu_mlist_enter(pp); 13438 13439 if (!PP_ISMAPPED_KPM(pp)) { 13440 sfmmu_mlist_exit(pml); 13441 return (EFAULT); 13442 } 13443 13444 mutex_enter(&kpmp->khl_mutex); 13445 13446 if (alias_range) { 13447 ASSERT(!PP_ISMAPPED_LARGE(pp)); 13448 if (kp->kp_refcnta > 0) { 13449 if (PP_ISKPMC(pp)) { 13450 pmtx = sfmmu_page_enter(pp); 13451 PP_CLRKPMC(pp); 13452 sfmmu_page_exit(pmtx); 13453 } 13454 /* 13455 * Check for vcolor conflicts. Return here 13456 * w/ either no conflict (fast path), removed hme 13457 * mapping chains (unload conflict) or uncached 13458 * (uncache conflict). VACaches are cleaned and 13459 * p_vcolor and PP_TNC are set accordingly for the 13460 * conflict cases. Drop kpmp for uncache conflict 13461 * cases since it will be grabbed within 13462 * sfmmu_kpm_page_cache in case of an uncache 13463 * conflict. 13464 */ 13465 mutex_exit(&kpmp->khl_mutex); 13466 sfmmu_kpm_vac_conflict(pp, vaddr); 13467 mutex_enter(&kpmp->khl_mutex); 13468 13469 if (PP_ISNC(pp)) { 13470 uncached = 1; 13471 pmtx = sfmmu_page_enter(pp); 13472 PP_SETKPMC(pp); 13473 sfmmu_page_exit(pmtx); 13474 } 13475 goto smallexit; 13476 13477 } else { 13478 /* 13479 * We got a tsbmiss on a not active kpm_page range. 13480 * Let segkpm_fault decide how to panic. 13481 */ 13482 error = EFAULT; 13483 } 13484 goto exit; 13485 } 13486 13487 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0); 13488 if (kp->kp_refcntc == -1) { 13489 /* 13490 * We should come here only if trap level tsb miss 13491 * handler is disabled. 13492 */ 13493 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 || 13494 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp)); 13495 13496 if (badstate == 0) 13497 goto largeexit; 13498 } 13499 13500 if (badstate || kp->kp_refcntc < 0) 13501 goto badstate_exit; 13502 13503 /* 13504 * Combine the per kpm_page and per page kpm VAC states to 13505 * a summary state in order to make the kpm fault handling 13506 * more concise. 13507 */ 13508 tsbmcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) | 13509 ((kp->kp_refcnts > 0) ? KPM_KS : 0) | 13510 (PP_ISKPMC(pp) ? KPM_C : 0) | 13511 (PP_ISKPMS(pp) ? KPM_S : 0)); 13512 13513 switch (tsbmcase) { 13514 case KPM_TSBM_CONFL_GONE: /* - - - - */ 13515 /* 13516 * That's fine, we either have no more vac conflict in 13517 * this kpm page or someone raced in and has solved the 13518 * vac conflict for us -- call sfmmu_kpm_vac_conflict 13519 * to take care for correcting the vcolor and flushing 13520 * the dcache if required. 13521 */ 13522 mutex_exit(&kpmp->khl_mutex); 13523 sfmmu_kpm_vac_conflict(pp, vaddr); 13524 mutex_enter(&kpmp->khl_mutex); 13525 13526 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 || 13527 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 13528 panic("sfmmu_kpm_fault: inconsistent CONFL_GONE " 13529 "state, pp=%p", (void *)pp); 13530 } 13531 goto largeexit; 13532 13533 case KPM_TSBM_MAPS_RASM: /* - - ks - */ 13534 /* 13535 * All conflicts in this kpm page are gone but there are 13536 * already small mappings around, so we also map this 13537 * page small. This could be the trigger case for a 13538 * small mapping reaper, if this is really needed. 13539 * For now fall thru to the KPM_TSBM_MAPS handling. 13540 */ 13541 13542 case KPM_TSBM_MAPS: /* kc - ks - */ 13543 /* 13544 * Large page mapping is already broken, this page is not 13545 * conflicting, so map it small. Call sfmmu_kpm_vac_conflict 13546 * to take care for correcting the vcolor and flushing 13547 * the dcache if required. 13548 */ 13549 mutex_exit(&kpmp->khl_mutex); 13550 sfmmu_kpm_vac_conflict(pp, vaddr); 13551 mutex_enter(&kpmp->khl_mutex); 13552 13553 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 || 13554 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 13555 panic("sfmmu_kpm_fault: inconsistent MAPS state, " 13556 "pp=%p", (void *)pp); 13557 } 13558 kp->kp_refcnt--; 13559 kp->kp_refcnts++; 13560 pmtx = sfmmu_page_enter(pp); 13561 PP_SETKPMS(pp); 13562 sfmmu_page_exit(pmtx); 13563 goto smallexit; 13564 13565 case KPM_TSBM_RPLS_RASM: /* - - ks s */ 13566 /* 13567 * All conflicts in this kpm page are gone but this page 13568 * is mapped small. This could be the trigger case for a 13569 * small mapping reaper, if this is really needed. 13570 * For now we drop it in small again. Fall thru to the 13571 * KPM_TSBM_RPLS handling. 13572 */ 13573 13574 case KPM_TSBM_RPLS: /* kc - ks s */ 13575 /* 13576 * Large page mapping is already broken, this page is not 13577 * conflicting but already mapped small, so drop it in 13578 * small again. 13579 */ 13580 if (PP_ISNC(pp) || 13581 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 13582 panic("sfmmu_kpm_fault: inconsistent RPLS state, " 13583 "pp=%p", (void *)pp); 13584 } 13585 goto smallexit; 13586 13587 case KPM_TSBM_MAPS_BRKO: /* kc - - - */ 13588 /* 13589 * The kpm page where we live in is marked conflicting 13590 * but this page is not conflicting. So we have to map it 13591 * in small. Call sfmmu_kpm_vac_conflict to take care for 13592 * correcting the vcolor and flushing the dcache if required. 13593 */ 13594 mutex_exit(&kpmp->khl_mutex); 13595 sfmmu_kpm_vac_conflict(pp, vaddr); 13596 mutex_enter(&kpmp->khl_mutex); 13597 13598 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 || 13599 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 13600 panic("sfmmu_kpm_fault: inconsistent MAPS_BRKO state, " 13601 "pp=%p", (void *)pp); 13602 } 13603 kp->kp_refcnt--; 13604 kp->kp_refcnts++; 13605 pmtx = sfmmu_page_enter(pp); 13606 PP_SETKPMS(pp); 13607 sfmmu_page_exit(pmtx); 13608 goto smallexit; 13609 13610 case KPM_TSBM_MAPS_BRKT: /* kc c - - */ 13611 case KPM_TSBM_MAPS_CONFL: /* kc c ks - */ 13612 if (!PP_ISMAPPED(pp)) { 13613 /* 13614 * We got a tsbmiss on kpm large page range that is 13615 * marked to contain vac conflicting pages introduced 13616 * by hme mappings. The hme mappings are all gone and 13617 * must have bypassed the kpm alias prevention logic. 13618 */ 13619 panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p", 13620 (void *)pp); 13621 } 13622 13623 /* 13624 * Check for vcolor conflicts. Return here w/ either no 13625 * conflict (fast path), removed hme mapping chains 13626 * (unload conflict) or uncached (uncache conflict). 13627 * Dcache is cleaned and p_vcolor and P_TNC are set 13628 * accordingly. Drop kpmp for uncache conflict cases 13629 * since it will be grabbed within sfmmu_kpm_page_cache 13630 * in case of an uncache conflict. 13631 */ 13632 mutex_exit(&kpmp->khl_mutex); 13633 sfmmu_kpm_vac_conflict(pp, vaddr); 13634 mutex_enter(&kpmp->khl_mutex); 13635 13636 if (kp->kp_refcnt <= 0) 13637 panic("sfmmu_kpm_fault: bad refcnt kp=%p", (void *)kp); 13638 13639 if (PP_ISNC(pp)) { 13640 uncached = 1; 13641 } else { 13642 /* 13643 * When an unload conflict is solved and there are 13644 * no other small mappings around, we can resume 13645 * largepage mode. Otherwise we have to map or drop 13646 * in small. This could be a trigger for a small 13647 * mapping reaper when this was the last conflict 13648 * within the kpm page and when there are only 13649 * other small mappings around. 13650 */ 13651 ASSERT(addr_to_vcolor(vaddr) == PP_GET_VCOLOR(pp)); 13652 ASSERT(kp->kp_refcntc > 0); 13653 kp->kp_refcntc--; 13654 pmtx = sfmmu_page_enter(pp); 13655 PP_CLRKPMC(pp); 13656 sfmmu_page_exit(pmtx); 13657 ASSERT(PP_ISKPMS(pp) == 0); 13658 if (kp->kp_refcntc == 0 && kp->kp_refcnts == 0) 13659 goto largeexit; 13660 } 13661 13662 kp->kp_refcnt--; 13663 kp->kp_refcnts++; 13664 pmtx = sfmmu_page_enter(pp); 13665 PP_SETKPMS(pp); 13666 sfmmu_page_exit(pmtx); 13667 goto smallexit; 13668 13669 case KPM_TSBM_RPLS_CONFL: /* kc c ks s */ 13670 if (!PP_ISMAPPED(pp)) { 13671 /* 13672 * We got a tsbmiss on kpm large page range that is 13673 * marked to contain vac conflicting pages introduced 13674 * by hme mappings. They are all gone and must have 13675 * somehow bypassed the kpm alias prevention logic. 13676 */ 13677 panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p", 13678 (void *)pp); 13679 } 13680 13681 /* 13682 * This state is only possible for an uncached mapping. 13683 */ 13684 if (!PP_ISNC(pp)) { 13685 panic("sfmmu_kpm_fault: page not uncached, pp=%p", 13686 (void *)pp); 13687 } 13688 uncached = 1; 13689 goto smallexit; 13690 13691 default: 13692 badstate_exit: 13693 panic("sfmmu_kpm_fault: inconsistent VAC state, vaddr=%p kp=%p " 13694 "pp=%p", (void *)vaddr, (void *)kp, (void *)pp); 13695 } 13696 13697 smallexit: 13698 /* tte assembly */ 13699 if (uncached == 0) 13700 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 13701 else 13702 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 13703 13704 /* tsb dropin */ 13705 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13706 13707 error = 0; 13708 goto exit; 13709 13710 largeexit: 13711 if (kp->kp_refcnt > 0) { 13712 13713 /* tte assembly */ 13714 KPM_TTE_VCACHED(tte.ll, pfn, TTE4M); 13715 13716 /* tsb dropin */ 13717 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M); 13718 13719 if (kp->kp_refcntc == 0) { 13720 /* Set "go" flag for TL tsbmiss handler */ 13721 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock, 13722 KPMTSBM_START); 13723 } 13724 ASSERT(kp->kp_refcntc == -1); 13725 error = 0; 13726 13727 } else 13728 error = EFAULT; 13729 exit: 13730 mutex_exit(&kpmp->khl_mutex); 13731 sfmmu_mlist_exit(pml); 13732 return (error); 13733 } 13734 13735 /* 13736 * kpm fault handler for mappings with small page size. 13737 */ 13738 int 13739 sfmmu_kpm_fault_small(caddr_t vaddr, struct memseg *mseg, page_t *pp) 13740 { 13741 int error = 0; 13742 pgcnt_t inx; 13743 kpm_spage_t *ksp; 13744 kpm_shlk_t *kpmsp; 13745 kmutex_t *pml; 13746 pfn_t pfn = pp->p_pagenum; 13747 tte_t tte; 13748 kmutex_t *pmtx; 13749 int oldval; 13750 13751 inx = pfn - mseg->kpm_pbase; 13752 ksp = &mseg->kpm_spages[inx]; 13753 kpmsp = KPMP_SHASH(ksp); 13754 13755 pml = sfmmu_mlist_enter(pp); 13756 13757 if (!PP_ISMAPPED_KPM(pp)) { 13758 sfmmu_mlist_exit(pml); 13759 return (EFAULT); 13760 } 13761 13762 /* 13763 * kp_mapped lookup protected by mlist mutex 13764 */ 13765 if (ksp->kp_mapped == KPM_MAPPEDS) { 13766 /* 13767 * Fast path tsbmiss 13768 */ 13769 ASSERT(!PP_ISKPMC(pp)); 13770 ASSERT(!PP_ISNC(pp)); 13771 13772 /* tte assembly */ 13773 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 13774 13775 /* tsb dropin */ 13776 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13777 13778 } else if (ksp->kp_mapped == KPM_MAPPEDSC) { 13779 /* 13780 * Got here due to existing or gone kpm/hme VAC conflict. 13781 * Recheck for vcolor conflicts. Return here w/ either 13782 * no conflict, removed hme mapping chain (unload 13783 * conflict) or uncached (uncache conflict). VACaches 13784 * are cleaned and p_vcolor and PP_TNC are set accordingly 13785 * for the conflict cases. 13786 */ 13787 sfmmu_kpm_vac_conflict(pp, vaddr); 13788 13789 if (PP_ISNC(pp)) { 13790 /* ASSERT(pp->p_share); XXX use hat_page_getshare */ 13791 13792 /* tte assembly */ 13793 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 13794 13795 /* tsb dropin */ 13796 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13797 13798 } else { 13799 if (PP_ISKPMC(pp)) { 13800 pmtx = sfmmu_page_enter(pp); 13801 PP_CLRKPMC(pp); 13802 sfmmu_page_exit(pmtx); 13803 } 13804 13805 /* tte assembly */ 13806 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 13807 13808 /* tsb dropin */ 13809 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13810 13811 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 13812 &kpmsp->kshl_lock, KPM_MAPPEDS); 13813 13814 if (oldval != KPM_MAPPEDSC) 13815 panic("sfmmu_kpm_fault_small: " 13816 "stale smallpages mapping"); 13817 } 13818 13819 } else { 13820 /* 13821 * We got a tsbmiss on a not active kpm_page range. 13822 * Let decide segkpm_fault how to panic. 13823 */ 13824 error = EFAULT; 13825 } 13826 13827 sfmmu_mlist_exit(pml); 13828 return (error); 13829 } 13830 13831 /* 13832 * Check/handle potential hme/kpm mapping conflicts 13833 */ 13834 static void 13835 sfmmu_kpm_vac_conflict(page_t *pp, caddr_t vaddr) 13836 { 13837 int vcolor; 13838 struct sf_hment *sfhmep; 13839 struct hat *tmphat; 13840 struct sf_hment *tmphme = NULL; 13841 struct hme_blk *hmeblkp; 13842 tte_t tte; 13843 13844 ASSERT(sfmmu_mlist_held(pp)); 13845 13846 if (PP_ISNC(pp)) 13847 return; 13848 13849 vcolor = addr_to_vcolor(vaddr); 13850 if (PP_GET_VCOLOR(pp) == vcolor) 13851 return; 13852 13853 /* 13854 * There could be no vcolor conflict between a large cached 13855 * hme page and a non alias range kpm page (neither large nor 13856 * small mapped). So if a hme conflict already exists between 13857 * a constituent page of a large hme mapping and a shared small 13858 * conflicting hme mapping, both mappings must be already 13859 * uncached at this point. 13860 */ 13861 ASSERT(!PP_ISMAPPED_LARGE(pp)); 13862 13863 if (!PP_ISMAPPED(pp)) { 13864 /* 13865 * Previous hme user of page had a different color 13866 * but since there are no current users 13867 * we just flush the cache and change the color. 13868 */ 13869 SFMMU_STAT(sf_pgcolor_conflict); 13870 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 13871 PP_SET_VCOLOR(pp, vcolor); 13872 return; 13873 } 13874 13875 /* 13876 * If we get here we have a vac conflict with a current hme 13877 * mapping. This must have been established by forcing a wrong 13878 * colored mapping, e.g. by using mmap(2) with MAP_FIXED. 13879 */ 13880 13881 /* 13882 * Check if any mapping is in same as or if it is locked 13883 * since in that case we need to uncache. 13884 */ 13885 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 13886 tmphme = sfhmep->hme_next; 13887 hmeblkp = sfmmu_hmetohblk(sfhmep); 13888 if (hmeblkp->hblk_xhat_bit) 13889 continue; 13890 tmphat = hblktosfmmu(hmeblkp); 13891 sfmmu_copytte(&sfhmep->hme_tte, &tte); 13892 ASSERT(TTE_IS_VALID(&tte)); 13893 if ((tmphat == ksfmmup) || hmeblkp->hblk_lckcnt) { 13894 /* 13895 * We have an uncache conflict 13896 */ 13897 SFMMU_STAT(sf_uncache_conflict); 13898 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 13899 return; 13900 } 13901 } 13902 13903 /* 13904 * We have an unload conflict 13905 */ 13906 SFMMU_STAT(sf_unload_conflict); 13907 13908 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 13909 tmphme = sfhmep->hme_next; 13910 hmeblkp = sfmmu_hmetohblk(sfhmep); 13911 if (hmeblkp->hblk_xhat_bit) 13912 continue; 13913 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 13914 } 13915 13916 /* 13917 * Unloads only does tlb flushes so we need to flush the 13918 * dcache vcolor here. 13919 */ 13920 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 13921 PP_SET_VCOLOR(pp, vcolor); 13922 } 13923 13924 /* 13925 * Remove all kpm mappings using kpme's for pp and check that 13926 * all kpm mappings (w/ and w/o kpme's) are gone. 13927 */ 13928 static void 13929 sfmmu_kpm_pageunload(page_t *pp) 13930 { 13931 caddr_t vaddr; 13932 struct kpme *kpme, *nkpme; 13933 13934 ASSERT(pp != NULL); 13935 ASSERT(pp->p_kpmref); 13936 ASSERT(sfmmu_mlist_held(pp)); 13937 13938 vaddr = hat_kpm_page2va(pp, 1); 13939 13940 for (kpme = pp->p_kpmelist; kpme; kpme = nkpme) { 13941 ASSERT(kpme->kpe_page == pp); 13942 13943 if (pp->p_kpmref == 0) 13944 panic("sfmmu_kpm_pageunload: stale p_kpmref pp=%p " 13945 "kpme=%p", (void *)pp, (void *)kpme); 13946 13947 nkpme = kpme->kpe_next; 13948 13949 /* Add instance callback here here if needed later */ 13950 sfmmu_kpme_sub(kpme, pp); 13951 } 13952 13953 /* 13954 * Also correct after mixed kpme/nonkpme mappings. If nonkpme 13955 * segkpm clients have unlocked the page and forgot to mapout 13956 * we panic here. 13957 */ 13958 if (pp->p_kpmref != 0) 13959 panic("sfmmu_kpm_pageunload: bad refcnt pp=%p", (void *)pp); 13960 13961 sfmmu_kpm_mapout(pp, vaddr); 13962 } 13963 13964 /* 13965 * Remove a large kpm mapping from kernel TSB and all TLB's. 13966 */ 13967 static void 13968 sfmmu_kpm_demap_large(caddr_t vaddr) 13969 { 13970 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M); 13971 sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT); 13972 } 13973 13974 /* 13975 * Remove a small kpm mapping from kernel TSB and all TLB's. 13976 */ 13977 static void 13978 sfmmu_kpm_demap_small(caddr_t vaddr) 13979 { 13980 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT); 13981 sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT); 13982 } 13983 13984 /* 13985 * Demap a kpm mapping in all TLB's. 13986 */ 13987 static void 13988 sfmmu_kpm_demap_tlbs(caddr_t vaddr, int ctxnum) 13989 { 13990 cpuset_t cpuset; 13991 13992 kpreempt_disable(); 13993 cpuset = ksfmmup->sfmmu_cpusran; 13994 CPUSET_AND(cpuset, cpu_ready_set); 13995 CPUSET_DEL(cpuset, CPU->cpu_id); 13996 SFMMU_XCALL_STATS(ctxnum); 13997 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)vaddr, ctxnum); 13998 vtag_flushpage(vaddr, ctxnum); 13999 kpreempt_enable(); 14000 } 14001 14002 /* 14003 * Summary states used in sfmmu_kpm_vac_unload (KPM_VUL__*). 14004 * See also more detailed comments within in the sfmmu_kpm_vac_unload switch. 14005 * Abbreviations used: 14006 * BIG: Large page kpm mapping in use. 14007 * CONFL: VAC conflict(s) within a kpm_page. 14008 * INCR: Count of conflicts within a kpm_page is going to be incremented. 14009 * DECR: Count of conflicts within a kpm_page is going to be decremented. 14010 * UNMAP_SMALL: A small (regular page size) mapping is going to be unmapped. 14011 * TNC: Temporary non cached: a kpm mapped page is mapped in TNC state. 14012 */ 14013 #define KPM_VUL_BIG (0) 14014 #define KPM_VUL_CONFL_INCR1 (KPM_KS) 14015 #define KPM_VUL_UNMAP_SMALL1 (KPM_KS | KPM_S) 14016 #define KPM_VUL_CONFL_INCR2 (KPM_KC) 14017 #define KPM_VUL_CONFL_INCR3 (KPM_KC | KPM_KS) 14018 #define KPM_VUL_UNMAP_SMALL2 (KPM_KC | KPM_KS | KPM_S) 14019 #define KPM_VUL_CONFL_DECR1 (KPM_KC | KPM_C) 14020 #define KPM_VUL_CONFL_DECR2 (KPM_KC | KPM_C | KPM_KS) 14021 #define KPM_VUL_TNC (KPM_KC | KPM_C | KPM_KS | KPM_S) 14022 14023 /* 14024 * Handle VAC unload conflicts introduced by hme mappings or vice 14025 * versa when a hme conflict mapping is replaced by a non conflict 14026 * one. Perform actions and state transitions according to the 14027 * various page and kpm_page entry states. VACache flushes are in 14028 * the responsibiliy of the caller. We still hold the mlist lock. 14029 */ 14030 static void 14031 sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr) 14032 { 14033 kpm_page_t *kp; 14034 kpm_hlk_t *kpmp; 14035 caddr_t kpmvaddr = hat_kpm_page2va(pp, 1); 14036 int newcolor; 14037 kmutex_t *pmtx; 14038 uint_t vacunlcase; 14039 int badstate = 0; 14040 kpm_spage_t *ksp; 14041 kpm_shlk_t *kpmsp; 14042 14043 ASSERT(PAGE_LOCKED(pp)); 14044 ASSERT(sfmmu_mlist_held(pp)); 14045 ASSERT(!PP_ISNC(pp)); 14046 14047 newcolor = addr_to_vcolor(kpmvaddr) != addr_to_vcolor(vaddr); 14048 if (kpm_smallpages) 14049 goto smallpages_vac_unload; 14050 14051 PP2KPMPG(pp, kp); 14052 kpmp = KPMP_HASH(kp); 14053 mutex_enter(&kpmp->khl_mutex); 14054 14055 if (IS_KPM_ALIAS_RANGE(kpmvaddr)) { 14056 if (kp->kp_refcnta < 1) { 14057 panic("sfmmu_kpm_vac_unload: bad refcnta kpm_page=%p\n", 14058 (void *)kp); 14059 } 14060 14061 if (PP_ISKPMC(pp) == 0) { 14062 if (newcolor == 0) 14063 goto exit; 14064 sfmmu_kpm_demap_small(kpmvaddr); 14065 pmtx = sfmmu_page_enter(pp); 14066 PP_SETKPMC(pp); 14067 sfmmu_page_exit(pmtx); 14068 14069 } else if (newcolor == 0) { 14070 pmtx = sfmmu_page_enter(pp); 14071 PP_CLRKPMC(pp); 14072 sfmmu_page_exit(pmtx); 14073 14074 } else { 14075 badstate++; 14076 } 14077 14078 goto exit; 14079 } 14080 14081 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0); 14082 if (kp->kp_refcntc == -1) { 14083 /* 14084 * We should come here only if trap level tsb miss 14085 * handler is disabled. 14086 */ 14087 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 || 14088 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp)); 14089 } else { 14090 badstate |= (kp->kp_refcntc < 0); 14091 } 14092 14093 if (badstate) 14094 goto exit; 14095 14096 if (PP_ISKPMC(pp) == 0 && newcolor == 0) { 14097 ASSERT(PP_ISKPMS(pp) == 0); 14098 goto exit; 14099 } 14100 14101 /* 14102 * Combine the per kpm_page and per page kpm VAC states 14103 * to a summary state in order to make the vac unload 14104 * handling more concise. 14105 */ 14106 vacunlcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) | 14107 ((kp->kp_refcnts > 0) ? KPM_KS : 0) | 14108 (PP_ISKPMC(pp) ? KPM_C : 0) | 14109 (PP_ISKPMS(pp) ? KPM_S : 0)); 14110 14111 switch (vacunlcase) { 14112 case KPM_VUL_BIG: /* - - - - */ 14113 /* 14114 * Have to breakup the large page mapping to be 14115 * able to handle the conflicting hme vaddr. 14116 */ 14117 if (kp->kp_refcntc == -1) { 14118 /* remove go indication */ 14119 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 14120 &kpmp->khl_lock, KPMTSBM_STOP); 14121 } 14122 sfmmu_kpm_demap_large(kpmvaddr); 14123 14124 ASSERT(kp->kp_refcntc == 0); 14125 kp->kp_refcntc++; 14126 pmtx = sfmmu_page_enter(pp); 14127 PP_SETKPMC(pp); 14128 sfmmu_page_exit(pmtx); 14129 break; 14130 14131 case KPM_VUL_UNMAP_SMALL1: /* - - ks s */ 14132 case KPM_VUL_UNMAP_SMALL2: /* kc - ks s */ 14133 /* 14134 * New conflict w/ an active kpm page, actually mapped 14135 * in by small TSB/TLB entries. Remove the mapping and 14136 * update states. 14137 */ 14138 ASSERT(newcolor); 14139 sfmmu_kpm_demap_small(kpmvaddr); 14140 kp->kp_refcnts--; 14141 kp->kp_refcnt++; 14142 kp->kp_refcntc++; 14143 pmtx = sfmmu_page_enter(pp); 14144 PP_CLRKPMS(pp); 14145 PP_SETKPMC(pp); 14146 sfmmu_page_exit(pmtx); 14147 break; 14148 14149 case KPM_VUL_CONFL_INCR1: /* - - ks - */ 14150 case KPM_VUL_CONFL_INCR2: /* kc - - - */ 14151 case KPM_VUL_CONFL_INCR3: /* kc - ks - */ 14152 /* 14153 * New conflict on a active kpm mapped page not yet in 14154 * TSB/TLB. Mark page and increment the kpm_page conflict 14155 * count. 14156 */ 14157 ASSERT(newcolor); 14158 kp->kp_refcntc++; 14159 pmtx = sfmmu_page_enter(pp); 14160 PP_SETKPMC(pp); 14161 sfmmu_page_exit(pmtx); 14162 break; 14163 14164 case KPM_VUL_CONFL_DECR1: /* kc c - - */ 14165 case KPM_VUL_CONFL_DECR2: /* kc c ks - */ 14166 /* 14167 * A conflicting hme mapping is removed for an active 14168 * kpm page not yet in TSB/TLB. Unmark page and decrement 14169 * the kpm_page conflict count. 14170 */ 14171 ASSERT(newcolor == 0); 14172 kp->kp_refcntc--; 14173 pmtx = sfmmu_page_enter(pp); 14174 PP_CLRKPMC(pp); 14175 sfmmu_page_exit(pmtx); 14176 break; 14177 14178 case KPM_VUL_TNC: /* kc c ks s */ 14179 cmn_err(CE_NOTE, "sfmmu_kpm_vac_unload: " 14180 "page not in NC state"); 14181 /* FALLTHRU */ 14182 14183 default: 14184 badstate++; 14185 } 14186 exit: 14187 if (badstate) { 14188 panic("sfmmu_kpm_vac_unload: inconsistent VAC state, " 14189 "kpmvaddr=%p kp=%p pp=%p", 14190 (void *)kpmvaddr, (void *)kp, (void *)pp); 14191 } 14192 mutex_exit(&kpmp->khl_mutex); 14193 14194 return; 14195 14196 smallpages_vac_unload: 14197 if (newcolor == 0) 14198 return; 14199 14200 PP2KPMSPG(pp, ksp); 14201 kpmsp = KPMP_SHASH(ksp); 14202 14203 if (PP_ISKPMC(pp) == 0) { 14204 if (ksp->kp_mapped == KPM_MAPPEDS) { 14205 /* 14206 * Stop TL tsbmiss handling 14207 */ 14208 (void) sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 14209 &kpmsp->kshl_lock, KPM_MAPPEDSC); 14210 14211 sfmmu_kpm_demap_small(kpmvaddr); 14212 14213 } else if (ksp->kp_mapped != KPM_MAPPEDSC) { 14214 panic("sfmmu_kpm_vac_unload: inconsistent mapping"); 14215 } 14216 14217 pmtx = sfmmu_page_enter(pp); 14218 PP_SETKPMC(pp); 14219 sfmmu_page_exit(pmtx); 14220 14221 } else { 14222 if (ksp->kp_mapped != KPM_MAPPEDSC) 14223 panic("sfmmu_kpm_vac_unload: inconsistent mapping"); 14224 } 14225 } 14226 14227 /* 14228 * Page is marked to be in VAC conflict to an existing kpm mapping 14229 * or is kpm mapped using only the regular pagesize. Called from 14230 * sfmmu_hblk_unload when a mlist is completely removed. 14231 */ 14232 static void 14233 sfmmu_kpm_hme_unload(page_t *pp) 14234 { 14235 /* tte assembly */ 14236 kpm_page_t *kp; 14237 kpm_hlk_t *kpmp; 14238 caddr_t vaddr; 14239 kmutex_t *pmtx; 14240 uint_t flags; 14241 kpm_spage_t *ksp; 14242 14243 ASSERT(sfmmu_mlist_held(pp)); 14244 ASSERT(PP_ISMAPPED_KPM(pp)); 14245 14246 flags = pp->p_nrm & (P_KPMC | P_KPMS); 14247 if (kpm_smallpages) 14248 goto smallpages_hme_unload; 14249 14250 if (flags == (P_KPMC | P_KPMS)) { 14251 panic("sfmmu_kpm_hme_unload: page should be uncached"); 14252 14253 } else if (flags == P_KPMS) { 14254 /* 14255 * Page mapped small but not involved in VAC conflict 14256 */ 14257 return; 14258 } 14259 14260 vaddr = hat_kpm_page2va(pp, 1); 14261 14262 PP2KPMPG(pp, kp); 14263 kpmp = KPMP_HASH(kp); 14264 mutex_enter(&kpmp->khl_mutex); 14265 14266 if (IS_KPM_ALIAS_RANGE(vaddr)) { 14267 if (kp->kp_refcnta < 1) { 14268 panic("sfmmu_kpm_hme_unload: bad refcnta kpm_page=%p\n", 14269 (void *)kp); 14270 } 14271 14272 } else { 14273 if (kp->kp_refcntc < 1) { 14274 panic("sfmmu_kpm_hme_unload: bad refcntc kpm_page=%p\n", 14275 (void *)kp); 14276 } 14277 kp->kp_refcntc--; 14278 } 14279 14280 pmtx = sfmmu_page_enter(pp); 14281 PP_CLRKPMC(pp); 14282 sfmmu_page_exit(pmtx); 14283 14284 mutex_exit(&kpmp->khl_mutex); 14285 return; 14286 14287 smallpages_hme_unload: 14288 if (flags != P_KPMC) 14289 panic("sfmmu_kpm_hme_unload: page should be uncached"); 14290 14291 vaddr = hat_kpm_page2va(pp, 1); 14292 PP2KPMSPG(pp, ksp); 14293 14294 if (ksp->kp_mapped != KPM_MAPPEDSC) 14295 panic("sfmmu_kpm_hme_unload: inconsistent mapping"); 14296 14297 /* 14298 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it 14299 * prevents TL tsbmiss handling and force a hat_kpm_fault. 14300 * There we can start over again. 14301 */ 14302 14303 pmtx = sfmmu_page_enter(pp); 14304 PP_CLRKPMC(pp); 14305 sfmmu_page_exit(pmtx); 14306 } 14307 14308 /* 14309 * Special hooks for sfmmu_page_cache_array() when changing the 14310 * cacheability of a page. It is used to obey the hat_kpm lock 14311 * ordering (mlist -> kpmp -> spl, and back). 14312 */ 14313 static kpm_hlk_t * 14314 sfmmu_kpm_kpmp_enter(page_t *pp, pgcnt_t npages) 14315 { 14316 kpm_page_t *kp; 14317 kpm_hlk_t *kpmp; 14318 14319 ASSERT(sfmmu_mlist_held(pp)); 14320 14321 if (kpm_smallpages || PP_ISMAPPED_KPM(pp) == 0) 14322 return (NULL); 14323 14324 ASSERT(npages <= kpmpnpgs); 14325 14326 PP2KPMPG(pp, kp); 14327 kpmp = KPMP_HASH(kp); 14328 mutex_enter(&kpmp->khl_mutex); 14329 14330 return (kpmp); 14331 } 14332 14333 static void 14334 sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp) 14335 { 14336 if (kpm_smallpages || kpmp == NULL) 14337 return; 14338 14339 mutex_exit(&kpmp->khl_mutex); 14340 } 14341 14342 /* 14343 * Summary states used in sfmmu_kpm_page_cache (KPM_*). 14344 * See also more detailed comments within in the sfmmu_kpm_page_cache switch. 14345 * Abbreviations used: 14346 * UNC: Input state for an uncache request. 14347 * BIG: Large page kpm mapping in use. 14348 * SMALL: Page has a small kpm mapping within a kpm_page range. 14349 * NODEMAP: No demap needed. 14350 * NOP: No operation needed on this input state. 14351 * CACHE: Input state for a re-cache request. 14352 * MAPS: Page is in TNC and kpm VAC conflict state and kpm mapped small. 14353 * NOMAP: Page is in TNC and kpm VAC conflict state, but not small kpm 14354 * mapped. 14355 * NOMAPO: Page is in TNC and kpm VAC conflict state, but not small kpm 14356 * mapped. There are also other small kpm mappings within this 14357 * kpm_page. 14358 */ 14359 #define KPM_UNC_BIG (0) 14360 #define KPM_UNC_NODEMAP1 (KPM_KS) 14361 #define KPM_UNC_SMALL1 (KPM_KS | KPM_S) 14362 #define KPM_UNC_NODEMAP2 (KPM_KC) 14363 #define KPM_UNC_NODEMAP3 (KPM_KC | KPM_KS) 14364 #define KPM_UNC_SMALL2 (KPM_KC | KPM_KS | KPM_S) 14365 #define KPM_UNC_NOP1 (KPM_KC | KPM_C) 14366 #define KPM_UNC_NOP2 (KPM_KC | KPM_C | KPM_KS) 14367 #define KPM_CACHE_NOMAP (KPM_KC | KPM_C) 14368 #define KPM_CACHE_NOMAPO (KPM_KC | KPM_C | KPM_KS) 14369 #define KPM_CACHE_MAPS (KPM_KC | KPM_C | KPM_KS | KPM_S) 14370 14371 /* 14372 * This function is called when the virtual cacheability of a page 14373 * is changed and the page has an actice kpm mapping. The mlist mutex, 14374 * the spl hash lock and the kpmp mutex (if needed) are already grabbed. 14375 */ 14376 static void 14377 sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag) 14378 { 14379 kpm_page_t *kp; 14380 kpm_hlk_t *kpmp; 14381 caddr_t kpmvaddr; 14382 int badstate = 0; 14383 uint_t pgcacase; 14384 kpm_spage_t *ksp; 14385 kpm_shlk_t *kpmsp; 14386 int oldval; 14387 14388 ASSERT(PP_ISMAPPED_KPM(pp)); 14389 ASSERT(sfmmu_mlist_held(pp)); 14390 ASSERT(sfmmu_page_spl_held(pp)); 14391 14392 if (flags != HAT_TMPNC && flags != HAT_CACHE) 14393 panic("sfmmu_kpm_page_cache: bad flags"); 14394 14395 kpmvaddr = hat_kpm_page2va(pp, 1); 14396 14397 if (flags == HAT_TMPNC && cache_flush_tag == CACHE_FLUSH) { 14398 pfn_t pfn = pp->p_pagenum; 14399 int vcolor = addr_to_vcolor(kpmvaddr); 14400 cpuset_t cpuset = cpu_ready_set; 14401 14402 /* Flush vcolor in DCache */ 14403 CPUSET_DEL(cpuset, CPU->cpu_id); 14404 SFMMU_XCALL_STATS(ksfmmup->sfmmu_cnum); 14405 xt_some(cpuset, vac_flushpage_tl1, pfn, vcolor); 14406 vac_flushpage(pfn, vcolor); 14407 } 14408 14409 if (kpm_smallpages) 14410 goto smallpages_page_cache; 14411 14412 PP2KPMPG(pp, kp); 14413 kpmp = KPMP_HASH(kp); 14414 ASSERT(MUTEX_HELD(&kpmp->khl_mutex)); 14415 14416 if (IS_KPM_ALIAS_RANGE(kpmvaddr)) { 14417 if (kp->kp_refcnta < 1) { 14418 panic("sfmmu_kpm_page_cache: bad refcnta " 14419 "kpm_page=%p\n", (void *)kp); 14420 } 14421 sfmmu_kpm_demap_small(kpmvaddr); 14422 if (flags == HAT_TMPNC) { 14423 PP_SETKPMC(pp); 14424 ASSERT(!PP_ISKPMS(pp)); 14425 } else { 14426 ASSERT(PP_ISKPMC(pp)); 14427 PP_CLRKPMC(pp); 14428 } 14429 goto exit; 14430 } 14431 14432 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0); 14433 if (kp->kp_refcntc == -1) { 14434 /* 14435 * We should come here only if trap level tsb miss 14436 * handler is disabled. 14437 */ 14438 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 || 14439 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp)); 14440 } else { 14441 badstate |= (kp->kp_refcntc < 0); 14442 } 14443 14444 if (badstate) 14445 goto exit; 14446 14447 /* 14448 * Combine the per kpm_page and per page kpm VAC states to 14449 * a summary state in order to make the VAC cache/uncache 14450 * handling more concise. 14451 */ 14452 pgcacase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) | 14453 ((kp->kp_refcnts > 0) ? KPM_KS : 0) | 14454 (PP_ISKPMC(pp) ? KPM_C : 0) | 14455 (PP_ISKPMS(pp) ? KPM_S : 0)); 14456 14457 if (flags == HAT_CACHE) { 14458 switch (pgcacase) { 14459 case KPM_CACHE_MAPS: /* kc c ks s */ 14460 sfmmu_kpm_demap_small(kpmvaddr); 14461 if (kp->kp_refcnts < 1) { 14462 panic("sfmmu_kpm_page_cache: bad refcnts " 14463 "kpm_page=%p\n", (void *)kp); 14464 } 14465 kp->kp_refcnts--; 14466 kp->kp_refcnt++; 14467 PP_CLRKPMS(pp); 14468 /* FALLTHRU */ 14469 14470 case KPM_CACHE_NOMAP: /* kc c - - */ 14471 case KPM_CACHE_NOMAPO: /* kc c ks - */ 14472 kp->kp_refcntc--; 14473 PP_CLRKPMC(pp); 14474 break; 14475 14476 default: 14477 badstate++; 14478 } 14479 goto exit; 14480 } 14481 14482 switch (pgcacase) { 14483 case KPM_UNC_BIG: /* - - - - */ 14484 if (kp->kp_refcnt < 1) { 14485 panic("sfmmu_kpm_page_cache: bad refcnt " 14486 "kpm_page=%p\n", (void *)kp); 14487 } 14488 14489 /* 14490 * Have to breakup the large page mapping in preparation 14491 * to the upcoming TNC mode handled by small mappings. 14492 * The demap can already be done due to another conflict 14493 * within the kpm_page. 14494 */ 14495 if (kp->kp_refcntc == -1) { 14496 /* remove go indication */ 14497 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 14498 &kpmp->khl_lock, KPMTSBM_STOP); 14499 } 14500 ASSERT(kp->kp_refcntc == 0); 14501 sfmmu_kpm_demap_large(kpmvaddr); 14502 kp->kp_refcntc++; 14503 PP_SETKPMC(pp); 14504 break; 14505 14506 case KPM_UNC_SMALL1: /* - - ks s */ 14507 case KPM_UNC_SMALL2: /* kc - ks s */ 14508 /* 14509 * Have to demap an already small kpm mapping in preparation 14510 * to the upcoming TNC mode. The demap can already be done 14511 * due to another conflict within the kpm_page. 14512 */ 14513 sfmmu_kpm_demap_small(kpmvaddr); 14514 kp->kp_refcntc++; 14515 kp->kp_refcnts--; 14516 kp->kp_refcnt++; 14517 PP_CLRKPMS(pp); 14518 PP_SETKPMC(pp); 14519 break; 14520 14521 case KPM_UNC_NODEMAP1: /* - - ks - */ 14522 /* fallthru */ 14523 14524 case KPM_UNC_NODEMAP2: /* kc - - - */ 14525 case KPM_UNC_NODEMAP3: /* kc - ks - */ 14526 kp->kp_refcntc++; 14527 PP_SETKPMC(pp); 14528 break; 14529 14530 case KPM_UNC_NOP1: /* kc c - - */ 14531 case KPM_UNC_NOP2: /* kc c ks - */ 14532 break; 14533 14534 default: 14535 badstate++; 14536 } 14537 exit: 14538 if (badstate) { 14539 panic("sfmmu_kpm_page_cache: inconsistent VAC state " 14540 "kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr, 14541 (void *)kp, (void *)pp); 14542 } 14543 return; 14544 14545 smallpages_page_cache: 14546 PP2KPMSPG(pp, ksp); 14547 kpmsp = KPMP_SHASH(ksp); 14548 14549 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 14550 &kpmsp->kshl_lock, KPM_MAPPEDSC); 14551 14552 if (!(oldval == KPM_MAPPEDS || oldval == KPM_MAPPEDSC)) 14553 panic("smallpages_page_cache: inconsistent mapping"); 14554 14555 sfmmu_kpm_demap_small(kpmvaddr); 14556 14557 if (flags == HAT_TMPNC) { 14558 PP_SETKPMC(pp); 14559 ASSERT(!PP_ISKPMS(pp)); 14560 14561 } else { 14562 ASSERT(PP_ISKPMC(pp)); 14563 PP_CLRKPMC(pp); 14564 } 14565 14566 /* 14567 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it 14568 * prevents TL tsbmiss handling and force a hat_kpm_fault. 14569 * There we can start over again. 14570 */ 14571 } 14572 14573 /* 14574 * unused in sfmmu 14575 */ 14576 void 14577 hat_dump(void) 14578 { 14579 } 14580 14581 /* 14582 * Called when a thread is exiting and we have switched to the kernel address 14583 * space. Perform the same VM initialization resume() uses when switching 14584 * processes. 14585 * 14586 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 14587 * we call it anyway in case the semantics change in the future. 14588 */ 14589 /*ARGSUSED*/ 14590 void 14591 hat_thread_exit(kthread_t *thd) 14592 { 14593 ASSERT(thd->t_procp->p_as == &kas); 14594 14595 sfmmu_setctx_sec(KCONTEXT); 14596 sfmmu_load_mmustate(ksfmmup); 14597 } 14598