1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * VM - Hardware Address Translation management for Spitfire MMU. 30 * 31 * This file implements the machine specific hardware translation 32 * needed by the VM system. The machine independent interface is 33 * described in <vm/hat.h> while the machine dependent interface 34 * and data structures are described in <vm/hat_sfmmu.h>. 35 * 36 * The hat layer manages the address translation hardware as a cache 37 * driven by calls from the higher levels in the VM system. 38 */ 39 40 #include <sys/types.h> 41 #include <sys/kstat.h> 42 #include <vm/hat.h> 43 #include <vm/hat_sfmmu.h> 44 #include <vm/page.h> 45 #include <sys/pte.h> 46 #include <sys/systm.h> 47 #include <sys/mman.h> 48 #include <sys/sysmacros.h> 49 #include <sys/machparam.h> 50 #include <sys/vtrace.h> 51 #include <sys/kmem.h> 52 #include <sys/mmu.h> 53 #include <sys/cmn_err.h> 54 #include <sys/cpu.h> 55 #include <sys/cpuvar.h> 56 #include <sys/debug.h> 57 #include <sys/lgrp.h> 58 #include <sys/archsystm.h> 59 #include <sys/machsystm.h> 60 #include <sys/vmsystm.h> 61 #include <vm/as.h> 62 #include <vm/seg.h> 63 #include <vm/seg_kp.h> 64 #include <vm/seg_kmem.h> 65 #include <vm/seg_kpm.h> 66 #include <vm/rm.h> 67 #include <sys/t_lock.h> 68 #include <sys/obpdefs.h> 69 #include <sys/vm_machparam.h> 70 #include <sys/var.h> 71 #include <sys/trap.h> 72 #include <sys/machtrap.h> 73 #include <sys/scb.h> 74 #include <sys/bitmap.h> 75 #include <sys/machlock.h> 76 #include <sys/membar.h> 77 #include <sys/atomic.h> 78 #include <sys/cpu_module.h> 79 #include <sys/prom_debug.h> 80 #include <sys/ksynch.h> 81 #include <sys/mem_config.h> 82 #include <sys/mem_cage.h> 83 #include <sys/dtrace.h> 84 #include <vm/vm_dep.h> 85 #include <vm/xhat_sfmmu.h> 86 #include <sys/fpu/fpusystm.h> 87 #include <vm/mach_kpm.h> 88 89 #if defined(SF_ERRATA_57) 90 extern caddr_t errata57_limit; 91 #endif 92 93 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 94 (sizeof (int64_t))) 95 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 96 97 #define HBLK_RESERVE_CNT 128 98 #define HBLK_RESERVE_MIN 20 99 100 static struct hme_blk *freehblkp; 101 static kmutex_t freehblkp_lock; 102 static int freehblkcnt; 103 104 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 105 static kmutex_t hblk_reserve_lock; 106 static kthread_t *hblk_reserve_thread; 107 108 static nucleus_hblk8_info_t nucleus_hblk8; 109 static nucleus_hblk1_info_t nucleus_hblk1; 110 111 /* 112 * SFMMU specific hat functions 113 */ 114 void hat_pagecachectl(struct page *, int); 115 116 /* flags for hat_pagecachectl */ 117 #define HAT_CACHE 0x1 118 #define HAT_UNCACHE 0x2 119 #define HAT_TMPNC 0x4 120 121 /* 122 * Flag to allow the creation of non-cacheable translations 123 * to system memory. It is off by default. At the moment this 124 * flag is used by the ecache error injector. The error injector 125 * will turn it on when creating such a translation then shut it 126 * off when it's finished. 127 */ 128 129 int sfmmu_allow_nc_trans = 0; 130 131 /* 132 * Flag to disable large page support. 133 * value of 1 => disable all large pages. 134 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 135 * 136 * For example, use the value 0x4 to disable 512K pages. 137 * 138 */ 139 #define LARGE_PAGES_OFF 0x1 140 141 /* 142 * WARNING: 512K pages MUST be disabled for ISM/DISM. If not 143 * a process would page fault indefinitely if it tried to 144 * access a 512K page. 145 */ 146 int disable_ism_large_pages = (1 << TTE512K); 147 int disable_large_pages = 0; 148 int disable_auto_large_pages = 0; 149 int disable_shm_large_pages = 0; 150 151 /* 152 * Private sfmmu data structures for hat management 153 */ 154 static struct kmem_cache *sfmmuid_cache; 155 static struct kmem_cache *mmuctxdom_cache; 156 157 /* 158 * Private sfmmu data structures for tsb management 159 */ 160 static struct kmem_cache *sfmmu_tsbinfo_cache; 161 static struct kmem_cache *sfmmu_tsb8k_cache; 162 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 163 static vmem_t *kmem_tsb_arena; 164 165 /* 166 * sfmmu static variables for hmeblk resource management. 167 */ 168 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 169 static struct kmem_cache *sfmmu8_cache; 170 static struct kmem_cache *sfmmu1_cache; 171 static struct kmem_cache *pa_hment_cache; 172 173 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 174 /* 175 * private data for ism 176 */ 177 static struct kmem_cache *ism_blk_cache; 178 static struct kmem_cache *ism_ment_cache; 179 #define ISMID_STARTADDR NULL 180 181 /* 182 * Whether to delay TLB flushes and use Cheetah's flush-all support 183 * when removing contexts from the dirty list. 184 */ 185 int delay_tlb_flush; 186 int disable_delay_tlb_flush; 187 188 /* 189 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 190 * HAT flags, synchronizing TLB/TSB coherency, and context management. 191 * The lock is hashed on the sfmmup since the case where we need to lock 192 * all processes is rare but does occur (e.g. we need to unload a shared 193 * mapping from all processes using the mapping). We have a lot of buckets, 194 * and each slab of sfmmu_t's can use about a quarter of them, giving us 195 * a fairly good distribution without wasting too much space and overhead 196 * when we have to grab them all. 197 */ 198 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 199 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 200 201 /* 202 * Hash algorithm optimized for a small number of slabs. 203 * 7 is (highbit((sizeof sfmmu_t)) - 1) 204 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 205 * kmem_cache, and thus they will be sequential within that cache. In 206 * addition, each new slab will have a different "color" up to cache_maxcolor 207 * which will skew the hashing for each successive slab which is allocated. 208 * If the size of sfmmu_t changed to a larger size, this algorithm may need 209 * to be revisited. 210 */ 211 #define TSB_HASH_SHIFT_BITS (7) 212 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 213 214 #ifdef DEBUG 215 int tsb_hash_debug = 0; 216 #define TSB_HASH(sfmmup) \ 217 (tsb_hash_debug ? &hat_lock[0] : \ 218 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 219 #else /* DEBUG */ 220 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 221 #endif /* DEBUG */ 222 223 224 /* sfmmu_replace_tsb() return codes. */ 225 typedef enum tsb_replace_rc { 226 TSB_SUCCESS, 227 TSB_ALLOCFAIL, 228 TSB_LOSTRACE, 229 TSB_ALREADY_SWAPPED, 230 TSB_CANTGROW 231 } tsb_replace_rc_t; 232 233 /* 234 * Flags for TSB allocation routines. 235 */ 236 #define TSB_ALLOC 0x01 237 #define TSB_FORCEALLOC 0x02 238 #define TSB_GROW 0x04 239 #define TSB_SHRINK 0x08 240 #define TSB_SWAPIN 0x10 241 242 /* 243 * Support for HAT callbacks. 244 */ 245 #define SFMMU_MAX_RELOC_CALLBACKS 10 246 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 247 static id_t sfmmu_cb_nextid = 0; 248 static id_t sfmmu_tsb_cb_id; 249 struct sfmmu_callback *sfmmu_cb_table; 250 251 /* 252 * Kernel page relocation is enabled by default for non-caged 253 * kernel pages. This has little effect unless segkmem_reloc is 254 * set, since by default kernel memory comes from inside the 255 * kernel cage. 256 */ 257 int hat_kpr_enabled = 1; 258 259 kmutex_t kpr_mutex; 260 kmutex_t kpr_suspendlock; 261 kthread_t *kreloc_thread; 262 263 /* 264 * Enable VA->PA translation sanity checking on DEBUG kernels. 265 * Disabled by default. This is incompatible with some 266 * drivers (error injector, RSM) so if it breaks you get 267 * to keep both pieces. 268 */ 269 int hat_check_vtop = 0; 270 271 /* 272 * Private sfmmu routines (prototypes) 273 */ 274 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 275 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 276 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t); 277 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 278 caddr_t, demap_range_t *, uint_t); 279 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 280 caddr_t, int); 281 static void sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *, 282 uint64_t, struct hme_blk **); 283 static void sfmmu_hblks_list_purge(struct hme_blk **); 284 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 285 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 286 static struct hme_blk *sfmmu_hblk_steal(int); 287 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 288 struct hme_blk *, uint64_t, uint64_t, 289 struct hme_blk *); 290 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 291 292 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 293 uint_t, uint_t, pgcnt_t); 294 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 295 uint_t); 296 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 297 uint_t); 298 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 299 caddr_t, int); 300 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 301 struct hmehash_bucket *, caddr_t, uint_t, uint_t); 302 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 303 caddr_t, page_t **, uint_t); 304 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 305 306 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 307 pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *); 308 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 309 #ifdef VAC 310 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 311 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 312 int tst_tnc(page_t *pp, pgcnt_t); 313 void conv_tnc(page_t *pp, int); 314 #endif 315 316 static void sfmmu_get_ctx(sfmmu_t *); 317 static void sfmmu_free_sfmmu(sfmmu_t *); 318 319 static void sfmmu_gettte(struct hat *, caddr_t, tte_t *); 320 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 321 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 322 323 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 324 static void hat_pagereload(struct page *, struct page *); 325 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 326 #ifdef VAC 327 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 328 static void sfmmu_page_cache(page_t *, int, int, int); 329 #endif 330 331 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 332 pfn_t, int, int, int, int); 333 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 334 pfn_t, int); 335 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 336 static void sfmmu_tlb_range_demap(demap_range_t *); 337 static void sfmmu_invalidate_ctx(sfmmu_t *); 338 static void sfmmu_sync_mmustate(sfmmu_t *); 339 340 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 341 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 342 sfmmu_t *); 343 static void sfmmu_tsb_free(struct tsb_info *); 344 static void sfmmu_tsbinfo_free(struct tsb_info *); 345 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 346 sfmmu_t *); 347 348 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 349 static int sfmmu_select_tsb_szc(pgcnt_t); 350 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 351 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 352 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 353 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 354 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 355 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 356 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 357 hatlock_t *, uint_t); 358 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 359 360 #ifdef VAC 361 void sfmmu_cache_flush(pfn_t, int); 362 void sfmmu_cache_flushcolor(int, pfn_t); 363 #endif 364 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 365 caddr_t, demap_range_t *, uint_t, int); 366 367 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 368 static uint_t sfmmu_ptov_attr(tte_t *); 369 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 370 caddr_t, demap_range_t *, uint_t); 371 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 372 static int sfmmu_idcache_constructor(void *, void *, int); 373 static void sfmmu_idcache_destructor(void *, void *); 374 static int sfmmu_hblkcache_constructor(void *, void *, int); 375 static void sfmmu_hblkcache_destructor(void *, void *); 376 static void sfmmu_hblkcache_reclaim(void *); 377 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 378 struct hmehash_bucket *); 379 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 380 static void sfmmu_rm_large_mappings(page_t *, int); 381 382 static void hat_lock_init(void); 383 static void hat_kstat_init(void); 384 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 385 static void sfmmu_check_page_sizes(sfmmu_t *, int); 386 int fnd_mapping_sz(page_t *); 387 static void iment_add(struct ism_ment *, struct hat *); 388 static void iment_sub(struct ism_ment *, struct hat *); 389 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 390 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 391 #ifdef sun4v 392 extern void sfmmu_invalidate_tsbinfo(sfmmu_t *); 393 #endif /* sun4v */ 394 extern void sfmmu_clear_utsbinfo(void); 395 396 static void sfmmu_ctx_wrap_around(mmu_ctx_t *); 397 398 /* kpm globals */ 399 #ifdef DEBUG 400 /* 401 * Enable trap level tsbmiss handling 402 */ 403 int kpm_tsbmtl = 1; 404 405 /* 406 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 407 * required TLB shootdowns in this case, so handle w/ care. Off by default. 408 */ 409 int kpm_tlb_flush; 410 #endif /* DEBUG */ 411 412 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 413 414 #ifdef DEBUG 415 static void sfmmu_check_hblk_flist(); 416 #endif 417 418 /* 419 * Semi-private sfmmu data structures. Some of them are initialize in 420 * startup or in hat_init. Some of them are private but accessed by 421 * assembly code or mach_sfmmu.c 422 */ 423 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 424 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 425 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 426 uint64_t khme_hash_pa; /* PA of khme_hash */ 427 int uhmehash_num; /* # of buckets in user hash table */ 428 int khmehash_num; /* # of buckets in kernel hash table */ 429 430 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 431 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 432 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 433 434 #define DEFAULT_NUM_CTXS_PER_MMU 8192 435 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 436 437 int cache; /* describes system cache */ 438 439 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 440 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 441 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 442 int ktsb_sz; /* kernel 8k-indexed tsb size */ 443 444 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 445 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 446 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 447 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 448 449 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 450 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 451 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 452 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 453 454 #ifndef sun4v 455 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 456 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 457 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 458 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 459 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 460 #endif /* sun4v */ 461 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 462 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 463 464 /* 465 * Size to use for TSB slabs. Future platforms that support page sizes 466 * larger than 4M may wish to change these values, and provide their own 467 * assembly macros for building and decoding the TSB base register contents. 468 * Note disable_large_pages will override the value set here. 469 */ 470 uint_t tsb_slab_ttesz = TTE4M; 471 uint_t tsb_slab_size; 472 uint_t tsb_slab_shift; 473 uint_t tsb_slab_mask; /* PFN mask for TTE */ 474 475 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 476 int tsb_max_growsize = UTSB_MAX_SZCODE; 477 478 /* 479 * Tunable parameters dealing with TSB policies. 480 */ 481 482 /* 483 * This undocumented tunable forces all 8K TSBs to be allocated from 484 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 485 */ 486 #ifdef DEBUG 487 int tsb_forceheap = 0; 488 #endif /* DEBUG */ 489 490 /* 491 * Decide whether to use per-lgroup arenas, or one global set of 492 * TSB arenas. The default is not to break up per-lgroup, since 493 * most platforms don't recognize any tangible benefit from it. 494 */ 495 int tsb_lgrp_affinity = 0; 496 497 /* 498 * Used for growing the TSB based on the process RSS. 499 * tsb_rss_factor is based on the smallest TSB, and is 500 * shifted by the TSB size to determine if we need to grow. 501 * The default will grow the TSB if the number of TTEs for 502 * this page size exceeds 75% of the number of TSB entries, 503 * which should _almost_ eliminate all conflict misses 504 * (at the expense of using up lots and lots of memory). 505 */ 506 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 507 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 508 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 509 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 510 default_tsb_size) 511 #define TSB_OK_SHRINK() \ 512 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 513 #define TSB_OK_GROW() \ 514 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 515 516 int enable_tsb_rss_sizing = 1; 517 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 518 519 /* which TSB size code to use for new address spaces or if rss sizing off */ 520 int default_tsb_size = TSB_8K_SZCODE; 521 522 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 523 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 524 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 525 526 #ifdef DEBUG 527 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 528 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 529 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 530 static int tsb_alloc_fail_mtbf = 0; 531 static int tsb_alloc_count = 0; 532 #endif /* DEBUG */ 533 534 /* if set to 1, will remap valid TTEs when growing TSB. */ 535 int tsb_remap_ttes = 1; 536 537 /* 538 * If we have more than this many mappings, allocate a second TSB. 539 * This default is chosen because the I/D fully associative TLBs are 540 * assumed to have at least 8 available entries. Platforms with a 541 * larger fully-associative TLB could probably override the default. 542 */ 543 int tsb_sectsb_threshold = 8; 544 545 /* 546 * kstat data 547 */ 548 struct sfmmu_global_stat sfmmu_global_stat; 549 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 550 551 /* 552 * Global data 553 */ 554 sfmmu_t *ksfmmup; /* kernel's hat id */ 555 556 #ifdef DEBUG 557 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 558 #endif 559 560 /* sfmmu locking operations */ 561 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 562 static int sfmmu_mlspl_held(struct page *, int); 563 564 kmutex_t *sfmmu_page_enter(page_t *); 565 void sfmmu_page_exit(kmutex_t *); 566 int sfmmu_page_spl_held(struct page *); 567 568 /* sfmmu internal locking operations - accessed directly */ 569 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 570 kmutex_t **, kmutex_t **); 571 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 572 static hatlock_t * 573 sfmmu_hat_enter(sfmmu_t *); 574 static hatlock_t * 575 sfmmu_hat_tryenter(sfmmu_t *); 576 static void sfmmu_hat_exit(hatlock_t *); 577 static void sfmmu_hat_lock_all(void); 578 static void sfmmu_hat_unlock_all(void); 579 static void sfmmu_ismhat_enter(sfmmu_t *, int); 580 static void sfmmu_ismhat_exit(sfmmu_t *, int); 581 582 /* 583 * Array of mutexes protecting a page's mapping list and p_nrm field. 584 * 585 * The hash function looks complicated, but is made up so that: 586 * 587 * "pp" not shifted, so adjacent pp values will hash to different cache lines 588 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 589 * 590 * "pp" >> mml_shift, incorporates more source bits into the hash result 591 * 592 * "& (mml_table_size - 1), should be faster than using remainder "%" 593 * 594 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 595 * cacheline, since they get declared next to each other below. We'll trust 596 * ld not to do something random. 597 */ 598 #ifdef DEBUG 599 int mlist_hash_debug = 0; 600 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 601 &mml_table[((uintptr_t)(pp) + \ 602 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 603 #else /* !DEBUG */ 604 #define MLIST_HASH(pp) &mml_table[ \ 605 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 606 #endif /* !DEBUG */ 607 608 kmutex_t *mml_table; 609 uint_t mml_table_sz; /* must be a power of 2 */ 610 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 611 612 kpm_hlk_t *kpmp_table; 613 uint_t kpmp_table_sz; /* must be a power of 2 */ 614 uchar_t kpmp_shift; 615 616 kpm_shlk_t *kpmp_stable; 617 uint_t kpmp_stable_sz; /* must be a power of 2 */ 618 619 /* 620 * SPL_HASH was improved to avoid false cache line sharing 621 */ 622 #define SPL_TABLE_SIZE 128 623 #define SPL_MASK (SPL_TABLE_SIZE - 1) 624 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 625 626 #define SPL_INDEX(pp) \ 627 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 628 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 629 (SPL_TABLE_SIZE - 1)) 630 631 #define SPL_HASH(pp) \ 632 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 633 634 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 635 636 637 /* 638 * hat_unload_callback() will group together callbacks in order 639 * to avoid xt_sync() calls. This is the maximum size of the group. 640 */ 641 #define MAX_CB_ADDR 32 642 643 tte_t hw_tte; 644 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 645 646 static char *mmu_ctx_kstat_names[] = { 647 "mmu_ctx_tsb_exceptions", 648 "mmu_ctx_tsb_raise_exception", 649 "mmu_ctx_wrap_around", 650 }; 651 652 /* 653 * Wrapper for vmem_xalloc since vmem_create only allows limited 654 * parameters for vm_source_alloc functions. This function allows us 655 * to specify alignment consistent with the size of the object being 656 * allocated. 657 */ 658 static void * 659 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 660 { 661 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 662 } 663 664 /* Common code for setting tsb_alloc_hiwater. */ 665 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 666 ptob(pages) / tsb_alloc_hiwater_factor 667 668 /* 669 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 670 * a single TSB. physmem is the number of physical pages so we need physmem 8K 671 * TTEs to represent all those physical pages. We round this up by using 672 * 1<<highbit(). To figure out which size code to use, remember that the size 673 * code is just an amount to shift the smallest TSB size to get the size of 674 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 675 * highbit() - 1) to get the size code for the smallest TSB that can represent 676 * all of physical memory, while erring on the side of too much. 677 * 678 * If the computed size code is less than the current tsb_max_growsize, we set 679 * tsb_max_growsize to the computed size code. In the case where the computed 680 * size code is greater than tsb_max_growsize, we have these restrictions that 681 * apply to increasing tsb_max_growsize: 682 * 1) TSBs can't grow larger than the TSB slab size 683 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 684 */ 685 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 686 int i, szc; \ 687 \ 688 i = highbit(pages); \ 689 if ((1 << (i - 1)) == (pages)) \ 690 i--; /* 2^n case, round down */ \ 691 szc = i - TSB_START_SIZE; \ 692 if (szc < tsb_max_growsize) \ 693 tsb_max_growsize = szc; \ 694 else if ((szc > tsb_max_growsize) && \ 695 (szc <= tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT))) \ 696 tsb_max_growsize = MIN(szc, UTSB_MAX_SZCODE); \ 697 } 698 699 /* 700 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 701 * tsb_info which handles that TTE size. 702 */ 703 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) \ 704 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 705 ASSERT(sfmmu_hat_lock_held(sfmmup)); \ 706 if ((tte_szc) >= TTE4M) \ 707 (tsbinfop) = (tsbinfop)->tsb_next; 708 709 /* 710 * Return the number of mappings present in the HAT 711 * for a particular process and page size. 712 */ 713 #define SFMMU_TTE_CNT(sfmmup, szc) \ 714 (sfmmup)->sfmmu_iblk? \ 715 (sfmmup)->sfmmu_ismttecnt[(szc)] + \ 716 (sfmmup)->sfmmu_ttecnt[(szc)] : \ 717 (sfmmup)->sfmmu_ttecnt[(szc)]; 718 719 /* 720 * Macro to use to unload entries from the TSB. 721 * It has knowledge of which page sizes get replicated in the TSB 722 * and will call the appropriate unload routine for the appropriate size. 723 */ 724 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp) \ 725 { \ 726 int ttesz = get_hblk_ttesz(hmeblkp); \ 727 if (ttesz == TTE8K || ttesz == TTE4M) { \ 728 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 729 } else { \ 730 caddr_t sva = (caddr_t)get_hblk_base(hmeblkp); \ 731 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 732 ASSERT(addr >= sva && addr < eva); \ 733 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 734 } \ 735 } 736 737 738 /* Update tsb_alloc_hiwater after memory is configured. */ 739 /*ARGSUSED*/ 740 static void 741 sfmmu_update_tsb_post_add(void *arg, pgcnt_t delta_pages) 742 { 743 /* Assumes physmem has already been updated. */ 744 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 745 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 746 } 747 748 /* 749 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 750 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 751 * deleted. 752 */ 753 /*ARGSUSED*/ 754 static int 755 sfmmu_update_tsb_pre_del(void *arg, pgcnt_t delta_pages) 756 { 757 return (0); 758 } 759 760 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 761 /*ARGSUSED*/ 762 static void 763 sfmmu_update_tsb_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 764 { 765 /* 766 * Whether the delete was cancelled or not, just go ahead and update 767 * tsb_alloc_hiwater and tsb_max_growsize. 768 */ 769 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 770 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 771 } 772 773 static kphysm_setup_vector_t sfmmu_update_tsb_vec = { 774 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 775 sfmmu_update_tsb_post_add, /* post_add */ 776 sfmmu_update_tsb_pre_del, /* pre_del */ 777 sfmmu_update_tsb_post_del /* post_del */ 778 }; 779 780 781 /* 782 * HME_BLK HASH PRIMITIVES 783 */ 784 785 /* 786 * Enter a hme on the mapping list for page pp. 787 * When large pages are more prevalent in the system we might want to 788 * keep the mapping list in ascending order by the hment size. For now, 789 * small pages are more frequent, so don't slow it down. 790 */ 791 #define HME_ADD(hme, pp) \ 792 { \ 793 ASSERT(sfmmu_mlist_held(pp)); \ 794 \ 795 hme->hme_prev = NULL; \ 796 hme->hme_next = pp->p_mapping; \ 797 hme->hme_page = pp; \ 798 if (pp->p_mapping) { \ 799 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 800 ASSERT(pp->p_share > 0); \ 801 } else { \ 802 /* EMPTY */ \ 803 ASSERT(pp->p_share == 0); \ 804 } \ 805 pp->p_mapping = hme; \ 806 pp->p_share++; \ 807 } 808 809 /* 810 * Enter a hme on the mapping list for page pp. 811 * If we are unmapping a large translation, we need to make sure that the 812 * change is reflect in the corresponding bit of the p_index field. 813 */ 814 #define HME_SUB(hme, pp) \ 815 { \ 816 ASSERT(sfmmu_mlist_held(pp)); \ 817 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 818 \ 819 if (pp->p_mapping == NULL) { \ 820 panic("hme_remove - no mappings"); \ 821 } \ 822 \ 823 membar_stst(); /* ensure previous stores finish */ \ 824 \ 825 ASSERT(pp->p_share > 0); \ 826 pp->p_share--; \ 827 \ 828 if (hme->hme_prev) { \ 829 ASSERT(pp->p_mapping != hme); \ 830 ASSERT(hme->hme_prev->hme_page == pp || \ 831 IS_PAHME(hme->hme_prev)); \ 832 hme->hme_prev->hme_next = hme->hme_next; \ 833 } else { \ 834 ASSERT(pp->p_mapping == hme); \ 835 pp->p_mapping = hme->hme_next; \ 836 ASSERT((pp->p_mapping == NULL) ? \ 837 (pp->p_share == 0) : 1); \ 838 } \ 839 \ 840 if (hme->hme_next) { \ 841 ASSERT(hme->hme_next->hme_page == pp || \ 842 IS_PAHME(hme->hme_next)); \ 843 hme->hme_next->hme_prev = hme->hme_prev; \ 844 } \ 845 \ 846 /* zero out the entry */ \ 847 hme->hme_next = NULL; \ 848 hme->hme_prev = NULL; \ 849 hme->hme_page = NULL; \ 850 \ 851 if (hme_size(hme) > TTE8K) { \ 852 /* remove mappings for remainder of large pg */ \ 853 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 854 } \ 855 } 856 857 /* 858 * This function returns the hment given the hme_blk and a vaddr. 859 * It assumes addr has already been checked to belong to hme_blk's 860 * range. 861 */ 862 #define HBLKTOHME(hment, hmeblkp, addr) \ 863 { \ 864 int index; \ 865 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 866 } 867 868 /* 869 * Version of HBLKTOHME that also returns the index in hmeblkp 870 * of the hment. 871 */ 872 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 873 { \ 874 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 875 \ 876 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 877 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 878 } else \ 879 idx = 0; \ 880 \ 881 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 882 } 883 884 /* 885 * Disable any page sizes not supported by the CPU 886 */ 887 void 888 hat_init_pagesizes() 889 { 890 int i; 891 892 mmu_exported_page_sizes = 0; 893 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 894 extern int disable_text_largepages; 895 extern int disable_initdata_largepages; 896 897 szc_2_userszc[i] = (uint_t)-1; 898 userszc_2_szc[i] = (uint_t)-1; 899 900 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 901 disable_large_pages |= (1 << i); 902 disable_ism_large_pages |= (1 << i); 903 disable_text_largepages |= (1 << i); 904 disable_initdata_largepages |= (1 << i); 905 } else { 906 szc_2_userszc[i] = mmu_exported_page_sizes; 907 userszc_2_szc[mmu_exported_page_sizes] = i; 908 mmu_exported_page_sizes++; 909 } 910 } 911 912 disable_auto_large_pages = disable_large_pages; 913 914 /* 915 * Initialize mmu-specific large page sizes. 916 */ 917 if (&mmu_large_pages_disabled) { 918 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 919 disable_ism_large_pages |= 920 mmu_large_pages_disabled(HAT_LOAD_SHARE); 921 disable_auto_large_pages |= 922 mmu_large_pages_disabled(HAT_LOAD_AUTOLPG); 923 } 924 925 disable_shm_large_pages = disable_auto_large_pages; 926 } 927 928 /* 929 * Initialize the hardware address translation structures. 930 */ 931 void 932 hat_init(void) 933 { 934 int i; 935 uint_t sz; 936 uint_t maxtsb; 937 size_t size; 938 939 hat_lock_init(); 940 hat_kstat_init(); 941 942 /* 943 * Hardware-only bits in a TTE 944 */ 945 MAKE_TTE_MASK(&hw_tte); 946 947 hat_init_pagesizes(); 948 949 /* Initialize the hash locks */ 950 for (i = 0; i < khmehash_num; i++) { 951 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 952 MUTEX_DEFAULT, NULL); 953 } 954 for (i = 0; i < uhmehash_num; i++) { 955 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 956 MUTEX_DEFAULT, NULL); 957 } 958 khmehash_num--; /* make sure counter starts from 0 */ 959 uhmehash_num--; /* make sure counter starts from 0 */ 960 961 /* 962 * Allocate context domain structures. 963 * 964 * A platform may choose to modify max_mmu_ctxdoms in 965 * set_platform_defaults(). If a platform does not define 966 * a set_platform_defaults() or does not choose to modify 967 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 968 * 969 * For sun4v, there will be one global context domain, this is to 970 * avoid the ldom cpu substitution problem. 971 * 972 * For all platforms that have CPUs sharing MMUs, this 973 * value must be defined. 974 */ 975 if (max_mmu_ctxdoms == 0) { 976 #ifndef sun4v 977 max_mmu_ctxdoms = max_ncpus; 978 #else /* sun4v */ 979 max_mmu_ctxdoms = 1; 980 #endif /* sun4v */ 981 } 982 983 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 984 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 985 986 /* mmu_ctx_t is 64 bytes aligned */ 987 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 988 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 989 /* 990 * MMU context domain initialization for the Boot CPU. 991 * This needs the context domains array allocated above. 992 */ 993 mutex_enter(&cpu_lock); 994 sfmmu_cpu_init(CPU); 995 mutex_exit(&cpu_lock); 996 997 /* 998 * Intialize ism mapping list lock. 999 */ 1000 1001 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1002 1003 /* 1004 * Each sfmmu structure carries an array of MMU context info 1005 * structures, one per context domain. The size of this array depends 1006 * on the maximum number of context domains. So, the size of the 1007 * sfmmu structure varies per platform. 1008 * 1009 * sfmmu is allocated from static arena, because trap 1010 * handler at TL > 0 is not allowed to touch kernel relocatable 1011 * memory. sfmmu's alignment is changed to 64 bytes from 1012 * default 8 bytes, as the lower 6 bits will be used to pass 1013 * pgcnt to vtag_flush_pgcnt_tl1. 1014 */ 1015 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1016 1017 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1018 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1019 NULL, NULL, static_arena, 0); 1020 1021 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1022 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1023 1024 /* 1025 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1026 * from the heap when low on memory or when TSB_FORCEALLOC is 1027 * specified, don't use magazines to cache them--we want to return 1028 * them to the system as quickly as possible. 1029 */ 1030 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1031 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1032 static_arena, KMC_NOMAGAZINE); 1033 1034 /* 1035 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1036 * memory, which corresponds to the old static reserve for TSBs. 1037 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1038 * memory we'll allocate for TSB slabs; beyond this point TSB 1039 * allocations will be taken from the kernel heap (via 1040 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1041 * consumer. 1042 */ 1043 if (tsb_alloc_hiwater_factor == 0) { 1044 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1045 } 1046 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1047 1048 /* Set tsb_max_growsize. */ 1049 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1050 1051 /* 1052 * On smaller memory systems, allocate TSB memory in smaller chunks 1053 * than the default 4M slab size. We also honor disable_large_pages 1054 * here. 1055 * 1056 * The trap handlers need to be patched with the final slab shift, 1057 * since they need to be able to construct the TSB pointer at runtime. 1058 */ 1059 if (tsb_max_growsize <= TSB_512K_SZCODE) 1060 tsb_slab_ttesz = TTE512K; 1061 1062 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1063 if (!(disable_large_pages & (1 << sz))) 1064 break; 1065 } 1066 1067 tsb_slab_ttesz = sz; 1068 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1069 tsb_slab_size = 1 << tsb_slab_shift; 1070 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1071 1072 maxtsb = tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); 1073 if (tsb_max_growsize > maxtsb) 1074 tsb_max_growsize = maxtsb; 1075 1076 /* 1077 * Set up memory callback to update tsb_alloc_hiwater and 1078 * tsb_max_growsize. 1079 */ 1080 i = kphysm_setup_func_register(&sfmmu_update_tsb_vec, (void *) 0); 1081 ASSERT(i == 0); 1082 1083 /* 1084 * kmem_tsb_arena is the source from which large TSB slabs are 1085 * drawn. The quantum of this arena corresponds to the largest 1086 * TSB size we can dynamically allocate for user processes. 1087 * Currently it must also be a supported page size since we 1088 * use exactly one translation entry to map each slab page. 1089 * 1090 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1091 * which most TSBs are allocated. Since most TSB allocations are 1092 * typically 8K we have a kmem cache we stack on top of each 1093 * kmem_tsb_default_arena to speed up those allocations. 1094 * 1095 * Note the two-level scheme of arenas is required only 1096 * because vmem_create doesn't allow us to specify alignment 1097 * requirements. If this ever changes the code could be 1098 * simplified to use only one level of arenas. 1099 */ 1100 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1101 sfmmu_vmem_xalloc_aligned_wrapper, vmem_xfree, heap_arena, 1102 0, VM_SLEEP); 1103 1104 if (tsb_lgrp_affinity) { 1105 char s[50]; 1106 for (i = 0; i < NLGRPS_MAX; i++) { 1107 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1108 kmem_tsb_default_arena[i] = 1109 vmem_create(s, NULL, 0, PAGESIZE, 1110 sfmmu_tsb_segkmem_alloc, sfmmu_tsb_segkmem_free, 1111 kmem_tsb_arena, 0, VM_SLEEP | VM_BESTFIT); 1112 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1113 sfmmu_tsb_cache[i] = kmem_cache_create(s, PAGESIZE, 1114 PAGESIZE, NULL, NULL, NULL, NULL, 1115 kmem_tsb_default_arena[i], 0); 1116 } 1117 } else { 1118 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1119 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1120 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1121 VM_SLEEP | VM_BESTFIT); 1122 1123 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1124 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1125 kmem_tsb_default_arena[0], 0); 1126 } 1127 1128 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1129 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1130 sfmmu_hblkcache_destructor, 1131 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1132 hat_memload_arena, KMC_NOHASH); 1133 1134 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1135 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 1136 1137 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1138 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1139 sfmmu_hblkcache_destructor, 1140 NULL, (void *)HME1BLK_SZ, 1141 hat_memload1_arena, KMC_NOHASH); 1142 1143 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1144 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1145 1146 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1147 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1148 NULL, NULL, static_arena, KMC_NOHASH); 1149 1150 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1151 sizeof (ism_ment_t), 0, NULL, NULL, 1152 NULL, NULL, NULL, 0); 1153 1154 /* 1155 * We grab the first hat for the kernel, 1156 */ 1157 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1158 kas.a_hat = hat_alloc(&kas); 1159 AS_LOCK_EXIT(&kas, &kas.a_lock); 1160 1161 /* 1162 * Initialize hblk_reserve. 1163 */ 1164 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1165 va_to_pa((caddr_t)hblk_reserve); 1166 1167 #ifndef UTSB_PHYS 1168 /* 1169 * Reserve some kernel virtual address space for the locked TTEs 1170 * that allow us to probe the TSB from TL>0. 1171 */ 1172 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1173 0, 0, NULL, NULL, VM_SLEEP); 1174 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1175 0, 0, NULL, NULL, VM_SLEEP); 1176 #endif 1177 1178 #ifdef VAC 1179 /* 1180 * The big page VAC handling code assumes VAC 1181 * will not be bigger than the smallest big 1182 * page- which is 64K. 1183 */ 1184 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1185 cmn_err(CE_PANIC, "VAC too big!"); 1186 } 1187 #endif 1188 1189 (void) xhat_init(); 1190 1191 uhme_hash_pa = va_to_pa(uhme_hash); 1192 khme_hash_pa = va_to_pa(khme_hash); 1193 1194 /* 1195 * Initialize relocation locks. kpr_suspendlock is held 1196 * at PIL_MAX to prevent interrupts from pinning the holder 1197 * of a suspended TTE which may access it leading to a 1198 * deadlock condition. 1199 */ 1200 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1201 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1202 } 1203 1204 /* 1205 * Initialize locking for the hat layer, called early during boot. 1206 */ 1207 static void 1208 hat_lock_init() 1209 { 1210 int i; 1211 1212 /* 1213 * initialize the array of mutexes protecting a page's mapping 1214 * list and p_nrm field. 1215 */ 1216 for (i = 0; i < mml_table_sz; i++) 1217 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1218 1219 if (kpm_enable) { 1220 for (i = 0; i < kpmp_table_sz; i++) { 1221 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1222 MUTEX_DEFAULT, NULL); 1223 } 1224 } 1225 1226 /* 1227 * Initialize array of mutex locks that protects sfmmu fields and 1228 * TSB lists. 1229 */ 1230 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1231 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1232 NULL); 1233 } 1234 1235 extern caddr_t kmem64_base, kmem64_end; 1236 1237 #define SFMMU_KERNEL_MAXVA \ 1238 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1239 1240 /* 1241 * Allocate a hat structure. 1242 * Called when an address space first uses a hat. 1243 */ 1244 struct hat * 1245 hat_alloc(struct as *as) 1246 { 1247 sfmmu_t *sfmmup; 1248 int i; 1249 uint64_t cnum; 1250 extern uint_t get_color_start(struct as *); 1251 1252 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1253 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1254 sfmmup->sfmmu_as = as; 1255 sfmmup->sfmmu_flags = 0; 1256 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1257 1258 if (as == &kas) { 1259 ksfmmup = sfmmup; 1260 sfmmup->sfmmu_cext = 0; 1261 cnum = KCONTEXT; 1262 1263 sfmmup->sfmmu_clrstart = 0; 1264 sfmmup->sfmmu_tsb = NULL; 1265 /* 1266 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1267 * to setup tsb_info for ksfmmup. 1268 */ 1269 } else { 1270 1271 /* 1272 * Just set to invalid ctx. When it faults, it will 1273 * get a valid ctx. This would avoid the situation 1274 * where we get a ctx, but it gets stolen and then 1275 * we fault when we try to run and so have to get 1276 * another ctx. 1277 */ 1278 sfmmup->sfmmu_cext = 0; 1279 cnum = INVALID_CONTEXT; 1280 1281 /* initialize original physical page coloring bin */ 1282 sfmmup->sfmmu_clrstart = get_color_start(as); 1283 #ifdef DEBUG 1284 if (tsb_random_size) { 1285 uint32_t randval = (uint32_t)gettick() >> 4; 1286 int size = randval % (tsb_max_growsize + 1); 1287 1288 /* chose a random tsb size for stress testing */ 1289 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1290 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1291 } else 1292 #endif /* DEBUG */ 1293 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1294 default_tsb_size, 1295 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1296 sfmmup->sfmmu_flags = HAT_SWAPPED; 1297 ASSERT(sfmmup->sfmmu_tsb != NULL); 1298 } 1299 1300 ASSERT(max_mmu_ctxdoms > 0); 1301 for (i = 0; i < max_mmu_ctxdoms; i++) { 1302 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1303 sfmmup->sfmmu_ctxs[i].gnum = 0; 1304 } 1305 1306 sfmmu_setup_tsbinfo(sfmmup); 1307 for (i = 0; i < max_mmu_page_sizes; i++) { 1308 sfmmup->sfmmu_ttecnt[i] = 0; 1309 sfmmup->sfmmu_ismttecnt[i] = 0; 1310 sfmmup->sfmmu_pgsz[i] = TTE8K; 1311 } 1312 1313 sfmmup->sfmmu_iblk = NULL; 1314 sfmmup->sfmmu_ismhat = 0; 1315 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1316 if (sfmmup == ksfmmup) { 1317 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1318 } else { 1319 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1320 } 1321 sfmmup->sfmmu_free = 0; 1322 sfmmup->sfmmu_rmstat = 0; 1323 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1324 sfmmup->sfmmu_xhat_provider = NULL; 1325 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1326 return (sfmmup); 1327 } 1328 1329 /* 1330 * Create per-MMU context domain kstats for a given MMU ctx. 1331 */ 1332 static void 1333 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1334 { 1335 mmu_ctx_stat_t stat; 1336 kstat_t *mmu_kstat; 1337 1338 ASSERT(MUTEX_HELD(&cpu_lock)); 1339 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1340 1341 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1342 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1343 1344 if (mmu_kstat == NULL) { 1345 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1346 mmu_ctxp->mmu_idx); 1347 } else { 1348 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1349 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1350 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1351 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1352 mmu_ctxp->mmu_kstat = mmu_kstat; 1353 kstat_install(mmu_kstat); 1354 } 1355 } 1356 1357 /* 1358 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1359 * context domain information for a given CPU. If a platform does not 1360 * specify that interface, then the function below is used instead to return 1361 * default information. The defaults are as follows: 1362 * 1363 * - For sun4u systems there's one MMU context domain per CPU. 1364 * This default is used by all sun4u systems except OPL. OPL systems 1365 * provide platform specific interface to map CPU ids to MMU ids 1366 * because on OPL more than 1 CPU shares a single MMU. 1367 * Note that on sun4v, there is one global context domain for 1368 * the entire system. This is to avoid running into potential problem 1369 * with ldom physical cpu substitution feature. 1370 * - The number of MMU context IDs supported on any CPU in the 1371 * system is 8K. 1372 */ 1373 /*ARGSUSED*/ 1374 static void 1375 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1376 { 1377 infop->mmu_nctxs = nctxs; 1378 #ifndef sun4v 1379 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1380 #else /* sun4v */ 1381 infop->mmu_idx = 0; 1382 #endif /* sun4v */ 1383 } 1384 1385 /* 1386 * Called during CPU initialization to set the MMU context-related information 1387 * for a CPU. 1388 * 1389 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1390 */ 1391 void 1392 sfmmu_cpu_init(cpu_t *cp) 1393 { 1394 mmu_ctx_info_t info; 1395 mmu_ctx_t *mmu_ctxp; 1396 1397 ASSERT(MUTEX_HELD(&cpu_lock)); 1398 1399 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1400 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1401 else 1402 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1403 1404 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1405 1406 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1407 /* Each mmu_ctx is cacheline aligned. */ 1408 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1409 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1410 1411 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1412 (void *)ipltospl(DISP_LEVEL)); 1413 mmu_ctxp->mmu_idx = info.mmu_idx; 1414 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1415 /* 1416 * Globally for lifetime of a system, 1417 * gnum must always increase. 1418 * mmu_saved_gnum is protected by the cpu_lock. 1419 */ 1420 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1421 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1422 1423 sfmmu_mmu_kstat_create(mmu_ctxp); 1424 1425 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1426 } else { 1427 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1428 } 1429 1430 /* 1431 * The mmu_lock is acquired here to prevent races with 1432 * the wrap-around code. 1433 */ 1434 mutex_enter(&mmu_ctxp->mmu_lock); 1435 1436 1437 mmu_ctxp->mmu_ncpus++; 1438 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1439 CPU_MMU_IDX(cp) = info.mmu_idx; 1440 CPU_MMU_CTXP(cp) = mmu_ctxp; 1441 1442 mutex_exit(&mmu_ctxp->mmu_lock); 1443 } 1444 1445 /* 1446 * Called to perform MMU context-related cleanup for a CPU. 1447 */ 1448 void 1449 sfmmu_cpu_cleanup(cpu_t *cp) 1450 { 1451 mmu_ctx_t *mmu_ctxp; 1452 1453 ASSERT(MUTEX_HELD(&cpu_lock)); 1454 1455 mmu_ctxp = CPU_MMU_CTXP(cp); 1456 ASSERT(mmu_ctxp != NULL); 1457 1458 /* 1459 * The mmu_lock is acquired here to prevent races with 1460 * the wrap-around code. 1461 */ 1462 mutex_enter(&mmu_ctxp->mmu_lock); 1463 1464 CPU_MMU_CTXP(cp) = NULL; 1465 1466 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1467 if (--mmu_ctxp->mmu_ncpus == 0) { 1468 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1469 mutex_exit(&mmu_ctxp->mmu_lock); 1470 mutex_destroy(&mmu_ctxp->mmu_lock); 1471 1472 if (mmu_ctxp->mmu_kstat) 1473 kstat_delete(mmu_ctxp->mmu_kstat); 1474 1475 /* mmu_saved_gnum is protected by the cpu_lock. */ 1476 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1477 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1478 1479 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1480 1481 return; 1482 } 1483 1484 mutex_exit(&mmu_ctxp->mmu_lock); 1485 } 1486 1487 /* 1488 * Hat_setup, makes an address space context the current active one. 1489 * In sfmmu this translates to setting the secondary context with the 1490 * corresponding context. 1491 */ 1492 void 1493 hat_setup(struct hat *sfmmup, int allocflag) 1494 { 1495 hatlock_t *hatlockp; 1496 1497 /* Init needs some special treatment. */ 1498 if (allocflag == HAT_INIT) { 1499 /* 1500 * Make sure that we have 1501 * 1. a TSB 1502 * 2. a valid ctx that doesn't get stolen after this point. 1503 */ 1504 hatlockp = sfmmu_hat_enter(sfmmup); 1505 1506 /* 1507 * Swap in the TSB. hat_init() allocates tsbinfos without 1508 * TSBs, but we need one for init, since the kernel does some 1509 * special things to set up its stack and needs the TSB to 1510 * resolve page faults. 1511 */ 1512 sfmmu_tsb_swapin(sfmmup, hatlockp); 1513 1514 sfmmu_get_ctx(sfmmup); 1515 1516 sfmmu_hat_exit(hatlockp); 1517 } else { 1518 ASSERT(allocflag == HAT_ALLOC); 1519 1520 hatlockp = sfmmu_hat_enter(sfmmup); 1521 kpreempt_disable(); 1522 1523 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1524 1525 /* 1526 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1527 * pagesize bits don't matter in this case since we are passing 1528 * INVALID_CONTEXT to it. 1529 */ 1530 sfmmu_setctx_sec(INVALID_CONTEXT); 1531 sfmmu_clear_utsbinfo(); 1532 1533 kpreempt_enable(); 1534 sfmmu_hat_exit(hatlockp); 1535 } 1536 } 1537 1538 /* 1539 * Free all the translation resources for the specified address space. 1540 * Called from as_free when an address space is being destroyed. 1541 */ 1542 void 1543 hat_free_start(struct hat *sfmmup) 1544 { 1545 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1546 ASSERT(sfmmup != ksfmmup); 1547 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1548 1549 sfmmup->sfmmu_free = 1; 1550 } 1551 1552 void 1553 hat_free_end(struct hat *sfmmup) 1554 { 1555 int i; 1556 1557 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1558 if (sfmmup->sfmmu_ismhat) { 1559 for (i = 0; i < mmu_page_sizes; i++) { 1560 sfmmup->sfmmu_ttecnt[i] = 0; 1561 sfmmup->sfmmu_ismttecnt[i] = 0; 1562 } 1563 } else { 1564 /* EMPTY */ 1565 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1566 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1567 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1568 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1569 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1570 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1571 } 1572 1573 if (sfmmup->sfmmu_rmstat) { 1574 hat_freestat(sfmmup->sfmmu_as, NULL); 1575 } 1576 1577 while (sfmmup->sfmmu_tsb != NULL) { 1578 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1579 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1580 sfmmup->sfmmu_tsb = next; 1581 } 1582 sfmmu_free_sfmmu(sfmmup); 1583 1584 kmem_cache_free(sfmmuid_cache, sfmmup); 1585 } 1586 1587 /* 1588 * Set up any translation structures, for the specified address space, 1589 * that are needed or preferred when the process is being swapped in. 1590 */ 1591 /* ARGSUSED */ 1592 void 1593 hat_swapin(struct hat *hat) 1594 { 1595 ASSERT(hat->sfmmu_xhat_provider == NULL); 1596 } 1597 1598 /* 1599 * Free all of the translation resources, for the specified address space, 1600 * that can be freed while the process is swapped out. Called from as_swapout. 1601 * Also, free up the ctx that this process was using. 1602 */ 1603 void 1604 hat_swapout(struct hat *sfmmup) 1605 { 1606 struct hmehash_bucket *hmebp; 1607 struct hme_blk *hmeblkp; 1608 struct hme_blk *pr_hblk = NULL; 1609 struct hme_blk *nx_hblk; 1610 int i; 1611 uint64_t hblkpa, prevpa, nx_pa; 1612 struct hme_blk *list = NULL; 1613 hatlock_t *hatlockp; 1614 struct tsb_info *tsbinfop; 1615 struct free_tsb { 1616 struct free_tsb *next; 1617 struct tsb_info *tsbinfop; 1618 }; /* free list of TSBs */ 1619 struct free_tsb *freelist, *last, *next; 1620 1621 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1622 SFMMU_STAT(sf_swapout); 1623 1624 /* 1625 * There is no way to go from an as to all its translations in sfmmu. 1626 * Here is one of the times when we take the big hit and traverse 1627 * the hash looking for hme_blks to free up. Not only do we free up 1628 * this as hme_blks but all those that are free. We are obviously 1629 * swapping because we need memory so let's free up as much 1630 * as we can. 1631 * 1632 * Note that we don't flush TLB/TSB here -- it's not necessary 1633 * because: 1634 * 1) we free the ctx we're using and throw away the TSB(s); 1635 * 2) processes aren't runnable while being swapped out. 1636 */ 1637 ASSERT(sfmmup != KHATID); 1638 for (i = 0; i <= UHMEHASH_SZ; i++) { 1639 hmebp = &uhme_hash[i]; 1640 SFMMU_HASH_LOCK(hmebp); 1641 hmeblkp = hmebp->hmeblkp; 1642 hblkpa = hmebp->hmeh_nextpa; 1643 prevpa = 0; 1644 pr_hblk = NULL; 1645 while (hmeblkp) { 1646 1647 ASSERT(!hmeblkp->hblk_xhat_bit); 1648 1649 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1650 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1651 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1652 (caddr_t)get_hblk_base(hmeblkp), 1653 get_hblk_endaddr(hmeblkp), 1654 NULL, HAT_UNLOAD); 1655 } 1656 nx_hblk = hmeblkp->hblk_next; 1657 nx_pa = hmeblkp->hblk_nextpa; 1658 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1659 ASSERT(!hmeblkp->hblk_lckcnt); 1660 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 1661 prevpa, pr_hblk); 1662 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 1663 } else { 1664 pr_hblk = hmeblkp; 1665 prevpa = hblkpa; 1666 } 1667 hmeblkp = nx_hblk; 1668 hblkpa = nx_pa; 1669 } 1670 SFMMU_HASH_UNLOCK(hmebp); 1671 } 1672 1673 sfmmu_hblks_list_purge(&list); 1674 1675 /* 1676 * Now free up the ctx so that others can reuse it. 1677 */ 1678 hatlockp = sfmmu_hat_enter(sfmmup); 1679 1680 sfmmu_invalidate_ctx(sfmmup); 1681 1682 /* 1683 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1684 * If TSBs were never swapped in, just return. 1685 * This implies that we don't support partial swapping 1686 * of TSBs -- either all are swapped out, or none are. 1687 * 1688 * We must hold the HAT lock here to prevent racing with another 1689 * thread trying to unmap TTEs from the TSB or running the post- 1690 * relocator after relocating the TSB's memory. Unfortunately, we 1691 * can't free memory while holding the HAT lock or we could 1692 * deadlock, so we build a list of TSBs to be freed after marking 1693 * the tsbinfos as swapped out and free them after dropping the 1694 * lock. 1695 */ 1696 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1697 sfmmu_hat_exit(hatlockp); 1698 return; 1699 } 1700 1701 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1702 last = freelist = NULL; 1703 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1704 tsbinfop = tsbinfop->tsb_next) { 1705 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1706 1707 /* 1708 * Cast the TSB into a struct free_tsb and put it on the free 1709 * list. 1710 */ 1711 if (freelist == NULL) { 1712 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1713 } else { 1714 last->next = (struct free_tsb *)tsbinfop->tsb_va; 1715 last = last->next; 1716 } 1717 last->next = NULL; 1718 last->tsbinfop = tsbinfop; 1719 tsbinfop->tsb_flags |= TSB_SWAPPED; 1720 /* 1721 * Zero out the TTE to clear the valid bit. 1722 * Note we can't use a value like 0xbad because we want to 1723 * ensure diagnostic bits are NEVER set on TTEs that might 1724 * be loaded. The intent is to catch any invalid access 1725 * to the swapped TSB, such as a thread running with a valid 1726 * context without first calling sfmmu_tsb_swapin() to 1727 * allocate TSB memory. 1728 */ 1729 tsbinfop->tsb_tte.ll = 0; 1730 } 1731 1732 #ifdef sun4v 1733 if (freelist) 1734 sfmmu_invalidate_tsbinfo(sfmmup); 1735 #endif /* sun4v */ 1736 1737 /* Now we can drop the lock and free the TSB memory. */ 1738 sfmmu_hat_exit(hatlockp); 1739 for (; freelist != NULL; freelist = next) { 1740 next = freelist->next; 1741 sfmmu_tsb_free(freelist->tsbinfop); 1742 } 1743 } 1744 1745 /* 1746 * Duplicate the translations of an as into another newas 1747 */ 1748 /* ARGSUSED */ 1749 int 1750 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 1751 uint_t flag) 1752 { 1753 ASSERT(hat->sfmmu_xhat_provider == NULL); 1754 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW)); 1755 1756 if (flag == HAT_DUP_COW) { 1757 panic("hat_dup: HAT_DUP_COW not supported"); 1758 } 1759 return (0); 1760 } 1761 1762 /* 1763 * Set up addr to map to page pp with protection prot. 1764 * As an optimization we also load the TSB with the 1765 * corresponding tte but it is no big deal if the tte gets kicked out. 1766 */ 1767 void 1768 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 1769 uint_t attr, uint_t flags) 1770 { 1771 tte_t tte; 1772 1773 1774 ASSERT(hat != NULL); 1775 ASSERT(PAGE_LOCKED(pp)); 1776 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1777 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1778 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1779 1780 if (PP_ISFREE(pp)) { 1781 panic("hat_memload: loading a mapping to free page %p", 1782 (void *)pp); 1783 } 1784 1785 if (hat->sfmmu_xhat_provider) { 1786 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 1787 return; 1788 } 1789 1790 ASSERT((hat == ksfmmup) || 1791 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1792 1793 if (flags & ~SFMMU_LOAD_ALLFLAG) 1794 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 1795 flags & ~SFMMU_LOAD_ALLFLAG); 1796 1797 if (hat->sfmmu_rmstat) 1798 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 1799 1800 #if defined(SF_ERRATA_57) 1801 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1802 (addr < errata57_limit) && (attr & PROT_EXEC) && 1803 !(flags & HAT_LOAD_SHARE)) { 1804 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 1805 " page executable"); 1806 attr &= ~PROT_EXEC; 1807 } 1808 #endif 1809 1810 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 1811 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags); 1812 1813 /* 1814 * Check TSB and TLB page sizes. 1815 */ 1816 if ((flags & HAT_LOAD_SHARE) == 0) { 1817 sfmmu_check_page_sizes(hat, 1); 1818 } 1819 } 1820 1821 /* 1822 * hat_devload can be called to map real memory (e.g. 1823 * /dev/kmem) and even though hat_devload will determine pf is 1824 * for memory, it will be unable to get a shared lock on the 1825 * page (because someone else has it exclusively) and will 1826 * pass dp = NULL. If tteload doesn't get a non-NULL 1827 * page pointer it can't cache memory. 1828 */ 1829 void 1830 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 1831 uint_t attr, int flags) 1832 { 1833 tte_t tte; 1834 struct page *pp = NULL; 1835 int use_lgpg = 0; 1836 1837 ASSERT(hat != NULL); 1838 1839 if (hat->sfmmu_xhat_provider) { 1840 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 1841 return; 1842 } 1843 1844 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1845 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1846 ASSERT((hat == ksfmmup) || 1847 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1848 if (len == 0) 1849 panic("hat_devload: zero len"); 1850 if (flags & ~SFMMU_LOAD_ALLFLAG) 1851 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 1852 flags & ~SFMMU_LOAD_ALLFLAG); 1853 1854 #if defined(SF_ERRATA_57) 1855 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1856 (addr < errata57_limit) && (attr & PROT_EXEC) && 1857 !(flags & HAT_LOAD_SHARE)) { 1858 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 1859 " page executable"); 1860 attr &= ~PROT_EXEC; 1861 } 1862 #endif 1863 1864 /* 1865 * If it's a memory page find its pp 1866 */ 1867 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 1868 pp = page_numtopp_nolock(pfn); 1869 if (pp == NULL) { 1870 flags |= HAT_LOAD_NOCONSIST; 1871 } else { 1872 if (PP_ISFREE(pp)) { 1873 panic("hat_memload: loading " 1874 "a mapping to free page %p", 1875 (void *)pp); 1876 } 1877 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1878 panic("hat_memload: loading a mapping " 1879 "to unlocked relocatable page %p", 1880 (void *)pp); 1881 } 1882 ASSERT(len == MMU_PAGESIZE); 1883 } 1884 } 1885 1886 if (hat->sfmmu_rmstat) 1887 hat_resvstat(len, hat->sfmmu_as, addr); 1888 1889 if (flags & HAT_LOAD_NOCONSIST) { 1890 attr |= SFMMU_UNCACHEVTTE; 1891 use_lgpg = 1; 1892 } 1893 if (!pf_is_memory(pfn)) { 1894 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 1895 use_lgpg = 1; 1896 switch (attr & HAT_ORDER_MASK) { 1897 case HAT_STRICTORDER: 1898 case HAT_UNORDERED_OK: 1899 /* 1900 * we set the side effect bit for all non 1901 * memory mappings unless merging is ok 1902 */ 1903 attr |= SFMMU_SIDEFFECT; 1904 break; 1905 case HAT_MERGING_OK: 1906 case HAT_LOADCACHING_OK: 1907 case HAT_STORECACHING_OK: 1908 break; 1909 default: 1910 panic("hat_devload: bad attr"); 1911 break; 1912 } 1913 } 1914 while (len) { 1915 if (!use_lgpg) { 1916 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1917 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1918 flags); 1919 len -= MMU_PAGESIZE; 1920 addr += MMU_PAGESIZE; 1921 pfn++; 1922 continue; 1923 } 1924 /* 1925 * try to use large pages, check va/pa alignments 1926 * Note that 32M/256M page sizes are not (yet) supported. 1927 */ 1928 if ((len >= MMU_PAGESIZE4M) && 1929 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 1930 !(disable_large_pages & (1 << TTE4M)) && 1931 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 1932 sfmmu_memtte(&tte, pfn, attr, TTE4M); 1933 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1934 flags); 1935 len -= MMU_PAGESIZE4M; 1936 addr += MMU_PAGESIZE4M; 1937 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 1938 } else if ((len >= MMU_PAGESIZE512K) && 1939 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 1940 !(disable_large_pages & (1 << TTE512K)) && 1941 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 1942 sfmmu_memtte(&tte, pfn, attr, TTE512K); 1943 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1944 flags); 1945 len -= MMU_PAGESIZE512K; 1946 addr += MMU_PAGESIZE512K; 1947 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 1948 } else if ((len >= MMU_PAGESIZE64K) && 1949 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 1950 !(disable_large_pages & (1 << TTE64K)) && 1951 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 1952 sfmmu_memtte(&tte, pfn, attr, TTE64K); 1953 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1954 flags); 1955 len -= MMU_PAGESIZE64K; 1956 addr += MMU_PAGESIZE64K; 1957 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 1958 } else { 1959 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1960 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1961 flags); 1962 len -= MMU_PAGESIZE; 1963 addr += MMU_PAGESIZE; 1964 pfn++; 1965 } 1966 } 1967 1968 /* 1969 * Check TSB and TLB page sizes. 1970 */ 1971 if ((flags & HAT_LOAD_SHARE) == 0) { 1972 sfmmu_check_page_sizes(hat, 1); 1973 } 1974 } 1975 1976 /* 1977 * Map the largest extend possible out of the page array. The array may NOT 1978 * be in order. The largest possible mapping a page can have 1979 * is specified in the p_szc field. The p_szc field 1980 * cannot change as long as there any mappings (large or small) 1981 * to any of the pages that make up the large page. (ie. any 1982 * promotion/demotion of page size is not up to the hat but up to 1983 * the page free list manager). The array 1984 * should consist of properly aligned contigous pages that are 1985 * part of a big page for a large mapping to be created. 1986 */ 1987 void 1988 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 1989 struct page **pps, uint_t attr, uint_t flags) 1990 { 1991 int ttesz; 1992 size_t mapsz; 1993 pgcnt_t numpg, npgs; 1994 tte_t tte; 1995 page_t *pp; 1996 int large_pages_disable; 1997 1998 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1999 2000 if (hat->sfmmu_xhat_provider) { 2001 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 2002 return; 2003 } 2004 2005 if (hat->sfmmu_rmstat) 2006 hat_resvstat(len, hat->sfmmu_as, addr); 2007 2008 #if defined(SF_ERRATA_57) 2009 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2010 (addr < errata57_limit) && (attr & PROT_EXEC) && 2011 !(flags & HAT_LOAD_SHARE)) { 2012 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2013 "user page executable"); 2014 attr &= ~PROT_EXEC; 2015 } 2016 #endif 2017 2018 /* Get number of pages */ 2019 npgs = len >> MMU_PAGESHIFT; 2020 2021 if (flags & HAT_LOAD_SHARE) { 2022 large_pages_disable = disable_ism_large_pages; 2023 } else { 2024 large_pages_disable = disable_large_pages; 2025 } 2026 2027 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2028 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 2029 return; 2030 } 2031 2032 while (npgs >= NHMENTS) { 2033 pp = *pps; 2034 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2035 /* 2036 * Check if this page size is disabled. 2037 */ 2038 if (large_pages_disable & (1 << ttesz)) 2039 continue; 2040 2041 numpg = TTEPAGES(ttesz); 2042 mapsz = numpg << MMU_PAGESHIFT; 2043 if ((npgs >= numpg) && 2044 IS_P2ALIGNED(addr, mapsz) && 2045 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2046 /* 2047 * At this point we have enough pages and 2048 * we know the virtual address and the pfn 2049 * are properly aligned. We still need 2050 * to check for physical contiguity but since 2051 * it is very likely that this is the case 2052 * we will assume they are so and undo 2053 * the request if necessary. It would 2054 * be great if we could get a hint flag 2055 * like HAT_CONTIG which would tell us 2056 * the pages are contigous for sure. 2057 */ 2058 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2059 attr, ttesz); 2060 if (!sfmmu_tteload_array(hat, &tte, addr, 2061 pps, flags)) { 2062 break; 2063 } 2064 } 2065 } 2066 if (ttesz == TTE8K) { 2067 /* 2068 * We were not able to map array using a large page 2069 * batch a hmeblk or fraction at a time. 2070 */ 2071 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2072 & (NHMENTS-1); 2073 numpg = NHMENTS - numpg; 2074 ASSERT(numpg <= npgs); 2075 mapsz = numpg * MMU_PAGESIZE; 2076 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2077 numpg); 2078 } 2079 addr += mapsz; 2080 npgs -= numpg; 2081 pps += numpg; 2082 } 2083 2084 if (npgs) { 2085 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 2086 } 2087 2088 /* 2089 * Check TSB and TLB page sizes. 2090 */ 2091 if ((flags & HAT_LOAD_SHARE) == 0) { 2092 sfmmu_check_page_sizes(hat, 1); 2093 } 2094 } 2095 2096 /* 2097 * Function tries to batch 8K pages into the same hme blk. 2098 */ 2099 static void 2100 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2101 uint_t attr, uint_t flags, pgcnt_t npgs) 2102 { 2103 tte_t tte; 2104 page_t *pp; 2105 struct hmehash_bucket *hmebp; 2106 struct hme_blk *hmeblkp; 2107 int index; 2108 2109 while (npgs) { 2110 /* 2111 * Acquire the hash bucket. 2112 */ 2113 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K); 2114 ASSERT(hmebp); 2115 2116 /* 2117 * Find the hment block. 2118 */ 2119 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2120 TTE8K, flags); 2121 ASSERT(hmeblkp); 2122 2123 do { 2124 /* 2125 * Make the tte. 2126 */ 2127 pp = *pps; 2128 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2129 2130 /* 2131 * Add the translation. 2132 */ 2133 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2134 vaddr, pps, flags); 2135 2136 /* 2137 * Goto next page. 2138 */ 2139 pps++; 2140 npgs--; 2141 2142 /* 2143 * Goto next address. 2144 */ 2145 vaddr += MMU_PAGESIZE; 2146 2147 /* 2148 * Don't crossover into a different hmentblk. 2149 */ 2150 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2151 (NHMENTS-1)); 2152 2153 } while (index != 0 && npgs != 0); 2154 2155 /* 2156 * Release the hash bucket. 2157 */ 2158 2159 sfmmu_tteload_release_hashbucket(hmebp); 2160 } 2161 } 2162 2163 /* 2164 * Construct a tte for a page: 2165 * 2166 * tte_valid = 1 2167 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2168 * tte_size = size 2169 * tte_nfo = attr & HAT_NOFAULT 2170 * tte_ie = attr & HAT_STRUCTURE_LE 2171 * tte_hmenum = hmenum 2172 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2173 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2174 * tte_ref = 1 (optimization) 2175 * tte_wr_perm = attr & PROT_WRITE; 2176 * tte_no_sync = attr & HAT_NOSYNC 2177 * tte_lock = attr & SFMMU_LOCKTTE 2178 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2179 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2180 * tte_e = attr & SFMMU_SIDEFFECT 2181 * tte_priv = !(attr & PROT_USER) 2182 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2183 * tte_glb = 0 2184 */ 2185 void 2186 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2187 { 2188 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2189 2190 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2191 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2192 2193 if (TTE_IS_NOSYNC(ttep)) { 2194 TTE_SET_REF(ttep); 2195 if (TTE_IS_WRITABLE(ttep)) { 2196 TTE_SET_MOD(ttep); 2197 } 2198 } 2199 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2200 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2201 } 2202 } 2203 2204 /* 2205 * This function will add a translation to the hme_blk and allocate the 2206 * hme_blk if one does not exist. 2207 * If a page structure is specified then it will add the 2208 * corresponding hment to the mapping list. 2209 * It will also update the hmenum field for the tte. 2210 */ 2211 void 2212 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2213 uint_t flags) 2214 { 2215 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags); 2216 } 2217 2218 /* 2219 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2220 * Assumes that a particular page size may only be resident in one TSB. 2221 */ 2222 static void 2223 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2224 { 2225 struct tsb_info *tsbinfop = NULL; 2226 uint64_t tag; 2227 struct tsbe *tsbe_addr; 2228 uint64_t tsb_base; 2229 uint_t tsb_size; 2230 int vpshift = MMU_PAGESHIFT; 2231 int phys = 0; 2232 2233 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2234 phys = ktsb_phys; 2235 if (ttesz >= TTE4M) { 2236 #ifndef sun4v 2237 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2238 #endif 2239 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2240 tsb_size = ktsb4m_szcode; 2241 } else { 2242 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2243 tsb_size = ktsb_szcode; 2244 } 2245 } else { 2246 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2247 2248 /* 2249 * If there isn't a TSB for this page size, or the TSB is 2250 * swapped out, there is nothing to do. Note that the latter 2251 * case seems impossible but can occur if hat_pageunload() 2252 * is called on an ISM mapping while the process is swapped 2253 * out. 2254 */ 2255 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2256 return; 2257 2258 /* 2259 * If another thread is in the middle of relocating a TSB 2260 * we can't unload the entry so set a flag so that the 2261 * TSB will be flushed before it can be accessed by the 2262 * process. 2263 */ 2264 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2265 if (ttep == NULL) 2266 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2267 return; 2268 } 2269 #if defined(UTSB_PHYS) 2270 phys = 1; 2271 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2272 #else 2273 tsb_base = (uint64_t)tsbinfop->tsb_va; 2274 #endif 2275 tsb_size = tsbinfop->tsb_szc; 2276 } 2277 if (ttesz >= TTE4M) 2278 vpshift = MMU_PAGESHIFT4M; 2279 2280 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2281 tag = sfmmu_make_tsbtag(vaddr); 2282 2283 if (ttep == NULL) { 2284 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2285 } else { 2286 if (ttesz >= TTE4M) { 2287 SFMMU_STAT(sf_tsb_load4m); 2288 } else { 2289 SFMMU_STAT(sf_tsb_load8k); 2290 } 2291 2292 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2293 } 2294 } 2295 2296 /* 2297 * Unmap all entries from [start, end) matching the given page size. 2298 * 2299 * This function is used primarily to unmap replicated 64K or 512K entries 2300 * from the TSB that are inserted using the base page size TSB pointer, but 2301 * it may also be called to unmap a range of addresses from the TSB. 2302 */ 2303 void 2304 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2305 { 2306 struct tsb_info *tsbinfop; 2307 uint64_t tag; 2308 struct tsbe *tsbe_addr; 2309 caddr_t vaddr; 2310 uint64_t tsb_base; 2311 int vpshift, vpgsz; 2312 uint_t tsb_size; 2313 int phys = 0; 2314 2315 /* 2316 * Assumptions: 2317 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2318 * at a time shooting down any valid entries we encounter. 2319 * 2320 * If ttesz >= 4M we walk the range 4M at a time shooting 2321 * down any valid mappings we find. 2322 */ 2323 if (sfmmup == ksfmmup) { 2324 phys = ktsb_phys; 2325 if (ttesz >= TTE4M) { 2326 #ifndef sun4v 2327 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2328 #endif 2329 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2330 tsb_size = ktsb4m_szcode; 2331 } else { 2332 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2333 tsb_size = ktsb_szcode; 2334 } 2335 } else { 2336 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2337 2338 /* 2339 * If there isn't a TSB for this page size, or the TSB is 2340 * swapped out, there is nothing to do. Note that the latter 2341 * case seems impossible but can occur if hat_pageunload() 2342 * is called on an ISM mapping while the process is swapped 2343 * out. 2344 */ 2345 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2346 return; 2347 2348 /* 2349 * If another thread is in the middle of relocating a TSB 2350 * we can't unload the entry so set a flag so that the 2351 * TSB will be flushed before it can be accessed by the 2352 * process. 2353 */ 2354 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2355 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2356 return; 2357 } 2358 #if defined(UTSB_PHYS) 2359 phys = 1; 2360 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2361 #else 2362 tsb_base = (uint64_t)tsbinfop->tsb_va; 2363 #endif 2364 tsb_size = tsbinfop->tsb_szc; 2365 } 2366 if (ttesz >= TTE4M) { 2367 vpshift = MMU_PAGESHIFT4M; 2368 vpgsz = MMU_PAGESIZE4M; 2369 } else { 2370 vpshift = MMU_PAGESHIFT; 2371 vpgsz = MMU_PAGESIZE; 2372 } 2373 2374 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2375 tag = sfmmu_make_tsbtag(vaddr); 2376 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2377 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2378 } 2379 } 2380 2381 /* 2382 * Select the optimum TSB size given the number of mappings 2383 * that need to be cached. 2384 */ 2385 static int 2386 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2387 { 2388 int szc = 0; 2389 2390 #ifdef DEBUG 2391 if (tsb_grow_stress) { 2392 uint32_t randval = (uint32_t)gettick() >> 4; 2393 return (randval % (tsb_max_growsize + 1)); 2394 } 2395 #endif /* DEBUG */ 2396 2397 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2398 szc++; 2399 return (szc); 2400 } 2401 2402 /* 2403 * This function will add a translation to the hme_blk and allocate the 2404 * hme_blk if one does not exist. 2405 * If a page structure is specified then it will add the 2406 * corresponding hment to the mapping list. 2407 * It will also update the hmenum field for the tte. 2408 * Furthermore, it attempts to create a large page translation 2409 * for <addr,hat> at page array pps. It assumes addr and first 2410 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2411 */ 2412 static int 2413 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2414 page_t **pps, uint_t flags) 2415 { 2416 struct hmehash_bucket *hmebp; 2417 struct hme_blk *hmeblkp; 2418 int ret; 2419 uint_t size; 2420 2421 /* 2422 * Get mapping size. 2423 */ 2424 size = TTE_CSZ(ttep); 2425 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2426 2427 /* 2428 * Acquire the hash bucket. 2429 */ 2430 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size); 2431 ASSERT(hmebp); 2432 2433 /* 2434 * Find the hment block. 2435 */ 2436 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags); 2437 ASSERT(hmeblkp); 2438 2439 /* 2440 * Add the translation. 2441 */ 2442 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags); 2443 2444 /* 2445 * Release the hash bucket. 2446 */ 2447 sfmmu_tteload_release_hashbucket(hmebp); 2448 2449 return (ret); 2450 } 2451 2452 /* 2453 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2454 */ 2455 static struct hmehash_bucket * 2456 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size) 2457 { 2458 struct hmehash_bucket *hmebp; 2459 int hmeshift; 2460 2461 hmeshift = HME_HASH_SHIFT(size); 2462 2463 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 2464 2465 SFMMU_HASH_LOCK(hmebp); 2466 2467 return (hmebp); 2468 } 2469 2470 /* 2471 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2472 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2473 * allocated. 2474 */ 2475 static struct hme_blk * 2476 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2477 caddr_t vaddr, uint_t size, uint_t flags) 2478 { 2479 hmeblk_tag hblktag; 2480 int hmeshift; 2481 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2482 uint64_t hblkpa, prevpa; 2483 struct kmem_cache *sfmmu_cache; 2484 uint_t forcefree; 2485 2486 hblktag.htag_id = sfmmup; 2487 hmeshift = HME_HASH_SHIFT(size); 2488 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2489 hblktag.htag_rehash = HME_HASH_REHASH(size); 2490 2491 ttearray_realloc: 2492 2493 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 2494 pr_hblk, prevpa, &list); 2495 2496 /* 2497 * We block until hblk_reserve_lock is released; it's held by 2498 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2499 * replaced by a hblk from sfmmu8_cache. 2500 */ 2501 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2502 hblk_reserve_thread != curthread) { 2503 SFMMU_HASH_UNLOCK(hmebp); 2504 mutex_enter(&hblk_reserve_lock); 2505 mutex_exit(&hblk_reserve_lock); 2506 SFMMU_STAT(sf_hblk_reserve_hit); 2507 SFMMU_HASH_LOCK(hmebp); 2508 goto ttearray_realloc; 2509 } 2510 2511 if (hmeblkp == NULL) { 2512 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2513 hblktag, flags); 2514 } else { 2515 /* 2516 * It is possible for 8k and 64k hblks to collide since they 2517 * have the same rehash value. This is because we 2518 * lazily free hblks and 8K/64K blks could be lingering. 2519 * If we find size mismatch we free the block and & try again. 2520 */ 2521 if (get_hblk_ttesz(hmeblkp) != size) { 2522 ASSERT(!hmeblkp->hblk_vcnt); 2523 ASSERT(!hmeblkp->hblk_hmecnt); 2524 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 2525 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 2526 goto ttearray_realloc; 2527 } 2528 if (hmeblkp->hblk_shw_bit) { 2529 /* 2530 * if the hblk was previously used as a shadow hblk then 2531 * we will change it to a normal hblk 2532 */ 2533 if (hmeblkp->hblk_shw_mask) { 2534 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2535 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2536 goto ttearray_realloc; 2537 } else { 2538 hmeblkp->hblk_shw_bit = 0; 2539 } 2540 } 2541 SFMMU_STAT(sf_hblk_hit); 2542 } 2543 2544 /* 2545 * hat_memload() should never call kmem_cache_free(); see block 2546 * comment showing the stacktrace in sfmmu_hblk_alloc(); 2547 * enqueue each hblk in the list to reserve list if it's created 2548 * from sfmmu8_cache *and* sfmmup == KHATID. 2549 */ 2550 forcefree = (sfmmup == KHATID) ? 1 : 0; 2551 while ((pr_hblk = list) != NULL) { 2552 list = pr_hblk->hblk_next; 2553 sfmmu_cache = get_hblk_cache(pr_hblk); 2554 if ((sfmmu_cache == sfmmu8_cache) && 2555 sfmmu_put_free_hblk(pr_hblk, forcefree)) 2556 continue; 2557 2558 ASSERT(sfmmup != KHATID); 2559 kmem_cache_free(sfmmu_cache, pr_hblk); 2560 } 2561 2562 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2563 ASSERT(!hmeblkp->hblk_shw_bit); 2564 2565 return (hmeblkp); 2566 } 2567 2568 /* 2569 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2570 * otherwise. 2571 */ 2572 static int 2573 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2574 caddr_t vaddr, page_t **pps, uint_t flags) 2575 { 2576 page_t *pp = *pps; 2577 int hmenum, size, remap; 2578 tte_t tteold, flush_tte; 2579 #ifdef DEBUG 2580 tte_t orig_old; 2581 #endif /* DEBUG */ 2582 struct sf_hment *sfhme; 2583 kmutex_t *pml, *pmtx; 2584 hatlock_t *hatlockp; 2585 2586 /* 2587 * remove this panic when we decide to let user virtual address 2588 * space be >= USERLIMIT. 2589 */ 2590 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2591 panic("user addr %p in kernel space", vaddr); 2592 #if defined(TTE_IS_GLOBAL) 2593 if (TTE_IS_GLOBAL(ttep)) 2594 panic("sfmmu_tteload: creating global tte"); 2595 #endif 2596 2597 #ifdef DEBUG 2598 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2599 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2600 panic("sfmmu_tteload: non cacheable memory tte"); 2601 #endif /* DEBUG */ 2602 2603 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2604 !TTE_IS_MOD(ttep)) { 2605 /* 2606 * Don't load TSB for dummy as in ISM. Also don't preload 2607 * the TSB if the TTE isn't writable since we're likely to 2608 * fault on it again -- preloading can be fairly expensive. 2609 */ 2610 flags |= SFMMU_NO_TSBLOAD; 2611 } 2612 2613 size = TTE_CSZ(ttep); 2614 switch (size) { 2615 case TTE8K: 2616 SFMMU_STAT(sf_tteload8k); 2617 break; 2618 case TTE64K: 2619 SFMMU_STAT(sf_tteload64k); 2620 break; 2621 case TTE512K: 2622 SFMMU_STAT(sf_tteload512k); 2623 break; 2624 case TTE4M: 2625 SFMMU_STAT(sf_tteload4m); 2626 break; 2627 case (TTE32M): 2628 SFMMU_STAT(sf_tteload32m); 2629 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2630 break; 2631 case (TTE256M): 2632 SFMMU_STAT(sf_tteload256m); 2633 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2634 break; 2635 } 2636 2637 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2638 2639 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 2640 2641 /* 2642 * Need to grab mlist lock here so that pageunload 2643 * will not change tte behind us. 2644 */ 2645 if (pp) { 2646 pml = sfmmu_mlist_enter(pp); 2647 } 2648 2649 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2650 /* 2651 * Look for corresponding hment and if valid verify 2652 * pfns are equal. 2653 */ 2654 remap = TTE_IS_VALID(&tteold); 2655 if (remap) { 2656 pfn_t new_pfn, old_pfn; 2657 2658 old_pfn = TTE_TO_PFN(vaddr, &tteold); 2659 new_pfn = TTE_TO_PFN(vaddr, ttep); 2660 2661 if (flags & HAT_LOAD_REMAP) { 2662 /* make sure we are remapping same type of pages */ 2663 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 2664 panic("sfmmu_tteload - tte remap io<->memory"); 2665 } 2666 if (old_pfn != new_pfn && 2667 (pp != NULL || sfhme->hme_page != NULL)) { 2668 panic("sfmmu_tteload - tte remap pp != NULL"); 2669 } 2670 } else if (old_pfn != new_pfn) { 2671 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 2672 (void *)hmeblkp); 2673 } 2674 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 2675 } 2676 2677 if (pp) { 2678 if (size == TTE8K) { 2679 #ifdef VAC 2680 /* 2681 * Handle VAC consistency 2682 */ 2683 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 2684 sfmmu_vac_conflict(sfmmup, vaddr, pp); 2685 } 2686 #endif 2687 2688 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2689 pmtx = sfmmu_page_enter(pp); 2690 PP_CLRRO(pp); 2691 sfmmu_page_exit(pmtx); 2692 } else if (!PP_ISMAPPED(pp) && 2693 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 2694 pmtx = sfmmu_page_enter(pp); 2695 if (!(PP_ISMOD(pp))) { 2696 PP_SETRO(pp); 2697 } 2698 sfmmu_page_exit(pmtx); 2699 } 2700 2701 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 2702 /* 2703 * sfmmu_pagearray_setup failed so return 2704 */ 2705 sfmmu_mlist_exit(pml); 2706 return (1); 2707 } 2708 } 2709 2710 /* 2711 * Make sure hment is not on a mapping list. 2712 */ 2713 ASSERT(remap || (sfhme->hme_page == NULL)); 2714 2715 /* if it is not a remap then hme->next better be NULL */ 2716 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 2717 2718 if (flags & HAT_LOAD_LOCK) { 2719 if (((int)hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 2720 panic("too high lckcnt-hmeblk %p", 2721 (void *)hmeblkp); 2722 } 2723 atomic_add_16(&hmeblkp->hblk_lckcnt, 1); 2724 2725 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 2726 } 2727 2728 #ifdef VAC 2729 if (pp && PP_ISNC(pp)) { 2730 /* 2731 * If the physical page is marked to be uncacheable, like 2732 * by a vac conflict, make sure the new mapping is also 2733 * uncacheable. 2734 */ 2735 TTE_CLR_VCACHEABLE(ttep); 2736 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 2737 } 2738 #endif 2739 ttep->tte_hmenum = hmenum; 2740 2741 #ifdef DEBUG 2742 orig_old = tteold; 2743 #endif /* DEBUG */ 2744 2745 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 2746 if ((sfmmup == KHATID) && 2747 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 2748 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2749 } 2750 #ifdef DEBUG 2751 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 2752 #endif /* DEBUG */ 2753 } 2754 2755 if (!TTE_IS_VALID(&tteold)) { 2756 2757 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 2758 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 2759 2760 /* 2761 * HAT_RELOAD_SHARE has been deprecated with lpg DISM. 2762 */ 2763 2764 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 2765 sfmmup != ksfmmup) { 2766 /* 2767 * If this is the first large mapping for the process 2768 * we must force any CPUs running this process to TL=0 2769 * where they will reload the HAT flags from the 2770 * tsbmiss area. This is necessary to make the large 2771 * mappings we are about to load visible to those CPUs; 2772 * otherwise they'll loop forever calling pagefault() 2773 * since we don't search large hash chains by default. 2774 */ 2775 hatlockp = sfmmu_hat_enter(sfmmup); 2776 if (size == TTE512K && 2777 !SFMMU_FLAGS_ISSET(sfmmup, HAT_512K_FLAG)) { 2778 SFMMU_FLAGS_SET(sfmmup, HAT_512K_FLAG); 2779 sfmmu_sync_mmustate(sfmmup); 2780 } else if (size == TTE4M && 2781 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 2782 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 2783 sfmmu_sync_mmustate(sfmmup); 2784 } else if (size == TTE64K && 2785 !SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) { 2786 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 2787 /* no sync mmustate; 64K shares 8K hashes */ 2788 } else if (mmu_page_sizes == max_mmu_page_sizes) { 2789 if (size == TTE32M && 2790 !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 2791 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 2792 sfmmu_sync_mmustate(sfmmup); 2793 } else if (size == TTE256M && 2794 !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 2795 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 2796 sfmmu_sync_mmustate(sfmmup); 2797 } 2798 } 2799 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 2800 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 2801 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 2802 } 2803 sfmmu_hat_exit(hatlockp); 2804 } 2805 } 2806 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 2807 2808 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 2809 hw_tte.tte_intlo; 2810 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 2811 hw_tte.tte_inthi; 2812 2813 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 2814 /* 2815 * If remap and new tte differs from old tte we need 2816 * to sync the mod bit and flush TLB/TSB. We don't 2817 * need to sync ref bit because we currently always set 2818 * ref bit in tteload. 2819 */ 2820 ASSERT(TTE_IS_REF(ttep)); 2821 if (TTE_IS_MOD(&tteold)) { 2822 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 2823 } 2824 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 2825 xt_sync(sfmmup->sfmmu_cpusran); 2826 } 2827 2828 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 2829 /* 2830 * We only preload 8K and 4M mappings into the TSB, since 2831 * 64K and 512K mappings are replicated and hence don't 2832 * have a single, unique TSB entry. Ditto for 32M/256M. 2833 */ 2834 if (size == TTE8K || size == TTE4M) { 2835 hatlockp = sfmmu_hat_enter(sfmmup); 2836 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, size); 2837 sfmmu_hat_exit(hatlockp); 2838 } 2839 } 2840 if (pp) { 2841 if (!remap) { 2842 HME_ADD(sfhme, pp); 2843 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 2844 ASSERT(hmeblkp->hblk_hmecnt > 0); 2845 2846 /* 2847 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 2848 * see pageunload() for comment. 2849 */ 2850 } 2851 sfmmu_mlist_exit(pml); 2852 } 2853 2854 return (0); 2855 } 2856 /* 2857 * Function unlocks hash bucket. 2858 */ 2859 static void 2860 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 2861 { 2862 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2863 SFMMU_HASH_UNLOCK(hmebp); 2864 } 2865 2866 /* 2867 * function which checks and sets up page array for a large 2868 * translation. Will set p_vcolor, p_index, p_ro fields. 2869 * Assumes addr and pfnum of first page are properly aligned. 2870 * Will check for physical contiguity. If check fails it return 2871 * non null. 2872 */ 2873 static int 2874 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 2875 { 2876 int i, index, ttesz; 2877 pfn_t pfnum; 2878 pgcnt_t npgs; 2879 page_t *pp, *pp1; 2880 kmutex_t *pmtx; 2881 #ifdef VAC 2882 int osz; 2883 int cflags = 0; 2884 int vac_err = 0; 2885 #endif 2886 int newidx = 0; 2887 2888 ttesz = TTE_CSZ(ttep); 2889 2890 ASSERT(ttesz > TTE8K); 2891 2892 npgs = TTEPAGES(ttesz); 2893 index = PAGESZ_TO_INDEX(ttesz); 2894 2895 pfnum = (*pps)->p_pagenum; 2896 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 2897 2898 /* 2899 * Save the first pp so we can do HAT_TMPNC at the end. 2900 */ 2901 pp1 = *pps; 2902 #ifdef VAC 2903 osz = fnd_mapping_sz(pp1); 2904 #endif 2905 2906 for (i = 0; i < npgs; i++, pps++) { 2907 pp = *pps; 2908 ASSERT(PAGE_LOCKED(pp)); 2909 ASSERT(pp->p_szc >= ttesz); 2910 ASSERT(pp->p_szc == pp1->p_szc); 2911 ASSERT(sfmmu_mlist_held(pp)); 2912 2913 /* 2914 * XXX is it possible to maintain P_RO on the root only? 2915 */ 2916 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2917 pmtx = sfmmu_page_enter(pp); 2918 PP_CLRRO(pp); 2919 sfmmu_page_exit(pmtx); 2920 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 2921 !PP_ISMOD(pp)) { 2922 pmtx = sfmmu_page_enter(pp); 2923 if (!(PP_ISMOD(pp))) { 2924 PP_SETRO(pp); 2925 } 2926 sfmmu_page_exit(pmtx); 2927 } 2928 2929 /* 2930 * If this is a remap we skip vac & contiguity checks. 2931 */ 2932 if (remap) 2933 continue; 2934 2935 /* 2936 * set p_vcolor and detect any vac conflicts. 2937 */ 2938 #ifdef VAC 2939 if (vac_err == 0) { 2940 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 2941 2942 } 2943 #endif 2944 2945 /* 2946 * Save current index in case we need to undo it. 2947 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 2948 * "SFMMU_INDEX_SHIFT 6" 2949 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 2950 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 2951 * 2952 * So: index = PAGESZ_TO_INDEX(ttesz); 2953 * if ttesz == 1 then index = 0x2 2954 * 2 then index = 0x4 2955 * 3 then index = 0x8 2956 * 4 then index = 0x10 2957 * 5 then index = 0x20 2958 * The code below checks if it's a new pagesize (ie, newidx) 2959 * in case we need to take it back out of p_index, 2960 * and then or's the new index into the existing index. 2961 */ 2962 if ((PP_MAPINDEX(pp) & index) == 0) 2963 newidx = 1; 2964 pp->p_index = (PP_MAPINDEX(pp) | index); 2965 2966 /* 2967 * contiguity check 2968 */ 2969 if (pp->p_pagenum != pfnum) { 2970 /* 2971 * If we fail the contiguity test then 2972 * the only thing we need to fix is the p_index field. 2973 * We might get a few extra flushes but since this 2974 * path is rare that is ok. The p_ro field will 2975 * get automatically fixed on the next tteload to 2976 * the page. NO TNC bit is set yet. 2977 */ 2978 while (i >= 0) { 2979 pp = *pps; 2980 if (newidx) 2981 pp->p_index = (PP_MAPINDEX(pp) & 2982 ~index); 2983 pps--; 2984 i--; 2985 } 2986 return (1); 2987 } 2988 pfnum++; 2989 addr += MMU_PAGESIZE; 2990 } 2991 2992 #ifdef VAC 2993 if (vac_err) { 2994 if (ttesz > osz) { 2995 /* 2996 * There are some smaller mappings that causes vac 2997 * conflicts. Convert all existing small mappings to 2998 * TNC. 2999 */ 3000 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3001 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3002 npgs); 3003 } else { 3004 /* EMPTY */ 3005 /* 3006 * If there exists an big page mapping, 3007 * that means the whole existing big page 3008 * has TNC setting already. No need to covert to 3009 * TNC again. 3010 */ 3011 ASSERT(PP_ISTNC(pp1)); 3012 } 3013 } 3014 #endif /* VAC */ 3015 3016 return (0); 3017 } 3018 3019 #ifdef VAC 3020 /* 3021 * Routine that detects vac consistency for a large page. It also 3022 * sets virtual color for all pp's for this big mapping. 3023 */ 3024 static int 3025 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3026 { 3027 int vcolor, ocolor; 3028 3029 ASSERT(sfmmu_mlist_held(pp)); 3030 3031 if (PP_ISNC(pp)) { 3032 return (HAT_TMPNC); 3033 } 3034 3035 vcolor = addr_to_vcolor(addr); 3036 if (PP_NEWPAGE(pp)) { 3037 PP_SET_VCOLOR(pp, vcolor); 3038 return (0); 3039 } 3040 3041 ocolor = PP_GET_VCOLOR(pp); 3042 if (ocolor == vcolor) { 3043 return (0); 3044 } 3045 3046 if (!PP_ISMAPPED(pp)) { 3047 /* 3048 * Previous user of page had a differnet color 3049 * but since there are no current users 3050 * we just flush the cache and change the color. 3051 * As an optimization for large pages we flush the 3052 * entire cache of that color and set a flag. 3053 */ 3054 SFMMU_STAT(sf_pgcolor_conflict); 3055 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3056 CacheColor_SetFlushed(*cflags, ocolor); 3057 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3058 } 3059 PP_SET_VCOLOR(pp, vcolor); 3060 return (0); 3061 } 3062 3063 /* 3064 * We got a real conflict with a current mapping. 3065 * set flags to start unencaching all mappings 3066 * and return failure so we restart looping 3067 * the pp array from the beginning. 3068 */ 3069 return (HAT_TMPNC); 3070 } 3071 #endif /* VAC */ 3072 3073 /* 3074 * creates a large page shadow hmeblk for a tte. 3075 * The purpose of this routine is to allow us to do quick unloads because 3076 * the vm layer can easily pass a very large but sparsely populated range. 3077 */ 3078 static struct hme_blk * 3079 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3080 { 3081 struct hmehash_bucket *hmebp; 3082 hmeblk_tag hblktag; 3083 int hmeshift, size, vshift; 3084 uint_t shw_mask, newshw_mask; 3085 struct hme_blk *hmeblkp; 3086 3087 ASSERT(sfmmup != KHATID); 3088 if (mmu_page_sizes == max_mmu_page_sizes) { 3089 ASSERT(ttesz < TTE256M); 3090 } else { 3091 ASSERT(ttesz < TTE4M); 3092 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3093 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3094 } 3095 3096 if (ttesz == TTE8K) { 3097 size = TTE512K; 3098 } else { 3099 size = ++ttesz; 3100 } 3101 3102 hblktag.htag_id = sfmmup; 3103 hmeshift = HME_HASH_SHIFT(size); 3104 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3105 hblktag.htag_rehash = HME_HASH_REHASH(size); 3106 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3107 3108 SFMMU_HASH_LOCK(hmebp); 3109 3110 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3111 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3112 if (hmeblkp == NULL) { 3113 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3114 hblktag, flags); 3115 } 3116 ASSERT(hmeblkp); 3117 if (!hmeblkp->hblk_shw_mask) { 3118 /* 3119 * if this is a unused hblk it was just allocated or could 3120 * potentially be a previous large page hblk so we need to 3121 * set the shadow bit. 3122 */ 3123 hmeblkp->hblk_shw_bit = 1; 3124 } 3125 ASSERT(hmeblkp->hblk_shw_bit == 1); 3126 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3127 ASSERT(vshift < 8); 3128 /* 3129 * Atomically set shw mask bit 3130 */ 3131 do { 3132 shw_mask = hmeblkp->hblk_shw_mask; 3133 newshw_mask = shw_mask | (1 << vshift); 3134 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3135 newshw_mask); 3136 } while (newshw_mask != shw_mask); 3137 3138 SFMMU_HASH_UNLOCK(hmebp); 3139 3140 return (hmeblkp); 3141 } 3142 3143 /* 3144 * This routine cleanup a previous shadow hmeblk and changes it to 3145 * a regular hblk. This happens rarely but it is possible 3146 * when a process wants to use large pages and there are hblks still 3147 * lying around from the previous as that used these hmeblks. 3148 * The alternative was to cleanup the shadow hblks at unload time 3149 * but since so few user processes actually use large pages, it is 3150 * better to be lazy and cleanup at this time. 3151 */ 3152 static void 3153 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3154 struct hmehash_bucket *hmebp) 3155 { 3156 caddr_t addr, endaddr; 3157 int hashno, size; 3158 3159 ASSERT(hmeblkp->hblk_shw_bit); 3160 3161 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3162 3163 if (!hmeblkp->hblk_shw_mask) { 3164 hmeblkp->hblk_shw_bit = 0; 3165 return; 3166 } 3167 addr = (caddr_t)get_hblk_base(hmeblkp); 3168 endaddr = get_hblk_endaddr(hmeblkp); 3169 size = get_hblk_ttesz(hmeblkp); 3170 hashno = size - 1; 3171 ASSERT(hashno > 0); 3172 SFMMU_HASH_UNLOCK(hmebp); 3173 3174 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3175 3176 SFMMU_HASH_LOCK(hmebp); 3177 } 3178 3179 static void 3180 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3181 int hashno) 3182 { 3183 int hmeshift, shadow = 0; 3184 hmeblk_tag hblktag; 3185 struct hmehash_bucket *hmebp; 3186 struct hme_blk *hmeblkp; 3187 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3188 uint64_t hblkpa, prevpa, nx_pa; 3189 3190 ASSERT(hashno > 0); 3191 hblktag.htag_id = sfmmup; 3192 hblktag.htag_rehash = hashno; 3193 3194 hmeshift = HME_HASH_SHIFT(hashno); 3195 3196 while (addr < endaddr) { 3197 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3198 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3199 SFMMU_HASH_LOCK(hmebp); 3200 /* inline HME_HASH_SEARCH */ 3201 hmeblkp = hmebp->hmeblkp; 3202 hblkpa = hmebp->hmeh_nextpa; 3203 prevpa = 0; 3204 pr_hblk = NULL; 3205 while (hmeblkp) { 3206 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 3207 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3208 /* found hme_blk */ 3209 if (hmeblkp->hblk_shw_bit) { 3210 if (hmeblkp->hblk_shw_mask) { 3211 shadow = 1; 3212 sfmmu_shadow_hcleanup(sfmmup, 3213 hmeblkp, hmebp); 3214 break; 3215 } else { 3216 hmeblkp->hblk_shw_bit = 0; 3217 } 3218 } 3219 3220 /* 3221 * Hblk_hmecnt and hblk_vcnt could be non zero 3222 * since hblk_unload() does not gurantee that. 3223 * 3224 * XXX - this could cause tteload() to spin 3225 * where sfmmu_shadow_hcleanup() is called. 3226 */ 3227 } 3228 3229 nx_hblk = hmeblkp->hblk_next; 3230 nx_pa = hmeblkp->hblk_nextpa; 3231 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3232 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 3233 pr_hblk); 3234 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3235 } else { 3236 pr_hblk = hmeblkp; 3237 prevpa = hblkpa; 3238 } 3239 hmeblkp = nx_hblk; 3240 hblkpa = nx_pa; 3241 } 3242 3243 SFMMU_HASH_UNLOCK(hmebp); 3244 3245 if (shadow) { 3246 /* 3247 * We found another shadow hblk so cleaned its 3248 * children. We need to go back and cleanup 3249 * the original hblk so we don't change the 3250 * addr. 3251 */ 3252 shadow = 0; 3253 } else { 3254 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3255 (1 << hmeshift)); 3256 } 3257 } 3258 sfmmu_hblks_list_purge(&list); 3259 } 3260 3261 /* 3262 * Release one hardware address translation lock on the given address range. 3263 */ 3264 void 3265 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3266 { 3267 struct hmehash_bucket *hmebp; 3268 hmeblk_tag hblktag; 3269 int hmeshift, hashno = 1; 3270 struct hme_blk *hmeblkp, *list = NULL; 3271 caddr_t endaddr; 3272 3273 ASSERT(sfmmup != NULL); 3274 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3275 3276 ASSERT((sfmmup == ksfmmup) || 3277 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3278 ASSERT((len & MMU_PAGEOFFSET) == 0); 3279 endaddr = addr + len; 3280 hblktag.htag_id = sfmmup; 3281 3282 /* 3283 * Spitfire supports 4 page sizes. 3284 * Most pages are expected to be of the smallest page size (8K) and 3285 * these will not need to be rehashed. 64K pages also don't need to be 3286 * rehashed because an hmeblk spans 64K of address space. 512K pages 3287 * might need 1 rehash and and 4M pages might need 2 rehashes. 3288 */ 3289 while (addr < endaddr) { 3290 hmeshift = HME_HASH_SHIFT(hashno); 3291 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3292 hblktag.htag_rehash = hashno; 3293 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3294 3295 SFMMU_HASH_LOCK(hmebp); 3296 3297 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3298 if (hmeblkp != NULL) { 3299 /* 3300 * If we encounter a shadow hmeblk then 3301 * we know there are no valid hmeblks mapping 3302 * this address at this size or larger. 3303 * Just increment address by the smallest 3304 * page size. 3305 */ 3306 if (hmeblkp->hblk_shw_bit) { 3307 addr += MMU_PAGESIZE; 3308 } else { 3309 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3310 endaddr); 3311 } 3312 SFMMU_HASH_UNLOCK(hmebp); 3313 hashno = 1; 3314 continue; 3315 } 3316 SFMMU_HASH_UNLOCK(hmebp); 3317 3318 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3319 /* 3320 * We have traversed the whole list and rehashed 3321 * if necessary without finding the address to unlock 3322 * which should never happen. 3323 */ 3324 panic("sfmmu_unlock: addr not found. " 3325 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3326 } else { 3327 hashno++; 3328 } 3329 } 3330 3331 sfmmu_hblks_list_purge(&list); 3332 } 3333 3334 /* 3335 * Function to unlock a range of addresses in an hmeblk. It returns the 3336 * next address that needs to be unlocked. 3337 * Should be called with the hash lock held. 3338 */ 3339 static caddr_t 3340 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 3341 { 3342 struct sf_hment *sfhme; 3343 tte_t tteold, ttemod; 3344 int ttesz, ret; 3345 3346 ASSERT(in_hblk_range(hmeblkp, addr)); 3347 ASSERT(hmeblkp->hblk_shw_bit == 0); 3348 3349 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 3350 ttesz = get_hblk_ttesz(hmeblkp); 3351 3352 HBLKTOHME(sfhme, hmeblkp, addr); 3353 while (addr < endaddr) { 3354 readtte: 3355 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3356 if (TTE_IS_VALID(&tteold)) { 3357 3358 ttemod = tteold; 3359 3360 ret = sfmmu_modifytte_try(&tteold, &ttemod, 3361 &sfhme->hme_tte); 3362 3363 if (ret < 0) 3364 goto readtte; 3365 3366 if (hmeblkp->hblk_lckcnt == 0) 3367 panic("zero hblk lckcnt"); 3368 3369 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 3370 (uintptr_t)endaddr) 3371 panic("can't unlock large tte"); 3372 3373 ASSERT(hmeblkp->hblk_lckcnt > 0); 3374 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 3375 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 3376 } else { 3377 panic("sfmmu_hblk_unlock: invalid tte"); 3378 } 3379 addr += TTEBYTES(ttesz); 3380 sfhme++; 3381 } 3382 return (addr); 3383 } 3384 3385 /* 3386 * Physical Address Mapping Framework 3387 * 3388 * General rules: 3389 * 3390 * (1) Applies only to seg_kmem memory pages. To make things easier, 3391 * seg_kpm addresses are also accepted by the routines, but nothing 3392 * is done with them since by definition their PA mappings are static. 3393 * (2) hat_add_callback() may only be called while holding the page lock 3394 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 3395 * or passing HAC_PAGELOCK flag. 3396 * (3) prehandler() and posthandler() may not call hat_add_callback() or 3397 * hat_delete_callback(), nor should they allocate memory. Post quiesce 3398 * callbacks may not sleep or acquire adaptive mutex locks. 3399 * (4) Either prehandler() or posthandler() (but not both) may be specified 3400 * as being NULL. Specifying an errhandler() is optional. 3401 * 3402 * Details of using the framework: 3403 * 3404 * registering a callback (hat_register_callback()) 3405 * 3406 * Pass prehandler, posthandler, errhandler addresses 3407 * as described below. If capture_cpus argument is nonzero, 3408 * suspend callback to the prehandler will occur with CPUs 3409 * captured and executing xc_loop() and CPUs will remain 3410 * captured until after the posthandler suspend callback 3411 * occurs. 3412 * 3413 * adding a callback (hat_add_callback()) 3414 * 3415 * as_pagelock(); 3416 * hat_add_callback(); 3417 * save returned pfn in private data structures or program registers; 3418 * as_pageunlock(); 3419 * 3420 * prehandler() 3421 * 3422 * Stop all accesses by physical address to this memory page. 3423 * Called twice: the first, PRESUSPEND, is a context safe to acquire 3424 * adaptive locks. The second, SUSPEND, is called at high PIL with 3425 * CPUs captured so adaptive locks may NOT be acquired (and all spin 3426 * locks must be XCALL_PIL or higher locks). 3427 * 3428 * May return the following errors: 3429 * EIO: A fatal error has occurred. This will result in panic. 3430 * EAGAIN: The page cannot be suspended. This will fail the 3431 * relocation. 3432 * 0: Success. 3433 * 3434 * posthandler() 3435 * 3436 * Save new pfn in private data structures or program registers; 3437 * not allowed to fail (non-zero return values will result in panic). 3438 * 3439 * errhandler() 3440 * 3441 * called when an error occurs related to the callback. Currently 3442 * the only such error is HAT_CB_ERR_LEAKED which indicates that 3443 * a page is being freed, but there are still outstanding callback(s) 3444 * registered on the page. 3445 * 3446 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 3447 * 3448 * stop using physical address 3449 * hat_delete_callback(); 3450 * 3451 */ 3452 3453 /* 3454 * Register a callback class. Each subsystem should do this once and 3455 * cache the id_t returned for use in setting up and tearing down callbacks. 3456 * 3457 * There is no facility for removing callback IDs once they are created; 3458 * the "key" should be unique for each module, so in case a module is unloaded 3459 * and subsequently re-loaded, we can recycle the module's previous entry. 3460 */ 3461 id_t 3462 hat_register_callback(int key, 3463 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 3464 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 3465 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 3466 int capture_cpus) 3467 { 3468 id_t id; 3469 3470 /* 3471 * Search the table for a pre-existing callback associated with 3472 * the identifier "key". If one exists, we re-use that entry in 3473 * the table for this instance, otherwise we assign the next 3474 * available table slot. 3475 */ 3476 for (id = 0; id < sfmmu_max_cb_id; id++) { 3477 if (sfmmu_cb_table[id].key == key) 3478 break; 3479 } 3480 3481 if (id == sfmmu_max_cb_id) { 3482 id = sfmmu_cb_nextid++; 3483 if (id >= sfmmu_max_cb_id) 3484 panic("hat_register_callback: out of callback IDs"); 3485 } 3486 3487 ASSERT(prehandler != NULL || posthandler != NULL); 3488 3489 sfmmu_cb_table[id].key = key; 3490 sfmmu_cb_table[id].prehandler = prehandler; 3491 sfmmu_cb_table[id].posthandler = posthandler; 3492 sfmmu_cb_table[id].errhandler = errhandler; 3493 sfmmu_cb_table[id].capture_cpus = capture_cpus; 3494 3495 return (id); 3496 } 3497 3498 #define HAC_COOKIE_NONE (void *)-1 3499 3500 /* 3501 * Add relocation callbacks to the specified addr/len which will be called 3502 * when relocating the associated page. See the description of pre and 3503 * posthandler above for more details. 3504 * 3505 * If HAC_PAGELOCK is included in flags, the underlying memory page is 3506 * locked internally so the caller must be able to deal with the callback 3507 * running even before this function has returned. If HAC_PAGELOCK is not 3508 * set, it is assumed that the underlying memory pages are locked. 3509 * 3510 * Since the caller must track the individual page boundaries anyway, 3511 * we only allow a callback to be added to a single page (large 3512 * or small). Thus [addr, addr + len) MUST be contained within a single 3513 * page. 3514 * 3515 * Registering multiple callbacks on the same [addr, addr+len) is supported, 3516 * _provided_that_ a unique parameter is specified for each callback. 3517 * If multiple callbacks are registered on the same range the callback will 3518 * be invoked with each unique parameter. Registering the same callback with 3519 * the same argument more than once will result in corrupted kernel state. 3520 * 3521 * Returns the pfn of the underlying kernel page in *rpfn 3522 * on success, or PFN_INVALID on failure. 3523 * 3524 * cookiep (if passed) provides storage space for an opaque cookie 3525 * to return later to hat_delete_callback(). This cookie makes the callback 3526 * deletion significantly quicker by avoiding a potentially lengthy hash 3527 * search. 3528 * 3529 * Returns values: 3530 * 0: success 3531 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 3532 * EINVAL: callback ID is not valid 3533 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 3534 * space 3535 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 3536 */ 3537 int 3538 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 3539 void *pvt, pfn_t *rpfn, void **cookiep) 3540 { 3541 struct hmehash_bucket *hmebp; 3542 hmeblk_tag hblktag; 3543 struct hme_blk *hmeblkp; 3544 int hmeshift, hashno; 3545 caddr_t saddr, eaddr, baseaddr; 3546 struct pa_hment *pahmep; 3547 struct sf_hment *sfhmep, *osfhmep; 3548 kmutex_t *pml; 3549 tte_t tte; 3550 page_t *pp; 3551 vnode_t *vp; 3552 u_offset_t off; 3553 pfn_t pfn; 3554 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 3555 int locked = 0; 3556 3557 /* 3558 * For KPM mappings, just return the physical address since we 3559 * don't need to register any callbacks. 3560 */ 3561 if (IS_KPM_ADDR(vaddr)) { 3562 uint64_t paddr; 3563 SFMMU_KPM_VTOP(vaddr, paddr); 3564 *rpfn = btop(paddr); 3565 if (cookiep != NULL) 3566 *cookiep = HAC_COOKIE_NONE; 3567 return (0); 3568 } 3569 3570 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 3571 *rpfn = PFN_INVALID; 3572 return (EINVAL); 3573 } 3574 3575 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 3576 *rpfn = PFN_INVALID; 3577 return (ENOMEM); 3578 } 3579 3580 sfhmep = &pahmep->sfment; 3581 3582 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3583 eaddr = saddr + len; 3584 3585 rehash: 3586 /* Find the mapping(s) for this page */ 3587 for (hashno = TTE64K, hmeblkp = NULL; 3588 hmeblkp == NULL && hashno <= mmu_hashcnt; 3589 hashno++) { 3590 hmeshift = HME_HASH_SHIFT(hashno); 3591 hblktag.htag_id = ksfmmup; 3592 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3593 hblktag.htag_rehash = hashno; 3594 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3595 3596 SFMMU_HASH_LOCK(hmebp); 3597 3598 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3599 3600 if (hmeblkp == NULL) 3601 SFMMU_HASH_UNLOCK(hmebp); 3602 } 3603 3604 if (hmeblkp == NULL) { 3605 kmem_cache_free(pa_hment_cache, pahmep); 3606 *rpfn = PFN_INVALID; 3607 return (ENXIO); 3608 } 3609 3610 HBLKTOHME(osfhmep, hmeblkp, saddr); 3611 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3612 3613 if (!TTE_IS_VALID(&tte)) { 3614 SFMMU_HASH_UNLOCK(hmebp); 3615 kmem_cache_free(pa_hment_cache, pahmep); 3616 *rpfn = PFN_INVALID; 3617 return (ENXIO); 3618 } 3619 3620 /* 3621 * Make sure the boundaries for the callback fall within this 3622 * single mapping. 3623 */ 3624 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 3625 ASSERT(saddr >= baseaddr); 3626 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 3627 SFMMU_HASH_UNLOCK(hmebp); 3628 kmem_cache_free(pa_hment_cache, pahmep); 3629 *rpfn = PFN_INVALID; 3630 return (ERANGE); 3631 } 3632 3633 pfn = sfmmu_ttetopfn(&tte, vaddr); 3634 3635 /* 3636 * The pfn may not have a page_t underneath in which case we 3637 * just return it. This can happen if we are doing I/O to a 3638 * static portion of the kernel's address space, for instance. 3639 */ 3640 pp = osfhmep->hme_page; 3641 if (pp == NULL) { 3642 SFMMU_HASH_UNLOCK(hmebp); 3643 kmem_cache_free(pa_hment_cache, pahmep); 3644 *rpfn = pfn; 3645 if (cookiep) 3646 *cookiep = HAC_COOKIE_NONE; 3647 return (0); 3648 } 3649 ASSERT(pp == PP_PAGEROOT(pp)); 3650 3651 vp = pp->p_vnode; 3652 off = pp->p_offset; 3653 3654 pml = sfmmu_mlist_enter(pp); 3655 3656 if (flags & HAC_PAGELOCK) { 3657 if (!page_trylock(pp, SE_SHARED)) { 3658 /* 3659 * Somebody is holding SE_EXCL lock. Might 3660 * even be hat_page_relocate(). Drop all 3661 * our locks, lookup the page in &kvp, and 3662 * retry. If it doesn't exist in &kvp, then 3663 * we must be dealing with a kernel mapped 3664 * page which doesn't actually belong to 3665 * segkmem so we punt. 3666 */ 3667 sfmmu_mlist_exit(pml); 3668 SFMMU_HASH_UNLOCK(hmebp); 3669 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3670 if (pp == NULL) { 3671 kmem_cache_free(pa_hment_cache, pahmep); 3672 *rpfn = pfn; 3673 if (cookiep) 3674 *cookiep = HAC_COOKIE_NONE; 3675 return (0); 3676 } 3677 page_unlock(pp); 3678 goto rehash; 3679 } 3680 locked = 1; 3681 } 3682 3683 if (!PAGE_LOCKED(pp) && !panicstr) 3684 panic("hat_add_callback: page 0x%p not locked", pp); 3685 3686 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 3687 pp->p_offset != off) { 3688 /* 3689 * The page moved before we got our hands on it. Drop 3690 * all the locks and try again. 3691 */ 3692 ASSERT((flags & HAC_PAGELOCK) != 0); 3693 sfmmu_mlist_exit(pml); 3694 SFMMU_HASH_UNLOCK(hmebp); 3695 page_unlock(pp); 3696 locked = 0; 3697 goto rehash; 3698 } 3699 3700 if (vp != &kvp) { 3701 /* 3702 * This is not a segkmem page but another page which 3703 * has been kernel mapped. It had better have at least 3704 * a share lock on it. Return the pfn. 3705 */ 3706 sfmmu_mlist_exit(pml); 3707 SFMMU_HASH_UNLOCK(hmebp); 3708 if (locked) 3709 page_unlock(pp); 3710 kmem_cache_free(pa_hment_cache, pahmep); 3711 ASSERT(PAGE_LOCKED(pp)); 3712 *rpfn = pfn; 3713 if (cookiep) 3714 *cookiep = HAC_COOKIE_NONE; 3715 return (0); 3716 } 3717 3718 /* 3719 * Setup this pa_hment and link its embedded dummy sf_hment into 3720 * the mapping list. 3721 */ 3722 pp->p_share++; 3723 pahmep->cb_id = callback_id; 3724 pahmep->addr = vaddr; 3725 pahmep->len = len; 3726 pahmep->refcnt = 1; 3727 pahmep->flags = 0; 3728 pahmep->pvt = pvt; 3729 3730 sfhmep->hme_tte.ll = 0; 3731 sfhmep->hme_data = pahmep; 3732 sfhmep->hme_prev = osfhmep; 3733 sfhmep->hme_next = osfhmep->hme_next; 3734 3735 if (osfhmep->hme_next) 3736 osfhmep->hme_next->hme_prev = sfhmep; 3737 3738 osfhmep->hme_next = sfhmep; 3739 3740 sfmmu_mlist_exit(pml); 3741 SFMMU_HASH_UNLOCK(hmebp); 3742 3743 if (locked) 3744 page_unlock(pp); 3745 3746 *rpfn = pfn; 3747 if (cookiep) 3748 *cookiep = (void *)pahmep; 3749 3750 return (0); 3751 } 3752 3753 /* 3754 * Remove the relocation callbacks from the specified addr/len. 3755 */ 3756 void 3757 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 3758 void *cookie) 3759 { 3760 struct hmehash_bucket *hmebp; 3761 hmeblk_tag hblktag; 3762 struct hme_blk *hmeblkp; 3763 int hmeshift, hashno; 3764 caddr_t saddr; 3765 struct pa_hment *pahmep; 3766 struct sf_hment *sfhmep, *osfhmep; 3767 kmutex_t *pml; 3768 tte_t tte; 3769 page_t *pp; 3770 vnode_t *vp; 3771 u_offset_t off; 3772 int locked = 0; 3773 3774 /* 3775 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 3776 * remove so just return. 3777 */ 3778 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 3779 return; 3780 3781 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3782 3783 rehash: 3784 /* Find the mapping(s) for this page */ 3785 for (hashno = TTE64K, hmeblkp = NULL; 3786 hmeblkp == NULL && hashno <= mmu_hashcnt; 3787 hashno++) { 3788 hmeshift = HME_HASH_SHIFT(hashno); 3789 hblktag.htag_id = ksfmmup; 3790 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3791 hblktag.htag_rehash = hashno; 3792 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3793 3794 SFMMU_HASH_LOCK(hmebp); 3795 3796 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3797 3798 if (hmeblkp == NULL) 3799 SFMMU_HASH_UNLOCK(hmebp); 3800 } 3801 3802 if (hmeblkp == NULL) 3803 return; 3804 3805 HBLKTOHME(osfhmep, hmeblkp, saddr); 3806 3807 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3808 if (!TTE_IS_VALID(&tte)) { 3809 SFMMU_HASH_UNLOCK(hmebp); 3810 return; 3811 } 3812 3813 pp = osfhmep->hme_page; 3814 if (pp == NULL) { 3815 SFMMU_HASH_UNLOCK(hmebp); 3816 ASSERT(cookie == NULL); 3817 return; 3818 } 3819 3820 vp = pp->p_vnode; 3821 off = pp->p_offset; 3822 3823 pml = sfmmu_mlist_enter(pp); 3824 3825 if (flags & HAC_PAGELOCK) { 3826 if (!page_trylock(pp, SE_SHARED)) { 3827 /* 3828 * Somebody is holding SE_EXCL lock. Might 3829 * even be hat_page_relocate(). Drop all 3830 * our locks, lookup the page in &kvp, and 3831 * retry. If it doesn't exist in &kvp, then 3832 * we must be dealing with a kernel mapped 3833 * page which doesn't actually belong to 3834 * segkmem so we punt. 3835 */ 3836 sfmmu_mlist_exit(pml); 3837 SFMMU_HASH_UNLOCK(hmebp); 3838 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3839 if (pp == NULL) { 3840 ASSERT(cookie == NULL); 3841 return; 3842 } 3843 page_unlock(pp); 3844 goto rehash; 3845 } 3846 locked = 1; 3847 } 3848 3849 ASSERT(PAGE_LOCKED(pp)); 3850 3851 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 3852 pp->p_offset != off) { 3853 /* 3854 * The page moved before we got our hands on it. Drop 3855 * all the locks and try again. 3856 */ 3857 ASSERT((flags & HAC_PAGELOCK) != 0); 3858 sfmmu_mlist_exit(pml); 3859 SFMMU_HASH_UNLOCK(hmebp); 3860 page_unlock(pp); 3861 locked = 0; 3862 goto rehash; 3863 } 3864 3865 if (vp != &kvp) { 3866 /* 3867 * This is not a segkmem page but another page which 3868 * has been kernel mapped. 3869 */ 3870 sfmmu_mlist_exit(pml); 3871 SFMMU_HASH_UNLOCK(hmebp); 3872 if (locked) 3873 page_unlock(pp); 3874 ASSERT(cookie == NULL); 3875 return; 3876 } 3877 3878 if (cookie != NULL) { 3879 pahmep = (struct pa_hment *)cookie; 3880 sfhmep = &pahmep->sfment; 3881 } else { 3882 for (sfhmep = pp->p_mapping; sfhmep != NULL; 3883 sfhmep = sfhmep->hme_next) { 3884 3885 /* 3886 * skip va<->pa mappings 3887 */ 3888 if (!IS_PAHME(sfhmep)) 3889 continue; 3890 3891 pahmep = sfhmep->hme_data; 3892 ASSERT(pahmep != NULL); 3893 3894 /* 3895 * if pa_hment matches, remove it 3896 */ 3897 if ((pahmep->pvt == pvt) && 3898 (pahmep->addr == vaddr) && 3899 (pahmep->len == len)) { 3900 break; 3901 } 3902 } 3903 } 3904 3905 if (sfhmep == NULL) { 3906 if (!panicstr) { 3907 panic("hat_delete_callback: pa_hment not found, pp %p", 3908 (void *)pp); 3909 } 3910 return; 3911 } 3912 3913 /* 3914 * Note: at this point a valid kernel mapping must still be 3915 * present on this page. 3916 */ 3917 pp->p_share--; 3918 if (pp->p_share <= 0) 3919 panic("hat_delete_callback: zero p_share"); 3920 3921 if (--pahmep->refcnt == 0) { 3922 if (pahmep->flags != 0) 3923 panic("hat_delete_callback: pa_hment is busy"); 3924 3925 /* 3926 * Remove sfhmep from the mapping list for the page. 3927 */ 3928 if (sfhmep->hme_prev) { 3929 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 3930 } else { 3931 pp->p_mapping = sfhmep->hme_next; 3932 } 3933 3934 if (sfhmep->hme_next) 3935 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 3936 3937 sfmmu_mlist_exit(pml); 3938 SFMMU_HASH_UNLOCK(hmebp); 3939 3940 if (locked) 3941 page_unlock(pp); 3942 3943 kmem_cache_free(pa_hment_cache, pahmep); 3944 return; 3945 } 3946 3947 sfmmu_mlist_exit(pml); 3948 SFMMU_HASH_UNLOCK(hmebp); 3949 if (locked) 3950 page_unlock(pp); 3951 } 3952 3953 /* 3954 * hat_probe returns 1 if the translation for the address 'addr' is 3955 * loaded, zero otherwise. 3956 * 3957 * hat_probe should be used only for advisorary purposes because it may 3958 * occasionally return the wrong value. The implementation must guarantee that 3959 * returning the wrong value is a very rare event. hat_probe is used 3960 * to implement optimizations in the segment drivers. 3961 * 3962 */ 3963 int 3964 hat_probe(struct hat *sfmmup, caddr_t addr) 3965 { 3966 pfn_t pfn; 3967 tte_t tte; 3968 3969 ASSERT(sfmmup != NULL); 3970 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3971 3972 ASSERT((sfmmup == ksfmmup) || 3973 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3974 3975 if (sfmmup == ksfmmup) { 3976 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 3977 == PFN_SUSPENDED) { 3978 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 3979 } 3980 } else { 3981 pfn = sfmmu_uvatopfn(addr, sfmmup); 3982 } 3983 3984 if (pfn != PFN_INVALID) 3985 return (1); 3986 else 3987 return (0); 3988 } 3989 3990 ssize_t 3991 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 3992 { 3993 tte_t tte; 3994 3995 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3996 3997 sfmmu_gettte(sfmmup, addr, &tte); 3998 if (TTE_IS_VALID(&tte)) { 3999 return (TTEBYTES(TTE_CSZ(&tte))); 4000 } 4001 return (-1); 4002 } 4003 4004 static void 4005 sfmmu_gettte(struct hat *sfmmup, caddr_t addr, tte_t *ttep) 4006 { 4007 struct hmehash_bucket *hmebp; 4008 hmeblk_tag hblktag; 4009 int hmeshift, hashno = 1; 4010 struct hme_blk *hmeblkp, *list = NULL; 4011 struct sf_hment *sfhmep; 4012 4013 /* support for ISM */ 4014 ism_map_t *ism_map; 4015 ism_blk_t *ism_blkp; 4016 int i; 4017 sfmmu_t *ism_hatid = NULL; 4018 sfmmu_t *locked_hatid = NULL; 4019 4020 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4021 4022 ism_blkp = sfmmup->sfmmu_iblk; 4023 if (ism_blkp) { 4024 sfmmu_ismhat_enter(sfmmup, 0); 4025 locked_hatid = sfmmup; 4026 } 4027 while (ism_blkp && ism_hatid == NULL) { 4028 ism_map = ism_blkp->iblk_maps; 4029 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 4030 if (addr >= ism_start(ism_map[i]) && 4031 addr < ism_end(ism_map[i])) { 4032 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 4033 addr = (caddr_t)(addr - 4034 ism_start(ism_map[i])); 4035 break; 4036 } 4037 } 4038 ism_blkp = ism_blkp->iblk_next; 4039 } 4040 if (locked_hatid) { 4041 sfmmu_ismhat_exit(locked_hatid, 0); 4042 } 4043 4044 hblktag.htag_id = sfmmup; 4045 ttep->ll = 0; 4046 4047 do { 4048 hmeshift = HME_HASH_SHIFT(hashno); 4049 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4050 hblktag.htag_rehash = hashno; 4051 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4052 4053 SFMMU_HASH_LOCK(hmebp); 4054 4055 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4056 if (hmeblkp != NULL) { 4057 HBLKTOHME(sfhmep, hmeblkp, addr); 4058 sfmmu_copytte(&sfhmep->hme_tte, ttep); 4059 SFMMU_HASH_UNLOCK(hmebp); 4060 break; 4061 } 4062 SFMMU_HASH_UNLOCK(hmebp); 4063 hashno++; 4064 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 4065 4066 sfmmu_hblks_list_purge(&list); 4067 } 4068 4069 uint_t 4070 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4071 { 4072 tte_t tte; 4073 4074 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4075 4076 sfmmu_gettte(sfmmup, addr, &tte); 4077 if (TTE_IS_VALID(&tte)) { 4078 *attr = sfmmu_ptov_attr(&tte); 4079 return (0); 4080 } 4081 *attr = 0; 4082 return ((uint_t)0xffffffff); 4083 } 4084 4085 /* 4086 * Enables more attributes on specified address range (ie. logical OR) 4087 */ 4088 void 4089 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4090 { 4091 if (hat->sfmmu_xhat_provider) { 4092 XHAT_SETATTR(hat, addr, len, attr); 4093 return; 4094 } else { 4095 /* 4096 * This must be a CPU HAT. If the address space has 4097 * XHATs attached, change attributes for all of them, 4098 * just in case 4099 */ 4100 ASSERT(hat->sfmmu_as != NULL); 4101 if (hat->sfmmu_as->a_xhat != NULL) 4102 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 4103 } 4104 4105 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4106 } 4107 4108 /* 4109 * Assigns attributes to the specified address range. All the attributes 4110 * are specified. 4111 */ 4112 void 4113 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4114 { 4115 if (hat->sfmmu_xhat_provider) { 4116 XHAT_CHGATTR(hat, addr, len, attr); 4117 return; 4118 } else { 4119 /* 4120 * This must be a CPU HAT. If the address space has 4121 * XHATs attached, change attributes for all of them, 4122 * just in case 4123 */ 4124 ASSERT(hat->sfmmu_as != NULL); 4125 if (hat->sfmmu_as->a_xhat != NULL) 4126 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 4127 } 4128 4129 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4130 } 4131 4132 /* 4133 * Remove attributes on the specified address range (ie. loginal NAND) 4134 */ 4135 void 4136 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4137 { 4138 if (hat->sfmmu_xhat_provider) { 4139 XHAT_CLRATTR(hat, addr, len, attr); 4140 return; 4141 } else { 4142 /* 4143 * This must be a CPU HAT. If the address space has 4144 * XHATs attached, change attributes for all of them, 4145 * just in case 4146 */ 4147 ASSERT(hat->sfmmu_as != NULL); 4148 if (hat->sfmmu_as->a_xhat != NULL) 4149 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4150 } 4151 4152 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4153 } 4154 4155 /* 4156 * Change attributes on an address range to that specified by attr and mode. 4157 */ 4158 static void 4159 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4160 int mode) 4161 { 4162 struct hmehash_bucket *hmebp; 4163 hmeblk_tag hblktag; 4164 int hmeshift, hashno = 1; 4165 struct hme_blk *hmeblkp, *list = NULL; 4166 caddr_t endaddr; 4167 cpuset_t cpuset; 4168 demap_range_t dmr; 4169 4170 CPUSET_ZERO(cpuset); 4171 4172 ASSERT((sfmmup == ksfmmup) || 4173 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4174 ASSERT((len & MMU_PAGEOFFSET) == 0); 4175 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4176 4177 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4178 ((addr + len) > (caddr_t)USERLIMIT)) { 4179 panic("user addr %p in kernel space", 4180 (void *)addr); 4181 } 4182 4183 endaddr = addr + len; 4184 hblktag.htag_id = sfmmup; 4185 DEMAP_RANGE_INIT(sfmmup, &dmr); 4186 4187 while (addr < endaddr) { 4188 hmeshift = HME_HASH_SHIFT(hashno); 4189 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4190 hblktag.htag_rehash = hashno; 4191 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4192 4193 SFMMU_HASH_LOCK(hmebp); 4194 4195 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4196 if (hmeblkp != NULL) { 4197 /* 4198 * We've encountered a shadow hmeblk so skip the range 4199 * of the next smaller mapping size. 4200 */ 4201 if (hmeblkp->hblk_shw_bit) { 4202 ASSERT(sfmmup != ksfmmup); 4203 ASSERT(hashno > 1); 4204 addr = (caddr_t)P2END((uintptr_t)addr, 4205 TTEBYTES(hashno - 1)); 4206 } else { 4207 addr = sfmmu_hblk_chgattr(sfmmup, 4208 hmeblkp, addr, endaddr, &dmr, attr, mode); 4209 } 4210 SFMMU_HASH_UNLOCK(hmebp); 4211 hashno = 1; 4212 continue; 4213 } 4214 SFMMU_HASH_UNLOCK(hmebp); 4215 4216 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4217 /* 4218 * We have traversed the whole list and rehashed 4219 * if necessary without finding the address to chgattr. 4220 * This is ok, so we increment the address by the 4221 * smallest hmeblk range for kernel mappings or for 4222 * user mappings with no large pages, and the largest 4223 * hmeblk range, to account for shadow hmeblks, for 4224 * user mappings with large pages and continue. 4225 */ 4226 if (sfmmup == ksfmmup) 4227 addr = (caddr_t)P2END((uintptr_t)addr, 4228 TTEBYTES(1)); 4229 else 4230 addr = (caddr_t)P2END((uintptr_t)addr, 4231 TTEBYTES(hashno)); 4232 hashno = 1; 4233 } else { 4234 hashno++; 4235 } 4236 } 4237 4238 sfmmu_hblks_list_purge(&list); 4239 DEMAP_RANGE_FLUSH(&dmr); 4240 cpuset = sfmmup->sfmmu_cpusran; 4241 xt_sync(cpuset); 4242 } 4243 4244 /* 4245 * This function chgattr on a range of addresses in an hmeblk. It returns the 4246 * next addres that needs to be chgattr. 4247 * It should be called with the hash lock held. 4248 * XXX It should be possible to optimize chgattr by not flushing every time but 4249 * on the other hand: 4250 * 1. do one flush crosscall. 4251 * 2. only flush if we are increasing permissions (make sure this will work) 4252 */ 4253 static caddr_t 4254 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4255 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4256 { 4257 tte_t tte, tteattr, tteflags, ttemod; 4258 struct sf_hment *sfhmep; 4259 int ttesz; 4260 struct page *pp = NULL; 4261 kmutex_t *pml, *pmtx; 4262 int ret; 4263 int use_demap_range; 4264 #if defined(SF_ERRATA_57) 4265 int check_exec; 4266 #endif 4267 4268 ASSERT(in_hblk_range(hmeblkp, addr)); 4269 ASSERT(hmeblkp->hblk_shw_bit == 0); 4270 4271 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4272 ttesz = get_hblk_ttesz(hmeblkp); 4273 4274 /* 4275 * Flush the current demap region if addresses have been 4276 * skipped or the page size doesn't match. 4277 */ 4278 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4279 if (use_demap_range) { 4280 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4281 } else { 4282 DEMAP_RANGE_FLUSH(dmrp); 4283 } 4284 4285 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4286 #if defined(SF_ERRATA_57) 4287 check_exec = (sfmmup != ksfmmup) && 4288 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4289 TTE_IS_EXECUTABLE(&tteattr); 4290 #endif 4291 HBLKTOHME(sfhmep, hmeblkp, addr); 4292 while (addr < endaddr) { 4293 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4294 if (TTE_IS_VALID(&tte)) { 4295 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4296 /* 4297 * if the new attr is the same as old 4298 * continue 4299 */ 4300 goto next_addr; 4301 } 4302 if (!TTE_IS_WRITABLE(&tteattr)) { 4303 /* 4304 * make sure we clear hw modify bit if we 4305 * removing write protections 4306 */ 4307 tteflags.tte_intlo |= TTE_HWWR_INT; 4308 } 4309 4310 pml = NULL; 4311 pp = sfhmep->hme_page; 4312 if (pp) { 4313 pml = sfmmu_mlist_enter(pp); 4314 } 4315 4316 if (pp != sfhmep->hme_page) { 4317 /* 4318 * tte must have been unloaded. 4319 */ 4320 ASSERT(pml); 4321 sfmmu_mlist_exit(pml); 4322 continue; 4323 } 4324 4325 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4326 4327 ttemod = tte; 4328 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4329 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4330 4331 #if defined(SF_ERRATA_57) 4332 if (check_exec && addr < errata57_limit) 4333 ttemod.tte_exec_perm = 0; 4334 #endif 4335 ret = sfmmu_modifytte_try(&tte, &ttemod, 4336 &sfhmep->hme_tte); 4337 4338 if (ret < 0) { 4339 /* tte changed underneath us */ 4340 if (pml) { 4341 sfmmu_mlist_exit(pml); 4342 } 4343 continue; 4344 } 4345 4346 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4347 /* 4348 * need to sync if we are clearing modify bit. 4349 */ 4350 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4351 } 4352 4353 if (pp && PP_ISRO(pp)) { 4354 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4355 pmtx = sfmmu_page_enter(pp); 4356 PP_CLRRO(pp); 4357 sfmmu_page_exit(pmtx); 4358 } 4359 } 4360 4361 if (ret > 0 && use_demap_range) { 4362 DEMAP_RANGE_MARKPG(dmrp, addr); 4363 } else if (ret > 0) { 4364 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4365 } 4366 4367 if (pml) { 4368 sfmmu_mlist_exit(pml); 4369 } 4370 } 4371 next_addr: 4372 addr += TTEBYTES(ttesz); 4373 sfhmep++; 4374 DEMAP_RANGE_NEXTPG(dmrp); 4375 } 4376 return (addr); 4377 } 4378 4379 /* 4380 * This routine converts virtual attributes to physical ones. It will 4381 * update the tteflags field with the tte mask corresponding to the attributes 4382 * affected and it returns the new attributes. It will also clear the modify 4383 * bit if we are taking away write permission. This is necessary since the 4384 * modify bit is the hardware permission bit and we need to clear it in order 4385 * to detect write faults. 4386 */ 4387 static uint64_t 4388 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 4389 { 4390 tte_t ttevalue; 4391 4392 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 4393 4394 switch (mode) { 4395 case SFMMU_CHGATTR: 4396 /* all attributes specified */ 4397 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 4398 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 4399 ttemaskp->tte_inthi = TTEINTHI_ATTR; 4400 ttemaskp->tte_intlo = TTEINTLO_ATTR; 4401 break; 4402 case SFMMU_SETATTR: 4403 ASSERT(!(attr & ~HAT_PROT_MASK)); 4404 ttemaskp->ll = 0; 4405 ttevalue.ll = 0; 4406 /* 4407 * a valid tte implies exec and read for sfmmu 4408 * so no need to do anything about them. 4409 * since priviledged access implies user access 4410 * PROT_USER doesn't make sense either. 4411 */ 4412 if (attr & PROT_WRITE) { 4413 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 4414 ttevalue.tte_intlo |= TTE_WRPRM_INT; 4415 } 4416 break; 4417 case SFMMU_CLRATTR: 4418 /* attributes will be nand with current ones */ 4419 if (attr & ~(PROT_WRITE | PROT_USER)) { 4420 panic("sfmmu: attr %x not supported", attr); 4421 } 4422 ttemaskp->ll = 0; 4423 ttevalue.ll = 0; 4424 if (attr & PROT_WRITE) { 4425 /* clear both writable and modify bit */ 4426 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 4427 } 4428 if (attr & PROT_USER) { 4429 ttemaskp->tte_intlo |= TTE_PRIV_INT; 4430 ttevalue.tte_intlo |= TTE_PRIV_INT; 4431 } 4432 break; 4433 default: 4434 panic("sfmmu_vtop_attr: bad mode %x", mode); 4435 } 4436 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 4437 return (ttevalue.ll); 4438 } 4439 4440 static uint_t 4441 sfmmu_ptov_attr(tte_t *ttep) 4442 { 4443 uint_t attr; 4444 4445 ASSERT(TTE_IS_VALID(ttep)); 4446 4447 attr = PROT_READ; 4448 4449 if (TTE_IS_WRITABLE(ttep)) { 4450 attr |= PROT_WRITE; 4451 } 4452 if (TTE_IS_EXECUTABLE(ttep)) { 4453 attr |= PROT_EXEC; 4454 } 4455 if (!TTE_IS_PRIVILEGED(ttep)) { 4456 attr |= PROT_USER; 4457 } 4458 if (TTE_IS_NFO(ttep)) { 4459 attr |= HAT_NOFAULT; 4460 } 4461 if (TTE_IS_NOSYNC(ttep)) { 4462 attr |= HAT_NOSYNC; 4463 } 4464 if (TTE_IS_SIDEFFECT(ttep)) { 4465 attr |= SFMMU_SIDEFFECT; 4466 } 4467 if (!TTE_IS_VCACHEABLE(ttep)) { 4468 attr |= SFMMU_UNCACHEVTTE; 4469 } 4470 if (!TTE_IS_PCACHEABLE(ttep)) { 4471 attr |= SFMMU_UNCACHEPTTE; 4472 } 4473 return (attr); 4474 } 4475 4476 /* 4477 * hat_chgprot is a deprecated hat call. New segment drivers 4478 * should store all attributes and use hat_*attr calls. 4479 * 4480 * Change the protections in the virtual address range 4481 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 4482 * then remove write permission, leaving the other 4483 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 4484 * 4485 */ 4486 void 4487 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 4488 { 4489 struct hmehash_bucket *hmebp; 4490 hmeblk_tag hblktag; 4491 int hmeshift, hashno = 1; 4492 struct hme_blk *hmeblkp, *list = NULL; 4493 caddr_t endaddr; 4494 cpuset_t cpuset; 4495 demap_range_t dmr; 4496 4497 ASSERT((len & MMU_PAGEOFFSET) == 0); 4498 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4499 4500 if (sfmmup->sfmmu_xhat_provider) { 4501 XHAT_CHGPROT(sfmmup, addr, len, vprot); 4502 return; 4503 } else { 4504 /* 4505 * This must be a CPU HAT. If the address space has 4506 * XHATs attached, change attributes for all of them, 4507 * just in case 4508 */ 4509 ASSERT(sfmmup->sfmmu_as != NULL); 4510 if (sfmmup->sfmmu_as->a_xhat != NULL) 4511 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 4512 } 4513 4514 CPUSET_ZERO(cpuset); 4515 4516 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 4517 ((addr + len) > (caddr_t)USERLIMIT)) { 4518 panic("user addr %p vprot %x in kernel space", 4519 (void *)addr, vprot); 4520 } 4521 endaddr = addr + len; 4522 hblktag.htag_id = sfmmup; 4523 DEMAP_RANGE_INIT(sfmmup, &dmr); 4524 4525 while (addr < endaddr) { 4526 hmeshift = HME_HASH_SHIFT(hashno); 4527 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4528 hblktag.htag_rehash = hashno; 4529 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4530 4531 SFMMU_HASH_LOCK(hmebp); 4532 4533 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4534 if (hmeblkp != NULL) { 4535 /* 4536 * We've encountered a shadow hmeblk so skip the range 4537 * of the next smaller mapping size. 4538 */ 4539 if (hmeblkp->hblk_shw_bit) { 4540 ASSERT(sfmmup != ksfmmup); 4541 ASSERT(hashno > 1); 4542 addr = (caddr_t)P2END((uintptr_t)addr, 4543 TTEBYTES(hashno - 1)); 4544 } else { 4545 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 4546 addr, endaddr, &dmr, vprot); 4547 } 4548 SFMMU_HASH_UNLOCK(hmebp); 4549 hashno = 1; 4550 continue; 4551 } 4552 SFMMU_HASH_UNLOCK(hmebp); 4553 4554 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4555 /* 4556 * We have traversed the whole list and rehashed 4557 * if necessary without finding the address to chgprot. 4558 * This is ok so we increment the address by the 4559 * smallest hmeblk range for kernel mappings and the 4560 * largest hmeblk range, to account for shadow hmeblks, 4561 * for user mappings and continue. 4562 */ 4563 if (sfmmup == ksfmmup) 4564 addr = (caddr_t)P2END((uintptr_t)addr, 4565 TTEBYTES(1)); 4566 else 4567 addr = (caddr_t)P2END((uintptr_t)addr, 4568 TTEBYTES(hashno)); 4569 hashno = 1; 4570 } else { 4571 hashno++; 4572 } 4573 } 4574 4575 sfmmu_hblks_list_purge(&list); 4576 DEMAP_RANGE_FLUSH(&dmr); 4577 cpuset = sfmmup->sfmmu_cpusran; 4578 xt_sync(cpuset); 4579 } 4580 4581 /* 4582 * This function chgprots a range of addresses in an hmeblk. It returns the 4583 * next addres that needs to be chgprot. 4584 * It should be called with the hash lock held. 4585 * XXX It shold be possible to optimize chgprot by not flushing every time but 4586 * on the other hand: 4587 * 1. do one flush crosscall. 4588 * 2. only flush if we are increasing permissions (make sure this will work) 4589 */ 4590 static caddr_t 4591 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4592 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 4593 { 4594 uint_t pprot; 4595 tte_t tte, ttemod; 4596 struct sf_hment *sfhmep; 4597 uint_t tteflags; 4598 int ttesz; 4599 struct page *pp = NULL; 4600 kmutex_t *pml, *pmtx; 4601 int ret; 4602 int use_demap_range; 4603 #if defined(SF_ERRATA_57) 4604 int check_exec; 4605 #endif 4606 4607 ASSERT(in_hblk_range(hmeblkp, addr)); 4608 ASSERT(hmeblkp->hblk_shw_bit == 0); 4609 4610 #ifdef DEBUG 4611 if (get_hblk_ttesz(hmeblkp) != TTE8K && 4612 (endaddr < get_hblk_endaddr(hmeblkp))) { 4613 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 4614 } 4615 #endif /* DEBUG */ 4616 4617 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4618 ttesz = get_hblk_ttesz(hmeblkp); 4619 4620 pprot = sfmmu_vtop_prot(vprot, &tteflags); 4621 #if defined(SF_ERRATA_57) 4622 check_exec = (sfmmup != ksfmmup) && 4623 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4624 ((vprot & PROT_EXEC) == PROT_EXEC); 4625 #endif 4626 HBLKTOHME(sfhmep, hmeblkp, addr); 4627 4628 /* 4629 * Flush the current demap region if addresses have been 4630 * skipped or the page size doesn't match. 4631 */ 4632 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 4633 if (use_demap_range) { 4634 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4635 } else { 4636 DEMAP_RANGE_FLUSH(dmrp); 4637 } 4638 4639 while (addr < endaddr) { 4640 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4641 if (TTE_IS_VALID(&tte)) { 4642 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 4643 /* 4644 * if the new protection is the same as old 4645 * continue 4646 */ 4647 goto next_addr; 4648 } 4649 pml = NULL; 4650 pp = sfhmep->hme_page; 4651 if (pp) { 4652 pml = sfmmu_mlist_enter(pp); 4653 } 4654 if (pp != sfhmep->hme_page) { 4655 /* 4656 * tte most have been unloaded 4657 * underneath us. Recheck 4658 */ 4659 ASSERT(pml); 4660 sfmmu_mlist_exit(pml); 4661 continue; 4662 } 4663 4664 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4665 4666 ttemod = tte; 4667 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 4668 #if defined(SF_ERRATA_57) 4669 if (check_exec && addr < errata57_limit) 4670 ttemod.tte_exec_perm = 0; 4671 #endif 4672 ret = sfmmu_modifytte_try(&tte, &ttemod, 4673 &sfhmep->hme_tte); 4674 4675 if (ret < 0) { 4676 /* tte changed underneath us */ 4677 if (pml) { 4678 sfmmu_mlist_exit(pml); 4679 } 4680 continue; 4681 } 4682 4683 if (tteflags & TTE_HWWR_INT) { 4684 /* 4685 * need to sync if we are clearing modify bit. 4686 */ 4687 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4688 } 4689 4690 if (pp && PP_ISRO(pp)) { 4691 if (pprot & TTE_WRPRM_INT) { 4692 pmtx = sfmmu_page_enter(pp); 4693 PP_CLRRO(pp); 4694 sfmmu_page_exit(pmtx); 4695 } 4696 } 4697 4698 if (ret > 0 && use_demap_range) { 4699 DEMAP_RANGE_MARKPG(dmrp, addr); 4700 } else if (ret > 0) { 4701 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4702 } 4703 4704 if (pml) { 4705 sfmmu_mlist_exit(pml); 4706 } 4707 } 4708 next_addr: 4709 addr += TTEBYTES(ttesz); 4710 sfhmep++; 4711 DEMAP_RANGE_NEXTPG(dmrp); 4712 } 4713 return (addr); 4714 } 4715 4716 /* 4717 * This routine is deprecated and should only be used by hat_chgprot. 4718 * The correct routine is sfmmu_vtop_attr. 4719 * This routine converts virtual page protections to physical ones. It will 4720 * update the tteflags field with the tte mask corresponding to the protections 4721 * affected and it returns the new protections. It will also clear the modify 4722 * bit if we are taking away write permission. This is necessary since the 4723 * modify bit is the hardware permission bit and we need to clear it in order 4724 * to detect write faults. 4725 * It accepts the following special protections: 4726 * ~PROT_WRITE = remove write permissions. 4727 * ~PROT_USER = remove user permissions. 4728 */ 4729 static uint_t 4730 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 4731 { 4732 if (vprot == (uint_t)~PROT_WRITE) { 4733 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 4734 return (0); /* will cause wrprm to be cleared */ 4735 } 4736 if (vprot == (uint_t)~PROT_USER) { 4737 *tteflagsp = TTE_PRIV_INT; 4738 return (0); /* will cause privprm to be cleared */ 4739 } 4740 if ((vprot == 0) || (vprot == PROT_USER) || 4741 ((vprot & PROT_ALL) != vprot)) { 4742 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4743 } 4744 4745 switch (vprot) { 4746 case (PROT_READ): 4747 case (PROT_EXEC): 4748 case (PROT_EXEC | PROT_READ): 4749 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4750 return (TTE_PRIV_INT); /* set prv and clr wrt */ 4751 case (PROT_WRITE): 4752 case (PROT_WRITE | PROT_READ): 4753 case (PROT_EXEC | PROT_WRITE): 4754 case (PROT_EXEC | PROT_WRITE | PROT_READ): 4755 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4756 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 4757 case (PROT_USER | PROT_READ): 4758 case (PROT_USER | PROT_EXEC): 4759 case (PROT_USER | PROT_EXEC | PROT_READ): 4760 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4761 return (0); /* clr prv and wrt */ 4762 case (PROT_USER | PROT_WRITE): 4763 case (PROT_USER | PROT_WRITE | PROT_READ): 4764 case (PROT_USER | PROT_EXEC | PROT_WRITE): 4765 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 4766 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4767 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 4768 default: 4769 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4770 } 4771 return (0); 4772 } 4773 4774 /* 4775 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 4776 * the normal algorithm would take too long for a very large VA range with 4777 * few real mappings. This routine just walks thru all HMEs in the global 4778 * hash table to find and remove mappings. 4779 */ 4780 static void 4781 hat_unload_large_virtual( 4782 struct hat *sfmmup, 4783 caddr_t startaddr, 4784 size_t len, 4785 uint_t flags, 4786 hat_callback_t *callback) 4787 { 4788 struct hmehash_bucket *hmebp; 4789 struct hme_blk *hmeblkp; 4790 struct hme_blk *pr_hblk = NULL; 4791 struct hme_blk *nx_hblk; 4792 struct hme_blk *list = NULL; 4793 int i; 4794 uint64_t hblkpa, prevpa, nx_pa; 4795 demap_range_t dmr, *dmrp; 4796 cpuset_t cpuset; 4797 caddr_t endaddr = startaddr + len; 4798 caddr_t sa; 4799 caddr_t ea; 4800 caddr_t cb_sa[MAX_CB_ADDR]; 4801 caddr_t cb_ea[MAX_CB_ADDR]; 4802 int addr_cnt = 0; 4803 int a = 0; 4804 4805 if (sfmmup->sfmmu_free) { 4806 dmrp = NULL; 4807 } else { 4808 dmrp = &dmr; 4809 DEMAP_RANGE_INIT(sfmmup, dmrp); 4810 } 4811 4812 /* 4813 * Loop through all the hash buckets of HME blocks looking for matches. 4814 */ 4815 for (i = 0; i <= UHMEHASH_SZ; i++) { 4816 hmebp = &uhme_hash[i]; 4817 SFMMU_HASH_LOCK(hmebp); 4818 hmeblkp = hmebp->hmeblkp; 4819 hblkpa = hmebp->hmeh_nextpa; 4820 prevpa = 0; 4821 pr_hblk = NULL; 4822 while (hmeblkp) { 4823 nx_hblk = hmeblkp->hblk_next; 4824 nx_pa = hmeblkp->hblk_nextpa; 4825 4826 /* 4827 * skip if not this context, if a shadow block or 4828 * if the mapping is not in the requested range 4829 */ 4830 if (hmeblkp->hblk_tag.htag_id != sfmmup || 4831 hmeblkp->hblk_shw_bit || 4832 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 4833 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 4834 pr_hblk = hmeblkp; 4835 prevpa = hblkpa; 4836 goto next_block; 4837 } 4838 4839 /* 4840 * unload if there are any current valid mappings 4841 */ 4842 if (hmeblkp->hblk_vcnt != 0 || 4843 hmeblkp->hblk_hmecnt != 0) 4844 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 4845 sa, ea, dmrp, flags); 4846 4847 /* 4848 * on unmap we also release the HME block itself, once 4849 * all mappings are gone. 4850 */ 4851 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 4852 !hmeblkp->hblk_vcnt && 4853 !hmeblkp->hblk_hmecnt) { 4854 ASSERT(!hmeblkp->hblk_lckcnt); 4855 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 4856 prevpa, pr_hblk); 4857 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 4858 } else { 4859 pr_hblk = hmeblkp; 4860 prevpa = hblkpa; 4861 } 4862 4863 if (callback == NULL) 4864 goto next_block; 4865 4866 /* 4867 * HME blocks may span more than one page, but we may be 4868 * unmapping only one page, so check for a smaller range 4869 * for the callback 4870 */ 4871 if (sa < startaddr) 4872 sa = startaddr; 4873 if (--ea > endaddr) 4874 ea = endaddr - 1; 4875 4876 cb_sa[addr_cnt] = sa; 4877 cb_ea[addr_cnt] = ea; 4878 if (++addr_cnt == MAX_CB_ADDR) { 4879 if (dmrp != NULL) { 4880 DEMAP_RANGE_FLUSH(dmrp); 4881 cpuset = sfmmup->sfmmu_cpusran; 4882 xt_sync(cpuset); 4883 } 4884 4885 for (a = 0; a < MAX_CB_ADDR; ++a) { 4886 callback->hcb_start_addr = cb_sa[a]; 4887 callback->hcb_end_addr = cb_ea[a]; 4888 callback->hcb_function(callback); 4889 } 4890 addr_cnt = 0; 4891 } 4892 4893 next_block: 4894 hmeblkp = nx_hblk; 4895 hblkpa = nx_pa; 4896 } 4897 SFMMU_HASH_UNLOCK(hmebp); 4898 } 4899 4900 sfmmu_hblks_list_purge(&list); 4901 if (dmrp != NULL) { 4902 DEMAP_RANGE_FLUSH(dmrp); 4903 cpuset = sfmmup->sfmmu_cpusran; 4904 xt_sync(cpuset); 4905 } 4906 4907 for (a = 0; a < addr_cnt; ++a) { 4908 callback->hcb_start_addr = cb_sa[a]; 4909 callback->hcb_end_addr = cb_ea[a]; 4910 callback->hcb_function(callback); 4911 } 4912 4913 /* 4914 * Check TSB and TLB page sizes if the process isn't exiting. 4915 */ 4916 if (!sfmmup->sfmmu_free) 4917 sfmmu_check_page_sizes(sfmmup, 0); 4918 } 4919 4920 /* 4921 * Unload all the mappings in the range [addr..addr+len). addr and len must 4922 * be MMU_PAGESIZE aligned. 4923 */ 4924 4925 extern struct seg *segkmap; 4926 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 4927 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 4928 4929 4930 void 4931 hat_unload_callback( 4932 struct hat *sfmmup, 4933 caddr_t addr, 4934 size_t len, 4935 uint_t flags, 4936 hat_callback_t *callback) 4937 { 4938 struct hmehash_bucket *hmebp; 4939 hmeblk_tag hblktag; 4940 int hmeshift, hashno, iskernel; 4941 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 4942 caddr_t endaddr; 4943 cpuset_t cpuset; 4944 uint64_t hblkpa, prevpa; 4945 int addr_count = 0; 4946 int a; 4947 caddr_t cb_start_addr[MAX_CB_ADDR]; 4948 caddr_t cb_end_addr[MAX_CB_ADDR]; 4949 int issegkmap = ISSEGKMAP(sfmmup, addr); 4950 demap_range_t dmr, *dmrp; 4951 4952 if (sfmmup->sfmmu_xhat_provider) { 4953 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 4954 return; 4955 } else { 4956 /* 4957 * This must be a CPU HAT. If the address space has 4958 * XHATs attached, unload the mappings for all of them, 4959 * just in case 4960 */ 4961 ASSERT(sfmmup->sfmmu_as != NULL); 4962 if (sfmmup->sfmmu_as->a_xhat != NULL) 4963 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 4964 len, flags, callback); 4965 } 4966 4967 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 4968 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4969 4970 ASSERT(sfmmup != NULL); 4971 ASSERT((len & MMU_PAGEOFFSET) == 0); 4972 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4973 4974 /* 4975 * Probing through a large VA range (say 63 bits) will be slow, even 4976 * at 4 Meg steps between the probes. So, when the virtual address range 4977 * is very large, search the HME entries for what to unload. 4978 * 4979 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 4980 * 4981 * UHMEHASH_SZ is number of hash buckets to examine 4982 * 4983 */ 4984 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 4985 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 4986 return; 4987 } 4988 4989 CPUSET_ZERO(cpuset); 4990 4991 /* 4992 * If the process is exiting, we can save a lot of fuss since 4993 * we'll flush the TLB when we free the ctx anyway. 4994 */ 4995 if (sfmmup->sfmmu_free) 4996 dmrp = NULL; 4997 else 4998 dmrp = &dmr; 4999 5000 DEMAP_RANGE_INIT(sfmmup, dmrp); 5001 endaddr = addr + len; 5002 hblktag.htag_id = sfmmup; 5003 5004 /* 5005 * It is likely for the vm to call unload over a wide range of 5006 * addresses that are actually very sparsely populated by 5007 * translations. In order to speed this up the sfmmu hat supports 5008 * the concept of shadow hmeblks. Dummy large page hmeblks that 5009 * correspond to actual small translations are allocated at tteload 5010 * time and are referred to as shadow hmeblks. Now, during unload 5011 * time, we first check if we have a shadow hmeblk for that 5012 * translation. The absence of one means the corresponding address 5013 * range is empty and can be skipped. 5014 * 5015 * The kernel is an exception to above statement and that is why 5016 * we don't use shadow hmeblks and hash starting from the smallest 5017 * page size. 5018 */ 5019 if (sfmmup == KHATID) { 5020 iskernel = 1; 5021 hashno = TTE64K; 5022 } else { 5023 iskernel = 0; 5024 if (mmu_page_sizes == max_mmu_page_sizes) { 5025 hashno = TTE256M; 5026 } else { 5027 hashno = TTE4M; 5028 } 5029 } 5030 while (addr < endaddr) { 5031 hmeshift = HME_HASH_SHIFT(hashno); 5032 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5033 hblktag.htag_rehash = hashno; 5034 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5035 5036 SFMMU_HASH_LOCK(hmebp); 5037 5038 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 5039 prevpa, &list); 5040 if (hmeblkp == NULL) { 5041 /* 5042 * didn't find an hmeblk. skip the appropiate 5043 * address range. 5044 */ 5045 SFMMU_HASH_UNLOCK(hmebp); 5046 if (iskernel) { 5047 if (hashno < mmu_hashcnt) { 5048 hashno++; 5049 continue; 5050 } else { 5051 hashno = TTE64K; 5052 addr = (caddr_t)roundup((uintptr_t)addr 5053 + 1, MMU_PAGESIZE64K); 5054 continue; 5055 } 5056 } 5057 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5058 (1 << hmeshift)); 5059 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5060 ASSERT(hashno == TTE64K); 5061 continue; 5062 } 5063 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5064 hashno = TTE512K; 5065 continue; 5066 } 5067 if (mmu_page_sizes == max_mmu_page_sizes) { 5068 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5069 hashno = TTE4M; 5070 continue; 5071 } 5072 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5073 hashno = TTE32M; 5074 continue; 5075 } 5076 hashno = TTE256M; 5077 continue; 5078 } else { 5079 hashno = TTE4M; 5080 continue; 5081 } 5082 } 5083 ASSERT(hmeblkp); 5084 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5085 /* 5086 * If the valid count is zero we can skip the range 5087 * mapped by this hmeblk. 5088 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5089 * is used by segment drivers as a hint 5090 * that the mapping resource won't be used any longer. 5091 * The best example of this is during exit(). 5092 */ 5093 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5094 get_hblk_span(hmeblkp)); 5095 if ((flags & HAT_UNLOAD_UNMAP) || 5096 (iskernel && !issegkmap)) { 5097 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5098 pr_hblk); 5099 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5100 } 5101 SFMMU_HASH_UNLOCK(hmebp); 5102 5103 if (iskernel) { 5104 hashno = TTE64K; 5105 continue; 5106 } 5107 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5108 ASSERT(hashno == TTE64K); 5109 continue; 5110 } 5111 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5112 hashno = TTE512K; 5113 continue; 5114 } 5115 if (mmu_page_sizes == max_mmu_page_sizes) { 5116 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5117 hashno = TTE4M; 5118 continue; 5119 } 5120 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5121 hashno = TTE32M; 5122 continue; 5123 } 5124 hashno = TTE256M; 5125 continue; 5126 } else { 5127 hashno = TTE4M; 5128 continue; 5129 } 5130 } 5131 if (hmeblkp->hblk_shw_bit) { 5132 /* 5133 * If we encounter a shadow hmeblk we know there is 5134 * smaller sized hmeblks mapping the same address space. 5135 * Decrement the hash size and rehash. 5136 */ 5137 ASSERT(sfmmup != KHATID); 5138 hashno--; 5139 SFMMU_HASH_UNLOCK(hmebp); 5140 continue; 5141 } 5142 5143 /* 5144 * track callback address ranges. 5145 * only start a new range when it's not contiguous 5146 */ 5147 if (callback != NULL) { 5148 if (addr_count > 0 && 5149 addr == cb_end_addr[addr_count - 1]) 5150 --addr_count; 5151 else 5152 cb_start_addr[addr_count] = addr; 5153 } 5154 5155 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5156 dmrp, flags); 5157 5158 if (callback != NULL) 5159 cb_end_addr[addr_count++] = addr; 5160 5161 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5162 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5163 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5164 pr_hblk); 5165 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5166 } 5167 SFMMU_HASH_UNLOCK(hmebp); 5168 5169 /* 5170 * Notify our caller as to exactly which pages 5171 * have been unloaded. We do these in clumps, 5172 * to minimize the number of xt_sync()s that need to occur. 5173 */ 5174 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5175 DEMAP_RANGE_FLUSH(dmrp); 5176 if (dmrp != NULL) { 5177 cpuset = sfmmup->sfmmu_cpusran; 5178 xt_sync(cpuset); 5179 } 5180 5181 for (a = 0; a < MAX_CB_ADDR; ++a) { 5182 callback->hcb_start_addr = cb_start_addr[a]; 5183 callback->hcb_end_addr = cb_end_addr[a]; 5184 callback->hcb_function(callback); 5185 } 5186 addr_count = 0; 5187 } 5188 if (iskernel) { 5189 hashno = TTE64K; 5190 continue; 5191 } 5192 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5193 ASSERT(hashno == TTE64K); 5194 continue; 5195 } 5196 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5197 hashno = TTE512K; 5198 continue; 5199 } 5200 if (mmu_page_sizes == max_mmu_page_sizes) { 5201 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5202 hashno = TTE4M; 5203 continue; 5204 } 5205 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5206 hashno = TTE32M; 5207 continue; 5208 } 5209 hashno = TTE256M; 5210 } else { 5211 hashno = TTE4M; 5212 } 5213 } 5214 5215 sfmmu_hblks_list_purge(&list); 5216 DEMAP_RANGE_FLUSH(dmrp); 5217 if (dmrp != NULL) { 5218 cpuset = sfmmup->sfmmu_cpusran; 5219 xt_sync(cpuset); 5220 } 5221 if (callback && addr_count != 0) { 5222 for (a = 0; a < addr_count; ++a) { 5223 callback->hcb_start_addr = cb_start_addr[a]; 5224 callback->hcb_end_addr = cb_end_addr[a]; 5225 callback->hcb_function(callback); 5226 } 5227 } 5228 5229 /* 5230 * Check TSB and TLB page sizes if the process isn't exiting. 5231 */ 5232 if (!sfmmup->sfmmu_free) 5233 sfmmu_check_page_sizes(sfmmup, 0); 5234 } 5235 5236 /* 5237 * Unload all the mappings in the range [addr..addr+len). addr and len must 5238 * be MMU_PAGESIZE aligned. 5239 */ 5240 void 5241 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5242 { 5243 if (sfmmup->sfmmu_xhat_provider) { 5244 XHAT_UNLOAD(sfmmup, addr, len, flags); 5245 return; 5246 } 5247 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5248 } 5249 5250 5251 /* 5252 * Find the largest mapping size for this page. 5253 */ 5254 int 5255 fnd_mapping_sz(page_t *pp) 5256 { 5257 int sz; 5258 int p_index; 5259 5260 p_index = PP_MAPINDEX(pp); 5261 5262 sz = 0; 5263 p_index >>= 1; /* don't care about 8K bit */ 5264 for (; p_index; p_index >>= 1) { 5265 sz++; 5266 } 5267 5268 return (sz); 5269 } 5270 5271 /* 5272 * This function unloads a range of addresses for an hmeblk. 5273 * It returns the next address to be unloaded. 5274 * It should be called with the hash lock held. 5275 */ 5276 static caddr_t 5277 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5278 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5279 { 5280 tte_t tte, ttemod; 5281 struct sf_hment *sfhmep; 5282 int ttesz; 5283 long ttecnt; 5284 page_t *pp; 5285 kmutex_t *pml; 5286 int ret; 5287 int use_demap_range; 5288 5289 ASSERT(in_hblk_range(hmeblkp, addr)); 5290 ASSERT(!hmeblkp->hblk_shw_bit); 5291 #ifdef DEBUG 5292 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5293 (endaddr < get_hblk_endaddr(hmeblkp))) { 5294 panic("sfmmu_hblk_unload: partial unload of large page"); 5295 } 5296 #endif /* DEBUG */ 5297 5298 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5299 ttesz = get_hblk_ttesz(hmeblkp); 5300 5301 use_demap_range = (do_virtual_coloring && 5302 ((dmrp == NULL) || TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5303 if (use_demap_range) { 5304 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5305 } else { 5306 DEMAP_RANGE_FLUSH(dmrp); 5307 } 5308 ttecnt = 0; 5309 HBLKTOHME(sfhmep, hmeblkp, addr); 5310 5311 while (addr < endaddr) { 5312 pml = NULL; 5313 again: 5314 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5315 if (TTE_IS_VALID(&tte)) { 5316 pp = sfhmep->hme_page; 5317 if (pp && pml == NULL) { 5318 pml = sfmmu_mlist_enter(pp); 5319 } 5320 5321 /* 5322 * Verify if hme still points to 'pp' now that 5323 * we have p_mapping lock. 5324 */ 5325 if (sfhmep->hme_page != pp) { 5326 if (pp != NULL && sfhmep->hme_page != NULL) { 5327 if (pml) { 5328 sfmmu_mlist_exit(pml); 5329 } 5330 /* Re-start this iteration. */ 5331 continue; 5332 } 5333 ASSERT((pp != NULL) && 5334 (sfhmep->hme_page == NULL)); 5335 goto tte_unloaded; 5336 } 5337 5338 /* 5339 * This point on we have both HASH and p_mapping 5340 * lock. 5341 */ 5342 ASSERT(pp == sfhmep->hme_page); 5343 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5344 5345 /* 5346 * We need to loop on modify tte because it is 5347 * possible for pagesync to come along and 5348 * change the software bits beneath us. 5349 * 5350 * Page_unload can also invalidate the tte after 5351 * we read tte outside of p_mapping lock. 5352 */ 5353 ttemod = tte; 5354 5355 TTE_SET_INVALID(&ttemod); 5356 ret = sfmmu_modifytte_try(&tte, &ttemod, 5357 &sfhmep->hme_tte); 5358 5359 if (ret <= 0) { 5360 if (TTE_IS_VALID(&tte)) { 5361 goto again; 5362 } else { 5363 /* 5364 * We read in a valid pte, but it 5365 * is unloaded by page_unload. 5366 * hme_page has become NULL and 5367 * we hold no p_mapping lock. 5368 */ 5369 ASSERT(pp == NULL && pml == NULL); 5370 goto tte_unloaded; 5371 } 5372 } 5373 5374 if (!(flags & HAT_UNLOAD_NOSYNC)) { 5375 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5376 } 5377 5378 /* 5379 * Ok- we invalidated the tte. Do the rest of the job. 5380 */ 5381 ttecnt++; 5382 5383 if (flags & HAT_UNLOAD_UNLOCK) { 5384 ASSERT(hmeblkp->hblk_lckcnt > 0); 5385 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 5386 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 5387 } 5388 5389 /* 5390 * Normally we would need to flush the page 5391 * from the virtual cache at this point in 5392 * order to prevent a potential cache alias 5393 * inconsistency. 5394 * The particular scenario we need to worry 5395 * about is: 5396 * Given: va1 and va2 are two virtual address 5397 * that alias and map the same physical 5398 * address. 5399 * 1. mapping exists from va1 to pa and data 5400 * has been read into the cache. 5401 * 2. unload va1. 5402 * 3. load va2 and modify data using va2. 5403 * 4 unload va2. 5404 * 5. load va1 and reference data. Unless we 5405 * flush the data cache when we unload we will 5406 * get stale data. 5407 * Fortunately, page coloring eliminates the 5408 * above scenario by remembering the color a 5409 * physical page was last or is currently 5410 * mapped to. Now, we delay the flush until 5411 * the loading of translations. Only when the 5412 * new translation is of a different color 5413 * are we forced to flush. 5414 */ 5415 if (use_demap_range) { 5416 /* 5417 * Mark this page as needing a demap. 5418 */ 5419 DEMAP_RANGE_MARKPG(dmrp, addr); 5420 } else { 5421 if (do_virtual_coloring) { 5422 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 5423 sfmmup->sfmmu_free, 0); 5424 } else { 5425 pfn_t pfnum; 5426 5427 pfnum = TTE_TO_PFN(addr, &tte); 5428 sfmmu_tlbcache_demap(addr, sfmmup, 5429 hmeblkp, pfnum, sfmmup->sfmmu_free, 5430 FLUSH_NECESSARY_CPUS, 5431 CACHE_FLUSH, 0); 5432 } 5433 } 5434 5435 if (pp) { 5436 /* 5437 * Remove the hment from the mapping list 5438 */ 5439 ASSERT(hmeblkp->hblk_hmecnt > 0); 5440 5441 /* 5442 * Again, we cannot 5443 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 5444 */ 5445 HME_SUB(sfhmep, pp); 5446 membar_stst(); 5447 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 5448 } 5449 5450 ASSERT(hmeblkp->hblk_vcnt > 0); 5451 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 5452 5453 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 5454 !hmeblkp->hblk_lckcnt); 5455 5456 #ifdef VAC 5457 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 5458 if (PP_ISTNC(pp)) { 5459 /* 5460 * If page was temporary 5461 * uncached, try to recache 5462 * it. Note that HME_SUB() was 5463 * called above so p_index and 5464 * mlist had been updated. 5465 */ 5466 conv_tnc(pp, ttesz); 5467 } else if (pp->p_mapping == NULL) { 5468 ASSERT(kpm_enable); 5469 /* 5470 * Page is marked to be in VAC conflict 5471 * to an existing kpm mapping and/or is 5472 * kpm mapped using only the regular 5473 * pagesize. 5474 */ 5475 sfmmu_kpm_hme_unload(pp); 5476 } 5477 } 5478 #endif /* VAC */ 5479 } else if ((pp = sfhmep->hme_page) != NULL) { 5480 /* 5481 * TTE is invalid but the hme 5482 * still exists. let pageunload 5483 * complete its job. 5484 */ 5485 ASSERT(pml == NULL); 5486 pml = sfmmu_mlist_enter(pp); 5487 if (sfhmep->hme_page != NULL) { 5488 sfmmu_mlist_exit(pml); 5489 pml = NULL; 5490 goto again; 5491 } 5492 ASSERT(sfhmep->hme_page == NULL); 5493 } else if (hmeblkp->hblk_hmecnt != 0) { 5494 /* 5495 * pageunload may have not finished decrementing 5496 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 5497 * wait for pageunload to finish. Rely on pageunload 5498 * to decrement hblk_hmecnt after hblk_vcnt. 5499 */ 5500 pfn_t pfn = TTE_TO_TTEPFN(&tte); 5501 ASSERT(pml == NULL); 5502 if (pf_is_memory(pfn)) { 5503 pp = page_numtopp_nolock(pfn); 5504 if (pp != NULL) { 5505 pml = sfmmu_mlist_enter(pp); 5506 sfmmu_mlist_exit(pml); 5507 pml = NULL; 5508 } 5509 } 5510 } 5511 5512 tte_unloaded: 5513 /* 5514 * At this point, the tte we are looking at 5515 * should be unloaded, and hme has been unlinked 5516 * from page too. This is important because in 5517 * pageunload, it does ttesync() then HME_SUB. 5518 * We need to make sure HME_SUB has been completed 5519 * so we know ttesync() has been completed. Otherwise, 5520 * at exit time, after return from hat layer, VM will 5521 * release as structure which hat_setstat() (called 5522 * by ttesync()) needs. 5523 */ 5524 #ifdef DEBUG 5525 { 5526 tte_t dtte; 5527 5528 ASSERT(sfhmep->hme_page == NULL); 5529 5530 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 5531 ASSERT(!TTE_IS_VALID(&dtte)); 5532 } 5533 #endif 5534 5535 if (pml) { 5536 sfmmu_mlist_exit(pml); 5537 } 5538 5539 addr += TTEBYTES(ttesz); 5540 sfhmep++; 5541 DEMAP_RANGE_NEXTPG(dmrp); 5542 } 5543 if (ttecnt > 0) 5544 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 5545 return (addr); 5546 } 5547 5548 /* 5549 * Synchronize all the mappings in the range [addr..addr+len). 5550 * Can be called with clearflag having two states: 5551 * HAT_SYNC_DONTZERO means just return the rm stats 5552 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 5553 */ 5554 void 5555 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 5556 { 5557 struct hmehash_bucket *hmebp; 5558 hmeblk_tag hblktag; 5559 int hmeshift, hashno = 1; 5560 struct hme_blk *hmeblkp, *list = NULL; 5561 caddr_t endaddr; 5562 cpuset_t cpuset; 5563 5564 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 5565 ASSERT((sfmmup == ksfmmup) || 5566 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5567 ASSERT((len & MMU_PAGEOFFSET) == 0); 5568 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 5569 (clearflag == HAT_SYNC_ZERORM)); 5570 5571 CPUSET_ZERO(cpuset); 5572 5573 endaddr = addr + len; 5574 hblktag.htag_id = sfmmup; 5575 /* 5576 * Spitfire supports 4 page sizes. 5577 * Most pages are expected to be of the smallest page 5578 * size (8K) and these will not need to be rehashed. 64K 5579 * pages also don't need to be rehashed because the an hmeblk 5580 * spans 64K of address space. 512K pages might need 1 rehash and 5581 * and 4M pages 2 rehashes. 5582 */ 5583 while (addr < endaddr) { 5584 hmeshift = HME_HASH_SHIFT(hashno); 5585 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5586 hblktag.htag_rehash = hashno; 5587 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5588 5589 SFMMU_HASH_LOCK(hmebp); 5590 5591 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5592 if (hmeblkp != NULL) { 5593 /* 5594 * We've encountered a shadow hmeblk so skip the range 5595 * of the next smaller mapping size. 5596 */ 5597 if (hmeblkp->hblk_shw_bit) { 5598 ASSERT(sfmmup != ksfmmup); 5599 ASSERT(hashno > 1); 5600 addr = (caddr_t)P2END((uintptr_t)addr, 5601 TTEBYTES(hashno - 1)); 5602 } else { 5603 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 5604 addr, endaddr, clearflag); 5605 } 5606 SFMMU_HASH_UNLOCK(hmebp); 5607 hashno = 1; 5608 continue; 5609 } 5610 SFMMU_HASH_UNLOCK(hmebp); 5611 5612 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5613 /* 5614 * We have traversed the whole list and rehashed 5615 * if necessary without finding the address to sync. 5616 * This is ok so we increment the address by the 5617 * smallest hmeblk range for kernel mappings and the 5618 * largest hmeblk range, to account for shadow hmeblks, 5619 * for user mappings and continue. 5620 */ 5621 if (sfmmup == ksfmmup) 5622 addr = (caddr_t)P2END((uintptr_t)addr, 5623 TTEBYTES(1)); 5624 else 5625 addr = (caddr_t)P2END((uintptr_t)addr, 5626 TTEBYTES(hashno)); 5627 hashno = 1; 5628 } else { 5629 hashno++; 5630 } 5631 } 5632 sfmmu_hblks_list_purge(&list); 5633 cpuset = sfmmup->sfmmu_cpusran; 5634 xt_sync(cpuset); 5635 } 5636 5637 static caddr_t 5638 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5639 caddr_t endaddr, int clearflag) 5640 { 5641 tte_t tte, ttemod; 5642 struct sf_hment *sfhmep; 5643 int ttesz; 5644 struct page *pp; 5645 kmutex_t *pml; 5646 int ret; 5647 5648 ASSERT(hmeblkp->hblk_shw_bit == 0); 5649 5650 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5651 5652 ttesz = get_hblk_ttesz(hmeblkp); 5653 HBLKTOHME(sfhmep, hmeblkp, addr); 5654 5655 while (addr < endaddr) { 5656 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5657 if (TTE_IS_VALID(&tte)) { 5658 pml = NULL; 5659 pp = sfhmep->hme_page; 5660 if (pp) { 5661 pml = sfmmu_mlist_enter(pp); 5662 } 5663 if (pp != sfhmep->hme_page) { 5664 /* 5665 * tte most have been unloaded 5666 * underneath us. Recheck 5667 */ 5668 ASSERT(pml); 5669 sfmmu_mlist_exit(pml); 5670 continue; 5671 } 5672 5673 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5674 5675 if (clearflag == HAT_SYNC_ZERORM) { 5676 ttemod = tte; 5677 TTE_CLR_RM(&ttemod); 5678 ret = sfmmu_modifytte_try(&tte, &ttemod, 5679 &sfhmep->hme_tte); 5680 if (ret < 0) { 5681 if (pml) { 5682 sfmmu_mlist_exit(pml); 5683 } 5684 continue; 5685 } 5686 5687 if (ret > 0) { 5688 sfmmu_tlb_demap(addr, sfmmup, 5689 hmeblkp, 0, 0); 5690 } 5691 } 5692 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5693 if (pml) { 5694 sfmmu_mlist_exit(pml); 5695 } 5696 } 5697 addr += TTEBYTES(ttesz); 5698 sfhmep++; 5699 } 5700 return (addr); 5701 } 5702 5703 /* 5704 * This function will sync a tte to the page struct and it will 5705 * update the hat stats. Currently it allows us to pass a NULL pp 5706 * and we will simply update the stats. We may want to change this 5707 * so we only keep stats for pages backed by pp's. 5708 */ 5709 static void 5710 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 5711 { 5712 uint_t rm = 0; 5713 int sz; 5714 pgcnt_t npgs; 5715 5716 ASSERT(TTE_IS_VALID(ttep)); 5717 5718 if (TTE_IS_NOSYNC(ttep)) { 5719 return; 5720 } 5721 5722 if (TTE_IS_REF(ttep)) { 5723 rm = P_REF; 5724 } 5725 if (TTE_IS_MOD(ttep)) { 5726 rm |= P_MOD; 5727 } 5728 5729 if (rm == 0) { 5730 return; 5731 } 5732 5733 sz = TTE_CSZ(ttep); 5734 if (sfmmup->sfmmu_rmstat) { 5735 int i; 5736 caddr_t vaddr = addr; 5737 5738 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 5739 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 5740 } 5741 5742 } 5743 5744 /* 5745 * XXX I want to use cas to update nrm bits but they 5746 * currently belong in common/vm and not in hat where 5747 * they should be. 5748 * The nrm bits are protected by the same mutex as 5749 * the one that protects the page's mapping list. 5750 */ 5751 if (!pp) 5752 return; 5753 ASSERT(sfmmu_mlist_held(pp)); 5754 /* 5755 * If the tte is for a large page, we need to sync all the 5756 * pages covered by the tte. 5757 */ 5758 if (sz != TTE8K) { 5759 ASSERT(pp->p_szc != 0); 5760 pp = PP_GROUPLEADER(pp, sz); 5761 ASSERT(sfmmu_mlist_held(pp)); 5762 } 5763 5764 /* Get number of pages from tte size. */ 5765 npgs = TTEPAGES(sz); 5766 5767 do { 5768 ASSERT(pp); 5769 ASSERT(sfmmu_mlist_held(pp)); 5770 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 5771 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 5772 hat_page_setattr(pp, rm); 5773 5774 /* 5775 * Are we done? If not, we must have a large mapping. 5776 * For large mappings we need to sync the rest of the pages 5777 * covered by this tte; goto the next page. 5778 */ 5779 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 5780 } 5781 5782 /* 5783 * Execute pre-callback handler of each pa_hment linked to pp 5784 * 5785 * Inputs: 5786 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 5787 * capture_cpus: pointer to return value (below) 5788 * 5789 * Returns: 5790 * Propagates the subsystem callback return values back to the caller; 5791 * returns 0 on success. If capture_cpus is non-NULL, the value returned 5792 * is zero if all of the pa_hments are of a type that do not require 5793 * capturing CPUs prior to suspending the mapping, else it is 1. 5794 */ 5795 static int 5796 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 5797 { 5798 struct sf_hment *sfhmep; 5799 struct pa_hment *pahmep; 5800 int (*f)(caddr_t, uint_t, uint_t, void *); 5801 int ret; 5802 id_t id; 5803 int locked = 0; 5804 kmutex_t *pml; 5805 5806 ASSERT(PAGE_EXCL(pp)); 5807 if (!sfmmu_mlist_held(pp)) { 5808 pml = sfmmu_mlist_enter(pp); 5809 locked = 1; 5810 } 5811 5812 if (capture_cpus) 5813 *capture_cpus = 0; 5814 5815 top: 5816 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5817 /* 5818 * skip sf_hments corresponding to VA<->PA mappings; 5819 * for pa_hment's, hme_tte.ll is zero 5820 */ 5821 if (!IS_PAHME(sfhmep)) 5822 continue; 5823 5824 pahmep = sfhmep->hme_data; 5825 ASSERT(pahmep != NULL); 5826 5827 /* 5828 * skip if pre-handler has been called earlier in this loop 5829 */ 5830 if (pahmep->flags & flag) 5831 continue; 5832 5833 id = pahmep->cb_id; 5834 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5835 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 5836 *capture_cpus = 1; 5837 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 5838 pahmep->flags |= flag; 5839 continue; 5840 } 5841 5842 /* 5843 * Drop the mapping list lock to avoid locking order issues. 5844 */ 5845 if (locked) 5846 sfmmu_mlist_exit(pml); 5847 5848 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 5849 if (ret != 0) 5850 return (ret); /* caller must do the cleanup */ 5851 5852 if (locked) { 5853 pml = sfmmu_mlist_enter(pp); 5854 pahmep->flags |= flag; 5855 goto top; 5856 } 5857 5858 pahmep->flags |= flag; 5859 } 5860 5861 if (locked) 5862 sfmmu_mlist_exit(pml); 5863 5864 return (0); 5865 } 5866 5867 /* 5868 * Execute post-callback handler of each pa_hment linked to pp 5869 * 5870 * Same overall assumptions and restrictions apply as for 5871 * hat_pageprocess_precallbacks(). 5872 */ 5873 static void 5874 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 5875 { 5876 pfn_t pgpfn = pp->p_pagenum; 5877 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 5878 pfn_t newpfn; 5879 struct sf_hment *sfhmep; 5880 struct pa_hment *pahmep; 5881 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 5882 id_t id; 5883 int locked = 0; 5884 kmutex_t *pml; 5885 5886 ASSERT(PAGE_EXCL(pp)); 5887 if (!sfmmu_mlist_held(pp)) { 5888 pml = sfmmu_mlist_enter(pp); 5889 locked = 1; 5890 } 5891 5892 top: 5893 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5894 /* 5895 * skip sf_hments corresponding to VA<->PA mappings; 5896 * for pa_hment's, hme_tte.ll is zero 5897 */ 5898 if (!IS_PAHME(sfhmep)) 5899 continue; 5900 5901 pahmep = sfhmep->hme_data; 5902 ASSERT(pahmep != NULL); 5903 5904 if ((pahmep->flags & flag) == 0) 5905 continue; 5906 5907 pahmep->flags &= ~flag; 5908 5909 id = pahmep->cb_id; 5910 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5911 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 5912 continue; 5913 5914 /* 5915 * Convert the base page PFN into the constituent PFN 5916 * which is needed by the callback handler. 5917 */ 5918 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 5919 5920 /* 5921 * Drop the mapping list lock to avoid locking order issues. 5922 */ 5923 if (locked) 5924 sfmmu_mlist_exit(pml); 5925 5926 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 5927 != 0) 5928 panic("sfmmu: posthandler failed"); 5929 5930 if (locked) { 5931 pml = sfmmu_mlist_enter(pp); 5932 goto top; 5933 } 5934 } 5935 5936 if (locked) 5937 sfmmu_mlist_exit(pml); 5938 } 5939 5940 /* 5941 * Suspend locked kernel mapping 5942 */ 5943 void 5944 hat_pagesuspend(struct page *pp) 5945 { 5946 struct sf_hment *sfhmep; 5947 sfmmu_t *sfmmup; 5948 tte_t tte, ttemod; 5949 struct hme_blk *hmeblkp; 5950 caddr_t addr; 5951 int index, cons; 5952 cpuset_t cpuset; 5953 5954 ASSERT(PAGE_EXCL(pp)); 5955 ASSERT(sfmmu_mlist_held(pp)); 5956 5957 mutex_enter(&kpr_suspendlock); 5958 5959 /* 5960 * Call into dtrace to tell it we're about to suspend a 5961 * kernel mapping. This prevents us from running into issues 5962 * with probe context trying to touch a suspended page 5963 * in the relocation codepath itself. 5964 */ 5965 if (dtrace_kreloc_init) 5966 (*dtrace_kreloc_init)(); 5967 5968 index = PP_MAPINDEX(pp); 5969 cons = TTE8K; 5970 5971 retry: 5972 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5973 5974 if (IS_PAHME(sfhmep)) 5975 continue; 5976 5977 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 5978 continue; 5979 5980 /* 5981 * Loop until we successfully set the suspend bit in 5982 * the TTE. 5983 */ 5984 again: 5985 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5986 ASSERT(TTE_IS_VALID(&tte)); 5987 5988 ttemod = tte; 5989 TTE_SET_SUSPEND(&ttemod); 5990 if (sfmmu_modifytte_try(&tte, &ttemod, 5991 &sfhmep->hme_tte) < 0) 5992 goto again; 5993 5994 /* 5995 * Invalidate TSB entry 5996 */ 5997 hmeblkp = sfmmu_hmetohblk(sfhmep); 5998 5999 sfmmup = hblktosfmmu(hmeblkp); 6000 ASSERT(sfmmup == ksfmmup); 6001 6002 addr = tte_to_vaddr(hmeblkp, tte); 6003 6004 /* 6005 * No need to make sure that the TSB for this sfmmu is 6006 * not being relocated since it is ksfmmup and thus it 6007 * will never be relocated. 6008 */ 6009 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 6010 6011 /* 6012 * Update xcall stats 6013 */ 6014 cpuset = cpu_ready_set; 6015 CPUSET_DEL(cpuset, CPU->cpu_id); 6016 6017 /* LINTED: constant in conditional context */ 6018 SFMMU_XCALL_STATS(ksfmmup); 6019 6020 /* 6021 * Flush TLB entry on remote CPU's 6022 */ 6023 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6024 (uint64_t)ksfmmup); 6025 xt_sync(cpuset); 6026 6027 /* 6028 * Flush TLB entry on local CPU 6029 */ 6030 vtag_flushpage(addr, (uint64_t)ksfmmup); 6031 } 6032 6033 while (index != 0) { 6034 index = index >> 1; 6035 if (index != 0) 6036 cons++; 6037 if (index & 0x1) { 6038 pp = PP_GROUPLEADER(pp, cons); 6039 goto retry; 6040 } 6041 } 6042 } 6043 6044 #ifdef DEBUG 6045 6046 #define N_PRLE 1024 6047 struct prle { 6048 page_t *targ; 6049 page_t *repl; 6050 int status; 6051 int pausecpus; 6052 hrtime_t whence; 6053 }; 6054 6055 static struct prle page_relocate_log[N_PRLE]; 6056 static int prl_entry; 6057 static kmutex_t prl_mutex; 6058 6059 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6060 mutex_enter(&prl_mutex); \ 6061 page_relocate_log[prl_entry].targ = *(t); \ 6062 page_relocate_log[prl_entry].repl = *(r); \ 6063 page_relocate_log[prl_entry].status = (s); \ 6064 page_relocate_log[prl_entry].pausecpus = (p); \ 6065 page_relocate_log[prl_entry].whence = gethrtime(); \ 6066 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6067 mutex_exit(&prl_mutex); 6068 6069 #else /* !DEBUG */ 6070 #define PAGE_RELOCATE_LOG(t, r, s, p) 6071 #endif 6072 6073 /* 6074 * Core Kernel Page Relocation Algorithm 6075 * 6076 * Input: 6077 * 6078 * target : constituent pages are SE_EXCL locked. 6079 * replacement: constituent pages are SE_EXCL locked. 6080 * 6081 * Output: 6082 * 6083 * nrelocp: number of pages relocated 6084 */ 6085 int 6086 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6087 { 6088 page_t *targ, *repl; 6089 page_t *tpp, *rpp; 6090 kmutex_t *low, *high; 6091 spgcnt_t npages, i; 6092 page_t *pl = NULL; 6093 int old_pil; 6094 cpuset_t cpuset; 6095 int cap_cpus; 6096 int ret; 6097 6098 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 6099 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6100 return (EAGAIN); 6101 } 6102 6103 mutex_enter(&kpr_mutex); 6104 kreloc_thread = curthread; 6105 6106 targ = *target; 6107 repl = *replacement; 6108 ASSERT(repl != NULL); 6109 ASSERT(targ->p_szc == repl->p_szc); 6110 6111 npages = page_get_pagecnt(targ->p_szc); 6112 6113 /* 6114 * unload VA<->PA mappings that are not locked 6115 */ 6116 tpp = targ; 6117 for (i = 0; i < npages; i++) { 6118 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6119 tpp++; 6120 } 6121 6122 /* 6123 * Do "presuspend" callbacks, in a context from which we can still 6124 * block as needed. Note that we don't hold the mapping list lock 6125 * of "targ" at this point due to potential locking order issues; 6126 * we assume that between the hat_pageunload() above and holding 6127 * the SE_EXCL lock that the mapping list *cannot* change at this 6128 * point. 6129 */ 6130 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6131 if (ret != 0) { 6132 /* 6133 * EIO translates to fatal error, for all others cleanup 6134 * and return EAGAIN. 6135 */ 6136 ASSERT(ret != EIO); 6137 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6138 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6139 kreloc_thread = NULL; 6140 mutex_exit(&kpr_mutex); 6141 return (EAGAIN); 6142 } 6143 6144 /* 6145 * acquire p_mapping list lock for both the target and replacement 6146 * root pages. 6147 * 6148 * low and high refer to the need to grab the mlist locks in a 6149 * specific order in order to prevent race conditions. Thus the 6150 * lower lock must be grabbed before the higher lock. 6151 * 6152 * This will block hat_unload's accessing p_mapping list. Since 6153 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6154 * blocked. Thus, no one else will be accessing the p_mapping list 6155 * while we suspend and reload the locked mapping below. 6156 */ 6157 tpp = targ; 6158 rpp = repl; 6159 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6160 6161 kpreempt_disable(); 6162 6163 #ifdef VAC 6164 /* 6165 * If the replacement page is of a different virtual color 6166 * than the page it is replacing, we need to handle the VAC 6167 * consistency for it just as we would if we were setting up 6168 * a new mapping to a page. 6169 */ 6170 if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) { 6171 if (tpp->p_vcolor != rpp->p_vcolor) { 6172 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6173 rpp->p_pagenum); 6174 } 6175 } 6176 #endif 6177 6178 /* 6179 * We raise our PIL to 13 so that we don't get captured by 6180 * another CPU or pinned by an interrupt thread. We can't go to 6181 * PIL 14 since the nexus driver(s) may need to interrupt at 6182 * that level in the case of IOMMU pseudo mappings. 6183 */ 6184 cpuset = cpu_ready_set; 6185 CPUSET_DEL(cpuset, CPU->cpu_id); 6186 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6187 old_pil = splr(XCALL_PIL); 6188 } else { 6189 old_pil = -1; 6190 xc_attention(cpuset); 6191 } 6192 ASSERT(getpil() == XCALL_PIL); 6193 6194 /* 6195 * Now do suspend callbacks. In the case of an IOMMU mapping 6196 * this will suspend all DMA activity to the page while it is 6197 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6198 * may be captured at this point we should have acquired any needed 6199 * locks in the presuspend callback. 6200 */ 6201 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6202 if (ret != 0) { 6203 repl = targ; 6204 goto suspend_fail; 6205 } 6206 6207 /* 6208 * Raise the PIL yet again, this time to block all high-level 6209 * interrupts on this CPU. This is necessary to prevent an 6210 * interrupt routine from pinning the thread which holds the 6211 * mapping suspended and then touching the suspended page. 6212 * 6213 * Once the page is suspended we also need to be careful to 6214 * avoid calling any functions which touch any seg_kmem memory 6215 * since that memory may be backed by the very page we are 6216 * relocating in here! 6217 */ 6218 hat_pagesuspend(targ); 6219 6220 /* 6221 * Now that we are confident everybody has stopped using this page, 6222 * copy the page contents. Note we use a physical copy to prevent 6223 * locking issues and to avoid fpRAS because we can't handle it in 6224 * this context. 6225 */ 6226 for (i = 0; i < npages; i++, tpp++, rpp++) { 6227 /* 6228 * Copy the contents of the page. 6229 */ 6230 ppcopy_kernel(tpp, rpp); 6231 } 6232 6233 tpp = targ; 6234 rpp = repl; 6235 for (i = 0; i < npages; i++, tpp++, rpp++) { 6236 /* 6237 * Copy attributes. VAC consistency was handled above, 6238 * if required. 6239 */ 6240 rpp->p_nrm = tpp->p_nrm; 6241 tpp->p_nrm = 0; 6242 rpp->p_index = tpp->p_index; 6243 tpp->p_index = 0; 6244 #ifdef VAC 6245 rpp->p_vcolor = tpp->p_vcolor; 6246 #endif 6247 } 6248 6249 /* 6250 * First, unsuspend the page, if we set the suspend bit, and transfer 6251 * the mapping list from the target page to the replacement page. 6252 * Next process postcallbacks; since pa_hment's are linked only to the 6253 * p_mapping list of root page, we don't iterate over the constituent 6254 * pages. 6255 */ 6256 hat_pagereload(targ, repl); 6257 6258 suspend_fail: 6259 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6260 6261 /* 6262 * Now lower our PIL and release any captured CPUs since we 6263 * are out of the "danger zone". After this it will again be 6264 * safe to acquire adaptive mutex locks, or to drop them... 6265 */ 6266 if (old_pil != -1) { 6267 splx(old_pil); 6268 } else { 6269 xc_dismissed(cpuset); 6270 } 6271 6272 kpreempt_enable(); 6273 6274 sfmmu_mlist_reloc_exit(low, high); 6275 6276 /* 6277 * Postsuspend callbacks should drop any locks held across 6278 * the suspend callbacks. As before, we don't hold the mapping 6279 * list lock at this point.. our assumption is that the mapping 6280 * list still can't change due to our holding SE_EXCL lock and 6281 * there being no unlocked mappings left. Hence the restriction 6282 * on calling context to hat_delete_callback() 6283 */ 6284 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6285 if (ret != 0) { 6286 /* 6287 * The second presuspend call failed: we got here through 6288 * the suspend_fail label above. 6289 */ 6290 ASSERT(ret != EIO); 6291 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6292 kreloc_thread = NULL; 6293 mutex_exit(&kpr_mutex); 6294 return (EAGAIN); 6295 } 6296 6297 /* 6298 * Now that we're out of the performance critical section we can 6299 * take care of updating the hash table, since we still 6300 * hold all the pages locked SE_EXCL at this point we 6301 * needn't worry about things changing out from under us. 6302 */ 6303 tpp = targ; 6304 rpp = repl; 6305 for (i = 0; i < npages; i++, tpp++, rpp++) { 6306 6307 /* 6308 * replace targ with replacement in page_hash table 6309 */ 6310 targ = tpp; 6311 page_relocate_hash(rpp, targ); 6312 6313 /* 6314 * concatenate target; caller of platform_page_relocate() 6315 * expects target to be concatenated after returning. 6316 */ 6317 ASSERT(targ->p_next == targ); 6318 ASSERT(targ->p_prev == targ); 6319 page_list_concat(&pl, &targ); 6320 } 6321 6322 ASSERT(*target == pl); 6323 *nrelocp = npages; 6324 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6325 kreloc_thread = NULL; 6326 mutex_exit(&kpr_mutex); 6327 return (0); 6328 } 6329 6330 /* 6331 * Called when stray pa_hments are found attached to a page which is 6332 * being freed. Notify the subsystem which attached the pa_hment of 6333 * the error if it registered a suitable handler, else panic. 6334 */ 6335 static void 6336 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6337 { 6338 id_t cb_id = pahmep->cb_id; 6339 6340 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6341 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6342 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6343 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6344 return; /* non-fatal */ 6345 } 6346 panic("pa_hment leaked: 0x%p", pahmep); 6347 } 6348 6349 /* 6350 * Remove all mappings to page 'pp'. 6351 */ 6352 int 6353 hat_pageunload(struct page *pp, uint_t forceflag) 6354 { 6355 struct page *origpp = pp; 6356 struct sf_hment *sfhme, *tmphme; 6357 struct hme_blk *hmeblkp; 6358 kmutex_t *pml; 6359 #ifdef VAC 6360 kmutex_t *pmtx; 6361 #endif 6362 cpuset_t cpuset, tset; 6363 int index, cons; 6364 int xhme_blks; 6365 int pa_hments; 6366 6367 ASSERT(PAGE_EXCL(pp)); 6368 6369 retry_xhat: 6370 tmphme = NULL; 6371 xhme_blks = 0; 6372 pa_hments = 0; 6373 CPUSET_ZERO(cpuset); 6374 6375 pml = sfmmu_mlist_enter(pp); 6376 6377 #ifdef VAC 6378 if (pp->p_kpmref) 6379 sfmmu_kpm_pageunload(pp); 6380 ASSERT(!PP_ISMAPPED_KPM(pp)); 6381 #endif 6382 6383 index = PP_MAPINDEX(pp); 6384 cons = TTE8K; 6385 retry: 6386 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6387 tmphme = sfhme->hme_next; 6388 6389 if (IS_PAHME(sfhme)) { 6390 ASSERT(sfhme->hme_data != NULL); 6391 pa_hments++; 6392 continue; 6393 } 6394 6395 hmeblkp = sfmmu_hmetohblk(sfhme); 6396 if (hmeblkp->hblk_xhat_bit) { 6397 struct xhat_hme_blk *xblk = 6398 (struct xhat_hme_blk *)hmeblkp; 6399 6400 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 6401 pp, forceflag, XBLK2PROVBLK(xblk)); 6402 6403 xhme_blks = 1; 6404 continue; 6405 } 6406 6407 /* 6408 * If there are kernel mappings don't unload them, they will 6409 * be suspended. 6410 */ 6411 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 6412 hmeblkp->hblk_tag.htag_id == ksfmmup) 6413 continue; 6414 6415 tset = sfmmu_pageunload(pp, sfhme, cons); 6416 CPUSET_OR(cpuset, tset); 6417 } 6418 6419 while (index != 0) { 6420 index = index >> 1; 6421 if (index != 0) 6422 cons++; 6423 if (index & 0x1) { 6424 /* Go to leading page */ 6425 pp = PP_GROUPLEADER(pp, cons); 6426 ASSERT(sfmmu_mlist_held(pp)); 6427 goto retry; 6428 } 6429 } 6430 6431 /* 6432 * cpuset may be empty if the page was only mapped by segkpm, 6433 * in which case we won't actually cross-trap. 6434 */ 6435 xt_sync(cpuset); 6436 6437 /* 6438 * The page should have no mappings at this point, unless 6439 * we were called from hat_page_relocate() in which case we 6440 * leave the locked mappings which will be suspended later. 6441 */ 6442 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 6443 (forceflag == SFMMU_KERNEL_RELOC)); 6444 6445 #ifdef VAC 6446 if (PP_ISTNC(pp)) { 6447 if (cons == TTE8K) { 6448 pmtx = sfmmu_page_enter(pp); 6449 PP_CLRTNC(pp); 6450 sfmmu_page_exit(pmtx); 6451 } else { 6452 conv_tnc(pp, cons); 6453 } 6454 } 6455 #endif /* VAC */ 6456 6457 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 6458 /* 6459 * Unlink any pa_hments and free them, calling back 6460 * the responsible subsystem to notify it of the error. 6461 * This can occur in situations such as drivers leaking 6462 * DMA handles: naughty, but common enough that we'd like 6463 * to keep the system running rather than bringing it 6464 * down with an obscure error like "pa_hment leaked" 6465 * which doesn't aid the user in debugging their driver. 6466 */ 6467 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6468 tmphme = sfhme->hme_next; 6469 if (IS_PAHME(sfhme)) { 6470 struct pa_hment *pahmep = sfhme->hme_data; 6471 sfmmu_pahment_leaked(pahmep); 6472 HME_SUB(sfhme, pp); 6473 kmem_cache_free(pa_hment_cache, pahmep); 6474 } 6475 } 6476 6477 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 6478 } 6479 6480 sfmmu_mlist_exit(pml); 6481 6482 /* 6483 * XHAT may not have finished unloading pages 6484 * because some other thread was waiting for 6485 * mlist lock and XHAT_PAGEUNLOAD let it do 6486 * the job. 6487 */ 6488 if (xhme_blks) { 6489 pp = origpp; 6490 goto retry_xhat; 6491 } 6492 6493 return (0); 6494 } 6495 6496 cpuset_t 6497 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 6498 { 6499 struct hme_blk *hmeblkp; 6500 sfmmu_t *sfmmup; 6501 tte_t tte, ttemod; 6502 #ifdef DEBUG 6503 tte_t orig_old; 6504 #endif /* DEBUG */ 6505 caddr_t addr; 6506 int ttesz; 6507 int ret; 6508 cpuset_t cpuset; 6509 6510 ASSERT(pp != NULL); 6511 ASSERT(sfmmu_mlist_held(pp)); 6512 ASSERT(pp->p_vnode != &kvp); 6513 6514 CPUSET_ZERO(cpuset); 6515 6516 hmeblkp = sfmmu_hmetohblk(sfhme); 6517 6518 readtte: 6519 sfmmu_copytte(&sfhme->hme_tte, &tte); 6520 if (TTE_IS_VALID(&tte)) { 6521 sfmmup = hblktosfmmu(hmeblkp); 6522 ttesz = get_hblk_ttesz(hmeblkp); 6523 /* 6524 * Only unload mappings of 'cons' size. 6525 */ 6526 if (ttesz != cons) 6527 return (cpuset); 6528 6529 /* 6530 * Note that we have p_mapping lock, but no hash lock here. 6531 * hblk_unload() has to have both hash lock AND p_mapping 6532 * lock before it tries to modify tte. So, the tte could 6533 * not become invalid in the sfmmu_modifytte_try() below. 6534 */ 6535 ttemod = tte; 6536 #ifdef DEBUG 6537 orig_old = tte; 6538 #endif /* DEBUG */ 6539 6540 TTE_SET_INVALID(&ttemod); 6541 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6542 if (ret < 0) { 6543 #ifdef DEBUG 6544 /* only R/M bits can change. */ 6545 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 6546 #endif /* DEBUG */ 6547 goto readtte; 6548 } 6549 6550 if (ret == 0) { 6551 panic("pageunload: cas failed?"); 6552 } 6553 6554 addr = tte_to_vaddr(hmeblkp, tte); 6555 6556 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6557 6558 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 6559 6560 /* 6561 * We need to flush the page from the virtual cache 6562 * in order to prevent a virtual cache alias 6563 * inconsistency. The particular scenario we need 6564 * to worry about is: 6565 * Given: va1 and va2 are two virtual address that 6566 * alias and will map the same physical address. 6567 * 1. mapping exists from va1 to pa and data has 6568 * been read into the cache. 6569 * 2. unload va1. 6570 * 3. load va2 and modify data using va2. 6571 * 4 unload va2. 6572 * 5. load va1 and reference data. Unless we flush 6573 * the data cache when we unload we will get 6574 * stale data. 6575 * This scenario is taken care of by using virtual 6576 * page coloring. 6577 */ 6578 if (sfmmup->sfmmu_ismhat) { 6579 /* 6580 * Flush TSBs, TLBs and caches 6581 * of every process 6582 * sharing this ism segment. 6583 */ 6584 sfmmu_hat_lock_all(); 6585 mutex_enter(&ism_mlist_lock); 6586 kpreempt_disable(); 6587 if (do_virtual_coloring) 6588 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6589 pp->p_pagenum, CACHE_NO_FLUSH); 6590 else 6591 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6592 pp->p_pagenum, CACHE_FLUSH); 6593 kpreempt_enable(); 6594 mutex_exit(&ism_mlist_lock); 6595 sfmmu_hat_unlock_all(); 6596 cpuset = cpu_ready_set; 6597 } else if (do_virtual_coloring) { 6598 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6599 cpuset = sfmmup->sfmmu_cpusran; 6600 } else { 6601 sfmmu_tlbcache_demap(addr, sfmmup, hmeblkp, 6602 pp->p_pagenum, 0, FLUSH_NECESSARY_CPUS, 6603 CACHE_FLUSH, 0); 6604 cpuset = sfmmup->sfmmu_cpusran; 6605 } 6606 6607 /* 6608 * Hme_sub has to run after ttesync() and a_rss update. 6609 * See hblk_unload(). 6610 */ 6611 HME_SUB(sfhme, pp); 6612 membar_stst(); 6613 6614 /* 6615 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 6616 * since pteload may have done a HME_ADD() right after 6617 * we did the HME_SUB() above. Hmecnt is now maintained 6618 * by cas only. no lock guranteed its value. The only 6619 * gurantee we have is the hmecnt should not be less than 6620 * what it should be so the hblk will not be taken away. 6621 * It's also important that we decremented the hmecnt after 6622 * we are done with hmeblkp so that this hmeblk won't be 6623 * stolen. 6624 */ 6625 ASSERT(hmeblkp->hblk_hmecnt > 0); 6626 ASSERT(hmeblkp->hblk_vcnt > 0); 6627 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6628 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6629 /* 6630 * This is bug 4063182. 6631 * XXX: fixme 6632 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6633 * !hmeblkp->hblk_lckcnt); 6634 */ 6635 } else { 6636 panic("invalid tte? pp %p &tte %p", 6637 (void *)pp, (void *)&tte); 6638 } 6639 6640 return (cpuset); 6641 } 6642 6643 /* 6644 * While relocating a kernel page, this function will move the mappings 6645 * from tpp to dpp and modify any associated data with these mappings. 6646 * It also unsuspends the suspended kernel mapping. 6647 */ 6648 static void 6649 hat_pagereload(struct page *tpp, struct page *dpp) 6650 { 6651 struct sf_hment *sfhme; 6652 tte_t tte, ttemod; 6653 int index, cons; 6654 6655 ASSERT(getpil() == PIL_MAX); 6656 ASSERT(sfmmu_mlist_held(tpp)); 6657 ASSERT(sfmmu_mlist_held(dpp)); 6658 6659 index = PP_MAPINDEX(tpp); 6660 cons = TTE8K; 6661 6662 /* Update real mappings to the page */ 6663 retry: 6664 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 6665 if (IS_PAHME(sfhme)) 6666 continue; 6667 sfmmu_copytte(&sfhme->hme_tte, &tte); 6668 ttemod = tte; 6669 6670 /* 6671 * replace old pfn with new pfn in TTE 6672 */ 6673 PFN_TO_TTE(ttemod, dpp->p_pagenum); 6674 6675 /* 6676 * clear suspend bit 6677 */ 6678 ASSERT(TTE_IS_SUSPEND(&ttemod)); 6679 TTE_CLR_SUSPEND(&ttemod); 6680 6681 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 6682 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 6683 6684 /* 6685 * set hme_page point to new page 6686 */ 6687 sfhme->hme_page = dpp; 6688 } 6689 6690 /* 6691 * move p_mapping list from old page to new page 6692 */ 6693 dpp->p_mapping = tpp->p_mapping; 6694 tpp->p_mapping = NULL; 6695 dpp->p_share = tpp->p_share; 6696 tpp->p_share = 0; 6697 6698 while (index != 0) { 6699 index = index >> 1; 6700 if (index != 0) 6701 cons++; 6702 if (index & 0x1) { 6703 tpp = PP_GROUPLEADER(tpp, cons); 6704 dpp = PP_GROUPLEADER(dpp, cons); 6705 goto retry; 6706 } 6707 } 6708 6709 if (dtrace_kreloc_fini) 6710 (*dtrace_kreloc_fini)(); 6711 mutex_exit(&kpr_suspendlock); 6712 } 6713 6714 uint_t 6715 hat_pagesync(struct page *pp, uint_t clearflag) 6716 { 6717 struct sf_hment *sfhme, *tmphme = NULL; 6718 struct hme_blk *hmeblkp; 6719 kmutex_t *pml; 6720 cpuset_t cpuset, tset; 6721 int index, cons; 6722 extern ulong_t po_share; 6723 page_t *save_pp = pp; 6724 6725 CPUSET_ZERO(cpuset); 6726 6727 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 6728 return (PP_GENERIC_ATTR(pp)); 6729 } 6730 6731 if ((clearflag == (HAT_SYNC_STOPON_REF | HAT_SYNC_DONTZERO)) && 6732 PP_ISREF(pp)) { 6733 return (PP_GENERIC_ATTR(pp)); 6734 } 6735 6736 if ((clearflag == (HAT_SYNC_STOPON_MOD | HAT_SYNC_DONTZERO)) && 6737 PP_ISMOD(pp)) { 6738 return (PP_GENERIC_ATTR(pp)); 6739 } 6740 6741 if ((clearflag & HAT_SYNC_STOPON_SHARED) != 0 && 6742 (pp->p_share > po_share) && 6743 !(clearflag & HAT_SYNC_ZERORM)) { 6744 if (PP_ISRO(pp)) 6745 hat_page_setattr(pp, P_REF); 6746 return (PP_GENERIC_ATTR(pp)); 6747 } 6748 6749 clearflag &= ~HAT_SYNC_STOPON_SHARED; 6750 pml = sfmmu_mlist_enter(pp); 6751 index = PP_MAPINDEX(pp); 6752 cons = TTE8K; 6753 retry: 6754 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6755 /* 6756 * We need to save the next hment on the list since 6757 * it is possible for pagesync to remove an invalid hment 6758 * from the list. 6759 */ 6760 tmphme = sfhme->hme_next; 6761 /* 6762 * If we are looking for large mappings and this hme doesn't 6763 * reach the range we are seeking, just ignore its. 6764 */ 6765 hmeblkp = sfmmu_hmetohblk(sfhme); 6766 if (hmeblkp->hblk_xhat_bit) 6767 continue; 6768 6769 if (hme_size(sfhme) < cons) 6770 continue; 6771 tset = sfmmu_pagesync(pp, sfhme, 6772 clearflag & ~HAT_SYNC_STOPON_RM); 6773 CPUSET_OR(cpuset, tset); 6774 /* 6775 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 6776 * as the "ref" or "mod" is set. 6777 */ 6778 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 6779 ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 6780 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) { 6781 index = 0; 6782 break; 6783 } 6784 } 6785 6786 while (index) { 6787 index = index >> 1; 6788 cons++; 6789 if (index & 0x1) { 6790 /* Go to leading page */ 6791 pp = PP_GROUPLEADER(pp, cons); 6792 goto retry; 6793 } 6794 } 6795 6796 xt_sync(cpuset); 6797 sfmmu_mlist_exit(pml); 6798 return (PP_GENERIC_ATTR(save_pp)); 6799 } 6800 6801 /* 6802 * Get all the hardware dependent attributes for a page struct 6803 */ 6804 static cpuset_t 6805 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 6806 uint_t clearflag) 6807 { 6808 caddr_t addr; 6809 tte_t tte, ttemod; 6810 struct hme_blk *hmeblkp; 6811 int ret; 6812 sfmmu_t *sfmmup; 6813 cpuset_t cpuset; 6814 6815 ASSERT(pp != NULL); 6816 ASSERT(sfmmu_mlist_held(pp)); 6817 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6818 (clearflag == HAT_SYNC_ZERORM)); 6819 6820 SFMMU_STAT(sf_pagesync); 6821 6822 CPUSET_ZERO(cpuset); 6823 6824 sfmmu_pagesync_retry: 6825 6826 sfmmu_copytte(&sfhme->hme_tte, &tte); 6827 if (TTE_IS_VALID(&tte)) { 6828 hmeblkp = sfmmu_hmetohblk(sfhme); 6829 sfmmup = hblktosfmmu(hmeblkp); 6830 addr = tte_to_vaddr(hmeblkp, tte); 6831 if (clearflag == HAT_SYNC_ZERORM) { 6832 ttemod = tte; 6833 TTE_CLR_RM(&ttemod); 6834 ret = sfmmu_modifytte_try(&tte, &ttemod, 6835 &sfhme->hme_tte); 6836 if (ret < 0) { 6837 /* 6838 * cas failed and the new value is not what 6839 * we want. 6840 */ 6841 goto sfmmu_pagesync_retry; 6842 } 6843 6844 if (ret > 0) { 6845 /* we win the cas */ 6846 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6847 cpuset = sfmmup->sfmmu_cpusran; 6848 } 6849 } 6850 6851 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6852 } 6853 return (cpuset); 6854 } 6855 6856 /* 6857 * Remove write permission from a mappings to a page, so that 6858 * we can detect the next modification of it. This requires modifying 6859 * the TTE then invalidating (demap) any TLB entry using that TTE. 6860 * This code is similar to sfmmu_pagesync(). 6861 */ 6862 static cpuset_t 6863 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 6864 { 6865 caddr_t addr; 6866 tte_t tte; 6867 tte_t ttemod; 6868 struct hme_blk *hmeblkp; 6869 int ret; 6870 sfmmu_t *sfmmup; 6871 cpuset_t cpuset; 6872 6873 ASSERT(pp != NULL); 6874 ASSERT(sfmmu_mlist_held(pp)); 6875 6876 CPUSET_ZERO(cpuset); 6877 SFMMU_STAT(sf_clrwrt); 6878 6879 retry: 6880 6881 sfmmu_copytte(&sfhme->hme_tte, &tte); 6882 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 6883 hmeblkp = sfmmu_hmetohblk(sfhme); 6884 6885 /* 6886 * xhat mappings should never be to a VMODSORT page. 6887 */ 6888 ASSERT(hmeblkp->hblk_xhat_bit == 0); 6889 6890 sfmmup = hblktosfmmu(hmeblkp); 6891 addr = tte_to_vaddr(hmeblkp, tte); 6892 6893 ttemod = tte; 6894 TTE_CLR_WRT(&ttemod); 6895 TTE_CLR_MOD(&ttemod); 6896 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6897 6898 /* 6899 * if cas failed and the new value is not what 6900 * we want retry 6901 */ 6902 if (ret < 0) 6903 goto retry; 6904 6905 /* we win the cas */ 6906 if (ret > 0) { 6907 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6908 cpuset = sfmmup->sfmmu_cpusran; 6909 } 6910 } 6911 6912 return (cpuset); 6913 } 6914 6915 /* 6916 * Walk all mappings of a page, removing write permission and clearing the 6917 * ref/mod bits. This code is similar to hat_pagesync() 6918 */ 6919 static void 6920 hat_page_clrwrt(page_t *pp) 6921 { 6922 struct sf_hment *sfhme; 6923 struct sf_hment *tmphme = NULL; 6924 kmutex_t *pml; 6925 cpuset_t cpuset; 6926 cpuset_t tset; 6927 int index; 6928 int cons; 6929 6930 CPUSET_ZERO(cpuset); 6931 6932 pml = sfmmu_mlist_enter(pp); 6933 index = PP_MAPINDEX(pp); 6934 cons = TTE8K; 6935 retry: 6936 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6937 tmphme = sfhme->hme_next; 6938 6939 /* 6940 * If we are looking for large mappings and this hme doesn't 6941 * reach the range we are seeking, just ignore its. 6942 */ 6943 6944 if (hme_size(sfhme) < cons) 6945 continue; 6946 6947 tset = sfmmu_pageclrwrt(pp, sfhme); 6948 CPUSET_OR(cpuset, tset); 6949 } 6950 6951 while (index) { 6952 index = index >> 1; 6953 cons++; 6954 if (index & 0x1) { 6955 /* Go to leading page */ 6956 pp = PP_GROUPLEADER(pp, cons); 6957 goto retry; 6958 } 6959 } 6960 6961 xt_sync(cpuset); 6962 sfmmu_mlist_exit(pml); 6963 } 6964 6965 /* 6966 * Set the given REF/MOD/RO bits for the given page. 6967 * For a vnode with a sorted v_pages list, we need to change 6968 * the attributes and the v_pages list together under page_vnode_mutex. 6969 */ 6970 void 6971 hat_page_setattr(page_t *pp, uint_t flag) 6972 { 6973 vnode_t *vp = pp->p_vnode; 6974 page_t **listp; 6975 kmutex_t *pmtx; 6976 kmutex_t *vphm = NULL; 6977 6978 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 6979 6980 /* 6981 * nothing to do if attribute already set 6982 */ 6983 if ((pp->p_nrm & flag) == flag) 6984 return; 6985 6986 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 6987 vphm = page_vnode_mutex(vp); 6988 mutex_enter(vphm); 6989 } 6990 6991 pmtx = sfmmu_page_enter(pp); 6992 pp->p_nrm |= flag; 6993 sfmmu_page_exit(pmtx); 6994 6995 if (vphm != NULL) { 6996 /* 6997 * Some File Systems examine v_pages for NULL w/o 6998 * grabbing the vphm mutex. Must not let it become NULL when 6999 * pp is the only page on the list. 7000 */ 7001 if (pp->p_vpnext != pp) { 7002 page_vpsub(&vp->v_pages, pp); 7003 if (vp->v_pages != NULL) 7004 listp = &vp->v_pages->p_vpprev->p_vpnext; 7005 else 7006 listp = &vp->v_pages; 7007 page_vpadd(listp, pp); 7008 } 7009 mutex_exit(vphm); 7010 } 7011 } 7012 7013 void 7014 hat_page_clrattr(page_t *pp, uint_t flag) 7015 { 7016 vnode_t *vp = pp->p_vnode; 7017 kmutex_t *vphm = NULL; 7018 kmutex_t *pmtx; 7019 7020 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7021 7022 /* 7023 * For vnode with a sorted v_pages list, we need to change 7024 * the attributes and the v_pages list together under page_vnode_mutex. 7025 */ 7026 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7027 vphm = page_vnode_mutex(vp); 7028 mutex_enter(vphm); 7029 } 7030 7031 pmtx = sfmmu_page_enter(pp); 7032 pp->p_nrm &= ~flag; 7033 sfmmu_page_exit(pmtx); 7034 7035 if (vphm != NULL) { 7036 /* 7037 * Some File Systems examine v_pages for NULL w/o 7038 * grabbing the vphm mutex. Must not let it become NULL when 7039 * pp is the only page on the list. 7040 */ 7041 if (pp->p_vpnext != pp) { 7042 page_vpsub(&vp->v_pages, pp); 7043 page_vpadd(&vp->v_pages, pp); 7044 } 7045 mutex_exit(vphm); 7046 7047 /* 7048 * VMODSORT works by removing write permissions and getting 7049 * a fault when a page is made dirty. At this point 7050 * we need to remove write permission from all mappings 7051 * to this page. 7052 */ 7053 hat_page_clrwrt(pp); 7054 } 7055 } 7056 7057 7058 uint_t 7059 hat_page_getattr(page_t *pp, uint_t flag) 7060 { 7061 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7062 return ((uint_t)(pp->p_nrm & flag)); 7063 } 7064 7065 /* 7066 * DEBUG kernels: verify that a kernel va<->pa translation 7067 * is safe by checking the underlying page_t is in a page 7068 * relocation-safe state. 7069 */ 7070 #ifdef DEBUG 7071 void 7072 sfmmu_check_kpfn(pfn_t pfn) 7073 { 7074 page_t *pp; 7075 int index, cons; 7076 7077 if (hat_check_vtop == 0) 7078 return; 7079 7080 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 7081 return; 7082 7083 pp = page_numtopp_nolock(pfn); 7084 if (!pp) 7085 return; 7086 7087 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7088 return; 7089 7090 /* 7091 * Handed a large kernel page, we dig up the root page since we 7092 * know the root page might have the lock also. 7093 */ 7094 if (pp->p_szc != 0) { 7095 index = PP_MAPINDEX(pp); 7096 cons = TTE8K; 7097 again: 7098 while (index != 0) { 7099 index >>= 1; 7100 if (index != 0) 7101 cons++; 7102 if (index & 0x1) { 7103 pp = PP_GROUPLEADER(pp, cons); 7104 goto again; 7105 } 7106 } 7107 } 7108 7109 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7110 return; 7111 7112 /* 7113 * Pages need to be locked or allocated "permanent" (either from 7114 * static_arena arena or explicitly setting PG_NORELOC when calling 7115 * page_create_va()) for VA->PA translations to be valid. 7116 */ 7117 if (!PP_ISNORELOC(pp)) 7118 panic("Illegal VA->PA translation, pp 0x%p not permanent", pp); 7119 else 7120 panic("Illegal VA->PA translation, pp 0x%p not locked", pp); 7121 } 7122 #endif /* DEBUG */ 7123 7124 /* 7125 * Returns a page frame number for a given virtual address. 7126 * Returns PFN_INVALID to indicate an invalid mapping 7127 */ 7128 pfn_t 7129 hat_getpfnum(struct hat *hat, caddr_t addr) 7130 { 7131 pfn_t pfn; 7132 tte_t tte; 7133 7134 /* 7135 * We would like to 7136 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7137 * but we can't because the iommu driver will call this 7138 * routine at interrupt time and it can't grab the as lock 7139 * or it will deadlock: A thread could have the as lock 7140 * and be waiting for io. The io can't complete 7141 * because the interrupt thread is blocked trying to grab 7142 * the as lock. 7143 */ 7144 7145 ASSERT(hat->sfmmu_xhat_provider == NULL); 7146 7147 if (hat == ksfmmup) { 7148 if (segkpm && IS_KPM_ADDR(addr)) 7149 return (sfmmu_kpm_vatopfn(addr)); 7150 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7151 == PFN_SUSPENDED) { 7152 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7153 } 7154 sfmmu_check_kpfn(pfn); 7155 return (pfn); 7156 } else { 7157 return (sfmmu_uvatopfn(addr, hat)); 7158 } 7159 } 7160 7161 /* 7162 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7163 * Use hat_getpfnum(kas.a_hat, ...) instead. 7164 * 7165 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7166 * but can't right now due to the fact that some software has grown to use 7167 * this interface incorrectly. So for now when the interface is misused, 7168 * return a warning to the user that in the future it won't work in the 7169 * way they're abusing it, and carry on (after disabling page relocation). 7170 */ 7171 pfn_t 7172 hat_getkpfnum(caddr_t addr) 7173 { 7174 pfn_t pfn; 7175 tte_t tte; 7176 int badcaller = 0; 7177 extern int segkmem_reloc; 7178 7179 if (segkpm && IS_KPM_ADDR(addr)) { 7180 badcaller = 1; 7181 pfn = sfmmu_kpm_vatopfn(addr); 7182 } else { 7183 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7184 == PFN_SUSPENDED) { 7185 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7186 } 7187 badcaller = pf_is_memory(pfn); 7188 } 7189 7190 if (badcaller) { 7191 /* 7192 * We can't return PFN_INVALID or the caller may panic 7193 * or corrupt the system. The only alternative is to 7194 * disable page relocation at this point for all kernel 7195 * memory. This will impact any callers of page_relocate() 7196 * such as FMA or DR. 7197 * 7198 * RFE: Add junk here to spit out an ereport so the sysadmin 7199 * can be advised that he should upgrade his device driver 7200 * so that this doesn't happen. 7201 */ 7202 hat_getkpfnum_badcall(caller()); 7203 if (hat_kpr_enabled && segkmem_reloc) { 7204 hat_kpr_enabled = 0; 7205 segkmem_reloc = 0; 7206 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 7207 } 7208 } 7209 return (pfn); 7210 } 7211 7212 pfn_t 7213 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup) 7214 { 7215 struct hmehash_bucket *hmebp; 7216 hmeblk_tag hblktag; 7217 int hmeshift, hashno = 1; 7218 struct hme_blk *hmeblkp = NULL; 7219 7220 struct sf_hment *sfhmep; 7221 tte_t tte; 7222 pfn_t pfn; 7223 7224 /* support for ISM */ 7225 ism_map_t *ism_map; 7226 ism_blk_t *ism_blkp; 7227 int i; 7228 sfmmu_t *ism_hatid = NULL; 7229 sfmmu_t *locked_hatid = NULL; 7230 7231 7232 ASSERT(sfmmup != ksfmmup); 7233 SFMMU_STAT(sf_user_vtop); 7234 /* 7235 * Set ism_hatid if vaddr falls in a ISM segment. 7236 */ 7237 ism_blkp = sfmmup->sfmmu_iblk; 7238 if (ism_blkp) { 7239 sfmmu_ismhat_enter(sfmmup, 0); 7240 locked_hatid = sfmmup; 7241 } 7242 while (ism_blkp && ism_hatid == NULL) { 7243 ism_map = ism_blkp->iblk_maps; 7244 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7245 if (vaddr >= ism_start(ism_map[i]) && 7246 vaddr < ism_end(ism_map[i])) { 7247 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7248 vaddr = (caddr_t)(vaddr - 7249 ism_start(ism_map[i])); 7250 break; 7251 } 7252 } 7253 ism_blkp = ism_blkp->iblk_next; 7254 } 7255 if (locked_hatid) { 7256 sfmmu_ismhat_exit(locked_hatid, 0); 7257 } 7258 7259 hblktag.htag_id = sfmmup; 7260 do { 7261 hmeshift = HME_HASH_SHIFT(hashno); 7262 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7263 hblktag.htag_rehash = hashno; 7264 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7265 7266 SFMMU_HASH_LOCK(hmebp); 7267 7268 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7269 if (hmeblkp != NULL) { 7270 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7271 sfmmu_copytte(&sfhmep->hme_tte, &tte); 7272 if (TTE_IS_VALID(&tte)) { 7273 pfn = TTE_TO_PFN(vaddr, &tte); 7274 } else { 7275 pfn = PFN_INVALID; 7276 } 7277 SFMMU_HASH_UNLOCK(hmebp); 7278 return (pfn); 7279 } 7280 SFMMU_HASH_UNLOCK(hmebp); 7281 hashno++; 7282 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7283 return (PFN_INVALID); 7284 } 7285 7286 7287 /* 7288 * For compatability with AT&T and later optimizations 7289 */ 7290 /* ARGSUSED */ 7291 void 7292 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 7293 { 7294 ASSERT(hat != NULL); 7295 ASSERT(hat->sfmmu_xhat_provider == NULL); 7296 } 7297 7298 /* 7299 * Return the number of mappings to a particular page. 7300 * This number is an approximation of the number of 7301 * number of people sharing the page. 7302 */ 7303 ulong_t 7304 hat_page_getshare(page_t *pp) 7305 { 7306 page_t *spp = pp; /* start page */ 7307 kmutex_t *pml; 7308 ulong_t cnt; 7309 int index, sz = TTE64K; 7310 7311 /* 7312 * We need to grab the mlist lock to make sure any outstanding 7313 * load/unloads complete. Otherwise we could return zero 7314 * even though the unload(s) hasn't finished yet. 7315 */ 7316 pml = sfmmu_mlist_enter(spp); 7317 cnt = spp->p_share; 7318 7319 #ifdef VAC 7320 if (kpm_enable) 7321 cnt += spp->p_kpmref; 7322 #endif 7323 7324 /* 7325 * If we have any large mappings, we count the number of 7326 * mappings that this large page is part of. 7327 */ 7328 index = PP_MAPINDEX(spp); 7329 index >>= 1; 7330 while (index) { 7331 pp = PP_GROUPLEADER(spp, sz); 7332 if ((index & 0x1) && pp != spp) { 7333 cnt += pp->p_share; 7334 spp = pp; 7335 } 7336 index >>= 1; 7337 sz++; 7338 } 7339 sfmmu_mlist_exit(pml); 7340 return (cnt); 7341 } 7342 7343 /* 7344 * Unload all large mappings to the pp and reset the p_szc field of every 7345 * constituent page according to the remaining mappings. 7346 * 7347 * pp must be locked SE_EXCL. Even though no other constituent pages are 7348 * locked it's legal to unload the large mappings to the pp because all 7349 * constituent pages of large locked mappings have to be locked SE_SHARED. 7350 * This means if we have SE_EXCL lock on one of constituent pages none of the 7351 * large mappings to pp are locked. 7352 * 7353 * Decrease p_szc field starting from the last constituent page and ending 7354 * with the root page. This method is used because other threads rely on the 7355 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 7356 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 7357 * ensures that p_szc changes of the constituent pages appears atomic for all 7358 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 7359 * 7360 * This mechanism is only used for file system pages where it's not always 7361 * possible to get SE_EXCL locks on all constituent pages to demote the size 7362 * code (as is done for anonymous or kernel large pages). 7363 * 7364 * See more comments in front of sfmmu_mlspl_enter(). 7365 */ 7366 void 7367 hat_page_demote(page_t *pp) 7368 { 7369 int index; 7370 int sz; 7371 cpuset_t cpuset; 7372 int sync = 0; 7373 page_t *rootpp; 7374 struct sf_hment *sfhme; 7375 struct sf_hment *tmphme = NULL; 7376 struct hme_blk *hmeblkp; 7377 uint_t pszc; 7378 page_t *lastpp; 7379 cpuset_t tset; 7380 pgcnt_t npgs; 7381 kmutex_t *pml; 7382 kmutex_t *pmtx = NULL; 7383 7384 ASSERT(PAGE_EXCL(pp)); 7385 ASSERT(!PP_ISFREE(pp)); 7386 ASSERT(page_szc_lock_assert(pp)); 7387 pml = sfmmu_mlist_enter(pp); 7388 7389 pszc = pp->p_szc; 7390 if (pszc == 0) { 7391 goto out; 7392 } 7393 7394 index = PP_MAPINDEX(pp) >> 1; 7395 7396 if (index) { 7397 CPUSET_ZERO(cpuset); 7398 sz = TTE64K; 7399 sync = 1; 7400 } 7401 7402 while (index) { 7403 if (!(index & 0x1)) { 7404 index >>= 1; 7405 sz++; 7406 continue; 7407 } 7408 ASSERT(sz <= pszc); 7409 rootpp = PP_GROUPLEADER(pp, sz); 7410 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 7411 tmphme = sfhme->hme_next; 7412 hmeblkp = sfmmu_hmetohblk(sfhme); 7413 if (hme_size(sfhme) != sz) { 7414 continue; 7415 } 7416 if (hmeblkp->hblk_xhat_bit) { 7417 cmn_err(CE_PANIC, 7418 "hat_page_demote: xhat hmeblk"); 7419 } 7420 tset = sfmmu_pageunload(rootpp, sfhme, sz); 7421 CPUSET_OR(cpuset, tset); 7422 } 7423 if (index >>= 1) { 7424 sz++; 7425 } 7426 } 7427 7428 ASSERT(!PP_ISMAPPED_LARGE(pp)); 7429 7430 if (sync) { 7431 xt_sync(cpuset); 7432 #ifdef VAC 7433 if (PP_ISTNC(pp)) { 7434 conv_tnc(rootpp, sz); 7435 } 7436 #endif /* VAC */ 7437 } 7438 7439 pmtx = sfmmu_page_enter(pp); 7440 7441 ASSERT(pp->p_szc == pszc); 7442 rootpp = PP_PAGEROOT(pp); 7443 ASSERT(rootpp->p_szc == pszc); 7444 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 7445 7446 while (lastpp != rootpp) { 7447 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 7448 ASSERT(sz < pszc); 7449 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 7450 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 7451 while (--npgs > 0) { 7452 lastpp->p_szc = (uchar_t)sz; 7453 lastpp = PP_PAGEPREV(lastpp); 7454 } 7455 if (sz) { 7456 /* 7457 * make sure before current root's pszc 7458 * is updated all updates to constituent pages pszc 7459 * fields are globally visible. 7460 */ 7461 membar_producer(); 7462 } 7463 lastpp->p_szc = sz; 7464 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 7465 if (lastpp != rootpp) { 7466 lastpp = PP_PAGEPREV(lastpp); 7467 } 7468 } 7469 if (sz == 0) { 7470 /* the loop above doesn't cover this case */ 7471 rootpp->p_szc = 0; 7472 } 7473 out: 7474 ASSERT(pp->p_szc == 0); 7475 if (pmtx != NULL) { 7476 sfmmu_page_exit(pmtx); 7477 } 7478 sfmmu_mlist_exit(pml); 7479 } 7480 7481 /* 7482 * Refresh the HAT ismttecnt[] element for size szc. 7483 * Caller must have set ISM busy flag to prevent mapping 7484 * lists from changing while we're traversing them. 7485 */ 7486 pgcnt_t 7487 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 7488 { 7489 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 7490 ism_map_t *ism_map; 7491 pgcnt_t npgs = 0; 7492 int j; 7493 7494 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 7495 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 7496 ism_map = ism_blkp->iblk_maps; 7497 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) 7498 npgs += ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 7499 } 7500 sfmmup->sfmmu_ismttecnt[szc] = npgs; 7501 return (npgs); 7502 } 7503 7504 /* 7505 * Yield the memory claim requirement for an address space. 7506 * 7507 * This is currently implemented as the number of bytes that have active 7508 * hardware translations that have page structures. Therefore, it can 7509 * underestimate the traditional resident set size, eg, if the 7510 * physical page is present and the hardware translation is missing; 7511 * and it can overestimate the rss, eg, if there are active 7512 * translations to a frame buffer with page structs. 7513 * Also, it does not take sharing into account. 7514 * 7515 * Note that we don't acquire locks here since this function is most often 7516 * called from the clock thread. 7517 */ 7518 size_t 7519 hat_get_mapped_size(struct hat *hat) 7520 { 7521 size_t assize = 0; 7522 int i; 7523 7524 if (hat == NULL) 7525 return (0); 7526 7527 ASSERT(hat->sfmmu_xhat_provider == NULL); 7528 7529 for (i = 0; i < mmu_page_sizes; i++) 7530 assize += (pgcnt_t)hat->sfmmu_ttecnt[i] * TTEBYTES(i); 7531 7532 if (hat->sfmmu_iblk == NULL) 7533 return (assize); 7534 7535 for (i = 0; i < mmu_page_sizes; i++) 7536 assize += (pgcnt_t)hat->sfmmu_ismttecnt[i] * TTEBYTES(i); 7537 7538 return (assize); 7539 } 7540 7541 int 7542 hat_stats_enable(struct hat *hat) 7543 { 7544 hatlock_t *hatlockp; 7545 7546 ASSERT(hat->sfmmu_xhat_provider == NULL); 7547 7548 hatlockp = sfmmu_hat_enter(hat); 7549 hat->sfmmu_rmstat++; 7550 sfmmu_hat_exit(hatlockp); 7551 return (1); 7552 } 7553 7554 void 7555 hat_stats_disable(struct hat *hat) 7556 { 7557 hatlock_t *hatlockp; 7558 7559 ASSERT(hat->sfmmu_xhat_provider == NULL); 7560 7561 hatlockp = sfmmu_hat_enter(hat); 7562 hat->sfmmu_rmstat--; 7563 sfmmu_hat_exit(hatlockp); 7564 } 7565 7566 /* 7567 * Routines for entering or removing ourselves from the 7568 * ism_hat's mapping list. 7569 */ 7570 static void 7571 iment_add(struct ism_ment *iment, struct hat *ism_hat) 7572 { 7573 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7574 7575 iment->iment_prev = NULL; 7576 iment->iment_next = ism_hat->sfmmu_iment; 7577 if (ism_hat->sfmmu_iment) { 7578 ism_hat->sfmmu_iment->iment_prev = iment; 7579 } 7580 ism_hat->sfmmu_iment = iment; 7581 } 7582 7583 static void 7584 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 7585 { 7586 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7587 7588 if (ism_hat->sfmmu_iment == NULL) { 7589 panic("ism map entry remove - no entries"); 7590 } 7591 7592 if (iment->iment_prev) { 7593 ASSERT(ism_hat->sfmmu_iment != iment); 7594 iment->iment_prev->iment_next = iment->iment_next; 7595 } else { 7596 ASSERT(ism_hat->sfmmu_iment == iment); 7597 ism_hat->sfmmu_iment = iment->iment_next; 7598 } 7599 7600 if (iment->iment_next) { 7601 iment->iment_next->iment_prev = iment->iment_prev; 7602 } 7603 7604 /* 7605 * zero out the entry 7606 */ 7607 iment->iment_next = NULL; 7608 iment->iment_prev = NULL; 7609 iment->iment_hat = NULL; 7610 } 7611 7612 /* 7613 * Hat_share()/unshare() return an (non-zero) error 7614 * when saddr and daddr are not properly aligned. 7615 * 7616 * The top level mapping element determines the alignment 7617 * requirement for saddr and daddr, depending on different 7618 * architectures. 7619 * 7620 * When hat_share()/unshare() are not supported, 7621 * HATOP_SHARE()/UNSHARE() return 0 7622 */ 7623 int 7624 hat_share(struct hat *sfmmup, caddr_t addr, 7625 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 7626 { 7627 ism_blk_t *ism_blkp; 7628 ism_blk_t *new_iblk; 7629 ism_map_t *ism_map; 7630 ism_ment_t *ism_ment; 7631 int i, added; 7632 hatlock_t *hatlockp; 7633 int reload_mmu = 0; 7634 uint_t ismshift = page_get_shift(ismszc); 7635 size_t ismpgsz = page_get_pagesize(ismszc); 7636 uint_t ismmask = (uint_t)ismpgsz - 1; 7637 size_t sh_size = ISM_SHIFT(ismshift, len); 7638 ushort_t ismhatflag; 7639 7640 #ifdef DEBUG 7641 caddr_t eaddr = addr + len; 7642 #endif /* DEBUG */ 7643 7644 ASSERT(ism_hatid != NULL && sfmmup != NULL); 7645 ASSERT(sptaddr == ISMID_STARTADDR); 7646 /* 7647 * Check the alignment. 7648 */ 7649 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 7650 return (EINVAL); 7651 7652 /* 7653 * Check size alignment. 7654 */ 7655 if (!ISM_ALIGNED(ismshift, len)) 7656 return (EINVAL); 7657 7658 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 7659 7660 /* 7661 * Allocate ism_ment for the ism_hat's mapping list, and an 7662 * ism map blk in case we need one. We must do our 7663 * allocations before acquiring locks to prevent a deadlock 7664 * in the kmem allocator on the mapping list lock. 7665 */ 7666 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 7667 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 7668 7669 /* 7670 * Serialize ISM mappings with the ISM busy flag, and also the 7671 * trap handlers. 7672 */ 7673 sfmmu_ismhat_enter(sfmmup, 0); 7674 7675 /* 7676 * Allocate an ism map blk if necessary. 7677 */ 7678 if (sfmmup->sfmmu_iblk == NULL) { 7679 sfmmup->sfmmu_iblk = new_iblk; 7680 bzero(new_iblk, sizeof (*new_iblk)); 7681 new_iblk->iblk_nextpa = (uint64_t)-1; 7682 membar_stst(); /* make sure next ptr visible to all CPUs */ 7683 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 7684 reload_mmu = 1; 7685 new_iblk = NULL; 7686 } 7687 7688 #ifdef DEBUG 7689 /* 7690 * Make sure mapping does not already exist. 7691 */ 7692 ism_blkp = sfmmup->sfmmu_iblk; 7693 while (ism_blkp) { 7694 ism_map = ism_blkp->iblk_maps; 7695 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 7696 if ((addr >= ism_start(ism_map[i]) && 7697 addr < ism_end(ism_map[i])) || 7698 eaddr > ism_start(ism_map[i]) && 7699 eaddr <= ism_end(ism_map[i])) { 7700 panic("sfmmu_share: Already mapped!"); 7701 } 7702 } 7703 ism_blkp = ism_blkp->iblk_next; 7704 } 7705 #endif /* DEBUG */ 7706 7707 ASSERT(ismszc >= TTE4M); 7708 if (ismszc == TTE4M) { 7709 ismhatflag = HAT_4M_FLAG; 7710 } else if (ismszc == TTE32M) { 7711 ismhatflag = HAT_32M_FLAG; 7712 } else if (ismszc == TTE256M) { 7713 ismhatflag = HAT_256M_FLAG; 7714 } 7715 /* 7716 * Add mapping to first available mapping slot. 7717 */ 7718 ism_blkp = sfmmup->sfmmu_iblk; 7719 added = 0; 7720 while (!added) { 7721 ism_map = ism_blkp->iblk_maps; 7722 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7723 if (ism_map[i].imap_ismhat == NULL) { 7724 7725 ism_map[i].imap_ismhat = ism_hatid; 7726 ism_map[i].imap_vb_shift = (ushort_t)ismshift; 7727 ism_map[i].imap_hatflags = ismhatflag; 7728 ism_map[i].imap_sz_mask = ismmask; 7729 /* 7730 * imap_seg is checked in ISM_CHECK to see if 7731 * non-NULL, then other info assumed valid. 7732 */ 7733 membar_stst(); 7734 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 7735 ism_map[i].imap_ment = ism_ment; 7736 7737 /* 7738 * Now add ourselves to the ism_hat's 7739 * mapping list. 7740 */ 7741 ism_ment->iment_hat = sfmmup; 7742 ism_ment->iment_base_va = addr; 7743 ism_hatid->sfmmu_ismhat = 1; 7744 ism_hatid->sfmmu_flags = 0; 7745 mutex_enter(&ism_mlist_lock); 7746 iment_add(ism_ment, ism_hatid); 7747 mutex_exit(&ism_mlist_lock); 7748 added = 1; 7749 break; 7750 } 7751 } 7752 if (!added && ism_blkp->iblk_next == NULL) { 7753 ism_blkp->iblk_next = new_iblk; 7754 new_iblk = NULL; 7755 bzero(ism_blkp->iblk_next, 7756 sizeof (*ism_blkp->iblk_next)); 7757 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 7758 membar_stst(); 7759 ism_blkp->iblk_nextpa = 7760 va_to_pa((caddr_t)ism_blkp->iblk_next); 7761 } 7762 ism_blkp = ism_blkp->iblk_next; 7763 } 7764 7765 /* 7766 * Update our counters for this sfmmup's ism mappings. 7767 */ 7768 for (i = 0; i <= ismszc; i++) { 7769 if (!(disable_ism_large_pages & (1 << i))) 7770 (void) ism_tsb_entries(sfmmup, i); 7771 } 7772 7773 hatlockp = sfmmu_hat_enter(sfmmup); 7774 7775 /* 7776 * For ISM and DISM we do not support 512K pages, so we only 7777 * only search the 4M and 8K/64K hashes for 4 pagesize cpus, and search 7778 * the 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 7779 */ 7780 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 7781 7782 if (ismszc > TTE4M && !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) 7783 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7784 7785 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) 7786 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 7787 7788 /* 7789 * If we updated the ismblkpa for this HAT or we need 7790 * to start searching the 256M or 32M or 4M hash, we must 7791 * make sure all CPUs running this process reload their 7792 * tsbmiss area. Otherwise they will fail to load the mappings 7793 * in the tsbmiss handler and will loop calling pagefault(). 7794 */ 7795 switch (ismszc) { 7796 case TTE256M: 7797 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 7798 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 7799 sfmmu_sync_mmustate(sfmmup); 7800 } 7801 break; 7802 case TTE32M: 7803 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 7804 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 7805 sfmmu_sync_mmustate(sfmmup); 7806 } 7807 break; 7808 case TTE4M: 7809 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 7810 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7811 sfmmu_sync_mmustate(sfmmup); 7812 } 7813 break; 7814 default: 7815 break; 7816 } 7817 7818 /* 7819 * Now we can drop the locks. 7820 */ 7821 sfmmu_ismhat_exit(sfmmup, 1); 7822 sfmmu_hat_exit(hatlockp); 7823 7824 /* 7825 * Free up ismblk if we didn't use it. 7826 */ 7827 if (new_iblk != NULL) 7828 kmem_cache_free(ism_blk_cache, new_iblk); 7829 7830 /* 7831 * Check TSB and TLB page sizes. 7832 */ 7833 sfmmu_check_page_sizes(sfmmup, 1); 7834 7835 return (0); 7836 } 7837 7838 /* 7839 * hat_unshare removes exactly one ism_map from 7840 * this process's as. It expects multiple calls 7841 * to hat_unshare for multiple shm segments. 7842 */ 7843 void 7844 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 7845 { 7846 ism_map_t *ism_map; 7847 ism_ment_t *free_ment = NULL; 7848 ism_blk_t *ism_blkp; 7849 struct hat *ism_hatid; 7850 int found, i; 7851 hatlock_t *hatlockp; 7852 struct tsb_info *tsbinfo; 7853 uint_t ismshift = page_get_shift(ismszc); 7854 size_t sh_size = ISM_SHIFT(ismshift, len); 7855 7856 ASSERT(ISM_ALIGNED(ismshift, addr)); 7857 ASSERT(ISM_ALIGNED(ismshift, len)); 7858 ASSERT(sfmmup != NULL); 7859 ASSERT(sfmmup != ksfmmup); 7860 7861 if (sfmmup->sfmmu_xhat_provider) { 7862 XHAT_UNSHARE(sfmmup, addr, len); 7863 return; 7864 } else { 7865 /* 7866 * This must be a CPU HAT. If the address space has 7867 * XHATs attached, inform all XHATs that ISM segment 7868 * is going away 7869 */ 7870 ASSERT(sfmmup->sfmmu_as != NULL); 7871 if (sfmmup->sfmmu_as->a_xhat != NULL) 7872 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 7873 } 7874 7875 /* 7876 * Make sure that during the entire time ISM mappings are removed, 7877 * the trap handlers serialize behind us, and that no one else 7878 * can be mucking with ISM mappings. This also lets us get away 7879 * with not doing expensive cross calls to flush the TLB -- we 7880 * just discard the context, flush the entire TSB, and call it 7881 * a day. 7882 */ 7883 sfmmu_ismhat_enter(sfmmup, 0); 7884 7885 /* 7886 * Remove the mapping. 7887 * 7888 * We can't have any holes in the ism map. 7889 * The tsb miss code while searching the ism map will 7890 * stop on an empty map slot. So we must move 7891 * everyone past the hole up 1 if any. 7892 * 7893 * Also empty ism map blks are not freed until the 7894 * process exits. This is to prevent a MT race condition 7895 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 7896 */ 7897 found = 0; 7898 ism_blkp = sfmmup->sfmmu_iblk; 7899 while (!found && ism_blkp) { 7900 ism_map = ism_blkp->iblk_maps; 7901 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7902 if (addr == ism_start(ism_map[i]) && 7903 sh_size == (size_t)(ism_size(ism_map[i]))) { 7904 found = 1; 7905 break; 7906 } 7907 } 7908 if (!found) 7909 ism_blkp = ism_blkp->iblk_next; 7910 } 7911 7912 if (found) { 7913 ism_hatid = ism_map[i].imap_ismhat; 7914 ASSERT(ism_hatid != NULL); 7915 ASSERT(ism_hatid->sfmmu_ismhat == 1); 7916 7917 /* 7918 * First remove ourselves from the ism mapping list. 7919 */ 7920 mutex_enter(&ism_mlist_lock); 7921 iment_sub(ism_map[i].imap_ment, ism_hatid); 7922 mutex_exit(&ism_mlist_lock); 7923 free_ment = ism_map[i].imap_ment; 7924 7925 /* 7926 * Now gurantee that any other cpu 7927 * that tries to process an ISM miss 7928 * will go to tl=0. 7929 */ 7930 hatlockp = sfmmu_hat_enter(sfmmup); 7931 7932 sfmmu_invalidate_ctx(sfmmup); 7933 7934 sfmmu_hat_exit(hatlockp); 7935 7936 /* 7937 * We delete the ism map by copying 7938 * the next map over the current one. 7939 * We will take the next one in the maps 7940 * array or from the next ism_blk. 7941 */ 7942 while (ism_blkp) { 7943 ism_map = ism_blkp->iblk_maps; 7944 while (i < (ISM_MAP_SLOTS - 1)) { 7945 ism_map[i] = ism_map[i + 1]; 7946 i++; 7947 } 7948 /* i == (ISM_MAP_SLOTS - 1) */ 7949 ism_blkp = ism_blkp->iblk_next; 7950 if (ism_blkp) { 7951 ism_map[i] = ism_blkp->iblk_maps[0]; 7952 i = 0; 7953 } else { 7954 ism_map[i].imap_seg = 0; 7955 ism_map[i].imap_vb_shift = 0; 7956 ism_map[i].imap_hatflags = 0; 7957 ism_map[i].imap_sz_mask = 0; 7958 ism_map[i].imap_ismhat = NULL; 7959 ism_map[i].imap_ment = NULL; 7960 } 7961 } 7962 7963 /* 7964 * Now flush entire TSB for the process, since 7965 * demapping page by page can be too expensive. 7966 * We don't have to flush the TLB here anymore 7967 * since we switch to a new TLB ctx instead. 7968 * Also, there is no need to flush if the process 7969 * is exiting since the TSB will be freed later. 7970 */ 7971 if (!sfmmup->sfmmu_free) { 7972 hatlockp = sfmmu_hat_enter(sfmmup); 7973 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 7974 tsbinfo = tsbinfo->tsb_next) { 7975 if (tsbinfo->tsb_flags & TSB_SWAPPED) 7976 continue; 7977 sfmmu_inv_tsb(tsbinfo->tsb_va, 7978 TSB_BYTES(tsbinfo->tsb_szc)); 7979 } 7980 sfmmu_hat_exit(hatlockp); 7981 } 7982 } 7983 7984 /* 7985 * Update our counters for this sfmmup's ism mappings. 7986 */ 7987 for (i = 0; i <= ismszc; i++) { 7988 if (!(disable_ism_large_pages & (1 << i))) 7989 (void) ism_tsb_entries(sfmmup, i); 7990 } 7991 7992 sfmmu_ismhat_exit(sfmmup, 0); 7993 7994 /* 7995 * We must do our freeing here after dropping locks 7996 * to prevent a deadlock in the kmem allocator on the 7997 * mapping list lock. 7998 */ 7999 if (free_ment != NULL) 8000 kmem_cache_free(ism_ment_cache, free_ment); 8001 8002 /* 8003 * Check TSB and TLB page sizes if the process isn't exiting. 8004 */ 8005 if (!sfmmup->sfmmu_free) 8006 sfmmu_check_page_sizes(sfmmup, 0); 8007 } 8008 8009 /* ARGSUSED */ 8010 static int 8011 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8012 { 8013 /* void *buf is sfmmu_t pointer */ 8014 return (0); 8015 } 8016 8017 /* ARGSUSED */ 8018 static void 8019 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8020 { 8021 /* void *buf is sfmmu_t pointer */ 8022 } 8023 8024 /* 8025 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8026 * field to be the pa of this hmeblk 8027 */ 8028 /* ARGSUSED */ 8029 static int 8030 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8031 { 8032 struct hme_blk *hmeblkp; 8033 8034 bzero(buf, (size_t)cdrarg); 8035 hmeblkp = (struct hme_blk *)buf; 8036 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8037 8038 #ifdef HBLK_TRACE 8039 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8040 #endif /* HBLK_TRACE */ 8041 8042 return (0); 8043 } 8044 8045 /* ARGSUSED */ 8046 static void 8047 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8048 { 8049 8050 #ifdef HBLK_TRACE 8051 8052 struct hme_blk *hmeblkp; 8053 8054 hmeblkp = (struct hme_blk *)buf; 8055 mutex_destroy(&hmeblkp->hblk_audit_lock); 8056 8057 #endif /* HBLK_TRACE */ 8058 } 8059 8060 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 8061 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 8062 /* 8063 * The kmem allocator will callback into our reclaim routine when the system 8064 * is running low in memory. We traverse the hash and free up all unused but 8065 * still cached hme_blks. We also traverse the free list and free them up 8066 * as well. 8067 */ 8068 /*ARGSUSED*/ 8069 static void 8070 sfmmu_hblkcache_reclaim(void *cdrarg) 8071 { 8072 int i; 8073 uint64_t hblkpa, prevpa, nx_pa; 8074 struct hmehash_bucket *hmebp; 8075 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 8076 static struct hmehash_bucket *uhmehash_reclaim_hand; 8077 static struct hmehash_bucket *khmehash_reclaim_hand; 8078 struct hme_blk *list = NULL; 8079 8080 hmebp = uhmehash_reclaim_hand; 8081 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 8082 uhmehash_reclaim_hand = hmebp = uhme_hash; 8083 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8084 8085 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8086 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8087 hmeblkp = hmebp->hmeblkp; 8088 hblkpa = hmebp->hmeh_nextpa; 8089 prevpa = 0; 8090 pr_hblk = NULL; 8091 while (hmeblkp) { 8092 nx_hblk = hmeblkp->hblk_next; 8093 nx_pa = hmeblkp->hblk_nextpa; 8094 if (!hmeblkp->hblk_vcnt && 8095 !hmeblkp->hblk_hmecnt) { 8096 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8097 prevpa, pr_hblk); 8098 sfmmu_hblk_free(hmebp, hmeblkp, 8099 hblkpa, &list); 8100 } else { 8101 pr_hblk = hmeblkp; 8102 prevpa = hblkpa; 8103 } 8104 hmeblkp = nx_hblk; 8105 hblkpa = nx_pa; 8106 } 8107 SFMMU_HASH_UNLOCK(hmebp); 8108 } 8109 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 8110 hmebp = uhme_hash; 8111 } 8112 8113 hmebp = khmehash_reclaim_hand; 8114 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 8115 khmehash_reclaim_hand = hmebp = khme_hash; 8116 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8117 8118 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8119 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8120 hmeblkp = hmebp->hmeblkp; 8121 hblkpa = hmebp->hmeh_nextpa; 8122 prevpa = 0; 8123 pr_hblk = NULL; 8124 while (hmeblkp) { 8125 nx_hblk = hmeblkp->hblk_next; 8126 nx_pa = hmeblkp->hblk_nextpa; 8127 if (!hmeblkp->hblk_vcnt && 8128 !hmeblkp->hblk_hmecnt) { 8129 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8130 prevpa, pr_hblk); 8131 sfmmu_hblk_free(hmebp, hmeblkp, 8132 hblkpa, &list); 8133 } else { 8134 pr_hblk = hmeblkp; 8135 prevpa = hblkpa; 8136 } 8137 hmeblkp = nx_hblk; 8138 hblkpa = nx_pa; 8139 } 8140 SFMMU_HASH_UNLOCK(hmebp); 8141 } 8142 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 8143 hmebp = khme_hash; 8144 } 8145 sfmmu_hblks_list_purge(&list); 8146 } 8147 8148 /* 8149 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 8150 * same goes for sfmmu_get_addrvcolor(). 8151 * 8152 * This function will return the virtual color for the specified page. The 8153 * virtual color corresponds to this page current mapping or its last mapping. 8154 * It is used by memory allocators to choose addresses with the correct 8155 * alignment so vac consistency is automatically maintained. If the page 8156 * has no color it returns -1. 8157 */ 8158 /*ARGSUSED*/ 8159 int 8160 sfmmu_get_ppvcolor(struct page *pp) 8161 { 8162 #ifdef VAC 8163 int color; 8164 8165 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 8166 return (-1); 8167 } 8168 color = PP_GET_VCOLOR(pp); 8169 ASSERT(color < mmu_btop(shm_alignment)); 8170 return (color); 8171 #else 8172 return (-1); 8173 #endif /* VAC */ 8174 } 8175 8176 /* 8177 * This function will return the desired alignment for vac consistency 8178 * (vac color) given a virtual address. If no vac is present it returns -1. 8179 */ 8180 /*ARGSUSED*/ 8181 int 8182 sfmmu_get_addrvcolor(caddr_t vaddr) 8183 { 8184 #ifdef VAC 8185 if (cache & CACHE_VAC) { 8186 return (addr_to_vcolor(vaddr)); 8187 } else { 8188 return (-1); 8189 } 8190 #else 8191 return (-1); 8192 #endif /* VAC */ 8193 } 8194 8195 #ifdef VAC 8196 /* 8197 * Check for conflicts. 8198 * A conflict exists if the new and existent mappings do not match in 8199 * their "shm_alignment fields. If conflicts exist, the existant mappings 8200 * are flushed unless one of them is locked. If one of them is locked, then 8201 * the mappings are flushed and converted to non-cacheable mappings. 8202 */ 8203 static void 8204 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 8205 { 8206 struct hat *tmphat; 8207 struct sf_hment *sfhmep, *tmphme = NULL; 8208 struct hme_blk *hmeblkp; 8209 int vcolor; 8210 tte_t tte; 8211 8212 ASSERT(sfmmu_mlist_held(pp)); 8213 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 8214 8215 vcolor = addr_to_vcolor(addr); 8216 if (PP_NEWPAGE(pp)) { 8217 PP_SET_VCOLOR(pp, vcolor); 8218 return; 8219 } 8220 8221 if (PP_GET_VCOLOR(pp) == vcolor) { 8222 return; 8223 } 8224 8225 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 8226 /* 8227 * Previous user of page had a different color 8228 * but since there are no current users 8229 * we just flush the cache and change the color. 8230 */ 8231 SFMMU_STAT(sf_pgcolor_conflict); 8232 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8233 PP_SET_VCOLOR(pp, vcolor); 8234 return; 8235 } 8236 8237 /* 8238 * If we get here we have a vac conflict with a current 8239 * mapping. VAC conflict policy is as follows. 8240 * - The default is to unload the other mappings unless: 8241 * - If we have a large mapping we uncache the page. 8242 * We need to uncache the rest of the large page too. 8243 * - If any of the mappings are locked we uncache the page. 8244 * - If the requested mapping is inconsistent 8245 * with another mapping and that mapping 8246 * is in the same address space we have to 8247 * make it non-cached. The default thing 8248 * to do is unload the inconsistent mapping 8249 * but if they are in the same address space 8250 * we run the risk of unmapping the pc or the 8251 * stack which we will use as we return to the user, 8252 * in which case we can then fault on the thing 8253 * we just unloaded and get into an infinite loop. 8254 */ 8255 if (PP_ISMAPPED_LARGE(pp)) { 8256 int sz; 8257 8258 /* 8259 * Existing mapping is for big pages. We don't unload 8260 * existing big mappings to satisfy new mappings. 8261 * Always convert all mappings to TNC. 8262 */ 8263 sz = fnd_mapping_sz(pp); 8264 pp = PP_GROUPLEADER(pp, sz); 8265 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 8266 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 8267 TTEPAGES(sz)); 8268 8269 return; 8270 } 8271 8272 /* 8273 * check if any mapping is in same as or if it is locked 8274 * since in that case we need to uncache. 8275 */ 8276 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8277 tmphme = sfhmep->hme_next; 8278 hmeblkp = sfmmu_hmetohblk(sfhmep); 8279 if (hmeblkp->hblk_xhat_bit) 8280 continue; 8281 tmphat = hblktosfmmu(hmeblkp); 8282 sfmmu_copytte(&sfhmep->hme_tte, &tte); 8283 ASSERT(TTE_IS_VALID(&tte)); 8284 if ((tmphat == hat) || hmeblkp->hblk_lckcnt) { 8285 /* 8286 * We have an uncache conflict 8287 */ 8288 SFMMU_STAT(sf_uncache_conflict); 8289 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 8290 return; 8291 } 8292 } 8293 8294 /* 8295 * We have an unload conflict 8296 * We have already checked for LARGE mappings, therefore 8297 * the remaining mapping(s) must be TTE8K. 8298 */ 8299 SFMMU_STAT(sf_unload_conflict); 8300 8301 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8302 tmphme = sfhmep->hme_next; 8303 hmeblkp = sfmmu_hmetohblk(sfhmep); 8304 if (hmeblkp->hblk_xhat_bit) 8305 continue; 8306 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 8307 } 8308 8309 if (PP_ISMAPPED_KPM(pp)) 8310 sfmmu_kpm_vac_unload(pp, addr); 8311 8312 /* 8313 * Unloads only do TLB flushes so we need to flush the 8314 * cache here. 8315 */ 8316 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8317 PP_SET_VCOLOR(pp, vcolor); 8318 } 8319 8320 /* 8321 * Whenever a mapping is unloaded and the page is in TNC state, 8322 * we see if the page can be made cacheable again. 'pp' is 8323 * the page that we just unloaded a mapping from, the size 8324 * of mapping that was unloaded is 'ottesz'. 8325 * Remark: 8326 * The recache policy for mpss pages can leave a performance problem 8327 * under the following circumstances: 8328 * . A large page in uncached mode has just been unmapped. 8329 * . All constituent pages are TNC due to a conflicting small mapping. 8330 * . There are many other, non conflicting, small mappings around for 8331 * a lot of the constituent pages. 8332 * . We're called w/ the "old" groupleader page and the old ottesz, 8333 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 8334 * we end up w/ TTE8K or npages == 1. 8335 * . We call tst_tnc w/ the old groupleader only, and if there is no 8336 * conflict, we re-cache only this page. 8337 * . All other small mappings are not checked and will be left in TNC mode. 8338 * The problem is not very serious because: 8339 * . mpss is actually only defined for heap and stack, so the probability 8340 * is not very high that a large page mapping exists in parallel to a small 8341 * one (this is possible, but seems to be bad programming style in the 8342 * appl). 8343 * . The problem gets a little bit more serious, when those TNC pages 8344 * have to be mapped into kernel space, e.g. for networking. 8345 * . When VAC alias conflicts occur in applications, this is regarded 8346 * as an application bug. So if kstat's show them, the appl should 8347 * be changed anyway. 8348 */ 8349 void 8350 conv_tnc(page_t *pp, int ottesz) 8351 { 8352 int cursz, dosz; 8353 pgcnt_t curnpgs, dopgs; 8354 pgcnt_t pg64k; 8355 page_t *pp2; 8356 8357 /* 8358 * Determine how big a range we check for TNC and find 8359 * leader page. cursz is the size of the biggest 8360 * mapping that still exist on 'pp'. 8361 */ 8362 if (PP_ISMAPPED_LARGE(pp)) { 8363 cursz = fnd_mapping_sz(pp); 8364 } else { 8365 cursz = TTE8K; 8366 } 8367 8368 if (ottesz >= cursz) { 8369 dosz = ottesz; 8370 pp2 = pp; 8371 } else { 8372 dosz = cursz; 8373 pp2 = PP_GROUPLEADER(pp, dosz); 8374 } 8375 8376 pg64k = TTEPAGES(TTE64K); 8377 dopgs = TTEPAGES(dosz); 8378 8379 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 8380 8381 while (dopgs != 0) { 8382 curnpgs = TTEPAGES(cursz); 8383 if (tst_tnc(pp2, curnpgs)) { 8384 SFMMU_STAT_ADD(sf_recache, curnpgs); 8385 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 8386 curnpgs); 8387 } 8388 8389 ASSERT(dopgs >= curnpgs); 8390 dopgs -= curnpgs; 8391 8392 if (dopgs == 0) { 8393 break; 8394 } 8395 8396 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 8397 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 8398 cursz = fnd_mapping_sz(pp2); 8399 } else { 8400 cursz = TTE8K; 8401 } 8402 } 8403 } 8404 8405 /* 8406 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 8407 * returns 0 otherwise. Note that oaddr argument is valid for only 8408 * 8k pages. 8409 */ 8410 int 8411 tst_tnc(page_t *pp, pgcnt_t npages) 8412 { 8413 struct sf_hment *sfhme; 8414 struct hme_blk *hmeblkp; 8415 tte_t tte; 8416 caddr_t vaddr; 8417 int clr_valid = 0; 8418 int color, color1, bcolor; 8419 int i, ncolors; 8420 8421 ASSERT(pp != NULL); 8422 ASSERT(!(cache & CACHE_WRITEBACK)); 8423 8424 if (npages > 1) { 8425 ncolors = CACHE_NUM_COLOR; 8426 } 8427 8428 for (i = 0; i < npages; i++) { 8429 ASSERT(sfmmu_mlist_held(pp)); 8430 ASSERT(PP_ISTNC(pp)); 8431 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 8432 8433 if (PP_ISPNC(pp)) { 8434 return (0); 8435 } 8436 8437 clr_valid = 0; 8438 if (PP_ISMAPPED_KPM(pp)) { 8439 caddr_t kpmvaddr; 8440 8441 ASSERT(kpm_enable); 8442 kpmvaddr = hat_kpm_page2va(pp, 1); 8443 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 8444 color1 = addr_to_vcolor(kpmvaddr); 8445 clr_valid = 1; 8446 } 8447 8448 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8449 hmeblkp = sfmmu_hmetohblk(sfhme); 8450 if (hmeblkp->hblk_xhat_bit) 8451 continue; 8452 8453 sfmmu_copytte(&sfhme->hme_tte, &tte); 8454 ASSERT(TTE_IS_VALID(&tte)); 8455 8456 vaddr = tte_to_vaddr(hmeblkp, tte); 8457 color = addr_to_vcolor(vaddr); 8458 8459 if (npages > 1) { 8460 /* 8461 * If there is a big mapping, make sure 8462 * 8K mapping is consistent with the big 8463 * mapping. 8464 */ 8465 bcolor = i % ncolors; 8466 if (color != bcolor) { 8467 return (0); 8468 } 8469 } 8470 if (!clr_valid) { 8471 clr_valid = 1; 8472 color1 = color; 8473 } 8474 8475 if (color1 != color) { 8476 return (0); 8477 } 8478 } 8479 8480 pp = PP_PAGENEXT(pp); 8481 } 8482 8483 return (1); 8484 } 8485 8486 void 8487 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 8488 pgcnt_t npages) 8489 { 8490 kmutex_t *pmtx; 8491 int i, ncolors, bcolor; 8492 kpm_hlk_t *kpmp; 8493 cpuset_t cpuset; 8494 8495 ASSERT(pp != NULL); 8496 ASSERT(!(cache & CACHE_WRITEBACK)); 8497 8498 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 8499 pmtx = sfmmu_page_enter(pp); 8500 8501 /* 8502 * Fast path caching single unmapped page 8503 */ 8504 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 8505 flags == HAT_CACHE) { 8506 PP_CLRTNC(pp); 8507 PP_CLRPNC(pp); 8508 sfmmu_page_exit(pmtx); 8509 sfmmu_kpm_kpmp_exit(kpmp); 8510 return; 8511 } 8512 8513 /* 8514 * We need to capture all cpus in order to change cacheability 8515 * because we can't allow one cpu to access the same physical 8516 * page using a cacheable and a non-cachebale mapping at the same 8517 * time. Since we may end up walking the ism mapping list 8518 * have to grab it's lock now since we can't after all the 8519 * cpus have been captured. 8520 */ 8521 sfmmu_hat_lock_all(); 8522 mutex_enter(&ism_mlist_lock); 8523 kpreempt_disable(); 8524 cpuset = cpu_ready_set; 8525 xc_attention(cpuset); 8526 8527 if (npages > 1) { 8528 /* 8529 * Make sure all colors are flushed since the 8530 * sfmmu_page_cache() only flushes one color- 8531 * it does not know big pages. 8532 */ 8533 ncolors = CACHE_NUM_COLOR; 8534 if (flags & HAT_TMPNC) { 8535 for (i = 0; i < ncolors; i++) { 8536 sfmmu_cache_flushcolor(i, pp->p_pagenum); 8537 } 8538 cache_flush_flag = CACHE_NO_FLUSH; 8539 } 8540 } 8541 8542 for (i = 0; i < npages; i++) { 8543 8544 ASSERT(sfmmu_mlist_held(pp)); 8545 8546 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 8547 8548 if (npages > 1) { 8549 bcolor = i % ncolors; 8550 } else { 8551 bcolor = NO_VCOLOR; 8552 } 8553 8554 sfmmu_page_cache(pp, flags, cache_flush_flag, 8555 bcolor); 8556 } 8557 8558 pp = PP_PAGENEXT(pp); 8559 } 8560 8561 xt_sync(cpuset); 8562 xc_dismissed(cpuset); 8563 mutex_exit(&ism_mlist_lock); 8564 sfmmu_hat_unlock_all(); 8565 sfmmu_page_exit(pmtx); 8566 sfmmu_kpm_kpmp_exit(kpmp); 8567 kpreempt_enable(); 8568 } 8569 8570 /* 8571 * This function changes the virtual cacheability of all mappings to a 8572 * particular page. When changing from uncache to cacheable the mappings will 8573 * only be changed if all of them have the same virtual color. 8574 * We need to flush the cache in all cpus. It is possible that 8575 * a process referenced a page as cacheable but has sinced exited 8576 * and cleared the mapping list. We still to flush it but have no 8577 * state so all cpus is the only alternative. 8578 */ 8579 static void 8580 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 8581 { 8582 struct sf_hment *sfhme; 8583 struct hme_blk *hmeblkp; 8584 sfmmu_t *sfmmup; 8585 tte_t tte, ttemod; 8586 caddr_t vaddr; 8587 int ret, color; 8588 pfn_t pfn; 8589 8590 color = bcolor; 8591 pfn = pp->p_pagenum; 8592 8593 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8594 8595 hmeblkp = sfmmu_hmetohblk(sfhme); 8596 8597 if (hmeblkp->hblk_xhat_bit) 8598 continue; 8599 8600 sfmmu_copytte(&sfhme->hme_tte, &tte); 8601 ASSERT(TTE_IS_VALID(&tte)); 8602 vaddr = tte_to_vaddr(hmeblkp, tte); 8603 color = addr_to_vcolor(vaddr); 8604 8605 #ifdef DEBUG 8606 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 8607 ASSERT(color == bcolor); 8608 } 8609 #endif 8610 8611 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 8612 8613 ttemod = tte; 8614 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 8615 TTE_CLR_VCACHEABLE(&ttemod); 8616 } else { /* flags & HAT_CACHE */ 8617 TTE_SET_VCACHEABLE(&ttemod); 8618 } 8619 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 8620 if (ret < 0) { 8621 /* 8622 * Since all cpus are captured modifytte should not 8623 * fail. 8624 */ 8625 panic("sfmmu_page_cache: write to tte failed"); 8626 } 8627 8628 sfmmup = hblktosfmmu(hmeblkp); 8629 if (cache_flush_flag == CACHE_FLUSH) { 8630 /* 8631 * Flush TSBs, TLBs and caches 8632 */ 8633 if (sfmmup->sfmmu_ismhat) { 8634 if (flags & HAT_CACHE) { 8635 SFMMU_STAT(sf_ism_recache); 8636 } else { 8637 SFMMU_STAT(sf_ism_uncache); 8638 } 8639 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8640 pfn, CACHE_FLUSH); 8641 } else { 8642 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 8643 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 8644 } 8645 8646 /* 8647 * all cache entries belonging to this pfn are 8648 * now flushed. 8649 */ 8650 cache_flush_flag = CACHE_NO_FLUSH; 8651 } else { 8652 8653 /* 8654 * Flush only TSBs and TLBs. 8655 */ 8656 if (sfmmup->sfmmu_ismhat) { 8657 if (flags & HAT_CACHE) { 8658 SFMMU_STAT(sf_ism_recache); 8659 } else { 8660 SFMMU_STAT(sf_ism_uncache); 8661 } 8662 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8663 pfn, CACHE_NO_FLUSH); 8664 } else { 8665 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 8666 } 8667 } 8668 } 8669 8670 if (PP_ISMAPPED_KPM(pp)) 8671 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 8672 8673 switch (flags) { 8674 8675 default: 8676 panic("sfmmu_pagecache: unknown flags"); 8677 break; 8678 8679 case HAT_CACHE: 8680 PP_CLRTNC(pp); 8681 PP_CLRPNC(pp); 8682 PP_SET_VCOLOR(pp, color); 8683 break; 8684 8685 case HAT_TMPNC: 8686 PP_SETTNC(pp); 8687 PP_SET_VCOLOR(pp, NO_VCOLOR); 8688 break; 8689 8690 case HAT_UNCACHE: 8691 PP_SETPNC(pp); 8692 PP_CLRTNC(pp); 8693 PP_SET_VCOLOR(pp, NO_VCOLOR); 8694 break; 8695 } 8696 } 8697 #endif /* VAC */ 8698 8699 8700 /* 8701 * Wrapper routine used to return a context. 8702 * 8703 * It's the responsibility of the caller to guarantee that the 8704 * process serializes on calls here by taking the HAT lock for 8705 * the hat. 8706 * 8707 */ 8708 static void 8709 sfmmu_get_ctx(sfmmu_t *sfmmup) 8710 { 8711 mmu_ctx_t *mmu_ctxp; 8712 uint_t pstate_save; 8713 8714 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8715 ASSERT(sfmmup != ksfmmup); 8716 8717 kpreempt_disable(); 8718 8719 mmu_ctxp = CPU_MMU_CTXP(CPU); 8720 ASSERT(mmu_ctxp); 8721 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 8722 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 8723 8724 /* 8725 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 8726 */ 8727 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 8728 sfmmu_ctx_wrap_around(mmu_ctxp); 8729 8730 /* 8731 * Let the MMU set up the page sizes to use for 8732 * this context in the TLB. Don't program 2nd dtlb for ism hat. 8733 */ 8734 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 8735 mmu_set_ctx_page_sizes(sfmmup); 8736 } 8737 8738 /* 8739 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 8740 * interrupts disabled to prevent race condition with wrap-around 8741 * ctx invalidatation. In sun4v, ctx invalidation also involves 8742 * a HV call to set the number of TSBs to 0. If interrupts are not 8743 * disabled until after sfmmu_load_mmustate is complete TSBs may 8744 * become assigned to INVALID_CONTEXT. This is not allowed. 8745 */ 8746 pstate_save = sfmmu_disable_intrs(); 8747 8748 sfmmu_alloc_ctx(sfmmup, 1, CPU); 8749 sfmmu_load_mmustate(sfmmup); 8750 8751 sfmmu_enable_intrs(pstate_save); 8752 8753 kpreempt_enable(); 8754 } 8755 8756 /* 8757 * When all cnums are used up in a MMU, cnum will wrap around to the 8758 * next generation and start from 2. 8759 */ 8760 static void 8761 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp) 8762 { 8763 8764 /* caller must have disabled the preemption */ 8765 ASSERT(curthread->t_preempt >= 1); 8766 ASSERT(mmu_ctxp != NULL); 8767 8768 /* acquire Per-MMU (PM) spin lock */ 8769 mutex_enter(&mmu_ctxp->mmu_lock); 8770 8771 /* re-check to see if wrap-around is needed */ 8772 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 8773 goto done; 8774 8775 SFMMU_MMU_STAT(mmu_wrap_around); 8776 8777 /* update gnum */ 8778 ASSERT(mmu_ctxp->mmu_gnum != 0); 8779 mmu_ctxp->mmu_gnum++; 8780 if (mmu_ctxp->mmu_gnum == 0 || 8781 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 8782 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 8783 (void *)mmu_ctxp); 8784 } 8785 8786 if (mmu_ctxp->mmu_ncpus > 1) { 8787 cpuset_t cpuset; 8788 8789 membar_enter(); /* make sure updated gnum visible */ 8790 8791 SFMMU_XCALL_STATS(NULL); 8792 8793 /* xcall to others on the same MMU to invalidate ctx */ 8794 cpuset = mmu_ctxp->mmu_cpuset; 8795 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id)); 8796 CPUSET_DEL(cpuset, CPU->cpu_id); 8797 CPUSET_AND(cpuset, cpu_ready_set); 8798 8799 /* 8800 * Pass in INVALID_CONTEXT as the first parameter to 8801 * sfmmu_raise_tsb_exception, which invalidates the context 8802 * of any process running on the CPUs in the MMU. 8803 */ 8804 xt_some(cpuset, sfmmu_raise_tsb_exception, 8805 INVALID_CONTEXT, INVALID_CONTEXT); 8806 xt_sync(cpuset); 8807 8808 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 8809 } 8810 8811 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 8812 sfmmu_setctx_sec(INVALID_CONTEXT); 8813 sfmmu_clear_utsbinfo(); 8814 } 8815 8816 /* 8817 * No xcall is needed here. For sun4u systems all CPUs in context 8818 * domain share a single physical MMU therefore it's enough to flush 8819 * TLB on local CPU. On sun4v systems we use 1 global context 8820 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 8821 * handler. Note that vtag_flushall_uctxs() is called 8822 * for Ultra II machine, where the equivalent flushall functionality 8823 * is implemented in SW, and only user ctx TLB entries are flushed. 8824 */ 8825 if (&vtag_flushall_uctxs != NULL) { 8826 vtag_flushall_uctxs(); 8827 } else { 8828 vtag_flushall(); 8829 } 8830 8831 /* reset mmu cnum, skips cnum 0 and 1 */ 8832 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 8833 8834 done: 8835 mutex_exit(&mmu_ctxp->mmu_lock); 8836 } 8837 8838 8839 /* 8840 * For multi-threaded process, set the process context to INVALID_CONTEXT 8841 * so that it faults and reloads the MMU state from TL=0. For single-threaded 8842 * process, we can just load the MMU state directly without having to 8843 * set context invalid. Caller must hold the hat lock since we don't 8844 * acquire it here. 8845 */ 8846 static void 8847 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 8848 { 8849 uint_t cnum; 8850 uint_t pstate_save; 8851 8852 ASSERT(sfmmup != ksfmmup); 8853 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8854 8855 kpreempt_disable(); 8856 8857 /* 8858 * We check whether the pass'ed-in sfmmup is the same as the 8859 * current running proc. This is to makes sure the current proc 8860 * stays single-threaded if it already is. 8861 */ 8862 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 8863 (curthread->t_procp->p_lwpcnt == 1)) { 8864 /* single-thread */ 8865 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 8866 if (cnum != INVALID_CONTEXT) { 8867 uint_t curcnum; 8868 /* 8869 * Disable interrupts to prevent race condition 8870 * with sfmmu_ctx_wrap_around ctx invalidation. 8871 * In sun4v, ctx invalidation involves setting 8872 * TSB to NULL, hence, interrupts should be disabled 8873 * untill after sfmmu_load_mmustate is completed. 8874 */ 8875 pstate_save = sfmmu_disable_intrs(); 8876 curcnum = sfmmu_getctx_sec(); 8877 if (curcnum == cnum) 8878 sfmmu_load_mmustate(sfmmup); 8879 sfmmu_enable_intrs(pstate_save); 8880 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 8881 } 8882 } else { 8883 /* 8884 * multi-thread 8885 * or when sfmmup is not the same as the curproc. 8886 */ 8887 sfmmu_invalidate_ctx(sfmmup); 8888 } 8889 8890 kpreempt_enable(); 8891 } 8892 8893 8894 /* 8895 * Replace the specified TSB with a new TSB. This function gets called when 8896 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 8897 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 8898 * (8K). 8899 * 8900 * Caller must hold the HAT lock, but should assume any tsb_info 8901 * pointers it has are no longer valid after calling this function. 8902 * 8903 * Return values: 8904 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 8905 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 8906 * something to this tsbinfo/TSB 8907 * TSB_SUCCESS Operation succeeded 8908 */ 8909 static tsb_replace_rc_t 8910 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 8911 hatlock_t *hatlockp, uint_t flags) 8912 { 8913 struct tsb_info *new_tsbinfo = NULL; 8914 struct tsb_info *curtsb, *prevtsb; 8915 uint_t tte_sz_mask; 8916 int i; 8917 8918 ASSERT(sfmmup != ksfmmup); 8919 ASSERT(sfmmup->sfmmu_ismhat == 0); 8920 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8921 ASSERT(szc <= tsb_max_growsize); 8922 8923 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 8924 return (TSB_LOSTRACE); 8925 8926 /* 8927 * Find the tsb_info ahead of this one in the list, and 8928 * also make sure that the tsb_info passed in really 8929 * exists! 8930 */ 8931 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8932 curtsb != old_tsbinfo && curtsb != NULL; 8933 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8934 ASSERT(curtsb != NULL); 8935 8936 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8937 /* 8938 * The process is swapped out, so just set the new size 8939 * code. When it swaps back in, we'll allocate a new one 8940 * of the new chosen size. 8941 */ 8942 curtsb->tsb_szc = szc; 8943 return (TSB_SUCCESS); 8944 } 8945 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 8946 8947 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 8948 8949 /* 8950 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 8951 * If we fail to allocate a TSB, exit. 8952 */ 8953 sfmmu_hat_exit(hatlockp); 8954 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, tte_sz_mask, 8955 flags, sfmmup)) { 8956 (void) sfmmu_hat_enter(sfmmup); 8957 if (!(flags & TSB_SWAPIN)) 8958 SFMMU_STAT(sf_tsb_resize_failures); 8959 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8960 return (TSB_ALLOCFAIL); 8961 } 8962 (void) sfmmu_hat_enter(sfmmup); 8963 8964 /* 8965 * Re-check to make sure somebody else didn't muck with us while we 8966 * didn't hold the HAT lock. If the process swapped out, fine, just 8967 * exit; this can happen if we try to shrink the TSB from the context 8968 * of another process (such as on an ISM unmap), though it is rare. 8969 */ 8970 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8971 SFMMU_STAT(sf_tsb_resize_failures); 8972 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8973 sfmmu_hat_exit(hatlockp); 8974 sfmmu_tsbinfo_free(new_tsbinfo); 8975 (void) sfmmu_hat_enter(sfmmup); 8976 return (TSB_LOSTRACE); 8977 } 8978 8979 #ifdef DEBUG 8980 /* Reverify that the tsb_info still exists.. for debugging only */ 8981 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8982 curtsb != old_tsbinfo && curtsb != NULL; 8983 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8984 ASSERT(curtsb != NULL); 8985 #endif /* DEBUG */ 8986 8987 /* 8988 * Quiesce any CPUs running this process on their next TLB miss 8989 * so they atomically see the new tsb_info. We temporarily set the 8990 * context to invalid context so new threads that come on processor 8991 * after we do the xcall to cpusran will also serialize behind the 8992 * HAT lock on TLB miss and will see the new TSB. Since this short 8993 * race with a new thread coming on processor is relatively rare, 8994 * this synchronization mechanism should be cheaper than always 8995 * pausing all CPUs for the duration of the setup, which is what 8996 * the old implementation did. This is particuarly true if we are 8997 * copying a huge chunk of memory around during that window. 8998 * 8999 * The memory barriers are to make sure things stay consistent 9000 * with resume() since it does not hold the HAT lock while 9001 * walking the list of tsb_info structures. 9002 */ 9003 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 9004 /* The TSB is either growing or shrinking. */ 9005 sfmmu_invalidate_ctx(sfmmup); 9006 } else { 9007 /* 9008 * It is illegal to swap in TSBs from a process other 9009 * than a process being swapped in. This in turn 9010 * implies we do not have a valid MMU context here 9011 * since a process needs one to resolve translation 9012 * misses. 9013 */ 9014 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 9015 } 9016 9017 #ifdef DEBUG 9018 ASSERT(max_mmu_ctxdoms > 0); 9019 9020 /* 9021 * Process should have INVALID_CONTEXT on all MMUs 9022 */ 9023 for (i = 0; i < max_mmu_ctxdoms; i++) { 9024 9025 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 9026 } 9027 #endif 9028 9029 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 9030 membar_stst(); /* strict ordering required */ 9031 if (prevtsb) 9032 prevtsb->tsb_next = new_tsbinfo; 9033 else 9034 sfmmup->sfmmu_tsb = new_tsbinfo; 9035 membar_enter(); /* make sure new TSB globally visible */ 9036 sfmmu_setup_tsbinfo(sfmmup); 9037 9038 /* 9039 * We need to migrate TSB entries from the old TSB to the new TSB 9040 * if tsb_remap_ttes is set and the TSB is growing. 9041 */ 9042 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 9043 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 9044 9045 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9046 9047 /* 9048 * Drop the HAT lock to free our old tsb_info. 9049 */ 9050 sfmmu_hat_exit(hatlockp); 9051 9052 if ((flags & TSB_GROW) == TSB_GROW) { 9053 SFMMU_STAT(sf_tsb_grow); 9054 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 9055 SFMMU_STAT(sf_tsb_shrink); 9056 } 9057 9058 sfmmu_tsbinfo_free(old_tsbinfo); 9059 9060 (void) sfmmu_hat_enter(sfmmup); 9061 return (TSB_SUCCESS); 9062 } 9063 9064 /* 9065 * This function will re-program hat pgsz array, and invalidate the 9066 * process' context, forcing the process to switch to another 9067 * context on the next TLB miss, and therefore start using the 9068 * TLB that is reprogrammed for the new page sizes. 9069 */ 9070 void 9071 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 9072 { 9073 int i; 9074 hatlock_t *hatlockp = NULL; 9075 9076 hatlockp = sfmmu_hat_enter(sfmmup); 9077 /* USIII+-IV+ optimization, requires hat lock */ 9078 if (tmp_pgsz) { 9079 for (i = 0; i < mmu_page_sizes; i++) 9080 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 9081 } 9082 SFMMU_STAT(sf_tlb_reprog_pgsz); 9083 9084 sfmmu_invalidate_ctx(sfmmup); 9085 9086 sfmmu_hat_exit(hatlockp); 9087 } 9088 9089 /* 9090 * This function assumes that there are either four or six supported page 9091 * sizes and at most two programmable TLBs, so we need to decide which 9092 * page sizes are most important and then tell the MMU layer so it 9093 * can adjust the TLB page sizes accordingly (if supported). 9094 * 9095 * If these assumptions change, this function will need to be 9096 * updated to support whatever the new limits are. 9097 * 9098 * The growing flag is nonzero if we are growing the address space, 9099 * and zero if it is shrinking. This allows us to decide whether 9100 * to grow or shrink our TSB, depending upon available memory 9101 * conditions. 9102 */ 9103 static void 9104 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 9105 { 9106 uint64_t ttecnt[MMU_PAGE_SIZES]; 9107 uint64_t tte8k_cnt, tte4m_cnt; 9108 uint8_t i; 9109 int sectsb_thresh; 9110 9111 /* 9112 * Kernel threads, processes with small address spaces not using 9113 * large pages, and dummy ISM HATs need not apply. 9114 */ 9115 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 9116 return; 9117 9118 if ((sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) == 0 && 9119 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 9120 return; 9121 9122 for (i = 0; i < mmu_page_sizes; i++) { 9123 ttecnt[i] = SFMMU_TTE_CNT(sfmmup, i); 9124 } 9125 9126 /* Check pagesizes in use, and possibly reprogram DTLB. */ 9127 if (&mmu_check_page_sizes) 9128 mmu_check_page_sizes(sfmmup, ttecnt); 9129 9130 /* 9131 * Calculate the number of 8k ttes to represent the span of these 9132 * pages. 9133 */ 9134 tte8k_cnt = ttecnt[TTE8K] + 9135 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 9136 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 9137 if (mmu_page_sizes == max_mmu_page_sizes) { 9138 tte4m_cnt = ttecnt[TTE4M] + 9139 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 9140 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 9141 } else { 9142 tte4m_cnt = ttecnt[TTE4M]; 9143 } 9144 9145 /* 9146 * Inflate TSB sizes by a factor of 2 if this process 9147 * uses 4M text pages to minimize extra conflict misses 9148 * in the first TSB since without counting text pages 9149 * 8K TSB may become too small. 9150 * 9151 * Also double the size of the second TSB to minimize 9152 * extra conflict misses due to competition between 4M text pages 9153 * and data pages. 9154 * 9155 * We need to adjust the second TSB allocation threshold by the 9156 * inflation factor, since there is no point in creating a second 9157 * TSB when we know all the mappings can fit in the I/D TLBs. 9158 */ 9159 sectsb_thresh = tsb_sectsb_threshold; 9160 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 9161 tte8k_cnt <<= 1; 9162 tte4m_cnt <<= 1; 9163 sectsb_thresh <<= 1; 9164 } 9165 9166 /* 9167 * Check to see if our TSB is the right size; we may need to 9168 * grow or shrink it. If the process is small, our work is 9169 * finished at this point. 9170 */ 9171 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 9172 return; 9173 } 9174 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 9175 } 9176 9177 static void 9178 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 9179 uint64_t tte4m_cnt, int sectsb_thresh) 9180 { 9181 int tsb_bits; 9182 uint_t tsb_szc; 9183 struct tsb_info *tsbinfop; 9184 hatlock_t *hatlockp = NULL; 9185 9186 hatlockp = sfmmu_hat_enter(sfmmup); 9187 ASSERT(hatlockp != NULL); 9188 tsbinfop = sfmmup->sfmmu_tsb; 9189 ASSERT(tsbinfop != NULL); 9190 9191 /* 9192 * If we're growing, select the size based on RSS. If we're 9193 * shrinking, leave some room so we don't have to turn around and 9194 * grow again immediately. 9195 */ 9196 if (growing) 9197 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 9198 else 9199 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 9200 9201 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9202 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9203 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9204 hatlockp, TSB_SHRINK); 9205 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 9206 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9207 hatlockp, TSB_GROW); 9208 } 9209 tsbinfop = sfmmup->sfmmu_tsb; 9210 9211 /* 9212 * With the TLB and first TSB out of the way, we need to see if 9213 * we need a second TSB for 4M pages. If we managed to reprogram 9214 * the TLB page sizes above, the process will start using this new 9215 * TSB right away; otherwise, it will start using it on the next 9216 * context switch. Either way, it's no big deal so there's no 9217 * synchronization with the trap handlers here unless we grow the 9218 * TSB (in which case it's required to prevent using the old one 9219 * after it's freed). Note: second tsb is required for 32M/256M 9220 * page sizes. 9221 */ 9222 if (tte4m_cnt > sectsb_thresh) { 9223 /* 9224 * If we're growing, select the size based on RSS. If we're 9225 * shrinking, leave some room so we don't have to turn 9226 * around and grow again immediately. 9227 */ 9228 if (growing) 9229 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 9230 else 9231 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 9232 if (tsbinfop->tsb_next == NULL) { 9233 struct tsb_info *newtsb; 9234 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 9235 0 : TSB_ALLOC; 9236 9237 sfmmu_hat_exit(hatlockp); 9238 9239 /* 9240 * Try to allocate a TSB for 4[32|256]M pages. If we 9241 * can't get the size we want, retry w/a minimum sized 9242 * TSB. If that still didn't work, give up; we can 9243 * still run without one. 9244 */ 9245 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 9246 TSB4M|TSB32M|TSB256M:TSB4M; 9247 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 9248 allocflags, sfmmup) != 0) && 9249 (sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 9250 tsb_bits, allocflags, sfmmup) != 0)) { 9251 return; 9252 } 9253 9254 hatlockp = sfmmu_hat_enter(sfmmup); 9255 9256 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 9257 sfmmup->sfmmu_tsb->tsb_next = newtsb; 9258 SFMMU_STAT(sf_tsb_sectsb_create); 9259 sfmmu_setup_tsbinfo(sfmmup); 9260 sfmmu_hat_exit(hatlockp); 9261 return; 9262 } else { 9263 /* 9264 * It's annoying, but possible for us 9265 * to get here.. we dropped the HAT lock 9266 * because of locking order in the kmem 9267 * allocator, and while we were off getting 9268 * our memory, some other thread decided to 9269 * do us a favor and won the race to get a 9270 * second TSB for this process. Sigh. 9271 */ 9272 sfmmu_hat_exit(hatlockp); 9273 sfmmu_tsbinfo_free(newtsb); 9274 return; 9275 } 9276 } 9277 9278 /* 9279 * We have a second TSB, see if it's big enough. 9280 */ 9281 tsbinfop = tsbinfop->tsb_next; 9282 9283 /* 9284 * Check to see if our second TSB is the right size; 9285 * we may need to grow or shrink it. 9286 * To prevent thrashing (e.g. growing the TSB on a 9287 * subsequent map operation), only try to shrink if 9288 * the TSB reach exceeds twice the virtual address 9289 * space size. 9290 */ 9291 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9292 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9293 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9294 tsb_szc, hatlockp, TSB_SHRINK); 9295 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 9296 TSB_OK_GROW()) { 9297 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9298 tsb_szc, hatlockp, TSB_GROW); 9299 } 9300 } 9301 9302 sfmmu_hat_exit(hatlockp); 9303 } 9304 9305 /* 9306 * Free up a sfmmu 9307 * Since the sfmmu is currently embedded in the hat struct we simply zero 9308 * out our fields and free up the ism map blk list if any. 9309 */ 9310 static void 9311 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 9312 { 9313 ism_blk_t *blkp, *nx_blkp; 9314 #ifdef DEBUG 9315 ism_map_t *map; 9316 int i; 9317 #endif 9318 9319 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 9320 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 9321 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 9322 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 9323 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 9324 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 9325 9326 sfmmup->sfmmu_free = 0; 9327 sfmmup->sfmmu_ismhat = 0; 9328 9329 blkp = sfmmup->sfmmu_iblk; 9330 sfmmup->sfmmu_iblk = NULL; 9331 9332 while (blkp) { 9333 #ifdef DEBUG 9334 map = blkp->iblk_maps; 9335 for (i = 0; i < ISM_MAP_SLOTS; i++) { 9336 ASSERT(map[i].imap_seg == 0); 9337 ASSERT(map[i].imap_ismhat == NULL); 9338 ASSERT(map[i].imap_ment == NULL); 9339 } 9340 #endif 9341 nx_blkp = blkp->iblk_next; 9342 blkp->iblk_next = NULL; 9343 blkp->iblk_nextpa = (uint64_t)-1; 9344 kmem_cache_free(ism_blk_cache, blkp); 9345 blkp = nx_blkp; 9346 } 9347 } 9348 9349 /* 9350 * Locking primitves accessed by HATLOCK macros 9351 */ 9352 9353 #define SFMMU_SPL_MTX (0x0) 9354 #define SFMMU_ML_MTX (0x1) 9355 9356 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 9357 SPL_HASH(pg) : MLIST_HASH(pg)) 9358 9359 kmutex_t * 9360 sfmmu_page_enter(struct page *pp) 9361 { 9362 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 9363 } 9364 9365 void 9366 sfmmu_page_exit(kmutex_t *spl) 9367 { 9368 mutex_exit(spl); 9369 } 9370 9371 int 9372 sfmmu_page_spl_held(struct page *pp) 9373 { 9374 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 9375 } 9376 9377 kmutex_t * 9378 sfmmu_mlist_enter(struct page *pp) 9379 { 9380 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 9381 } 9382 9383 void 9384 sfmmu_mlist_exit(kmutex_t *mml) 9385 { 9386 mutex_exit(mml); 9387 } 9388 9389 int 9390 sfmmu_mlist_held(struct page *pp) 9391 { 9392 9393 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 9394 } 9395 9396 /* 9397 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 9398 * sfmmu_mlist_enter() case mml_table lock array is used and for 9399 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 9400 * 9401 * The lock is taken on a root page so that it protects an operation on all 9402 * constituent pages of a large page pp belongs to. 9403 * 9404 * The routine takes a lock from the appropriate array. The lock is determined 9405 * by hashing the root page. After taking the lock this routine checks if the 9406 * root page has the same size code that was used to determine the root (i.e 9407 * that root hasn't changed). If root page has the expected p_szc field we 9408 * have the right lock and it's returned to the caller. If root's p_szc 9409 * decreased we release the lock and retry from the beginning. This case can 9410 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 9411 * value and taking the lock. The number of retries due to p_szc decrease is 9412 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 9413 * determined by hashing pp itself. 9414 * 9415 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 9416 * possible that p_szc can increase. To increase p_szc a thread has to lock 9417 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 9418 * callers that don't hold a page locked recheck if hmeblk through which pp 9419 * was found still maps this pp. If it doesn't map it anymore returned lock 9420 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 9421 * p_szc increase after taking the lock it returns this lock without further 9422 * retries because in this case the caller doesn't care about which lock was 9423 * taken. The caller will drop it right away. 9424 * 9425 * After the routine returns it's guaranteed that hat_page_demote() can't 9426 * change p_szc field of any of constituent pages of a large page pp belongs 9427 * to as long as pp was either locked at least SHARED prior to this call or 9428 * the caller finds that hment that pointed to this pp still references this 9429 * pp (this also assumes that the caller holds hme hash bucket lock so that 9430 * the same pp can't be remapped into the same hmeblk after it was unmapped by 9431 * hat_pageunload()). 9432 */ 9433 static kmutex_t * 9434 sfmmu_mlspl_enter(struct page *pp, int type) 9435 { 9436 kmutex_t *mtx; 9437 uint_t prev_rszc = UINT_MAX; 9438 page_t *rootpp; 9439 uint_t szc; 9440 uint_t rszc; 9441 uint_t pszc = pp->p_szc; 9442 9443 ASSERT(pp != NULL); 9444 9445 again: 9446 if (pszc == 0) { 9447 mtx = SFMMU_MLSPL_MTX(type, pp); 9448 mutex_enter(mtx); 9449 return (mtx); 9450 } 9451 9452 /* The lock lives in the root page */ 9453 rootpp = PP_GROUPLEADER(pp, pszc); 9454 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9455 mutex_enter(mtx); 9456 9457 /* 9458 * Return mml in the following 3 cases: 9459 * 9460 * 1) If pp itself is root since if its p_szc decreased before we took 9461 * the lock pp is still the root of smaller szc page. And if its p_szc 9462 * increased it doesn't matter what lock we return (see comment in 9463 * front of this routine). 9464 * 9465 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 9466 * large page we have the right lock since any previous potential 9467 * hat_page_demote() is done demoting from greater than current root's 9468 * p_szc because hat_page_demote() changes root's p_szc last. No 9469 * further hat_page_demote() can start or be in progress since it 9470 * would need the same lock we currently hold. 9471 * 9472 * 3) If rootpp's p_szc increased since previous iteration it doesn't 9473 * matter what lock we return (see comment in front of this routine). 9474 */ 9475 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 9476 rszc >= prev_rszc) { 9477 return (mtx); 9478 } 9479 9480 /* 9481 * hat_page_demote() could have decreased root's p_szc. 9482 * In this case pp's p_szc must also be smaller than pszc. 9483 * Retry. 9484 */ 9485 if (rszc < pszc) { 9486 szc = pp->p_szc; 9487 if (szc < pszc) { 9488 mutex_exit(mtx); 9489 pszc = szc; 9490 goto again; 9491 } 9492 /* 9493 * pp's p_szc increased after it was decreased. 9494 * page cannot be mapped. Return current lock. The caller 9495 * will drop it right away. 9496 */ 9497 return (mtx); 9498 } 9499 9500 /* 9501 * root's p_szc is greater than pp's p_szc. 9502 * hat_page_demote() is not done with all pages 9503 * yet. Wait for it to complete. 9504 */ 9505 mutex_exit(mtx); 9506 rootpp = PP_GROUPLEADER(rootpp, rszc); 9507 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9508 mutex_enter(mtx); 9509 mutex_exit(mtx); 9510 prev_rszc = rszc; 9511 goto again; 9512 } 9513 9514 static int 9515 sfmmu_mlspl_held(struct page *pp, int type) 9516 { 9517 kmutex_t *mtx; 9518 9519 ASSERT(pp != NULL); 9520 /* The lock lives in the root page */ 9521 pp = PP_PAGEROOT(pp); 9522 ASSERT(pp != NULL); 9523 9524 mtx = SFMMU_MLSPL_MTX(type, pp); 9525 return (MUTEX_HELD(mtx)); 9526 } 9527 9528 static uint_t 9529 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 9530 { 9531 struct hme_blk *hblkp; 9532 9533 if (freehblkp != NULL) { 9534 mutex_enter(&freehblkp_lock); 9535 if (freehblkp != NULL) { 9536 /* 9537 * If the current thread is owning hblk_reserve, 9538 * let it succede even if freehblkcnt is really low. 9539 */ 9540 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 9541 SFMMU_STAT(sf_get_free_throttle); 9542 mutex_exit(&freehblkp_lock); 9543 return (0); 9544 } 9545 freehblkcnt--; 9546 *hmeblkpp = freehblkp; 9547 hblkp = *hmeblkpp; 9548 freehblkp = hblkp->hblk_next; 9549 mutex_exit(&freehblkp_lock); 9550 hblkp->hblk_next = NULL; 9551 SFMMU_STAT(sf_get_free_success); 9552 return (1); 9553 } 9554 mutex_exit(&freehblkp_lock); 9555 } 9556 SFMMU_STAT(sf_get_free_fail); 9557 return (0); 9558 } 9559 9560 static uint_t 9561 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 9562 { 9563 struct hme_blk *hblkp; 9564 9565 /* 9566 * If the current thread is mapping into kernel space, 9567 * let it succede even if freehblkcnt is max 9568 * so that it will avoid freeing it to kmem. 9569 * This will prevent stack overflow due to 9570 * possible recursion since kmem_cache_free() 9571 * might require creation of a slab which 9572 * in turn needs an hmeblk to map that slab; 9573 * let's break this vicious chain at the first 9574 * opportunity. 9575 */ 9576 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9577 mutex_enter(&freehblkp_lock); 9578 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9579 SFMMU_STAT(sf_put_free_success); 9580 freehblkcnt++; 9581 hmeblkp->hblk_next = freehblkp; 9582 freehblkp = hmeblkp; 9583 mutex_exit(&freehblkp_lock); 9584 return (1); 9585 } 9586 mutex_exit(&freehblkp_lock); 9587 } 9588 9589 /* 9590 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 9591 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 9592 * we are not in the process of mapping into kernel space. 9593 */ 9594 ASSERT(!critical); 9595 while (freehblkcnt > HBLK_RESERVE_CNT) { 9596 mutex_enter(&freehblkp_lock); 9597 if (freehblkcnt > HBLK_RESERVE_CNT) { 9598 freehblkcnt--; 9599 hblkp = freehblkp; 9600 freehblkp = hblkp->hblk_next; 9601 mutex_exit(&freehblkp_lock); 9602 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 9603 kmem_cache_free(sfmmu8_cache, hblkp); 9604 continue; 9605 } 9606 mutex_exit(&freehblkp_lock); 9607 } 9608 SFMMU_STAT(sf_put_free_fail); 9609 return (0); 9610 } 9611 9612 static void 9613 sfmmu_hblk_swap(struct hme_blk *new) 9614 { 9615 struct hme_blk *old, *hblkp, *prev; 9616 uint64_t hblkpa, prevpa, newpa; 9617 caddr_t base, vaddr, endaddr; 9618 struct hmehash_bucket *hmebp; 9619 struct sf_hment *osfhme, *nsfhme; 9620 page_t *pp; 9621 kmutex_t *pml; 9622 tte_t tte; 9623 9624 #ifdef DEBUG 9625 hmeblk_tag hblktag; 9626 struct hme_blk *found; 9627 #endif 9628 old = HBLK_RESERVE; 9629 9630 /* 9631 * save pa before bcopy clobbers it 9632 */ 9633 newpa = new->hblk_nextpa; 9634 9635 base = (caddr_t)get_hblk_base(old); 9636 endaddr = base + get_hblk_span(old); 9637 9638 /* 9639 * acquire hash bucket lock. 9640 */ 9641 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K); 9642 9643 /* 9644 * copy contents from old to new 9645 */ 9646 bcopy((void *)old, (void *)new, HME8BLK_SZ); 9647 9648 /* 9649 * add new to hash chain 9650 */ 9651 sfmmu_hblk_hash_add(hmebp, new, newpa); 9652 9653 /* 9654 * search hash chain for hblk_reserve; this needs to be performed 9655 * after adding new, otherwise prevpa and prev won't correspond 9656 * to the hblk which is prior to old in hash chain when we call 9657 * sfmmu_hblk_hash_rm to remove old later. 9658 */ 9659 for (prevpa = 0, prev = NULL, 9660 hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp; 9661 hblkp != NULL && hblkp != old; 9662 prevpa = hblkpa, prev = hblkp, 9663 hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next); 9664 9665 if (hblkp != old) 9666 panic("sfmmu_hblk_swap: hblk_reserve not found"); 9667 9668 /* 9669 * p_mapping list is still pointing to hments in hblk_reserve; 9670 * fix up p_mapping list so that they point to hments in new. 9671 * 9672 * Since all these mappings are created by hblk_reserve_thread 9673 * on the way and it's using at least one of the buffers from each of 9674 * the newly minted slabs, there is no danger of any of these 9675 * mappings getting unloaded by another thread. 9676 * 9677 * tsbmiss could only modify ref/mod bits of hments in old/new. 9678 * Since all of these hments hold mappings established by segkmem 9679 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 9680 * have no meaning for the mappings in hblk_reserve. hments in 9681 * old and new are identical except for ref/mod bits. 9682 */ 9683 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 9684 9685 HBLKTOHME(osfhme, old, vaddr); 9686 sfmmu_copytte(&osfhme->hme_tte, &tte); 9687 9688 if (TTE_IS_VALID(&tte)) { 9689 if ((pp = osfhme->hme_page) == NULL) 9690 panic("sfmmu_hblk_swap: page not mapped"); 9691 9692 pml = sfmmu_mlist_enter(pp); 9693 9694 if (pp != osfhme->hme_page) 9695 panic("sfmmu_hblk_swap: mapping changed"); 9696 9697 HBLKTOHME(nsfhme, new, vaddr); 9698 9699 HME_ADD(nsfhme, pp); 9700 HME_SUB(osfhme, pp); 9701 9702 sfmmu_mlist_exit(pml); 9703 } 9704 } 9705 9706 /* 9707 * remove old from hash chain 9708 */ 9709 sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev); 9710 9711 #ifdef DEBUG 9712 9713 hblktag.htag_id = ksfmmup; 9714 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 9715 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 9716 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 9717 9718 if (found != new) 9719 panic("sfmmu_hblk_swap: new hblk not found"); 9720 #endif 9721 9722 SFMMU_HASH_UNLOCK(hmebp); 9723 9724 /* 9725 * Reset hblk_reserve 9726 */ 9727 bzero((void *)old, HME8BLK_SZ); 9728 old->hblk_nextpa = va_to_pa((caddr_t)old); 9729 } 9730 9731 /* 9732 * Grab the mlist mutex for both pages passed in. 9733 * 9734 * low and high will be returned as pointers to the mutexes for these pages. 9735 * low refers to the mutex residing in the lower bin of the mlist hash, while 9736 * high refers to the mutex residing in the higher bin of the mlist hash. This 9737 * is due to the locking order restrictions on the same thread grabbing 9738 * multiple mlist mutexes. The low lock must be acquired before the high lock. 9739 * 9740 * If both pages hash to the same mutex, only grab that single mutex, and 9741 * high will be returned as NULL 9742 * If the pages hash to different bins in the hash, grab the lower addressed 9743 * lock first and then the higher addressed lock in order to follow the locking 9744 * rules involved with the same thread grabbing multiple mlist mutexes. 9745 * low and high will both have non-NULL values. 9746 */ 9747 static void 9748 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 9749 kmutex_t **low, kmutex_t **high) 9750 { 9751 kmutex_t *mml_targ, *mml_repl; 9752 9753 /* 9754 * no need to do the dance around szc as in sfmmu_mlist_enter() 9755 * because this routine is only called by hat_page_relocate() and all 9756 * targ and repl pages are already locked EXCL so szc can't change. 9757 */ 9758 9759 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 9760 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 9761 9762 if (mml_targ == mml_repl) { 9763 *low = mml_targ; 9764 *high = NULL; 9765 } else { 9766 if (mml_targ < mml_repl) { 9767 *low = mml_targ; 9768 *high = mml_repl; 9769 } else { 9770 *low = mml_repl; 9771 *high = mml_targ; 9772 } 9773 } 9774 9775 mutex_enter(*low); 9776 if (*high) 9777 mutex_enter(*high); 9778 } 9779 9780 static void 9781 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 9782 { 9783 if (high) 9784 mutex_exit(high); 9785 mutex_exit(low); 9786 } 9787 9788 static hatlock_t * 9789 sfmmu_hat_enter(sfmmu_t *sfmmup) 9790 { 9791 hatlock_t *hatlockp; 9792 9793 if (sfmmup != ksfmmup) { 9794 hatlockp = TSB_HASH(sfmmup); 9795 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 9796 return (hatlockp); 9797 } 9798 return (NULL); 9799 } 9800 9801 static hatlock_t * 9802 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 9803 { 9804 hatlock_t *hatlockp; 9805 9806 if (sfmmup != ksfmmup) { 9807 hatlockp = TSB_HASH(sfmmup); 9808 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 9809 return (NULL); 9810 return (hatlockp); 9811 } 9812 return (NULL); 9813 } 9814 9815 static void 9816 sfmmu_hat_exit(hatlock_t *hatlockp) 9817 { 9818 if (hatlockp != NULL) 9819 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 9820 } 9821 9822 static void 9823 sfmmu_hat_lock_all(void) 9824 { 9825 int i; 9826 for (i = 0; i < SFMMU_NUM_LOCK; i++) 9827 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 9828 } 9829 9830 static void 9831 sfmmu_hat_unlock_all(void) 9832 { 9833 int i; 9834 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 9835 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 9836 } 9837 9838 int 9839 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 9840 { 9841 ASSERT(sfmmup != ksfmmup); 9842 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 9843 } 9844 9845 /* 9846 * Locking primitives to provide consistency between ISM unmap 9847 * and other operations. Since ISM unmap can take a long time, we 9848 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 9849 * contention on the hatlock buckets while ISM segments are being 9850 * unmapped. The tradeoff is that the flags don't prevent priority 9851 * inversion from occurring, so we must request kernel priority in 9852 * case we have to sleep to keep from getting buried while holding 9853 * the HAT_ISMBUSY flag set, which in turn could block other kernel 9854 * threads from running (for example, in sfmmu_uvatopfn()). 9855 */ 9856 static void 9857 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 9858 { 9859 hatlock_t *hatlockp; 9860 9861 THREAD_KPRI_REQUEST(); 9862 if (!hatlock_held) 9863 hatlockp = sfmmu_hat_enter(sfmmup); 9864 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 9865 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 9866 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 9867 if (!hatlock_held) 9868 sfmmu_hat_exit(hatlockp); 9869 } 9870 9871 static void 9872 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 9873 { 9874 hatlock_t *hatlockp; 9875 9876 if (!hatlock_held) 9877 hatlockp = sfmmu_hat_enter(sfmmup); 9878 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 9879 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 9880 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 9881 if (!hatlock_held) 9882 sfmmu_hat_exit(hatlockp); 9883 THREAD_KPRI_RELEASE(); 9884 } 9885 9886 /* 9887 * 9888 * Algorithm: 9889 * 9890 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 9891 * hblks. 9892 * 9893 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 9894 * 9895 * (a) try to return an hblk from reserve pool of free hblks; 9896 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 9897 * and return hblk_reserve. 9898 * 9899 * (3) call kmem_cache_alloc() to allocate hblk; 9900 * 9901 * (a) if hblk_reserve_lock is held by the current thread, 9902 * atomically replace hblk_reserve by the hblk that is 9903 * returned by kmem_cache_alloc; release hblk_reserve_lock 9904 * and call kmem_cache_alloc() again. 9905 * (b) if reserve pool is not full, add the hblk that is 9906 * returned by kmem_cache_alloc to reserve pool and 9907 * call kmem_cache_alloc again. 9908 * 9909 */ 9910 static struct hme_blk * 9911 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 9912 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 9913 uint_t flags) 9914 { 9915 struct hme_blk *hmeblkp = NULL; 9916 struct hme_blk *newhblkp; 9917 struct hme_blk *shw_hblkp = NULL; 9918 struct kmem_cache *sfmmu_cache = NULL; 9919 uint64_t hblkpa; 9920 ulong_t index; 9921 uint_t owner; /* set to 1 if using hblk_reserve */ 9922 uint_t forcefree; 9923 int sleep; 9924 9925 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 9926 9927 /* 9928 * If segkmem is not created yet, allocate from static hmeblks 9929 * created at the end of startup_modules(). See the block comment 9930 * in startup_modules() describing how we estimate the number of 9931 * static hmeblks that will be needed during re-map. 9932 */ 9933 if (!hblk_alloc_dynamic) { 9934 9935 if (size == TTE8K) { 9936 index = nucleus_hblk8.index; 9937 if (index >= nucleus_hblk8.len) { 9938 /* 9939 * If we panic here, see startup_modules() to 9940 * make sure that we are calculating the 9941 * number of hblk8's that we need correctly. 9942 */ 9943 panic("no nucleus hblk8 to allocate"); 9944 } 9945 hmeblkp = 9946 (struct hme_blk *)&nucleus_hblk8.list[index]; 9947 nucleus_hblk8.index++; 9948 SFMMU_STAT(sf_hblk8_nalloc); 9949 } else { 9950 index = nucleus_hblk1.index; 9951 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 9952 /* 9953 * If we panic here, see startup_modules() 9954 * and H8TOH1; most likely you need to 9955 * update the calculation of the number 9956 * of hblk1's the kernel needs to boot. 9957 */ 9958 panic("no nucleus hblk1 to allocate"); 9959 } 9960 hmeblkp = 9961 (struct hme_blk *)&nucleus_hblk1.list[index]; 9962 nucleus_hblk1.index++; 9963 SFMMU_STAT(sf_hblk1_nalloc); 9964 } 9965 9966 goto hblk_init; 9967 } 9968 9969 SFMMU_HASH_UNLOCK(hmebp); 9970 9971 if (sfmmup != KHATID) { 9972 if (mmu_page_sizes == max_mmu_page_sizes) { 9973 if (size < TTE256M) 9974 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 9975 size, flags); 9976 } else { 9977 if (size < TTE4M) 9978 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 9979 size, flags); 9980 } 9981 } 9982 9983 fill_hblk: 9984 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 9985 9986 if (owner && size == TTE8K) { 9987 9988 /* 9989 * We are really in a tight spot. We already own 9990 * hblk_reserve and we need another hblk. In anticipation 9991 * of this kind of scenario, we specifically set aside 9992 * HBLK_RESERVE_MIN number of hblks to be used exclusively 9993 * by owner of hblk_reserve. 9994 */ 9995 SFMMU_STAT(sf_hblk_recurse_cnt); 9996 9997 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 9998 panic("sfmmu_hblk_alloc: reserve list is empty"); 9999 10000 goto hblk_verify; 10001 } 10002 10003 ASSERT(!owner); 10004 10005 if ((flags & HAT_NO_KALLOC) == 0) { 10006 10007 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 10008 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 10009 10010 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 10011 hmeblkp = sfmmu_hblk_steal(size); 10012 } else { 10013 /* 10014 * if we are the owner of hblk_reserve, 10015 * swap hblk_reserve with hmeblkp and 10016 * start a fresh life. Hope things go 10017 * better this time. 10018 */ 10019 if (hblk_reserve_thread == curthread) { 10020 ASSERT(sfmmu_cache == sfmmu8_cache); 10021 sfmmu_hblk_swap(hmeblkp); 10022 hblk_reserve_thread = NULL; 10023 mutex_exit(&hblk_reserve_lock); 10024 goto fill_hblk; 10025 } 10026 /* 10027 * let's donate this hblk to our reserve list if 10028 * we are not mapping kernel range 10029 */ 10030 if (size == TTE8K && sfmmup != KHATID) 10031 if (sfmmu_put_free_hblk(hmeblkp, 0)) 10032 goto fill_hblk; 10033 } 10034 } else { 10035 /* 10036 * We are here to map the slab in sfmmu8_cache; let's 10037 * check if we could tap our reserve list; if successful, 10038 * this will avoid the pain of going thru sfmmu_hblk_swap 10039 */ 10040 SFMMU_STAT(sf_hblk_slab_cnt); 10041 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 10042 /* 10043 * let's start hblk_reserve dance 10044 */ 10045 SFMMU_STAT(sf_hblk_reserve_cnt); 10046 owner = 1; 10047 mutex_enter(&hblk_reserve_lock); 10048 hmeblkp = HBLK_RESERVE; 10049 hblk_reserve_thread = curthread; 10050 } 10051 } 10052 10053 hblk_verify: 10054 ASSERT(hmeblkp != NULL); 10055 set_hblk_sz(hmeblkp, size); 10056 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10057 SFMMU_HASH_LOCK(hmebp); 10058 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10059 if (newhblkp != NULL) { 10060 SFMMU_HASH_UNLOCK(hmebp); 10061 if (hmeblkp != HBLK_RESERVE) { 10062 /* 10063 * This is really tricky! 10064 * 10065 * vmem_alloc(vmem_seg_arena) 10066 * vmem_alloc(vmem_internal_arena) 10067 * segkmem_alloc(heap_arena) 10068 * vmem_alloc(heap_arena) 10069 * page_create() 10070 * hat_memload() 10071 * kmem_cache_free() 10072 * kmem_cache_alloc() 10073 * kmem_slab_create() 10074 * vmem_alloc(kmem_internal_arena) 10075 * segkmem_alloc(heap_arena) 10076 * vmem_alloc(heap_arena) 10077 * page_create() 10078 * hat_memload() 10079 * kmem_cache_free() 10080 * ... 10081 * 10082 * Thus, hat_memload() could call kmem_cache_free 10083 * for enough number of times that we could easily 10084 * hit the bottom of the stack or run out of reserve 10085 * list of vmem_seg structs. So, we must donate 10086 * this hblk to reserve list if it's allocated 10087 * from sfmmu8_cache *and* mapping kernel range. 10088 * We don't need to worry about freeing hmeblk1's 10089 * to kmem since they don't map any kmem slabs. 10090 * 10091 * Note: When segkmem supports largepages, we must 10092 * free hmeblk1's to reserve list as well. 10093 */ 10094 forcefree = (sfmmup == KHATID) ? 1 : 0; 10095 if (size == TTE8K && 10096 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 10097 goto re_verify; 10098 } 10099 ASSERT(sfmmup != KHATID); 10100 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10101 } else { 10102 /* 10103 * Hey! we don't need hblk_reserve any more. 10104 */ 10105 ASSERT(owner); 10106 hblk_reserve_thread = NULL; 10107 mutex_exit(&hblk_reserve_lock); 10108 owner = 0; 10109 } 10110 re_verify: 10111 /* 10112 * let's check if the goodies are still present 10113 */ 10114 SFMMU_HASH_LOCK(hmebp); 10115 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10116 if (newhblkp != NULL) { 10117 /* 10118 * return newhblkp if it's not hblk_reserve; 10119 * if newhblkp is hblk_reserve, return it 10120 * _only if_ we are the owner of hblk_reserve. 10121 */ 10122 if (newhblkp != HBLK_RESERVE || owner) { 10123 return (newhblkp); 10124 } else { 10125 /* 10126 * we just hit hblk_reserve in the hash and 10127 * we are not the owner of that; 10128 * 10129 * block until hblk_reserve_thread completes 10130 * swapping hblk_reserve and try the dance 10131 * once again. 10132 */ 10133 SFMMU_HASH_UNLOCK(hmebp); 10134 mutex_enter(&hblk_reserve_lock); 10135 mutex_exit(&hblk_reserve_lock); 10136 SFMMU_STAT(sf_hblk_reserve_hit); 10137 goto fill_hblk; 10138 } 10139 } else { 10140 /* 10141 * it's no more! try the dance once again. 10142 */ 10143 SFMMU_HASH_UNLOCK(hmebp); 10144 goto fill_hblk; 10145 } 10146 } 10147 10148 hblk_init: 10149 set_hblk_sz(hmeblkp, size); 10150 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10151 hmeblkp->hblk_next = (struct hme_blk *)NULL; 10152 hmeblkp->hblk_tag = hblktag; 10153 hmeblkp->hblk_shadow = shw_hblkp; 10154 hblkpa = hmeblkp->hblk_nextpa; 10155 hmeblkp->hblk_nextpa = 0; 10156 10157 ASSERT(get_hblk_ttesz(hmeblkp) == size); 10158 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 10159 ASSERT(hmeblkp->hblk_hmecnt == 0); 10160 ASSERT(hmeblkp->hblk_vcnt == 0); 10161 ASSERT(hmeblkp->hblk_lckcnt == 0); 10162 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10163 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 10164 return (hmeblkp); 10165 } 10166 10167 /* 10168 * This function performs any cleanup required on the hme_blk 10169 * and returns it to the free list. 10170 */ 10171 /* ARGSUSED */ 10172 static void 10173 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10174 uint64_t hblkpa, struct hme_blk **listp) 10175 { 10176 int shw_size, vshift; 10177 struct hme_blk *shw_hblkp; 10178 uint_t shw_mask, newshw_mask; 10179 uintptr_t vaddr; 10180 int size; 10181 uint_t critical; 10182 10183 ASSERT(hmeblkp); 10184 ASSERT(!hmeblkp->hblk_hmecnt); 10185 ASSERT(!hmeblkp->hblk_vcnt); 10186 ASSERT(!hmeblkp->hblk_lckcnt); 10187 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10188 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 10189 10190 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 10191 10192 size = get_hblk_ttesz(hmeblkp); 10193 shw_hblkp = hmeblkp->hblk_shadow; 10194 if (shw_hblkp) { 10195 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 10196 if (mmu_page_sizes == max_mmu_page_sizes) { 10197 ASSERT(size < TTE256M); 10198 } else { 10199 ASSERT(size < TTE4M); 10200 } 10201 10202 shw_size = get_hblk_ttesz(shw_hblkp); 10203 vaddr = get_hblk_base(hmeblkp); 10204 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10205 ASSERT(vshift < 8); 10206 /* 10207 * Atomically clear shadow mask bit 10208 */ 10209 do { 10210 shw_mask = shw_hblkp->hblk_shw_mask; 10211 ASSERT(shw_mask & (1 << vshift)); 10212 newshw_mask = shw_mask & ~(1 << vshift); 10213 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10214 shw_mask, newshw_mask); 10215 } while (newshw_mask != shw_mask); 10216 hmeblkp->hblk_shadow = NULL; 10217 } 10218 hmeblkp->hblk_next = NULL; 10219 hmeblkp->hblk_nextpa = hblkpa; 10220 hmeblkp->hblk_shw_bit = 0; 10221 10222 if (hmeblkp->hblk_nuc_bit == 0) { 10223 10224 if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical)) 10225 return; 10226 10227 hmeblkp->hblk_next = *listp; 10228 *listp = hmeblkp; 10229 } 10230 } 10231 10232 static void 10233 sfmmu_hblks_list_purge(struct hme_blk **listp) 10234 { 10235 struct hme_blk *hmeblkp; 10236 10237 while ((hmeblkp = *listp) != NULL) { 10238 *listp = hmeblkp->hblk_next; 10239 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10240 } 10241 } 10242 10243 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 10244 10245 static uint_t sfmmu_hblk_steal_twice; 10246 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 10247 10248 /* 10249 * Steal a hmeblk 10250 * Enough hmeblks were allocated at startup (nucleus hmeblks) and also 10251 * hmeblks were added dynamically. We should never ever not be able to 10252 * find one. Look for an unused/unlocked hmeblk in user hash table. 10253 */ 10254 static struct hme_blk * 10255 sfmmu_hblk_steal(int size) 10256 { 10257 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 10258 struct hmehash_bucket *hmebp; 10259 struct hme_blk *hmeblkp = NULL, *pr_hblk; 10260 uint64_t hblkpa, prevpa; 10261 int i; 10262 10263 for (;;) { 10264 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 10265 uhmehash_steal_hand; 10266 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 10267 10268 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 10269 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 10270 SFMMU_HASH_LOCK(hmebp); 10271 hmeblkp = hmebp->hmeblkp; 10272 hblkpa = hmebp->hmeh_nextpa; 10273 prevpa = 0; 10274 pr_hblk = NULL; 10275 while (hmeblkp) { 10276 /* 10277 * check if it is a hmeblk that is not locked 10278 * and not shared. skip shadow hmeblks with 10279 * shadow_mask set i.e valid count non zero. 10280 */ 10281 if ((get_hblk_ttesz(hmeblkp) == size) && 10282 (hmeblkp->hblk_shw_bit == 0 || 10283 hmeblkp->hblk_vcnt == 0) && 10284 (hmeblkp->hblk_lckcnt == 0)) { 10285 /* 10286 * there is a high probability that we 10287 * will find a free one. search some 10288 * buckets for a free hmeblk initially 10289 * before unloading a valid hmeblk. 10290 */ 10291 if ((hmeblkp->hblk_vcnt == 0 && 10292 hmeblkp->hblk_hmecnt == 0) || (i >= 10293 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 10294 if (sfmmu_steal_this_hblk(hmebp, 10295 hmeblkp, hblkpa, prevpa, 10296 pr_hblk)) { 10297 /* 10298 * Hblk is unloaded 10299 * successfully 10300 */ 10301 break; 10302 } 10303 } 10304 } 10305 pr_hblk = hmeblkp; 10306 prevpa = hblkpa; 10307 hblkpa = hmeblkp->hblk_nextpa; 10308 hmeblkp = hmeblkp->hblk_next; 10309 } 10310 10311 SFMMU_HASH_UNLOCK(hmebp); 10312 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 10313 hmebp = uhme_hash; 10314 } 10315 uhmehash_steal_hand = hmebp; 10316 10317 if (hmeblkp != NULL) 10318 break; 10319 10320 /* 10321 * in the worst case, look for a free one in the kernel 10322 * hash table. 10323 */ 10324 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 10325 SFMMU_HASH_LOCK(hmebp); 10326 hmeblkp = hmebp->hmeblkp; 10327 hblkpa = hmebp->hmeh_nextpa; 10328 prevpa = 0; 10329 pr_hblk = NULL; 10330 while (hmeblkp) { 10331 /* 10332 * check if it is free hmeblk 10333 */ 10334 if ((get_hblk_ttesz(hmeblkp) == size) && 10335 (hmeblkp->hblk_lckcnt == 0) && 10336 (hmeblkp->hblk_vcnt == 0) && 10337 (hmeblkp->hblk_hmecnt == 0)) { 10338 if (sfmmu_steal_this_hblk(hmebp, 10339 hmeblkp, hblkpa, prevpa, pr_hblk)) { 10340 break; 10341 } else { 10342 /* 10343 * Cannot fail since we have 10344 * hash lock. 10345 */ 10346 panic("fail to steal?"); 10347 } 10348 } 10349 10350 pr_hblk = hmeblkp; 10351 prevpa = hblkpa; 10352 hblkpa = hmeblkp->hblk_nextpa; 10353 hmeblkp = hmeblkp->hblk_next; 10354 } 10355 10356 SFMMU_HASH_UNLOCK(hmebp); 10357 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 10358 hmebp = khme_hash; 10359 } 10360 10361 if (hmeblkp != NULL) 10362 break; 10363 sfmmu_hblk_steal_twice++; 10364 } 10365 return (hmeblkp); 10366 } 10367 10368 /* 10369 * This routine does real work to prepare a hblk to be "stolen" by 10370 * unloading the mappings, updating shadow counts .... 10371 * It returns 1 if the block is ready to be reused (stolen), or 0 10372 * means the block cannot be stolen yet- pageunload is still working 10373 * on this hblk. 10374 */ 10375 static int 10376 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10377 uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk) 10378 { 10379 int shw_size, vshift; 10380 struct hme_blk *shw_hblkp; 10381 uintptr_t vaddr; 10382 uint_t shw_mask, newshw_mask; 10383 10384 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10385 10386 /* 10387 * check if the hmeblk is free, unload if necessary 10388 */ 10389 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10390 sfmmu_t *sfmmup; 10391 demap_range_t dmr; 10392 10393 sfmmup = hblktosfmmu(hmeblkp); 10394 DEMAP_RANGE_INIT(sfmmup, &dmr); 10395 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 10396 (caddr_t)get_hblk_base(hmeblkp), 10397 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 10398 DEMAP_RANGE_FLUSH(&dmr); 10399 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10400 /* 10401 * Pageunload is working on the same hblk. 10402 */ 10403 return (0); 10404 } 10405 10406 sfmmu_hblk_steal_unload_count++; 10407 } 10408 10409 ASSERT(hmeblkp->hblk_lckcnt == 0); 10410 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 10411 10412 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 10413 hmeblkp->hblk_nextpa = hblkpa; 10414 10415 shw_hblkp = hmeblkp->hblk_shadow; 10416 if (shw_hblkp) { 10417 shw_size = get_hblk_ttesz(shw_hblkp); 10418 vaddr = get_hblk_base(hmeblkp); 10419 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10420 ASSERT(vshift < 8); 10421 /* 10422 * Atomically clear shadow mask bit 10423 */ 10424 do { 10425 shw_mask = shw_hblkp->hblk_shw_mask; 10426 ASSERT(shw_mask & (1 << vshift)); 10427 newshw_mask = shw_mask & ~(1 << vshift); 10428 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10429 shw_mask, newshw_mask); 10430 } while (newshw_mask != shw_mask); 10431 hmeblkp->hblk_shadow = NULL; 10432 } 10433 10434 /* 10435 * remove shadow bit if we are stealing an unused shadow hmeblk. 10436 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 10437 * we are indeed allocating a shadow hmeblk. 10438 */ 10439 hmeblkp->hblk_shw_bit = 0; 10440 10441 sfmmu_hblk_steal_count++; 10442 SFMMU_STAT(sf_steal_count); 10443 10444 return (1); 10445 } 10446 10447 struct hme_blk * 10448 sfmmu_hmetohblk(struct sf_hment *sfhme) 10449 { 10450 struct hme_blk *hmeblkp; 10451 struct sf_hment *sfhme0; 10452 struct hme_blk *hblk_dummy = 0; 10453 10454 /* 10455 * No dummy sf_hments, please. 10456 */ 10457 ASSERT(sfhme->hme_tte.ll != 0); 10458 10459 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 10460 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 10461 (uintptr_t)&hblk_dummy->hblk_hme[0]); 10462 10463 return (hmeblkp); 10464 } 10465 10466 /* 10467 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 10468 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 10469 * KM_SLEEP allocation. 10470 * 10471 * Return 0 on success, -1 otherwise. 10472 */ 10473 static void 10474 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 10475 { 10476 struct tsb_info *tsbinfop, *next; 10477 tsb_replace_rc_t rc; 10478 boolean_t gotfirst = B_FALSE; 10479 10480 ASSERT(sfmmup != ksfmmup); 10481 ASSERT(sfmmu_hat_lock_held(sfmmup)); 10482 10483 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 10484 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10485 } 10486 10487 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10488 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 10489 } else { 10490 return; 10491 } 10492 10493 ASSERT(sfmmup->sfmmu_tsb != NULL); 10494 10495 /* 10496 * Loop over all tsbinfo's replacing them with ones that actually have 10497 * a TSB. If any of the replacements ever fail, bail out of the loop. 10498 */ 10499 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 10500 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 10501 next = tsbinfop->tsb_next; 10502 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 10503 hatlockp, TSB_SWAPIN); 10504 if (rc != TSB_SUCCESS) { 10505 break; 10506 } 10507 gotfirst = B_TRUE; 10508 } 10509 10510 switch (rc) { 10511 case TSB_SUCCESS: 10512 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10513 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10514 return; 10515 case TSB_ALLOCFAIL: 10516 break; 10517 default: 10518 panic("sfmmu_replace_tsb returned unrecognized failure code " 10519 "%d", rc); 10520 } 10521 10522 /* 10523 * In this case, we failed to get one of our TSBs. If we failed to 10524 * get the first TSB, get one of minimum size (8KB). Walk the list 10525 * and throw away the tsbinfos, starting where the allocation failed; 10526 * we can get by with just one TSB as long as we don't leave the 10527 * SWAPPED tsbinfo structures lying around. 10528 */ 10529 tsbinfop = sfmmup->sfmmu_tsb; 10530 next = tsbinfop->tsb_next; 10531 tsbinfop->tsb_next = NULL; 10532 10533 sfmmu_hat_exit(hatlockp); 10534 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 10535 next = tsbinfop->tsb_next; 10536 sfmmu_tsbinfo_free(tsbinfop); 10537 } 10538 hatlockp = sfmmu_hat_enter(sfmmup); 10539 10540 /* 10541 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 10542 * pages. 10543 */ 10544 if (!gotfirst) { 10545 tsbinfop = sfmmup->sfmmu_tsb; 10546 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 10547 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 10548 ASSERT(rc == TSB_SUCCESS); 10549 } else { 10550 /* update machine specific tsbinfo */ 10551 sfmmu_setup_tsbinfo(sfmmup); 10552 } 10553 10554 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10555 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10556 } 10557 10558 /* 10559 * Handle exceptions for low level tsb_handler. 10560 * 10561 * There are many scenarios that could land us here: 10562 * 10563 * If the context is invalid we land here. The context can be invalid 10564 * for 3 reasons: 1) we couldn't allocate a new context and now need to 10565 * perform a wrap around operation in order to allocate a new context. 10566 * 2) Context was invalidated to change pagesize programming 3) ISMs or 10567 * TSBs configuration is changeing for this process and we are forced into 10568 * here to do a syncronization operation. If the context is valid we can 10569 * be here from window trap hanlder. In this case just call trap to handle 10570 * the fault. 10571 * 10572 * Note that the process will run in INVALID_CONTEXT before 10573 * faulting into here and subsequently loading the MMU registers 10574 * (including the TSB base register) associated with this process. 10575 * For this reason, the trap handlers must all test for 10576 * INVALID_CONTEXT before attempting to access any registers other 10577 * than the context registers. 10578 */ 10579 void 10580 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 10581 { 10582 sfmmu_t *sfmmup; 10583 uint_t ctxnum; 10584 klwp_id_t lwp; 10585 char lwp_save_state; 10586 hatlock_t *hatlockp; 10587 struct tsb_info *tsbinfop; 10588 10589 SFMMU_STAT(sf_tsb_exceptions); 10590 SFMMU_MMU_STAT(mmu_tsb_exceptions); 10591 sfmmup = astosfmmu(curthread->t_procp->p_as); 10592 ctxnum = tagaccess & TAGACC_CTX_MASK; 10593 10594 ASSERT(sfmmup != ksfmmup && ctxnum != KCONTEXT); 10595 ASSERT(sfmmup->sfmmu_ismhat == 0); 10596 /* 10597 * First, make sure we come out of here with a valid ctx, 10598 * since if we don't get one we'll simply loop on the 10599 * faulting instruction. 10600 * 10601 * If the ISM mappings are changing, the TSB is being relocated, or 10602 * the process is swapped out we serialize behind the controlling 10603 * thread with the sfmmu_flags and sfmmu_tsb_cv condition variable. 10604 * Otherwise we synchronize with the context stealer or the thread 10605 * that required us to change out our MMU registers (such 10606 * as a thread changing out our TSB while we were running) by 10607 * locking the HAT and grabbing the rwlock on the context as a 10608 * reader temporarily. 10609 */ 10610 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 10611 ctxnum == INVALID_CONTEXT); 10612 10613 if (ctxnum == INVALID_CONTEXT) { 10614 /* 10615 * Must set lwp state to LWP_SYS before 10616 * trying to acquire any adaptive lock 10617 */ 10618 lwp = ttolwp(curthread); 10619 ASSERT(lwp); 10620 lwp_save_state = lwp->lwp_state; 10621 lwp->lwp_state = LWP_SYS; 10622 10623 hatlockp = sfmmu_hat_enter(sfmmup); 10624 retry: 10625 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 10626 tsbinfop = tsbinfop->tsb_next) { 10627 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 10628 cv_wait(&sfmmup->sfmmu_tsb_cv, 10629 HATLOCK_MUTEXP(hatlockp)); 10630 goto retry; 10631 } 10632 } 10633 10634 /* 10635 * Wait for ISM maps to be updated. 10636 */ 10637 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 10638 cv_wait(&sfmmup->sfmmu_tsb_cv, 10639 HATLOCK_MUTEXP(hatlockp)); 10640 goto retry; 10641 } 10642 10643 /* 10644 * If we're swapping in, get TSB(s). Note that we must do 10645 * this before we get a ctx or load the MMU state. Once 10646 * we swap in we have to recheck to make sure the TSB(s) and 10647 * ISM mappings didn't change while we slept. 10648 */ 10649 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10650 sfmmu_tsb_swapin(sfmmup, hatlockp); 10651 goto retry; 10652 } 10653 10654 sfmmu_get_ctx(sfmmup); 10655 10656 sfmmu_hat_exit(hatlockp); 10657 /* 10658 * Must restore lwp_state if not calling 10659 * trap() for further processing. Restore 10660 * it anyway. 10661 */ 10662 lwp->lwp_state = lwp_save_state; 10663 if (sfmmup->sfmmu_ttecnt[TTE8K] != 0 || 10664 sfmmup->sfmmu_ttecnt[TTE64K] != 0 || 10665 sfmmup->sfmmu_ttecnt[TTE512K] != 0 || 10666 sfmmup->sfmmu_ttecnt[TTE4M] != 0 || 10667 sfmmup->sfmmu_ttecnt[TTE32M] != 0 || 10668 sfmmup->sfmmu_ttecnt[TTE256M] != 0) { 10669 return; 10670 } 10671 if (traptype == T_DATA_PROT) { 10672 traptype = T_DATA_MMU_MISS; 10673 } 10674 } 10675 trap(rp, (caddr_t)tagaccess, traptype, 0); 10676 } 10677 10678 /* 10679 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 10680 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 10681 * rather than spinning to avoid send mondo timeouts with 10682 * interrupts enabled. When the lock is acquired it is immediately 10683 * released and we return back to sfmmu_vatopfn just after 10684 * the GET_TTE call. 10685 */ 10686 void 10687 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 10688 { 10689 struct page **pp; 10690 10691 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10692 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10693 } 10694 10695 /* 10696 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 10697 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 10698 * cross traps which cannot be handled while spinning in the 10699 * trap handlers. Simply enter and exit the kpr_suspendlock spin 10700 * mutex, which is held by the holder of the suspend bit, and then 10701 * retry the trapped instruction after unwinding. 10702 */ 10703 /*ARGSUSED*/ 10704 void 10705 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 10706 { 10707 ASSERT(curthread != kreloc_thread); 10708 mutex_enter(&kpr_suspendlock); 10709 mutex_exit(&kpr_suspendlock); 10710 } 10711 10712 /* 10713 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 10714 * This routine may be called with all cpu's captured. Therefore, the 10715 * caller is responsible for holding all locks and disabling kernel 10716 * preemption. 10717 */ 10718 /* ARGSUSED */ 10719 static void 10720 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 10721 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 10722 { 10723 cpuset_t cpuset; 10724 caddr_t va; 10725 ism_ment_t *ment; 10726 sfmmu_t *sfmmup; 10727 #ifdef VAC 10728 int vcolor; 10729 #endif 10730 int ttesz; 10731 10732 /* 10733 * Walk the ism_hat's mapping list and flush the page 10734 * from every hat sharing this ism_hat. This routine 10735 * may be called while all cpu's have been captured. 10736 * Therefore we can't attempt to grab any locks. For now 10737 * this means we will protect the ism mapping list under 10738 * a single lock which will be grabbed by the caller. 10739 * If hat_share/unshare scalibility becomes a performance 10740 * problem then we may need to re-think ism mapping list locking. 10741 */ 10742 ASSERT(ism_sfmmup->sfmmu_ismhat); 10743 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 10744 addr = addr - ISMID_STARTADDR; 10745 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 10746 10747 sfmmup = ment->iment_hat; 10748 10749 va = ment->iment_base_va; 10750 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 10751 10752 /* 10753 * Flush TSB of ISM mappings. 10754 */ 10755 ttesz = get_hblk_ttesz(hmeblkp); 10756 if (ttesz == TTE8K || ttesz == TTE4M) { 10757 sfmmu_unload_tsb(sfmmup, va, ttesz); 10758 } else { 10759 caddr_t sva = va; 10760 caddr_t eva; 10761 ASSERT(addr == (caddr_t)get_hblk_base(hmeblkp)); 10762 eva = sva + get_hblk_span(hmeblkp); 10763 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); 10764 } 10765 10766 cpuset = sfmmup->sfmmu_cpusran; 10767 CPUSET_AND(cpuset, cpu_ready_set); 10768 CPUSET_DEL(cpuset, CPU->cpu_id); 10769 10770 SFMMU_XCALL_STATS(sfmmup); 10771 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 10772 (uint64_t)sfmmup); 10773 10774 vtag_flushpage(va, (uint64_t)sfmmup); 10775 10776 #ifdef VAC 10777 /* 10778 * Flush D$ 10779 * When flushing D$ we must flush all 10780 * cpu's. See sfmmu_cache_flush(). 10781 */ 10782 if (cache_flush_flag == CACHE_FLUSH) { 10783 cpuset = cpu_ready_set; 10784 CPUSET_DEL(cpuset, CPU->cpu_id); 10785 10786 SFMMU_XCALL_STATS(sfmmup); 10787 vcolor = addr_to_vcolor(va); 10788 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10789 vac_flushpage(pfnum, vcolor); 10790 } 10791 #endif /* VAC */ 10792 } 10793 } 10794 10795 /* 10796 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 10797 * a particular virtual address and ctx. If noflush is set we do not 10798 * flush the TLB/TSB. This function may or may not be called with the 10799 * HAT lock held. 10800 */ 10801 static void 10802 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10803 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 10804 int hat_lock_held) 10805 { 10806 #ifdef VAC 10807 int vcolor; 10808 #endif 10809 cpuset_t cpuset; 10810 hatlock_t *hatlockp; 10811 10812 #if defined(lint) && !defined(VAC) 10813 pfnum = pfnum; 10814 cpu_flag = cpu_flag; 10815 cache_flush_flag = cache_flush_flag; 10816 #endif 10817 /* 10818 * There is no longer a need to protect against ctx being 10819 * stolen here since we don't store the ctx in the TSB anymore. 10820 */ 10821 #ifdef VAC 10822 vcolor = addr_to_vcolor(addr); 10823 #endif 10824 10825 /* 10826 * We must hold the hat lock during the flush of TLB, 10827 * to avoid a race with sfmmu_invalidate_ctx(), where 10828 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 10829 * causing TLB demap routine to skip flush on that MMU. 10830 * If the context on a MMU has already been set to 10831 * INVALID_CONTEXT, we just get an extra flush on 10832 * that MMU. 10833 */ 10834 if (!hat_lock_held && !tlb_noflush) 10835 hatlockp = sfmmu_hat_enter(sfmmup); 10836 10837 kpreempt_disable(); 10838 if (!tlb_noflush) { 10839 /* 10840 * Flush the TSB and TLB. 10841 */ 10842 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10843 10844 cpuset = sfmmup->sfmmu_cpusran; 10845 CPUSET_AND(cpuset, cpu_ready_set); 10846 CPUSET_DEL(cpuset, CPU->cpu_id); 10847 10848 SFMMU_XCALL_STATS(sfmmup); 10849 10850 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 10851 (uint64_t)sfmmup); 10852 10853 vtag_flushpage(addr, (uint64_t)sfmmup); 10854 } 10855 10856 if (!hat_lock_held && !tlb_noflush) 10857 sfmmu_hat_exit(hatlockp); 10858 10859 #ifdef VAC 10860 /* 10861 * Flush the D$ 10862 * 10863 * Even if the ctx is stolen, we need to flush the 10864 * cache. Our ctx stealer only flushes the TLBs. 10865 */ 10866 if (cache_flush_flag == CACHE_FLUSH) { 10867 if (cpu_flag & FLUSH_ALL_CPUS) { 10868 cpuset = cpu_ready_set; 10869 } else { 10870 cpuset = sfmmup->sfmmu_cpusran; 10871 CPUSET_AND(cpuset, cpu_ready_set); 10872 } 10873 CPUSET_DEL(cpuset, CPU->cpu_id); 10874 SFMMU_XCALL_STATS(sfmmup); 10875 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10876 vac_flushpage(pfnum, vcolor); 10877 } 10878 #endif /* VAC */ 10879 kpreempt_enable(); 10880 } 10881 10882 /* 10883 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 10884 * address and ctx. If noflush is set we do not currently do anything. 10885 * This function may or may not be called with the HAT lock held. 10886 */ 10887 static void 10888 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10889 int tlb_noflush, int hat_lock_held) 10890 { 10891 cpuset_t cpuset; 10892 hatlock_t *hatlockp; 10893 10894 /* 10895 * If the process is exiting we have nothing to do. 10896 */ 10897 if (tlb_noflush) 10898 return; 10899 10900 /* 10901 * Flush TSB. 10902 */ 10903 if (!hat_lock_held) 10904 hatlockp = sfmmu_hat_enter(sfmmup); 10905 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10906 10907 kpreempt_disable(); 10908 10909 cpuset = sfmmup->sfmmu_cpusran; 10910 CPUSET_AND(cpuset, cpu_ready_set); 10911 CPUSET_DEL(cpuset, CPU->cpu_id); 10912 10913 SFMMU_XCALL_STATS(sfmmup); 10914 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 10915 10916 vtag_flushpage(addr, (uint64_t)sfmmup); 10917 10918 if (!hat_lock_held) 10919 sfmmu_hat_exit(hatlockp); 10920 10921 kpreempt_enable(); 10922 10923 } 10924 10925 /* 10926 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 10927 * call handler that can flush a range of pages to save on xcalls. 10928 */ 10929 static int sfmmu_xcall_save; 10930 10931 static void 10932 sfmmu_tlb_range_demap(demap_range_t *dmrp) 10933 { 10934 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 10935 hatlock_t *hatlockp; 10936 cpuset_t cpuset; 10937 uint64_t sfmmu_pgcnt; 10938 pgcnt_t pgcnt = 0; 10939 int pgunload = 0; 10940 int dirtypg = 0; 10941 caddr_t addr = dmrp->dmr_addr; 10942 caddr_t eaddr; 10943 uint64_t bitvec = dmrp->dmr_bitvec; 10944 10945 ASSERT(bitvec & 1); 10946 10947 /* 10948 * Flush TSB and calculate number of pages to flush. 10949 */ 10950 while (bitvec != 0) { 10951 dirtypg = 0; 10952 /* 10953 * Find the first page to flush and then count how many 10954 * pages there are after it that also need to be flushed. 10955 * This way the number of TSB flushes is minimized. 10956 */ 10957 while ((bitvec & 1) == 0) { 10958 pgcnt++; 10959 addr += MMU_PAGESIZE; 10960 bitvec >>= 1; 10961 } 10962 while (bitvec & 1) { 10963 dirtypg++; 10964 bitvec >>= 1; 10965 } 10966 eaddr = addr + ptob(dirtypg); 10967 hatlockp = sfmmu_hat_enter(sfmmup); 10968 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 10969 sfmmu_hat_exit(hatlockp); 10970 pgunload += dirtypg; 10971 addr = eaddr; 10972 pgcnt += dirtypg; 10973 } 10974 10975 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 10976 if (sfmmup->sfmmu_free == 0) { 10977 addr = dmrp->dmr_addr; 10978 bitvec = dmrp->dmr_bitvec; 10979 10980 /* 10981 * make sure it has SFMMU_PGCNT_SHIFT bits only, 10982 * as it will be used to pack argument for xt_some 10983 */ 10984 ASSERT((pgcnt > 0) && 10985 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 10986 10987 /* 10988 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 10989 * the low 6 bits of sfmmup. This is doable since pgcnt 10990 * always >= 1. 10991 */ 10992 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 10993 sfmmu_pgcnt = (uint64_t)sfmmup | 10994 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 10995 10996 /* 10997 * We must hold the hat lock during the flush of TLB, 10998 * to avoid a race with sfmmu_invalidate_ctx(), where 10999 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 11000 * causing TLB demap routine to skip flush on that MMU. 11001 * If the context on a MMU has already been set to 11002 * INVALID_CONTEXT, we just get an extra flush on 11003 * that MMU. 11004 */ 11005 hatlockp = sfmmu_hat_enter(sfmmup); 11006 kpreempt_disable(); 11007 11008 cpuset = sfmmup->sfmmu_cpusran; 11009 CPUSET_AND(cpuset, cpu_ready_set); 11010 CPUSET_DEL(cpuset, CPU->cpu_id); 11011 11012 SFMMU_XCALL_STATS(sfmmup); 11013 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 11014 sfmmu_pgcnt); 11015 11016 for (; bitvec != 0; bitvec >>= 1) { 11017 if (bitvec & 1) 11018 vtag_flushpage(addr, (uint64_t)sfmmup); 11019 addr += MMU_PAGESIZE; 11020 } 11021 kpreempt_enable(); 11022 sfmmu_hat_exit(hatlockp); 11023 11024 sfmmu_xcall_save += (pgunload-1); 11025 } 11026 dmrp->dmr_bitvec = 0; 11027 } 11028 11029 /* 11030 * In cases where we need to synchronize with TLB/TSB miss trap 11031 * handlers, _and_ need to flush the TLB, it's a lot easier to 11032 * throw away the context from the process than to do a 11033 * special song and dance to keep things consistent for the 11034 * handlers. 11035 * 11036 * Since the process suddenly ends up without a context and our caller 11037 * holds the hat lock, threads that fault after this function is called 11038 * will pile up on the lock. We can then do whatever we need to 11039 * atomically from the context of the caller. The first blocked thread 11040 * to resume executing will get the process a new context, and the 11041 * process will resume executing. 11042 * 11043 * One added advantage of this approach is that on MMUs that 11044 * support a "flush all" operation, we will delay the flush until 11045 * cnum wrap-around, and then flush the TLB one time. This 11046 * is rather rare, so it's a lot less expensive than making 8000 11047 * x-calls to flush the TLB 8000 times. 11048 * 11049 * A per-process (PP) lock is used to synchronize ctx allocations in 11050 * resume() and ctx invalidations here. 11051 */ 11052 static void 11053 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 11054 { 11055 cpuset_t cpuset; 11056 int cnum, currcnum; 11057 mmu_ctx_t *mmu_ctxp; 11058 int i; 11059 uint_t pstate_save; 11060 11061 SFMMU_STAT(sf_ctx_inv); 11062 11063 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11064 ASSERT(sfmmup != ksfmmup); 11065 11066 kpreempt_disable(); 11067 11068 mmu_ctxp = CPU_MMU_CTXP(CPU); 11069 ASSERT(mmu_ctxp); 11070 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 11071 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 11072 11073 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 11074 11075 pstate_save = sfmmu_disable_intrs(); 11076 11077 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 11078 /* set HAT cnum invalid across all context domains. */ 11079 for (i = 0; i < max_mmu_ctxdoms; i++) { 11080 11081 cnum = sfmmup->sfmmu_ctxs[i].cnum; 11082 if (cnum == INVALID_CONTEXT) { 11083 continue; 11084 } 11085 11086 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 11087 } 11088 membar_enter(); /* make sure globally visible to all CPUs */ 11089 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 11090 11091 sfmmu_enable_intrs(pstate_save); 11092 11093 cpuset = sfmmup->sfmmu_cpusran; 11094 CPUSET_DEL(cpuset, CPU->cpu_id); 11095 CPUSET_AND(cpuset, cpu_ready_set); 11096 if (!CPUSET_ISNULL(cpuset)) { 11097 SFMMU_XCALL_STATS(sfmmup); 11098 xt_some(cpuset, sfmmu_raise_tsb_exception, 11099 (uint64_t)sfmmup, INVALID_CONTEXT); 11100 xt_sync(cpuset); 11101 SFMMU_STAT(sf_tsb_raise_exception); 11102 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 11103 } 11104 11105 /* 11106 * If the hat to-be-invalidated is the same as the current 11107 * process on local CPU we need to invalidate 11108 * this CPU context as well. 11109 */ 11110 if ((sfmmu_getctx_sec() == currcnum) && 11111 (currcnum != INVALID_CONTEXT)) { 11112 sfmmu_setctx_sec(INVALID_CONTEXT); 11113 sfmmu_clear_utsbinfo(); 11114 } 11115 11116 kpreempt_enable(); 11117 11118 /* 11119 * we hold the hat lock, so nobody should allocate a context 11120 * for us yet 11121 */ 11122 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 11123 } 11124 11125 #ifdef VAC 11126 /* 11127 * We need to flush the cache in all cpus. It is possible that 11128 * a process referenced a page as cacheable but has sinced exited 11129 * and cleared the mapping list. We still to flush it but have no 11130 * state so all cpus is the only alternative. 11131 */ 11132 void 11133 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 11134 { 11135 cpuset_t cpuset; 11136 11137 kpreempt_disable(); 11138 cpuset = cpu_ready_set; 11139 CPUSET_DEL(cpuset, CPU->cpu_id); 11140 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 11141 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 11142 xt_sync(cpuset); 11143 vac_flushpage(pfnum, vcolor); 11144 kpreempt_enable(); 11145 } 11146 11147 void 11148 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 11149 { 11150 cpuset_t cpuset; 11151 11152 ASSERT(vcolor >= 0); 11153 11154 kpreempt_disable(); 11155 cpuset = cpu_ready_set; 11156 CPUSET_DEL(cpuset, CPU->cpu_id); 11157 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 11158 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 11159 xt_sync(cpuset); 11160 vac_flushcolor(vcolor, pfnum); 11161 kpreempt_enable(); 11162 } 11163 #endif /* VAC */ 11164 11165 /* 11166 * We need to prevent processes from accessing the TSB using a cached physical 11167 * address. It's alright if they try to access the TSB via virtual address 11168 * since they will just fault on that virtual address once the mapping has 11169 * been suspended. 11170 */ 11171 #pragma weak sendmondo_in_recover 11172 11173 /* ARGSUSED */ 11174 static int 11175 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 11176 { 11177 hatlock_t *hatlockp; 11178 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11179 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11180 extern uint32_t sendmondo_in_recover; 11181 11182 if (flags != HAT_PRESUSPEND) 11183 return (0); 11184 11185 hatlockp = sfmmu_hat_enter(sfmmup); 11186 11187 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 11188 11189 /* 11190 * For Cheetah+ Erratum 25: 11191 * Wait for any active recovery to finish. We can't risk 11192 * relocating the TSB of the thread running mondo_recover_proc() 11193 * since, if we did that, we would deadlock. The scenario we are 11194 * trying to avoid is as follows: 11195 * 11196 * THIS CPU RECOVER CPU 11197 * -------- ----------- 11198 * Begins recovery, walking through TSB 11199 * hat_pagesuspend() TSB TTE 11200 * TLB miss on TSB TTE, spins at TL1 11201 * xt_sync() 11202 * send_mondo_timeout() 11203 * mondo_recover_proc() 11204 * ((deadlocked)) 11205 * 11206 * The second half of the workaround is that mondo_recover_proc() 11207 * checks to see if the tsb_info has the RELOC flag set, and if it 11208 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 11209 * and hence avoiding the TLB miss that could result in a deadlock. 11210 */ 11211 if (&sendmondo_in_recover) { 11212 membar_enter(); /* make sure RELOC flag visible */ 11213 while (sendmondo_in_recover) { 11214 drv_usecwait(1); 11215 membar_consumer(); 11216 } 11217 } 11218 11219 sfmmu_invalidate_ctx(sfmmup); 11220 sfmmu_hat_exit(hatlockp); 11221 11222 return (0); 11223 } 11224 11225 /* ARGSUSED */ 11226 static int 11227 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 11228 void *tsbinfo, pfn_t newpfn) 11229 { 11230 hatlock_t *hatlockp; 11231 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11232 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11233 11234 if (flags != HAT_POSTUNSUSPEND) 11235 return (0); 11236 11237 hatlockp = sfmmu_hat_enter(sfmmup); 11238 11239 SFMMU_STAT(sf_tsb_reloc); 11240 11241 /* 11242 * The process may have swapped out while we were relocating one 11243 * of its TSBs. If so, don't bother doing the setup since the 11244 * process can't be using the memory anymore. 11245 */ 11246 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 11247 ASSERT(va == tsbinfop->tsb_va); 11248 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 11249 sfmmu_setup_tsbinfo(sfmmup); 11250 11251 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 11252 sfmmu_inv_tsb(tsbinfop->tsb_va, 11253 TSB_BYTES(tsbinfop->tsb_szc)); 11254 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 11255 } 11256 } 11257 11258 membar_exit(); 11259 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 11260 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11261 11262 sfmmu_hat_exit(hatlockp); 11263 11264 return (0); 11265 } 11266 11267 /* 11268 * Allocate and initialize a tsb_info structure. Note that we may or may not 11269 * allocate a TSB here, depending on the flags passed in. 11270 */ 11271 static int 11272 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 11273 uint_t flags, sfmmu_t *sfmmup) 11274 { 11275 int err; 11276 11277 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 11278 sfmmu_tsbinfo_cache, KM_SLEEP); 11279 11280 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 11281 tsb_szc, flags, sfmmup)) != 0) { 11282 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 11283 SFMMU_STAT(sf_tsb_allocfail); 11284 *tsbinfopp = NULL; 11285 return (err); 11286 } 11287 SFMMU_STAT(sf_tsb_alloc); 11288 11289 /* 11290 * Bump the TSB size counters for this TSB size. 11291 */ 11292 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 11293 return (0); 11294 } 11295 11296 static void 11297 sfmmu_tsb_free(struct tsb_info *tsbinfo) 11298 { 11299 caddr_t tsbva = tsbinfo->tsb_va; 11300 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 11301 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 11302 vmem_t *vmp = tsbinfo->tsb_vmp; 11303 11304 /* 11305 * If we allocated this TSB from relocatable kernel memory, then we 11306 * need to uninstall the callback handler. 11307 */ 11308 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 11309 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11310 caddr_t slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 11311 page_t **ppl; 11312 int ret; 11313 11314 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 11315 ASSERT(ret == 0); 11316 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 11317 0, NULL); 11318 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 11319 } 11320 11321 if (kmem_cachep != NULL) { 11322 kmem_cache_free(kmem_cachep, tsbva); 11323 } else { 11324 vmem_xfree(vmp, (void *)tsbva, tsb_size); 11325 } 11326 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 11327 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 11328 } 11329 11330 static void 11331 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 11332 { 11333 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 11334 sfmmu_tsb_free(tsbinfo); 11335 } 11336 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 11337 11338 } 11339 11340 /* 11341 * Setup all the references to physical memory for this tsbinfo. 11342 * The underlying page(s) must be locked. 11343 */ 11344 static void 11345 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 11346 { 11347 ASSERT(pfn != PFN_INVALID); 11348 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 11349 11350 #ifndef sun4v 11351 if (tsbinfo->tsb_szc == 0) { 11352 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 11353 PROT_WRITE|PROT_READ, TTE8K); 11354 } else { 11355 /* 11356 * Round down PA and use a large mapping; the handlers will 11357 * compute the TSB pointer at the correct offset into the 11358 * big virtual page. NOTE: this assumes all TSBs larger 11359 * than 8K must come from physically contiguous slabs of 11360 * size tsb_slab_size. 11361 */ 11362 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 11363 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 11364 } 11365 tsbinfo->tsb_pa = ptob(pfn); 11366 11367 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 11368 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 11369 11370 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 11371 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 11372 #else /* sun4v */ 11373 tsbinfo->tsb_pa = ptob(pfn); 11374 #endif /* sun4v */ 11375 } 11376 11377 11378 /* 11379 * Returns zero on success, ENOMEM if over the high water mark, 11380 * or EAGAIN if the caller needs to retry with a smaller TSB 11381 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 11382 * 11383 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 11384 * is specified and the TSB requested is PAGESIZE, though it 11385 * may sleep waiting for memory if sufficient memory is not 11386 * available. 11387 */ 11388 static int 11389 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 11390 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 11391 { 11392 caddr_t vaddr = NULL; 11393 caddr_t slab_vaddr; 11394 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11395 int tsbbytes = TSB_BYTES(tsbcode); 11396 int lowmem = 0; 11397 struct kmem_cache *kmem_cachep = NULL; 11398 vmem_t *vmp = NULL; 11399 lgrp_id_t lgrpid = LGRP_NONE; 11400 pfn_t pfn; 11401 uint_t cbflags = HAC_SLEEP; 11402 page_t **pplist; 11403 int ret; 11404 11405 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 11406 flags |= TSB_ALLOC; 11407 11408 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 11409 11410 tsbinfo->tsb_sfmmu = sfmmup; 11411 11412 /* 11413 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 11414 * return. 11415 */ 11416 if ((flags & TSB_ALLOC) == 0) { 11417 tsbinfo->tsb_szc = tsbcode; 11418 tsbinfo->tsb_ttesz_mask = tteszmask; 11419 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 11420 tsbinfo->tsb_pa = -1; 11421 tsbinfo->tsb_tte.ll = 0; 11422 tsbinfo->tsb_next = NULL; 11423 tsbinfo->tsb_flags = TSB_SWAPPED; 11424 tsbinfo->tsb_cache = NULL; 11425 tsbinfo->tsb_vmp = NULL; 11426 return (0); 11427 } 11428 11429 #ifdef DEBUG 11430 /* 11431 * For debugging: 11432 * Randomly force allocation failures every tsb_alloc_mtbf 11433 * tries if TSB_FORCEALLOC is not specified. This will 11434 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 11435 * it is even, to allow testing of both failure paths... 11436 */ 11437 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 11438 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 11439 tsb_alloc_count = 0; 11440 tsb_alloc_fail_mtbf++; 11441 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 11442 } 11443 #endif /* DEBUG */ 11444 11445 /* 11446 * Enforce high water mark if we are not doing a forced allocation 11447 * and are not shrinking a process' TSB. 11448 */ 11449 if ((flags & TSB_SHRINK) == 0 && 11450 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 11451 if ((flags & TSB_FORCEALLOC) == 0) 11452 return (ENOMEM); 11453 lowmem = 1; 11454 } 11455 11456 /* 11457 * Allocate from the correct location based upon the size of the TSB 11458 * compared to the base page size, and what memory conditions dictate. 11459 * Note we always do nonblocking allocations from the TSB arena since 11460 * we don't want memory fragmentation to cause processes to block 11461 * indefinitely waiting for memory; until the kernel algorithms that 11462 * coalesce large pages are improved this is our best option. 11463 * 11464 * Algorithm: 11465 * If allocating a "large" TSB (>8K), allocate from the 11466 * appropriate kmem_tsb_default_arena vmem arena 11467 * else if low on memory or the TSB_FORCEALLOC flag is set or 11468 * tsb_forceheap is set 11469 * Allocate from kernel heap via sfmmu_tsb8k_cache with 11470 * KM_SLEEP (never fails) 11471 * else 11472 * Allocate from appropriate sfmmu_tsb_cache with 11473 * KM_NOSLEEP 11474 * endif 11475 */ 11476 if (tsb_lgrp_affinity) 11477 lgrpid = lgrp_home_id(curthread); 11478 if (lgrpid == LGRP_NONE) 11479 lgrpid = 0; /* use lgrp of boot CPU */ 11480 11481 if (tsbbytes > MMU_PAGESIZE) { 11482 vmp = kmem_tsb_default_arena[lgrpid]; 11483 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 0, 0, 11484 NULL, NULL, VM_NOSLEEP); 11485 #ifdef DEBUG 11486 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 11487 #else /* !DEBUG */ 11488 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 11489 #endif /* DEBUG */ 11490 kmem_cachep = sfmmu_tsb8k_cache; 11491 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 11492 ASSERT(vaddr != NULL); 11493 } else { 11494 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 11495 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 11496 } 11497 11498 tsbinfo->tsb_cache = kmem_cachep; 11499 tsbinfo->tsb_vmp = vmp; 11500 11501 if (vaddr == NULL) { 11502 return (EAGAIN); 11503 } 11504 11505 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 11506 kmem_cachep = tsbinfo->tsb_cache; 11507 11508 /* 11509 * If we are allocating from outside the cage, then we need to 11510 * register a relocation callback handler. Note that for now 11511 * since pseudo mappings always hang off of the slab's root page, 11512 * we need only lock the first 8K of the TSB slab. This is a bit 11513 * hacky but it is good for performance. 11514 */ 11515 if (kmem_cachep != sfmmu_tsb8k_cache) { 11516 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 11517 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 11518 ASSERT(ret == 0); 11519 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 11520 cbflags, (void *)tsbinfo, &pfn, NULL); 11521 11522 /* 11523 * Need to free up resources if we could not successfully 11524 * add the callback function and return an error condition. 11525 */ 11526 if (ret != 0) { 11527 if (kmem_cachep) { 11528 kmem_cache_free(kmem_cachep, vaddr); 11529 } else { 11530 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 11531 } 11532 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 11533 S_WRITE); 11534 return (EAGAIN); 11535 } 11536 } else { 11537 /* 11538 * Since allocation of 8K TSBs from heap is rare and occurs 11539 * during memory pressure we allocate them from permanent 11540 * memory rather than using callbacks to get the PFN. 11541 */ 11542 pfn = hat_getpfnum(kas.a_hat, vaddr); 11543 } 11544 11545 tsbinfo->tsb_va = vaddr; 11546 tsbinfo->tsb_szc = tsbcode; 11547 tsbinfo->tsb_ttesz_mask = tteszmask; 11548 tsbinfo->tsb_next = NULL; 11549 tsbinfo->tsb_flags = 0; 11550 11551 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 11552 11553 if (kmem_cachep != sfmmu_tsb8k_cache) { 11554 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 11555 } 11556 11557 sfmmu_inv_tsb(vaddr, tsbbytes); 11558 return (0); 11559 } 11560 11561 /* 11562 * Initialize per cpu tsb and per cpu tsbmiss_area 11563 */ 11564 void 11565 sfmmu_init_tsbs(void) 11566 { 11567 int i; 11568 struct tsbmiss *tsbmissp; 11569 struct kpmtsbm *kpmtsbmp; 11570 #ifndef sun4v 11571 extern int dcache_line_mask; 11572 #endif /* sun4v */ 11573 extern uint_t vac_colors; 11574 11575 /* 11576 * Init. tsb miss area. 11577 */ 11578 tsbmissp = tsbmiss_area; 11579 11580 for (i = 0; i < NCPU; tsbmissp++, i++) { 11581 /* 11582 * initialize the tsbmiss area. 11583 * Do this for all possible CPUs as some may be added 11584 * while the system is running. There is no cost to this. 11585 */ 11586 tsbmissp->ksfmmup = ksfmmup; 11587 #ifndef sun4v 11588 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 11589 #endif /* sun4v */ 11590 tsbmissp->khashstart = 11591 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 11592 tsbmissp->uhashstart = 11593 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 11594 tsbmissp->khashsz = khmehash_num; 11595 tsbmissp->uhashsz = uhmehash_num; 11596 } 11597 11598 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 11599 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 11600 11601 if (kpm_enable == 0) 11602 return; 11603 11604 /* -- Begin KPM specific init -- */ 11605 11606 if (kpm_smallpages) { 11607 /* 11608 * If we're using base pagesize pages for seg_kpm 11609 * mappings, we use the kernel TSB since we can't afford 11610 * to allocate a second huge TSB for these mappings. 11611 */ 11612 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11613 kpm_tsbsz = ktsb_szcode; 11614 kpmsm_tsbbase = kpm_tsbbase; 11615 kpmsm_tsbsz = kpm_tsbsz; 11616 } else { 11617 /* 11618 * In VAC conflict case, just put the entries in the 11619 * kernel 8K indexed TSB for now so we can find them. 11620 * This could really be changed in the future if we feel 11621 * the need... 11622 */ 11623 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11624 kpmsm_tsbsz = ktsb_szcode; 11625 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 11626 kpm_tsbsz = ktsb4m_szcode; 11627 } 11628 11629 kpmtsbmp = kpmtsbm_area; 11630 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 11631 /* 11632 * Initialize the kpmtsbm area. 11633 * Do this for all possible CPUs as some may be added 11634 * while the system is running. There is no cost to this. 11635 */ 11636 kpmtsbmp->vbase = kpm_vbase; 11637 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 11638 kpmtsbmp->sz_shift = kpm_size_shift; 11639 kpmtsbmp->kpmp_shift = kpmp_shift; 11640 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 11641 if (kpm_smallpages == 0) { 11642 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 11643 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 11644 } else { 11645 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 11646 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 11647 } 11648 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 11649 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 11650 #ifdef DEBUG 11651 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 11652 #endif /* DEBUG */ 11653 if (ktsb_phys) 11654 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 11655 } 11656 11657 /* -- End KPM specific init -- */ 11658 } 11659 11660 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 11661 struct tsb_info ktsb_info[2]; 11662 11663 /* 11664 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 11665 */ 11666 void 11667 sfmmu_init_ktsbinfo() 11668 { 11669 ASSERT(ksfmmup != NULL); 11670 ASSERT(ksfmmup->sfmmu_tsb == NULL); 11671 /* 11672 * Allocate tsbinfos for kernel and copy in data 11673 * to make debug easier and sun4v setup easier. 11674 */ 11675 ktsb_info[0].tsb_sfmmu = ksfmmup; 11676 ktsb_info[0].tsb_szc = ktsb_szcode; 11677 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 11678 ktsb_info[0].tsb_va = ktsb_base; 11679 ktsb_info[0].tsb_pa = ktsb_pbase; 11680 ktsb_info[0].tsb_flags = 0; 11681 ktsb_info[0].tsb_tte.ll = 0; 11682 ktsb_info[0].tsb_cache = NULL; 11683 11684 ktsb_info[1].tsb_sfmmu = ksfmmup; 11685 ktsb_info[1].tsb_szc = ktsb4m_szcode; 11686 ktsb_info[1].tsb_ttesz_mask = TSB4M; 11687 ktsb_info[1].tsb_va = ktsb4m_base; 11688 ktsb_info[1].tsb_pa = ktsb4m_pbase; 11689 ktsb_info[1].tsb_flags = 0; 11690 ktsb_info[1].tsb_tte.ll = 0; 11691 ktsb_info[1].tsb_cache = NULL; 11692 11693 /* Link them into ksfmmup. */ 11694 ktsb_info[0].tsb_next = &ktsb_info[1]; 11695 ktsb_info[1].tsb_next = NULL; 11696 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 11697 11698 sfmmu_setup_tsbinfo(ksfmmup); 11699 } 11700 11701 /* 11702 * Cache the last value returned from va_to_pa(). If the VA specified 11703 * in the current call to cached_va_to_pa() maps to the same Page (as the 11704 * previous call to cached_va_to_pa()), then compute the PA using 11705 * cached info, else call va_to_pa(). 11706 * 11707 * Note: this function is neither MT-safe nor consistent in the presence 11708 * of multiple, interleaved threads. This function was created to enable 11709 * an optimization used during boot (at a point when there's only one thread 11710 * executing on the "boot CPU", and before startup_vm() has been called). 11711 */ 11712 static uint64_t 11713 cached_va_to_pa(void *vaddr) 11714 { 11715 static uint64_t prev_vaddr_base = 0; 11716 static uint64_t prev_pfn = 0; 11717 11718 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 11719 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 11720 } else { 11721 uint64_t pa = va_to_pa(vaddr); 11722 11723 if (pa != ((uint64_t)-1)) { 11724 /* 11725 * Computed physical address is valid. Cache its 11726 * related info for the next cached_va_to_pa() call. 11727 */ 11728 prev_pfn = pa & MMU_PAGEMASK; 11729 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 11730 } 11731 11732 return (pa); 11733 } 11734 } 11735 11736 /* 11737 * Carve up our nucleus hblk region. We may allocate more hblks than 11738 * asked due to rounding errors but we are guaranteed to have at least 11739 * enough space to allocate the requested number of hblk8's and hblk1's. 11740 */ 11741 void 11742 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 11743 { 11744 struct hme_blk *hmeblkp; 11745 size_t hme8blk_sz, hme1blk_sz; 11746 size_t i; 11747 size_t hblk8_bound; 11748 ulong_t j = 0, k = 0; 11749 11750 ASSERT(addr != NULL && size != 0); 11751 11752 /* Need to use proper structure alignment */ 11753 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 11754 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 11755 11756 nucleus_hblk8.list = (void *)addr; 11757 nucleus_hblk8.index = 0; 11758 11759 /* 11760 * Use as much memory as possible for hblk8's since we 11761 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 11762 * We need to hold back enough space for the hblk1's which 11763 * we'll allocate next. 11764 */ 11765 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 11766 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 11767 hmeblkp = (struct hme_blk *)addr; 11768 addr += hme8blk_sz; 11769 hmeblkp->hblk_nuc_bit = 1; 11770 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11771 } 11772 nucleus_hblk8.len = j; 11773 ASSERT(j >= nhblk8); 11774 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 11775 11776 nucleus_hblk1.list = (void *)addr; 11777 nucleus_hblk1.index = 0; 11778 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 11779 hmeblkp = (struct hme_blk *)addr; 11780 addr += hme1blk_sz; 11781 hmeblkp->hblk_nuc_bit = 1; 11782 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11783 } 11784 ASSERT(k >= nhblk1); 11785 nucleus_hblk1.len = k; 11786 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 11787 } 11788 11789 /* 11790 * This function is currently not supported on this platform. For what 11791 * it's supposed to do, see hat.c and hat_srmmu.c 11792 */ 11793 /* ARGSUSED */ 11794 faultcode_t 11795 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 11796 uint_t flags) 11797 { 11798 ASSERT(hat->sfmmu_xhat_provider == NULL); 11799 return (FC_NOSUPPORT); 11800 } 11801 11802 /* 11803 * Searchs the mapping list of the page for a mapping of the same size. If not 11804 * found the corresponding bit is cleared in the p_index field. When large 11805 * pages are more prevalent in the system, we can maintain the mapping list 11806 * in order and we don't have to traverse the list each time. Just check the 11807 * next and prev entries, and if both are of different size, we clear the bit. 11808 */ 11809 static void 11810 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 11811 { 11812 struct sf_hment *sfhmep; 11813 struct hme_blk *hmeblkp; 11814 int index; 11815 pgcnt_t npgs; 11816 11817 ASSERT(ttesz > TTE8K); 11818 11819 ASSERT(sfmmu_mlist_held(pp)); 11820 11821 ASSERT(PP_ISMAPPED_LARGE(pp)); 11822 11823 /* 11824 * Traverse mapping list looking for another mapping of same size. 11825 * since we only want to clear index field if all mappings of 11826 * that size are gone. 11827 */ 11828 11829 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 11830 hmeblkp = sfmmu_hmetohblk(sfhmep); 11831 if (hmeblkp->hblk_xhat_bit) 11832 continue; 11833 if (hme_size(sfhmep) == ttesz) { 11834 /* 11835 * another mapping of the same size. don't clear index. 11836 */ 11837 return; 11838 } 11839 } 11840 11841 /* 11842 * Clear the p_index bit for large page. 11843 */ 11844 index = PAGESZ_TO_INDEX(ttesz); 11845 npgs = TTEPAGES(ttesz); 11846 while (npgs-- > 0) { 11847 ASSERT(pp->p_index & index); 11848 pp->p_index &= ~index; 11849 pp = PP_PAGENEXT(pp); 11850 } 11851 } 11852 11853 /* 11854 * return supported features 11855 */ 11856 /* ARGSUSED */ 11857 int 11858 hat_supported(enum hat_features feature, void *arg) 11859 { 11860 switch (feature) { 11861 case HAT_SHARED_PT: 11862 case HAT_DYNAMIC_ISM_UNMAP: 11863 case HAT_VMODSORT: 11864 return (1); 11865 default: 11866 return (0); 11867 } 11868 } 11869 11870 void 11871 hat_enter(struct hat *hat) 11872 { 11873 hatlock_t *hatlockp; 11874 11875 if (hat != ksfmmup) { 11876 hatlockp = TSB_HASH(hat); 11877 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 11878 } 11879 } 11880 11881 void 11882 hat_exit(struct hat *hat) 11883 { 11884 hatlock_t *hatlockp; 11885 11886 if (hat != ksfmmup) { 11887 hatlockp = TSB_HASH(hat); 11888 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 11889 } 11890 } 11891 11892 /*ARGSUSED*/ 11893 void 11894 hat_reserve(struct as *as, caddr_t addr, size_t len) 11895 { 11896 } 11897 11898 static void 11899 hat_kstat_init(void) 11900 { 11901 kstat_t *ksp; 11902 11903 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 11904 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 11905 KSTAT_FLAG_VIRTUAL); 11906 if (ksp) { 11907 ksp->ks_data = (void *) &sfmmu_global_stat; 11908 kstat_install(ksp); 11909 } 11910 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 11911 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 11912 KSTAT_FLAG_VIRTUAL); 11913 if (ksp) { 11914 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 11915 kstat_install(ksp); 11916 } 11917 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 11918 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 11919 KSTAT_FLAG_WRITABLE); 11920 if (ksp) { 11921 ksp->ks_update = sfmmu_kstat_percpu_update; 11922 kstat_install(ksp); 11923 } 11924 } 11925 11926 /* ARGSUSED */ 11927 static int 11928 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 11929 { 11930 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 11931 struct tsbmiss *tsbm = tsbmiss_area; 11932 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 11933 int i; 11934 11935 ASSERT(cpu_kstat); 11936 if (rw == KSTAT_READ) { 11937 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 11938 cpu_kstat->sf_itlb_misses = tsbm->itlb_misses; 11939 cpu_kstat->sf_dtlb_misses = tsbm->dtlb_misses; 11940 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 11941 tsbm->uprot_traps; 11942 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 11943 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 11944 11945 if (tsbm->itlb_misses > 0 && tsbm->dtlb_misses > 0) { 11946 cpu_kstat->sf_tsb_hits = 11947 (tsbm->itlb_misses + tsbm->dtlb_misses) - 11948 (tsbm->utsb_misses + tsbm->ktsb_misses + 11949 kpmtsbm->kpm_tsb_misses); 11950 } else { 11951 cpu_kstat->sf_tsb_hits = 0; 11952 } 11953 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 11954 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 11955 } 11956 } else { 11957 /* KSTAT_WRITE is used to clear stats */ 11958 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 11959 tsbm->itlb_misses = 0; 11960 tsbm->dtlb_misses = 0; 11961 tsbm->utsb_misses = 0; 11962 tsbm->ktsb_misses = 0; 11963 tsbm->uprot_traps = 0; 11964 tsbm->kprot_traps = 0; 11965 kpmtsbm->kpm_dtlb_misses = 0; 11966 kpmtsbm->kpm_tsb_misses = 0; 11967 } 11968 } 11969 return (0); 11970 } 11971 11972 #ifdef DEBUG 11973 11974 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 11975 11976 /* 11977 * A tte checker. *orig_old is the value we read before cas. 11978 * *cur is the value returned by cas. 11979 * *new is the desired value when we do the cas. 11980 * 11981 * *hmeblkp is currently unused. 11982 */ 11983 11984 /* ARGSUSED */ 11985 void 11986 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 11987 { 11988 pfn_t i, j, k; 11989 int cpuid = CPU->cpu_id; 11990 11991 gorig[cpuid] = orig_old; 11992 gcur[cpuid] = cur; 11993 gnew[cpuid] = new; 11994 11995 #ifdef lint 11996 hmeblkp = hmeblkp; 11997 #endif 11998 11999 if (TTE_IS_VALID(orig_old)) { 12000 if (TTE_IS_VALID(cur)) { 12001 i = TTE_TO_TTEPFN(orig_old); 12002 j = TTE_TO_TTEPFN(cur); 12003 k = TTE_TO_TTEPFN(new); 12004 if (i != j) { 12005 /* remap error? */ 12006 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 12007 } 12008 12009 if (i != k) { 12010 /* remap error? */ 12011 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 12012 } 12013 } else { 12014 if (TTE_IS_VALID(new)) { 12015 panic("chk_tte: invalid cur? "); 12016 } 12017 12018 i = TTE_TO_TTEPFN(orig_old); 12019 k = TTE_TO_TTEPFN(new); 12020 if (i != k) { 12021 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 12022 } 12023 } 12024 } else { 12025 if (TTE_IS_VALID(cur)) { 12026 j = TTE_TO_TTEPFN(cur); 12027 if (TTE_IS_VALID(new)) { 12028 k = TTE_TO_TTEPFN(new); 12029 if (j != k) { 12030 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 12031 j, k); 12032 } 12033 } else { 12034 panic("chk_tte: why here?"); 12035 } 12036 } else { 12037 if (!TTE_IS_VALID(new)) { 12038 panic("chk_tte: why here2 ?"); 12039 } 12040 } 12041 } 12042 } 12043 12044 #endif /* DEBUG */ 12045 12046 extern void prefetch_tsbe_read(struct tsbe *); 12047 extern void prefetch_tsbe_write(struct tsbe *); 12048 12049 12050 /* 12051 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 12052 * us optimal performance on Cheetah+. You can only have 8 outstanding 12053 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 12054 * prefetch to make the most utilization of the prefetch capability. 12055 */ 12056 #define TSBE_PREFETCH_STRIDE (7) 12057 12058 void 12059 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 12060 { 12061 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 12062 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 12063 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 12064 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 12065 struct tsbe *old; 12066 struct tsbe *new; 12067 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 12068 uint64_t va; 12069 int new_offset; 12070 int i; 12071 int vpshift; 12072 int last_prefetch; 12073 12074 if (old_bytes == new_bytes) { 12075 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 12076 } else { 12077 12078 /* 12079 * A TSBE is 16 bytes which means there are four TSBE's per 12080 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 12081 */ 12082 old = (struct tsbe *)old_tsbinfo->tsb_va; 12083 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 12084 for (i = 0; i < old_entries; i++, old++) { 12085 if (((i & (4-1)) == 0) && (i < last_prefetch)) 12086 prefetch_tsbe_read(old); 12087 if (!old->tte_tag.tag_invalid) { 12088 /* 12089 * We have a valid TTE to remap. Check the 12090 * size. We won't remap 64K or 512K TTEs 12091 * because they span more than one TSB entry 12092 * and are indexed using an 8K virt. page. 12093 * Ditto for 32M and 256M TTEs. 12094 */ 12095 if (TTE_CSZ(&old->tte_data) == TTE64K || 12096 TTE_CSZ(&old->tte_data) == TTE512K) 12097 continue; 12098 if (mmu_page_sizes == max_mmu_page_sizes) { 12099 if (TTE_CSZ(&old->tte_data) == TTE32M || 12100 TTE_CSZ(&old->tte_data) == TTE256M) 12101 continue; 12102 } 12103 12104 /* clear the lower 22 bits of the va */ 12105 va = *(uint64_t *)old << 22; 12106 /* turn va into a virtual pfn */ 12107 va >>= 22 - TSB_START_SIZE; 12108 /* 12109 * or in bits from the offset in the tsb 12110 * to get the real virtual pfn. These 12111 * correspond to bits [21:13] in the va 12112 */ 12113 vpshift = 12114 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 12115 0x1ff; 12116 va |= (i << vpshift); 12117 va >>= vpshift; 12118 new_offset = va & (new_entries - 1); 12119 new = new_base + new_offset; 12120 prefetch_tsbe_write(new); 12121 *new = *old; 12122 } 12123 } 12124 } 12125 } 12126 12127 /* 12128 * unused in sfmmu 12129 */ 12130 void 12131 hat_dump(void) 12132 { 12133 } 12134 12135 /* 12136 * Called when a thread is exiting and we have switched to the kernel address 12137 * space. Perform the same VM initialization resume() uses when switching 12138 * processes. 12139 * 12140 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 12141 * we call it anyway in case the semantics change in the future. 12142 */ 12143 /*ARGSUSED*/ 12144 void 12145 hat_thread_exit(kthread_t *thd) 12146 { 12147 uint64_t pgsz_cnum; 12148 uint_t pstate_save; 12149 12150 ASSERT(thd->t_procp->p_as == &kas); 12151 12152 pgsz_cnum = KCONTEXT; 12153 #ifdef sun4u 12154 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 12155 #endif 12156 /* 12157 * Note that sfmmu_load_mmustate() is currently a no-op for 12158 * kernel threads. We need to disable interrupts here, 12159 * simply because otherwise sfmmu_load_mmustate() would panic 12160 * if the caller does not disable interrupts. 12161 */ 12162 pstate_save = sfmmu_disable_intrs(); 12163 sfmmu_setctx_sec(pgsz_cnum); 12164 sfmmu_load_mmustate(ksfmmup); 12165 sfmmu_enable_intrs(pstate_save); 12166 } 12167