1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * VM - Hardware Address Translation management for Spitfire MMU. 30 * 31 * This file implements the machine specific hardware translation 32 * needed by the VM system. The machine independent interface is 33 * described in <vm/hat.h> while the machine dependent interface 34 * and data structures are described in <vm/hat_sfmmu.h>. 35 * 36 * The hat layer manages the address translation hardware as a cache 37 * driven by calls from the higher levels in the VM system. 38 */ 39 40 #include <sys/types.h> 41 #include <sys/kstat.h> 42 #include <vm/hat.h> 43 #include <vm/hat_sfmmu.h> 44 #include <vm/page.h> 45 #include <sys/pte.h> 46 #include <sys/systm.h> 47 #include <sys/mman.h> 48 #include <sys/sysmacros.h> 49 #include <sys/machparam.h> 50 #include <sys/vtrace.h> 51 #include <sys/kmem.h> 52 #include <sys/mmu.h> 53 #include <sys/cmn_err.h> 54 #include <sys/cpu.h> 55 #include <sys/cpuvar.h> 56 #include <sys/debug.h> 57 #include <sys/lgrp.h> 58 #include <sys/archsystm.h> 59 #include <sys/machsystm.h> 60 #include <sys/vmsystm.h> 61 #include <vm/as.h> 62 #include <vm/seg.h> 63 #include <vm/seg_kp.h> 64 #include <vm/seg_kmem.h> 65 #include <vm/seg_kpm.h> 66 #include <vm/rm.h> 67 #include <sys/t_lock.h> 68 #include <sys/obpdefs.h> 69 #include <sys/vm_machparam.h> 70 #include <sys/var.h> 71 #include <sys/trap.h> 72 #include <sys/machtrap.h> 73 #include <sys/scb.h> 74 #include <sys/bitmap.h> 75 #include <sys/machlock.h> 76 #include <sys/membar.h> 77 #include <sys/atomic.h> 78 #include <sys/cpu_module.h> 79 #include <sys/prom_debug.h> 80 #include <sys/ksynch.h> 81 #include <sys/mem_config.h> 82 #include <sys/mem_cage.h> 83 #include <sys/dtrace.h> 84 #include <vm/vm_dep.h> 85 #include <vm/xhat_sfmmu.h> 86 #include <sys/fpu/fpusystm.h> 87 #include <vm/mach_kpm.h> 88 89 #if defined(SF_ERRATA_57) 90 extern caddr_t errata57_limit; 91 #endif 92 93 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 94 (sizeof (int64_t))) 95 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 96 97 #define HBLK_RESERVE_CNT 128 98 #define HBLK_RESERVE_MIN 20 99 100 static struct hme_blk *freehblkp; 101 static kmutex_t freehblkp_lock; 102 static int freehblkcnt; 103 104 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 105 static kmutex_t hblk_reserve_lock; 106 static kthread_t *hblk_reserve_thread; 107 108 static nucleus_hblk8_info_t nucleus_hblk8; 109 static nucleus_hblk1_info_t nucleus_hblk1; 110 111 /* 112 * SFMMU specific hat functions 113 */ 114 void hat_pagecachectl(struct page *, int); 115 116 /* flags for hat_pagecachectl */ 117 #define HAT_CACHE 0x1 118 #define HAT_UNCACHE 0x2 119 #define HAT_TMPNC 0x4 120 121 /* 122 * Flag to allow the creation of non-cacheable translations 123 * to system memory. It is off by default. At the moment this 124 * flag is used by the ecache error injector. The error injector 125 * will turn it on when creating such a translation then shut it 126 * off when it's finished. 127 */ 128 129 int sfmmu_allow_nc_trans = 0; 130 131 /* 132 * Flag to disable large page support. 133 * value of 1 => disable all large pages. 134 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 135 * 136 * For example, use the value 0x4 to disable 512K pages. 137 * 138 */ 139 #define LARGE_PAGES_OFF 0x1 140 141 /* 142 * WARNING: 512K pages MUST be disabled for ISM/DISM. If not 143 * a process would page fault indefinitely if it tried to 144 * access a 512K page. 145 */ 146 int disable_ism_large_pages = (1 << TTE512K); 147 int disable_large_pages = 0; 148 int disable_auto_large_pages = 0; 149 150 /* 151 * Private sfmmu data structures for hat management 152 */ 153 static struct kmem_cache *sfmmuid_cache; 154 static struct kmem_cache *mmuctxdom_cache; 155 156 /* 157 * Private sfmmu data structures for tsb management 158 */ 159 static struct kmem_cache *sfmmu_tsbinfo_cache; 160 static struct kmem_cache *sfmmu_tsb8k_cache; 161 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 162 static vmem_t *kmem_tsb_arena; 163 164 /* 165 * sfmmu static variables for hmeblk resource management. 166 */ 167 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 168 static struct kmem_cache *sfmmu8_cache; 169 static struct kmem_cache *sfmmu1_cache; 170 static struct kmem_cache *pa_hment_cache; 171 172 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 173 /* 174 * private data for ism 175 */ 176 static struct kmem_cache *ism_blk_cache; 177 static struct kmem_cache *ism_ment_cache; 178 #define ISMID_STARTADDR NULL 179 180 /* 181 * Whether to delay TLB flushes and use Cheetah's flush-all support 182 * when removing contexts from the dirty list. 183 */ 184 int delay_tlb_flush; 185 int disable_delay_tlb_flush; 186 187 /* 188 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 189 * HAT flags, synchronizing TLB/TSB coherency, and context management. 190 * The lock is hashed on the sfmmup since the case where we need to lock 191 * all processes is rare but does occur (e.g. we need to unload a shared 192 * mapping from all processes using the mapping). We have a lot of buckets, 193 * and each slab of sfmmu_t's can use about a quarter of them, giving us 194 * a fairly good distribution without wasting too much space and overhead 195 * when we have to grab them all. 196 */ 197 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 198 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 199 200 /* 201 * Hash algorithm optimized for a small number of slabs. 202 * 7 is (highbit((sizeof sfmmu_t)) - 1) 203 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 204 * kmem_cache, and thus they will be sequential within that cache. In 205 * addition, each new slab will have a different "color" up to cache_maxcolor 206 * which will skew the hashing for each successive slab which is allocated. 207 * If the size of sfmmu_t changed to a larger size, this algorithm may need 208 * to be revisited. 209 */ 210 #define TSB_HASH_SHIFT_BITS (7) 211 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 212 213 #ifdef DEBUG 214 int tsb_hash_debug = 0; 215 #define TSB_HASH(sfmmup) \ 216 (tsb_hash_debug ? &hat_lock[0] : \ 217 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 218 #else /* DEBUG */ 219 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 220 #endif /* DEBUG */ 221 222 223 /* sfmmu_replace_tsb() return codes. */ 224 typedef enum tsb_replace_rc { 225 TSB_SUCCESS, 226 TSB_ALLOCFAIL, 227 TSB_LOSTRACE, 228 TSB_ALREADY_SWAPPED, 229 TSB_CANTGROW 230 } tsb_replace_rc_t; 231 232 /* 233 * Flags for TSB allocation routines. 234 */ 235 #define TSB_ALLOC 0x01 236 #define TSB_FORCEALLOC 0x02 237 #define TSB_GROW 0x04 238 #define TSB_SHRINK 0x08 239 #define TSB_SWAPIN 0x10 240 241 /* 242 * Support for HAT callbacks. 243 */ 244 #define SFMMU_MAX_RELOC_CALLBACKS 10 245 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 246 static id_t sfmmu_cb_nextid = 0; 247 static id_t sfmmu_tsb_cb_id; 248 struct sfmmu_callback *sfmmu_cb_table; 249 250 /* 251 * Kernel page relocation is enabled by default for non-caged 252 * kernel pages. This has little effect unless segkmem_reloc is 253 * set, since by default kernel memory comes from inside the 254 * kernel cage. 255 */ 256 int hat_kpr_enabled = 1; 257 258 kmutex_t kpr_mutex; 259 kmutex_t kpr_suspendlock; 260 kthread_t *kreloc_thread; 261 262 /* 263 * Enable VA->PA translation sanity checking on DEBUG kernels. 264 * Disabled by default. This is incompatible with some 265 * drivers (error injector, RSM) so if it breaks you get 266 * to keep both pieces. 267 */ 268 int hat_check_vtop = 0; 269 270 /* 271 * Private sfmmu routines (prototypes) 272 */ 273 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 274 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 275 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t); 276 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 277 caddr_t, demap_range_t *, uint_t); 278 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 279 caddr_t, int); 280 static void sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *, 281 uint64_t, struct hme_blk **); 282 static void sfmmu_hblks_list_purge(struct hme_blk **); 283 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 284 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 285 static struct hme_blk *sfmmu_hblk_steal(int); 286 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 287 struct hme_blk *, uint64_t, uint64_t, 288 struct hme_blk *); 289 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 290 291 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 292 uint_t, uint_t, pgcnt_t); 293 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 294 uint_t); 295 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 296 uint_t); 297 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 298 caddr_t, int); 299 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 300 struct hmehash_bucket *, caddr_t, uint_t, uint_t); 301 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 302 caddr_t, page_t **, uint_t); 303 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 304 305 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 306 pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *); 307 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 308 #ifdef VAC 309 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 310 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 311 int tst_tnc(page_t *pp, pgcnt_t); 312 void conv_tnc(page_t *pp, int); 313 #endif 314 315 static void sfmmu_get_ctx(sfmmu_t *); 316 static void sfmmu_free_sfmmu(sfmmu_t *); 317 318 static void sfmmu_gettte(struct hat *, caddr_t, tte_t *); 319 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 320 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 321 322 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 323 static void hat_pagereload(struct page *, struct page *); 324 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 325 #ifdef VAC 326 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 327 static void sfmmu_page_cache(page_t *, int, int, int); 328 #endif 329 330 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 331 pfn_t, int, int, int, int); 332 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 333 pfn_t, int); 334 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 335 static void sfmmu_tlb_range_demap(demap_range_t *); 336 static void sfmmu_invalidate_ctx(sfmmu_t *); 337 static void sfmmu_sync_mmustate(sfmmu_t *); 338 339 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 340 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 341 sfmmu_t *); 342 static void sfmmu_tsb_free(struct tsb_info *); 343 static void sfmmu_tsbinfo_free(struct tsb_info *); 344 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 345 sfmmu_t *); 346 347 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 348 static int sfmmu_select_tsb_szc(pgcnt_t); 349 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 350 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 351 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 352 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 353 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 354 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 355 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 356 hatlock_t *, uint_t); 357 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 358 359 #ifdef VAC 360 void sfmmu_cache_flush(pfn_t, int); 361 void sfmmu_cache_flushcolor(int, pfn_t); 362 #endif 363 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 364 caddr_t, demap_range_t *, uint_t, int); 365 366 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 367 static uint_t sfmmu_ptov_attr(tte_t *); 368 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 369 caddr_t, demap_range_t *, uint_t); 370 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 371 static int sfmmu_idcache_constructor(void *, void *, int); 372 static void sfmmu_idcache_destructor(void *, void *); 373 static int sfmmu_hblkcache_constructor(void *, void *, int); 374 static void sfmmu_hblkcache_destructor(void *, void *); 375 static void sfmmu_hblkcache_reclaim(void *); 376 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 377 struct hmehash_bucket *); 378 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 379 static void sfmmu_rm_large_mappings(page_t *, int); 380 381 static void hat_lock_init(void); 382 static void hat_kstat_init(void); 383 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 384 static void sfmmu_check_page_sizes(sfmmu_t *, int); 385 int fnd_mapping_sz(page_t *); 386 static void iment_add(struct ism_ment *, struct hat *); 387 static void iment_sub(struct ism_ment *, struct hat *); 388 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 389 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 390 extern void sfmmu_clear_utsbinfo(void); 391 392 static void sfmmu_ctx_wrap_around(mmu_ctx_t *); 393 394 /* kpm globals */ 395 #ifdef DEBUG 396 /* 397 * Enable trap level tsbmiss handling 398 */ 399 int kpm_tsbmtl = 1; 400 401 /* 402 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 403 * required TLB shootdowns in this case, so handle w/ care. Off by default. 404 */ 405 int kpm_tlb_flush; 406 #endif /* DEBUG */ 407 408 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 409 410 #ifdef DEBUG 411 static void sfmmu_check_hblk_flist(); 412 #endif 413 414 /* 415 * Semi-private sfmmu data structures. Some of them are initialize in 416 * startup or in hat_init. Some of them are private but accessed by 417 * assembly code or mach_sfmmu.c 418 */ 419 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 420 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 421 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 422 uint64_t khme_hash_pa; /* PA of khme_hash */ 423 int uhmehash_num; /* # of buckets in user hash table */ 424 int khmehash_num; /* # of buckets in kernel hash table */ 425 426 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 427 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 428 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 429 430 #define DEFAULT_NUM_CTXS_PER_MMU 8192 431 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 432 433 int cache; /* describes system cache */ 434 435 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 436 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 437 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 438 int ktsb_sz; /* kernel 8k-indexed tsb size */ 439 440 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 441 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 442 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 443 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 444 445 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 446 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 447 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 448 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 449 450 #ifndef sun4v 451 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 452 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 453 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 454 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 455 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 456 #endif /* sun4v */ 457 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 458 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 459 460 /* 461 * Size to use for TSB slabs. Future platforms that support page sizes 462 * larger than 4M may wish to change these values, and provide their own 463 * assembly macros for building and decoding the TSB base register contents. 464 * Note disable_large_pages will override the value set here. 465 */ 466 uint_t tsb_slab_ttesz = TTE4M; 467 uint_t tsb_slab_size; 468 uint_t tsb_slab_shift; 469 uint_t tsb_slab_mask; /* PFN mask for TTE */ 470 471 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 472 int tsb_max_growsize = UTSB_MAX_SZCODE; 473 474 /* 475 * Tunable parameters dealing with TSB policies. 476 */ 477 478 /* 479 * This undocumented tunable forces all 8K TSBs to be allocated from 480 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 481 */ 482 #ifdef DEBUG 483 int tsb_forceheap = 0; 484 #endif /* DEBUG */ 485 486 /* 487 * Decide whether to use per-lgroup arenas, or one global set of 488 * TSB arenas. The default is not to break up per-lgroup, since 489 * most platforms don't recognize any tangible benefit from it. 490 */ 491 int tsb_lgrp_affinity = 0; 492 493 /* 494 * Used for growing the TSB based on the process RSS. 495 * tsb_rss_factor is based on the smallest TSB, and is 496 * shifted by the TSB size to determine if we need to grow. 497 * The default will grow the TSB if the number of TTEs for 498 * this page size exceeds 75% of the number of TSB entries, 499 * which should _almost_ eliminate all conflict misses 500 * (at the expense of using up lots and lots of memory). 501 */ 502 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 503 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 504 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 505 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 506 default_tsb_size) 507 #define TSB_OK_SHRINK() \ 508 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 509 #define TSB_OK_GROW() \ 510 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 511 512 int enable_tsb_rss_sizing = 1; 513 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 514 515 /* which TSB size code to use for new address spaces or if rss sizing off */ 516 int default_tsb_size = TSB_8K_SZCODE; 517 518 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 519 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 520 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 521 522 #ifdef DEBUG 523 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 524 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 525 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 526 static int tsb_alloc_fail_mtbf = 0; 527 static int tsb_alloc_count = 0; 528 #endif /* DEBUG */ 529 530 /* if set to 1, will remap valid TTEs when growing TSB. */ 531 int tsb_remap_ttes = 1; 532 533 /* 534 * If we have more than this many mappings, allocate a second TSB. 535 * This default is chosen because the I/D fully associative TLBs are 536 * assumed to have at least 8 available entries. Platforms with a 537 * larger fully-associative TLB could probably override the default. 538 */ 539 int tsb_sectsb_threshold = 8; 540 541 /* 542 * kstat data 543 */ 544 struct sfmmu_global_stat sfmmu_global_stat; 545 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 546 547 /* 548 * Global data 549 */ 550 sfmmu_t *ksfmmup; /* kernel's hat id */ 551 552 #ifdef DEBUG 553 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 554 #endif 555 556 /* sfmmu locking operations */ 557 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 558 static int sfmmu_mlspl_held(struct page *, int); 559 560 kmutex_t *sfmmu_page_enter(page_t *); 561 void sfmmu_page_exit(kmutex_t *); 562 int sfmmu_page_spl_held(struct page *); 563 564 /* sfmmu internal locking operations - accessed directly */ 565 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 566 kmutex_t **, kmutex_t **); 567 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 568 static hatlock_t * 569 sfmmu_hat_enter(sfmmu_t *); 570 static hatlock_t * 571 sfmmu_hat_tryenter(sfmmu_t *); 572 static void sfmmu_hat_exit(hatlock_t *); 573 static void sfmmu_hat_lock_all(void); 574 static void sfmmu_hat_unlock_all(void); 575 static void sfmmu_ismhat_enter(sfmmu_t *, int); 576 static void sfmmu_ismhat_exit(sfmmu_t *, int); 577 578 /* 579 * Array of mutexes protecting a page's mapping list and p_nrm field. 580 * 581 * The hash function looks complicated, but is made up so that: 582 * 583 * "pp" not shifted, so adjacent pp values will hash to different cache lines 584 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 585 * 586 * "pp" >> mml_shift, incorporates more source bits into the hash result 587 * 588 * "& (mml_table_size - 1), should be faster than using remainder "%" 589 * 590 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 591 * cacheline, since they get declared next to each other below. We'll trust 592 * ld not to do something random. 593 */ 594 #ifdef DEBUG 595 int mlist_hash_debug = 0; 596 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 597 &mml_table[((uintptr_t)(pp) + \ 598 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 599 #else /* !DEBUG */ 600 #define MLIST_HASH(pp) &mml_table[ \ 601 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 602 #endif /* !DEBUG */ 603 604 kmutex_t *mml_table; 605 uint_t mml_table_sz; /* must be a power of 2 */ 606 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 607 608 kpm_hlk_t *kpmp_table; 609 uint_t kpmp_table_sz; /* must be a power of 2 */ 610 uchar_t kpmp_shift; 611 612 kpm_shlk_t *kpmp_stable; 613 uint_t kpmp_stable_sz; /* must be a power of 2 */ 614 615 /* 616 * SPL_HASH was improved to avoid false cache line sharing 617 */ 618 #define SPL_TABLE_SIZE 128 619 #define SPL_MASK (SPL_TABLE_SIZE - 1) 620 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 621 622 #define SPL_INDEX(pp) \ 623 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 624 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 625 (SPL_TABLE_SIZE - 1)) 626 627 #define SPL_HASH(pp) \ 628 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 629 630 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 631 632 633 /* 634 * hat_unload_callback() will group together callbacks in order 635 * to avoid xt_sync() calls. This is the maximum size of the group. 636 */ 637 #define MAX_CB_ADDR 32 638 639 tte_t hw_tte; 640 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 641 642 static char *mmu_ctx_kstat_names[] = { 643 "mmu_ctx_tsb_exceptions", 644 "mmu_ctx_tsb_raise_exception", 645 "mmu_ctx_wrap_around", 646 }; 647 648 /* 649 * Wrapper for vmem_xalloc since vmem_create only allows limited 650 * parameters for vm_source_alloc functions. This function allows us 651 * to specify alignment consistent with the size of the object being 652 * allocated. 653 */ 654 static void * 655 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 656 { 657 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 658 } 659 660 /* Common code for setting tsb_alloc_hiwater. */ 661 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 662 ptob(pages) / tsb_alloc_hiwater_factor 663 664 /* 665 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 666 * a single TSB. physmem is the number of physical pages so we need physmem 8K 667 * TTEs to represent all those physical pages. We round this up by using 668 * 1<<highbit(). To figure out which size code to use, remember that the size 669 * code is just an amount to shift the smallest TSB size to get the size of 670 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 671 * highbit() - 1) to get the size code for the smallest TSB that can represent 672 * all of physical memory, while erring on the side of too much. 673 * 674 * If the computed size code is less than the current tsb_max_growsize, we set 675 * tsb_max_growsize to the computed size code. In the case where the computed 676 * size code is greater than tsb_max_growsize, we have these restrictions that 677 * apply to increasing tsb_max_growsize: 678 * 1) TSBs can't grow larger than the TSB slab size 679 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 680 */ 681 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 682 int i, szc; \ 683 \ 684 i = highbit(pages); \ 685 if ((1 << (i - 1)) == (pages)) \ 686 i--; /* 2^n case, round down */ \ 687 szc = i - TSB_START_SIZE; \ 688 if (szc < tsb_max_growsize) \ 689 tsb_max_growsize = szc; \ 690 else if ((szc > tsb_max_growsize) && \ 691 (szc <= tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT))) \ 692 tsb_max_growsize = MIN(szc, UTSB_MAX_SZCODE); \ 693 } 694 695 /* 696 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 697 * tsb_info which handles that TTE size. 698 */ 699 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) \ 700 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 701 ASSERT(sfmmu_hat_lock_held(sfmmup)); \ 702 if ((tte_szc) >= TTE4M) \ 703 (tsbinfop) = (tsbinfop)->tsb_next; 704 705 /* 706 * Return the number of mappings present in the HAT 707 * for a particular process and page size. 708 */ 709 #define SFMMU_TTE_CNT(sfmmup, szc) \ 710 (sfmmup)->sfmmu_iblk? \ 711 (sfmmup)->sfmmu_ismttecnt[(szc)] + \ 712 (sfmmup)->sfmmu_ttecnt[(szc)] : \ 713 (sfmmup)->sfmmu_ttecnt[(szc)]; 714 715 /* 716 * Macro to use to unload entries from the TSB. 717 * It has knowledge of which page sizes get replicated in the TSB 718 * and will call the appropriate unload routine for the appropriate size. 719 */ 720 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp) \ 721 { \ 722 int ttesz = get_hblk_ttesz(hmeblkp); \ 723 if (ttesz == TTE8K || ttesz == TTE4M) { \ 724 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 725 } else { \ 726 caddr_t sva = (caddr_t)get_hblk_base(hmeblkp); \ 727 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 728 ASSERT(addr >= sva && addr < eva); \ 729 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 730 } \ 731 } 732 733 734 /* Update tsb_alloc_hiwater after memory is configured. */ 735 /*ARGSUSED*/ 736 static void 737 sfmmu_update_tsb_post_add(void *arg, pgcnt_t delta_pages) 738 { 739 /* Assumes physmem has already been updated. */ 740 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 741 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 742 } 743 744 /* 745 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 746 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 747 * deleted. 748 */ 749 /*ARGSUSED*/ 750 static int 751 sfmmu_update_tsb_pre_del(void *arg, pgcnt_t delta_pages) 752 { 753 return (0); 754 } 755 756 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 757 /*ARGSUSED*/ 758 static void 759 sfmmu_update_tsb_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 760 { 761 /* 762 * Whether the delete was cancelled or not, just go ahead and update 763 * tsb_alloc_hiwater and tsb_max_growsize. 764 */ 765 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 766 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 767 } 768 769 static kphysm_setup_vector_t sfmmu_update_tsb_vec = { 770 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 771 sfmmu_update_tsb_post_add, /* post_add */ 772 sfmmu_update_tsb_pre_del, /* pre_del */ 773 sfmmu_update_tsb_post_del /* post_del */ 774 }; 775 776 777 /* 778 * HME_BLK HASH PRIMITIVES 779 */ 780 781 /* 782 * Enter a hme on the mapping list for page pp. 783 * When large pages are more prevalent in the system we might want to 784 * keep the mapping list in ascending order by the hment size. For now, 785 * small pages are more frequent, so don't slow it down. 786 */ 787 #define HME_ADD(hme, pp) \ 788 { \ 789 ASSERT(sfmmu_mlist_held(pp)); \ 790 \ 791 hme->hme_prev = NULL; \ 792 hme->hme_next = pp->p_mapping; \ 793 hme->hme_page = pp; \ 794 if (pp->p_mapping) { \ 795 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 796 ASSERT(pp->p_share > 0); \ 797 } else { \ 798 /* EMPTY */ \ 799 ASSERT(pp->p_share == 0); \ 800 } \ 801 pp->p_mapping = hme; \ 802 pp->p_share++; \ 803 } 804 805 /* 806 * Enter a hme on the mapping list for page pp. 807 * If we are unmapping a large translation, we need to make sure that the 808 * change is reflect in the corresponding bit of the p_index field. 809 */ 810 #define HME_SUB(hme, pp) \ 811 { \ 812 ASSERT(sfmmu_mlist_held(pp)); \ 813 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 814 \ 815 if (pp->p_mapping == NULL) { \ 816 panic("hme_remove - no mappings"); \ 817 } \ 818 \ 819 membar_stst(); /* ensure previous stores finish */ \ 820 \ 821 ASSERT(pp->p_share > 0); \ 822 pp->p_share--; \ 823 \ 824 if (hme->hme_prev) { \ 825 ASSERT(pp->p_mapping != hme); \ 826 ASSERT(hme->hme_prev->hme_page == pp || \ 827 IS_PAHME(hme->hme_prev)); \ 828 hme->hme_prev->hme_next = hme->hme_next; \ 829 } else { \ 830 ASSERT(pp->p_mapping == hme); \ 831 pp->p_mapping = hme->hme_next; \ 832 ASSERT((pp->p_mapping == NULL) ? \ 833 (pp->p_share == 0) : 1); \ 834 } \ 835 \ 836 if (hme->hme_next) { \ 837 ASSERT(hme->hme_next->hme_page == pp || \ 838 IS_PAHME(hme->hme_next)); \ 839 hme->hme_next->hme_prev = hme->hme_prev; \ 840 } \ 841 \ 842 /* zero out the entry */ \ 843 hme->hme_next = NULL; \ 844 hme->hme_prev = NULL; \ 845 hme->hme_page = NULL; \ 846 \ 847 if (hme_size(hme) > TTE8K) { \ 848 /* remove mappings for remainder of large pg */ \ 849 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 850 } \ 851 } 852 853 /* 854 * This function returns the hment given the hme_blk and a vaddr. 855 * It assumes addr has already been checked to belong to hme_blk's 856 * range. 857 */ 858 #define HBLKTOHME(hment, hmeblkp, addr) \ 859 { \ 860 int index; \ 861 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 862 } 863 864 /* 865 * Version of HBLKTOHME that also returns the index in hmeblkp 866 * of the hment. 867 */ 868 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 869 { \ 870 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 871 \ 872 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 873 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 874 } else \ 875 idx = 0; \ 876 \ 877 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 878 } 879 880 /* 881 * Disable any page sizes not supported by the CPU 882 */ 883 void 884 hat_init_pagesizes() 885 { 886 int i; 887 888 mmu_exported_page_sizes = 0; 889 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 890 extern int disable_text_largepages; 891 extern int disable_initdata_largepages; 892 893 szc_2_userszc[i] = (uint_t)-1; 894 userszc_2_szc[i] = (uint_t)-1; 895 896 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 897 disable_large_pages |= (1 << i); 898 disable_ism_large_pages |= (1 << i); 899 disable_text_largepages |= (1 << i); 900 disable_initdata_largepages |= (1 << i); 901 } else { 902 szc_2_userszc[i] = mmu_exported_page_sizes; 903 userszc_2_szc[mmu_exported_page_sizes] = i; 904 mmu_exported_page_sizes++; 905 } 906 } 907 908 disable_auto_large_pages = disable_large_pages; 909 910 /* 911 * Initialize mmu-specific large page sizes. 912 */ 913 if (&mmu_large_pages_disabled) { 914 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 915 disable_ism_large_pages |= 916 mmu_large_pages_disabled(HAT_LOAD_SHARE); 917 disable_auto_large_pages |= 918 mmu_large_pages_disabled(HAT_LOAD_AUTOLPG); 919 } 920 921 } 922 923 /* 924 * Initialize the hardware address translation structures. 925 */ 926 void 927 hat_init(void) 928 { 929 int i; 930 uint_t sz; 931 uint_t maxtsb; 932 size_t size; 933 934 hat_lock_init(); 935 hat_kstat_init(); 936 937 /* 938 * Hardware-only bits in a TTE 939 */ 940 MAKE_TTE_MASK(&hw_tte); 941 942 hat_init_pagesizes(); 943 944 /* Initialize the hash locks */ 945 for (i = 0; i < khmehash_num; i++) { 946 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 947 MUTEX_DEFAULT, NULL); 948 } 949 for (i = 0; i < uhmehash_num; i++) { 950 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 951 MUTEX_DEFAULT, NULL); 952 } 953 khmehash_num--; /* make sure counter starts from 0 */ 954 uhmehash_num--; /* make sure counter starts from 0 */ 955 956 /* 957 * Allocate context domain structures. 958 * 959 * A platform may choose to modify max_mmu_ctxdoms in 960 * set_platform_defaults(). If a platform does not define 961 * a set_platform_defaults() or does not choose to modify 962 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 963 * 964 * For sun4v, there will be one global context domain, this is to 965 * avoid the ldom cpu substitution problem. 966 * 967 * For all platforms that have CPUs sharing MMUs, this 968 * value must be defined. 969 */ 970 if (max_mmu_ctxdoms == 0) { 971 #ifndef sun4v 972 max_mmu_ctxdoms = max_ncpus; 973 #else /* sun4v */ 974 max_mmu_ctxdoms = 1; 975 #endif /* sun4v */ 976 } 977 978 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 979 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 980 981 /* mmu_ctx_t is 64 bytes aligned */ 982 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 983 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 984 /* 985 * MMU context domain initialization for the Boot CPU. 986 * This needs the context domains array allocated above. 987 */ 988 mutex_enter(&cpu_lock); 989 sfmmu_cpu_init(CPU); 990 mutex_exit(&cpu_lock); 991 992 /* 993 * Intialize ism mapping list lock. 994 */ 995 996 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 997 998 /* 999 * Each sfmmu structure carries an array of MMU context info 1000 * structures, one per context domain. The size of this array depends 1001 * on the maximum number of context domains. So, the size of the 1002 * sfmmu structure varies per platform. 1003 * 1004 * sfmmu is allocated from static arena, because trap 1005 * handler at TL > 0 is not allowed to touch kernel relocatable 1006 * memory. sfmmu's alignment is changed to 64 bytes from 1007 * default 8 bytes, as the lower 6 bits will be used to pass 1008 * pgcnt to vtag_flush_pgcnt_tl1. 1009 */ 1010 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1011 1012 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1013 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1014 NULL, NULL, static_arena, 0); 1015 1016 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1017 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1018 1019 /* 1020 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1021 * from the heap when low on memory or when TSB_FORCEALLOC is 1022 * specified, don't use magazines to cache them--we want to return 1023 * them to the system as quickly as possible. 1024 */ 1025 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1026 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1027 static_arena, KMC_NOMAGAZINE); 1028 1029 /* 1030 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1031 * memory, which corresponds to the old static reserve for TSBs. 1032 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1033 * memory we'll allocate for TSB slabs; beyond this point TSB 1034 * allocations will be taken from the kernel heap (via 1035 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1036 * consumer. 1037 */ 1038 if (tsb_alloc_hiwater_factor == 0) { 1039 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1040 } 1041 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1042 1043 /* Set tsb_max_growsize. */ 1044 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1045 1046 /* 1047 * On smaller memory systems, allocate TSB memory in smaller chunks 1048 * than the default 4M slab size. We also honor disable_large_pages 1049 * here. 1050 * 1051 * The trap handlers need to be patched with the final slab shift, 1052 * since they need to be able to construct the TSB pointer at runtime. 1053 */ 1054 if (tsb_max_growsize <= TSB_512K_SZCODE) 1055 tsb_slab_ttesz = TTE512K; 1056 1057 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1058 if (!(disable_large_pages & (1 << sz))) 1059 break; 1060 } 1061 1062 tsb_slab_ttesz = sz; 1063 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1064 tsb_slab_size = 1 << tsb_slab_shift; 1065 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1066 1067 maxtsb = tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); 1068 if (tsb_max_growsize > maxtsb) 1069 tsb_max_growsize = maxtsb; 1070 1071 /* 1072 * Set up memory callback to update tsb_alloc_hiwater and 1073 * tsb_max_growsize. 1074 */ 1075 i = kphysm_setup_func_register(&sfmmu_update_tsb_vec, (void *) 0); 1076 ASSERT(i == 0); 1077 1078 /* 1079 * kmem_tsb_arena is the source from which large TSB slabs are 1080 * drawn. The quantum of this arena corresponds to the largest 1081 * TSB size we can dynamically allocate for user processes. 1082 * Currently it must also be a supported page size since we 1083 * use exactly one translation entry to map each slab page. 1084 * 1085 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1086 * which most TSBs are allocated. Since most TSB allocations are 1087 * typically 8K we have a kmem cache we stack on top of each 1088 * kmem_tsb_default_arena to speed up those allocations. 1089 * 1090 * Note the two-level scheme of arenas is required only 1091 * because vmem_create doesn't allow us to specify alignment 1092 * requirements. If this ever changes the code could be 1093 * simplified to use only one level of arenas. 1094 */ 1095 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1096 sfmmu_vmem_xalloc_aligned_wrapper, vmem_xfree, heap_arena, 1097 0, VM_SLEEP); 1098 1099 if (tsb_lgrp_affinity) { 1100 char s[50]; 1101 for (i = 0; i < NLGRPS_MAX; i++) { 1102 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1103 kmem_tsb_default_arena[i] = 1104 vmem_create(s, NULL, 0, PAGESIZE, 1105 sfmmu_tsb_segkmem_alloc, sfmmu_tsb_segkmem_free, 1106 kmem_tsb_arena, 0, VM_SLEEP | VM_BESTFIT); 1107 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1108 sfmmu_tsb_cache[i] = kmem_cache_create(s, PAGESIZE, 1109 PAGESIZE, NULL, NULL, NULL, NULL, 1110 kmem_tsb_default_arena[i], 0); 1111 } 1112 } else { 1113 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1114 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1115 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1116 VM_SLEEP | VM_BESTFIT); 1117 1118 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1119 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1120 kmem_tsb_default_arena[0], 0); 1121 } 1122 1123 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1124 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1125 sfmmu_hblkcache_destructor, 1126 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1127 hat_memload_arena, KMC_NOHASH); 1128 1129 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1130 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 1131 1132 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1133 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1134 sfmmu_hblkcache_destructor, 1135 NULL, (void *)HME1BLK_SZ, 1136 hat_memload1_arena, KMC_NOHASH); 1137 1138 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1139 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1140 1141 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1142 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1143 NULL, NULL, static_arena, KMC_NOHASH); 1144 1145 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1146 sizeof (ism_ment_t), 0, NULL, NULL, 1147 NULL, NULL, NULL, 0); 1148 1149 /* 1150 * We grab the first hat for the kernel, 1151 */ 1152 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1153 kas.a_hat = hat_alloc(&kas); 1154 AS_LOCK_EXIT(&kas, &kas.a_lock); 1155 1156 /* 1157 * Initialize hblk_reserve. 1158 */ 1159 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1160 va_to_pa((caddr_t)hblk_reserve); 1161 1162 #ifndef UTSB_PHYS 1163 /* 1164 * Reserve some kernel virtual address space for the locked TTEs 1165 * that allow us to probe the TSB from TL>0. 1166 */ 1167 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1168 0, 0, NULL, NULL, VM_SLEEP); 1169 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1170 0, 0, NULL, NULL, VM_SLEEP); 1171 #endif 1172 1173 #ifdef VAC 1174 /* 1175 * The big page VAC handling code assumes VAC 1176 * will not be bigger than the smallest big 1177 * page- which is 64K. 1178 */ 1179 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1180 cmn_err(CE_PANIC, "VAC too big!"); 1181 } 1182 #endif 1183 1184 (void) xhat_init(); 1185 1186 uhme_hash_pa = va_to_pa(uhme_hash); 1187 khme_hash_pa = va_to_pa(khme_hash); 1188 1189 /* 1190 * Initialize relocation locks. kpr_suspendlock is held 1191 * at PIL_MAX to prevent interrupts from pinning the holder 1192 * of a suspended TTE which may access it leading to a 1193 * deadlock condition. 1194 */ 1195 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1196 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1197 } 1198 1199 /* 1200 * Initialize locking for the hat layer, called early during boot. 1201 */ 1202 static void 1203 hat_lock_init() 1204 { 1205 int i; 1206 1207 /* 1208 * initialize the array of mutexes protecting a page's mapping 1209 * list and p_nrm field. 1210 */ 1211 for (i = 0; i < mml_table_sz; i++) 1212 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1213 1214 if (kpm_enable) { 1215 for (i = 0; i < kpmp_table_sz; i++) { 1216 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1217 MUTEX_DEFAULT, NULL); 1218 } 1219 } 1220 1221 /* 1222 * Initialize array of mutex locks that protects sfmmu fields and 1223 * TSB lists. 1224 */ 1225 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1226 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1227 NULL); 1228 } 1229 1230 extern caddr_t kmem64_base, kmem64_end; 1231 1232 #define SFMMU_KERNEL_MAXVA \ 1233 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1234 1235 /* 1236 * Allocate a hat structure. 1237 * Called when an address space first uses a hat. 1238 */ 1239 struct hat * 1240 hat_alloc(struct as *as) 1241 { 1242 sfmmu_t *sfmmup; 1243 int i; 1244 uint64_t cnum; 1245 extern uint_t get_color_start(struct as *); 1246 1247 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1248 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1249 sfmmup->sfmmu_as = as; 1250 sfmmup->sfmmu_flags = 0; 1251 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1252 1253 if (as == &kas) { 1254 ksfmmup = sfmmup; 1255 sfmmup->sfmmu_cext = 0; 1256 cnum = KCONTEXT; 1257 1258 sfmmup->sfmmu_clrstart = 0; 1259 sfmmup->sfmmu_tsb = NULL; 1260 /* 1261 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1262 * to setup tsb_info for ksfmmup. 1263 */ 1264 } else { 1265 1266 /* 1267 * Just set to invalid ctx. When it faults, it will 1268 * get a valid ctx. This would avoid the situation 1269 * where we get a ctx, but it gets stolen and then 1270 * we fault when we try to run and so have to get 1271 * another ctx. 1272 */ 1273 sfmmup->sfmmu_cext = 0; 1274 cnum = INVALID_CONTEXT; 1275 1276 /* initialize original physical page coloring bin */ 1277 sfmmup->sfmmu_clrstart = get_color_start(as); 1278 #ifdef DEBUG 1279 if (tsb_random_size) { 1280 uint32_t randval = (uint32_t)gettick() >> 4; 1281 int size = randval % (tsb_max_growsize + 1); 1282 1283 /* chose a random tsb size for stress testing */ 1284 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1285 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1286 } else 1287 #endif /* DEBUG */ 1288 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1289 default_tsb_size, 1290 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1291 sfmmup->sfmmu_flags = HAT_SWAPPED; 1292 ASSERT(sfmmup->sfmmu_tsb != NULL); 1293 } 1294 1295 ASSERT(max_mmu_ctxdoms > 0); 1296 for (i = 0; i < max_mmu_ctxdoms; i++) { 1297 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1298 sfmmup->sfmmu_ctxs[i].gnum = 0; 1299 } 1300 1301 sfmmu_setup_tsbinfo(sfmmup); 1302 for (i = 0; i < max_mmu_page_sizes; i++) { 1303 sfmmup->sfmmu_ttecnt[i] = 0; 1304 sfmmup->sfmmu_ismttecnt[i] = 0; 1305 sfmmup->sfmmu_pgsz[i] = TTE8K; 1306 } 1307 1308 sfmmup->sfmmu_iblk = NULL; 1309 sfmmup->sfmmu_ismhat = 0; 1310 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1311 if (sfmmup == ksfmmup) { 1312 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1313 } else { 1314 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1315 } 1316 sfmmup->sfmmu_free = 0; 1317 sfmmup->sfmmu_rmstat = 0; 1318 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1319 sfmmup->sfmmu_xhat_provider = NULL; 1320 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1321 return (sfmmup); 1322 } 1323 1324 /* 1325 * Create per-MMU context domain kstats for a given MMU ctx. 1326 */ 1327 static void 1328 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1329 { 1330 mmu_ctx_stat_t stat; 1331 kstat_t *mmu_kstat; 1332 1333 ASSERT(MUTEX_HELD(&cpu_lock)); 1334 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1335 1336 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1337 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1338 1339 if (mmu_kstat == NULL) { 1340 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1341 mmu_ctxp->mmu_idx); 1342 } else { 1343 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1344 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1345 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1346 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1347 mmu_ctxp->mmu_kstat = mmu_kstat; 1348 kstat_install(mmu_kstat); 1349 } 1350 } 1351 1352 /* 1353 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1354 * context domain information for a given CPU. If a platform does not 1355 * specify that interface, then the function below is used instead to return 1356 * default information. The defaults are as follows: 1357 * 1358 * - For sun4u systems there's one MMU context domain per CPU. 1359 * This default is used by all sun4u systems except OPL. OPL systems 1360 * provide platform specific interface to map CPU ids to MMU ids 1361 * because on OPL more than 1 CPU shares a single MMU. 1362 * Note that on sun4v, there is one global context domain for 1363 * the entire system. This is to avoid running into potential problem 1364 * with ldom physical cpu substitution feature. 1365 * - The number of MMU context IDs supported on any CPU in the 1366 * system is 8K. 1367 */ 1368 /*ARGSUSED*/ 1369 static void 1370 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1371 { 1372 infop->mmu_nctxs = nctxs; 1373 #ifndef sun4v 1374 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1375 #else /* sun4v */ 1376 infop->mmu_idx = 0; 1377 #endif /* sun4v */ 1378 } 1379 1380 /* 1381 * Called during CPU initialization to set the MMU context-related information 1382 * for a CPU. 1383 * 1384 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1385 */ 1386 void 1387 sfmmu_cpu_init(cpu_t *cp) 1388 { 1389 mmu_ctx_info_t info; 1390 mmu_ctx_t *mmu_ctxp; 1391 1392 ASSERT(MUTEX_HELD(&cpu_lock)); 1393 1394 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1395 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1396 else 1397 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1398 1399 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1400 1401 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1402 /* Each mmu_ctx is cacheline aligned. */ 1403 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1404 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1405 1406 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1407 (void *)ipltospl(DISP_LEVEL)); 1408 mmu_ctxp->mmu_idx = info.mmu_idx; 1409 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1410 /* 1411 * Globally for lifetime of a system, 1412 * gnum must always increase. 1413 * mmu_saved_gnum is protected by the cpu_lock. 1414 */ 1415 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1416 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1417 1418 sfmmu_mmu_kstat_create(mmu_ctxp); 1419 1420 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1421 } else { 1422 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1423 } 1424 1425 /* 1426 * The mmu_lock is acquired here to prevent races with 1427 * the wrap-around code. 1428 */ 1429 mutex_enter(&mmu_ctxp->mmu_lock); 1430 1431 1432 mmu_ctxp->mmu_ncpus++; 1433 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1434 CPU_MMU_IDX(cp) = info.mmu_idx; 1435 CPU_MMU_CTXP(cp) = mmu_ctxp; 1436 1437 mutex_exit(&mmu_ctxp->mmu_lock); 1438 } 1439 1440 /* 1441 * Called to perform MMU context-related cleanup for a CPU. 1442 */ 1443 void 1444 sfmmu_cpu_cleanup(cpu_t *cp) 1445 { 1446 mmu_ctx_t *mmu_ctxp; 1447 1448 ASSERT(MUTEX_HELD(&cpu_lock)); 1449 1450 mmu_ctxp = CPU_MMU_CTXP(cp); 1451 ASSERT(mmu_ctxp != NULL); 1452 1453 /* 1454 * The mmu_lock is acquired here to prevent races with 1455 * the wrap-around code. 1456 */ 1457 mutex_enter(&mmu_ctxp->mmu_lock); 1458 1459 CPU_MMU_CTXP(cp) = NULL; 1460 1461 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1462 if (--mmu_ctxp->mmu_ncpus == 0) { 1463 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1464 mutex_exit(&mmu_ctxp->mmu_lock); 1465 mutex_destroy(&mmu_ctxp->mmu_lock); 1466 1467 if (mmu_ctxp->mmu_kstat) 1468 kstat_delete(mmu_ctxp->mmu_kstat); 1469 1470 /* mmu_saved_gnum is protected by the cpu_lock. */ 1471 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1472 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1473 1474 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1475 1476 return; 1477 } 1478 1479 mutex_exit(&mmu_ctxp->mmu_lock); 1480 } 1481 1482 /* 1483 * Hat_setup, makes an address space context the current active one. 1484 * In sfmmu this translates to setting the secondary context with the 1485 * corresponding context. 1486 */ 1487 void 1488 hat_setup(struct hat *sfmmup, int allocflag) 1489 { 1490 hatlock_t *hatlockp; 1491 1492 /* Init needs some special treatment. */ 1493 if (allocflag == HAT_INIT) { 1494 /* 1495 * Make sure that we have 1496 * 1. a TSB 1497 * 2. a valid ctx that doesn't get stolen after this point. 1498 */ 1499 hatlockp = sfmmu_hat_enter(sfmmup); 1500 1501 /* 1502 * Swap in the TSB. hat_init() allocates tsbinfos without 1503 * TSBs, but we need one for init, since the kernel does some 1504 * special things to set up its stack and needs the TSB to 1505 * resolve page faults. 1506 */ 1507 sfmmu_tsb_swapin(sfmmup, hatlockp); 1508 1509 sfmmu_get_ctx(sfmmup); 1510 1511 sfmmu_hat_exit(hatlockp); 1512 } else { 1513 ASSERT(allocflag == HAT_ALLOC); 1514 1515 hatlockp = sfmmu_hat_enter(sfmmup); 1516 kpreempt_disable(); 1517 1518 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1519 1520 /* 1521 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1522 * pagesize bits don't matter in this case since we are passing 1523 * INVALID_CONTEXT to it. 1524 */ 1525 sfmmu_setctx_sec(INVALID_CONTEXT); 1526 sfmmu_clear_utsbinfo(); 1527 1528 kpreempt_enable(); 1529 sfmmu_hat_exit(hatlockp); 1530 } 1531 } 1532 1533 /* 1534 * Free all the translation resources for the specified address space. 1535 * Called from as_free when an address space is being destroyed. 1536 */ 1537 void 1538 hat_free_start(struct hat *sfmmup) 1539 { 1540 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1541 ASSERT(sfmmup != ksfmmup); 1542 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1543 1544 sfmmup->sfmmu_free = 1; 1545 } 1546 1547 void 1548 hat_free_end(struct hat *sfmmup) 1549 { 1550 int i; 1551 1552 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1553 if (sfmmup->sfmmu_ismhat) { 1554 for (i = 0; i < mmu_page_sizes; i++) { 1555 sfmmup->sfmmu_ttecnt[i] = 0; 1556 sfmmup->sfmmu_ismttecnt[i] = 0; 1557 } 1558 } else { 1559 /* EMPTY */ 1560 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1561 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1562 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1563 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1564 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1565 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1566 } 1567 1568 if (sfmmup->sfmmu_rmstat) { 1569 hat_freestat(sfmmup->sfmmu_as, NULL); 1570 } 1571 1572 while (sfmmup->sfmmu_tsb != NULL) { 1573 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1574 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1575 sfmmup->sfmmu_tsb = next; 1576 } 1577 sfmmu_free_sfmmu(sfmmup); 1578 1579 kmem_cache_free(sfmmuid_cache, sfmmup); 1580 } 1581 1582 /* 1583 * Set up any translation structures, for the specified address space, 1584 * that are needed or preferred when the process is being swapped in. 1585 */ 1586 /* ARGSUSED */ 1587 void 1588 hat_swapin(struct hat *hat) 1589 { 1590 ASSERT(hat->sfmmu_xhat_provider == NULL); 1591 } 1592 1593 /* 1594 * Free all of the translation resources, for the specified address space, 1595 * that can be freed while the process is swapped out. Called from as_swapout. 1596 * Also, free up the ctx that this process was using. 1597 */ 1598 void 1599 hat_swapout(struct hat *sfmmup) 1600 { 1601 struct hmehash_bucket *hmebp; 1602 struct hme_blk *hmeblkp; 1603 struct hme_blk *pr_hblk = NULL; 1604 struct hme_blk *nx_hblk; 1605 int i; 1606 uint64_t hblkpa, prevpa, nx_pa; 1607 struct hme_blk *list = NULL; 1608 hatlock_t *hatlockp; 1609 struct tsb_info *tsbinfop; 1610 struct free_tsb { 1611 struct free_tsb *next; 1612 struct tsb_info *tsbinfop; 1613 }; /* free list of TSBs */ 1614 struct free_tsb *freelist, *last, *next; 1615 1616 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1617 SFMMU_STAT(sf_swapout); 1618 1619 /* 1620 * There is no way to go from an as to all its translations in sfmmu. 1621 * Here is one of the times when we take the big hit and traverse 1622 * the hash looking for hme_blks to free up. Not only do we free up 1623 * this as hme_blks but all those that are free. We are obviously 1624 * swapping because we need memory so let's free up as much 1625 * as we can. 1626 * 1627 * Note that we don't flush TLB/TSB here -- it's not necessary 1628 * because: 1629 * 1) we free the ctx we're using and throw away the TSB(s); 1630 * 2) processes aren't runnable while being swapped out. 1631 */ 1632 ASSERT(sfmmup != KHATID); 1633 for (i = 0; i <= UHMEHASH_SZ; i++) { 1634 hmebp = &uhme_hash[i]; 1635 SFMMU_HASH_LOCK(hmebp); 1636 hmeblkp = hmebp->hmeblkp; 1637 hblkpa = hmebp->hmeh_nextpa; 1638 prevpa = 0; 1639 pr_hblk = NULL; 1640 while (hmeblkp) { 1641 1642 ASSERT(!hmeblkp->hblk_xhat_bit); 1643 1644 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1645 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1646 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1647 (caddr_t)get_hblk_base(hmeblkp), 1648 get_hblk_endaddr(hmeblkp), 1649 NULL, HAT_UNLOAD); 1650 } 1651 nx_hblk = hmeblkp->hblk_next; 1652 nx_pa = hmeblkp->hblk_nextpa; 1653 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1654 ASSERT(!hmeblkp->hblk_lckcnt); 1655 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 1656 prevpa, pr_hblk); 1657 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 1658 } else { 1659 pr_hblk = hmeblkp; 1660 prevpa = hblkpa; 1661 } 1662 hmeblkp = nx_hblk; 1663 hblkpa = nx_pa; 1664 } 1665 SFMMU_HASH_UNLOCK(hmebp); 1666 } 1667 1668 sfmmu_hblks_list_purge(&list); 1669 1670 /* 1671 * Now free up the ctx so that others can reuse it. 1672 */ 1673 hatlockp = sfmmu_hat_enter(sfmmup); 1674 1675 sfmmu_invalidate_ctx(sfmmup); 1676 1677 /* 1678 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1679 * If TSBs were never swapped in, just return. 1680 * This implies that we don't support partial swapping 1681 * of TSBs -- either all are swapped out, or none are. 1682 * 1683 * We must hold the HAT lock here to prevent racing with another 1684 * thread trying to unmap TTEs from the TSB or running the post- 1685 * relocator after relocating the TSB's memory. Unfortunately, we 1686 * can't free memory while holding the HAT lock or we could 1687 * deadlock, so we build a list of TSBs to be freed after marking 1688 * the tsbinfos as swapped out and free them after dropping the 1689 * lock. 1690 */ 1691 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1692 sfmmu_hat_exit(hatlockp); 1693 return; 1694 } 1695 1696 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1697 last = freelist = NULL; 1698 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1699 tsbinfop = tsbinfop->tsb_next) { 1700 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1701 1702 /* 1703 * Cast the TSB into a struct free_tsb and put it on the free 1704 * list. 1705 */ 1706 if (freelist == NULL) { 1707 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1708 } else { 1709 last->next = (struct free_tsb *)tsbinfop->tsb_va; 1710 last = last->next; 1711 } 1712 last->next = NULL; 1713 last->tsbinfop = tsbinfop; 1714 tsbinfop->tsb_flags |= TSB_SWAPPED; 1715 /* 1716 * Zero out the TTE to clear the valid bit. 1717 * Note we can't use a value like 0xbad because we want to 1718 * ensure diagnostic bits are NEVER set on TTEs that might 1719 * be loaded. The intent is to catch any invalid access 1720 * to the swapped TSB, such as a thread running with a valid 1721 * context without first calling sfmmu_tsb_swapin() to 1722 * allocate TSB memory. 1723 */ 1724 tsbinfop->tsb_tte.ll = 0; 1725 } 1726 1727 /* Now we can drop the lock and free the TSB memory. */ 1728 sfmmu_hat_exit(hatlockp); 1729 for (; freelist != NULL; freelist = next) { 1730 next = freelist->next; 1731 sfmmu_tsb_free(freelist->tsbinfop); 1732 } 1733 } 1734 1735 /* 1736 * Duplicate the translations of an as into another newas 1737 */ 1738 /* ARGSUSED */ 1739 int 1740 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 1741 uint_t flag) 1742 { 1743 ASSERT(hat->sfmmu_xhat_provider == NULL); 1744 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW)); 1745 1746 if (flag == HAT_DUP_COW) { 1747 panic("hat_dup: HAT_DUP_COW not supported"); 1748 } 1749 return (0); 1750 } 1751 1752 /* 1753 * Set up addr to map to page pp with protection prot. 1754 * As an optimization we also load the TSB with the 1755 * corresponding tte but it is no big deal if the tte gets kicked out. 1756 */ 1757 void 1758 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 1759 uint_t attr, uint_t flags) 1760 { 1761 tte_t tte; 1762 1763 1764 ASSERT(hat != NULL); 1765 ASSERT(PAGE_LOCKED(pp)); 1766 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1767 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1768 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1769 1770 if (PP_ISFREE(pp)) { 1771 panic("hat_memload: loading a mapping to free page %p", 1772 (void *)pp); 1773 } 1774 1775 if (hat->sfmmu_xhat_provider) { 1776 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 1777 return; 1778 } 1779 1780 ASSERT((hat == ksfmmup) || 1781 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1782 1783 if (flags & ~SFMMU_LOAD_ALLFLAG) 1784 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 1785 flags & ~SFMMU_LOAD_ALLFLAG); 1786 1787 if (hat->sfmmu_rmstat) 1788 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 1789 1790 #if defined(SF_ERRATA_57) 1791 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1792 (addr < errata57_limit) && (attr & PROT_EXEC) && 1793 !(flags & HAT_LOAD_SHARE)) { 1794 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 1795 " page executable"); 1796 attr &= ~PROT_EXEC; 1797 } 1798 #endif 1799 1800 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 1801 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags); 1802 1803 /* 1804 * Check TSB and TLB page sizes. 1805 */ 1806 if ((flags & HAT_LOAD_SHARE) == 0) { 1807 sfmmu_check_page_sizes(hat, 1); 1808 } 1809 } 1810 1811 /* 1812 * hat_devload can be called to map real memory (e.g. 1813 * /dev/kmem) and even though hat_devload will determine pf is 1814 * for memory, it will be unable to get a shared lock on the 1815 * page (because someone else has it exclusively) and will 1816 * pass dp = NULL. If tteload doesn't get a non-NULL 1817 * page pointer it can't cache memory. 1818 */ 1819 void 1820 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 1821 uint_t attr, int flags) 1822 { 1823 tte_t tte; 1824 struct page *pp = NULL; 1825 int use_lgpg = 0; 1826 1827 ASSERT(hat != NULL); 1828 1829 if (hat->sfmmu_xhat_provider) { 1830 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 1831 return; 1832 } 1833 1834 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1835 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1836 ASSERT((hat == ksfmmup) || 1837 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1838 if (len == 0) 1839 panic("hat_devload: zero len"); 1840 if (flags & ~SFMMU_LOAD_ALLFLAG) 1841 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 1842 flags & ~SFMMU_LOAD_ALLFLAG); 1843 1844 #if defined(SF_ERRATA_57) 1845 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1846 (addr < errata57_limit) && (attr & PROT_EXEC) && 1847 !(flags & HAT_LOAD_SHARE)) { 1848 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 1849 " page executable"); 1850 attr &= ~PROT_EXEC; 1851 } 1852 #endif 1853 1854 /* 1855 * If it's a memory page find its pp 1856 */ 1857 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 1858 pp = page_numtopp_nolock(pfn); 1859 if (pp == NULL) { 1860 flags |= HAT_LOAD_NOCONSIST; 1861 } else { 1862 if (PP_ISFREE(pp)) { 1863 panic("hat_memload: loading " 1864 "a mapping to free page %p", 1865 (void *)pp); 1866 } 1867 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1868 panic("hat_memload: loading a mapping " 1869 "to unlocked relocatable page %p", 1870 (void *)pp); 1871 } 1872 ASSERT(len == MMU_PAGESIZE); 1873 } 1874 } 1875 1876 if (hat->sfmmu_rmstat) 1877 hat_resvstat(len, hat->sfmmu_as, addr); 1878 1879 if (flags & HAT_LOAD_NOCONSIST) { 1880 attr |= SFMMU_UNCACHEVTTE; 1881 use_lgpg = 1; 1882 } 1883 if (!pf_is_memory(pfn)) { 1884 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 1885 use_lgpg = 1; 1886 switch (attr & HAT_ORDER_MASK) { 1887 case HAT_STRICTORDER: 1888 case HAT_UNORDERED_OK: 1889 /* 1890 * we set the side effect bit for all non 1891 * memory mappings unless merging is ok 1892 */ 1893 attr |= SFMMU_SIDEFFECT; 1894 break; 1895 case HAT_MERGING_OK: 1896 case HAT_LOADCACHING_OK: 1897 case HAT_STORECACHING_OK: 1898 break; 1899 default: 1900 panic("hat_devload: bad attr"); 1901 break; 1902 } 1903 } 1904 while (len) { 1905 if (!use_lgpg) { 1906 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1907 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1908 flags); 1909 len -= MMU_PAGESIZE; 1910 addr += MMU_PAGESIZE; 1911 pfn++; 1912 continue; 1913 } 1914 /* 1915 * try to use large pages, check va/pa alignments 1916 * Note that 32M/256M page sizes are not (yet) supported. 1917 */ 1918 if ((len >= MMU_PAGESIZE4M) && 1919 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 1920 !(disable_large_pages & (1 << TTE4M)) && 1921 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 1922 sfmmu_memtte(&tte, pfn, attr, TTE4M); 1923 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1924 flags); 1925 len -= MMU_PAGESIZE4M; 1926 addr += MMU_PAGESIZE4M; 1927 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 1928 } else if ((len >= MMU_PAGESIZE512K) && 1929 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 1930 !(disable_large_pages & (1 << TTE512K)) && 1931 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 1932 sfmmu_memtte(&tte, pfn, attr, TTE512K); 1933 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1934 flags); 1935 len -= MMU_PAGESIZE512K; 1936 addr += MMU_PAGESIZE512K; 1937 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 1938 } else if ((len >= MMU_PAGESIZE64K) && 1939 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 1940 !(disable_large_pages & (1 << TTE64K)) && 1941 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 1942 sfmmu_memtte(&tte, pfn, attr, TTE64K); 1943 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1944 flags); 1945 len -= MMU_PAGESIZE64K; 1946 addr += MMU_PAGESIZE64K; 1947 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 1948 } else { 1949 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1950 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1951 flags); 1952 len -= MMU_PAGESIZE; 1953 addr += MMU_PAGESIZE; 1954 pfn++; 1955 } 1956 } 1957 1958 /* 1959 * Check TSB and TLB page sizes. 1960 */ 1961 if ((flags & HAT_LOAD_SHARE) == 0) { 1962 sfmmu_check_page_sizes(hat, 1); 1963 } 1964 } 1965 1966 /* 1967 * Map the largest extend possible out of the page array. The array may NOT 1968 * be in order. The largest possible mapping a page can have 1969 * is specified in the p_szc field. The p_szc field 1970 * cannot change as long as there any mappings (large or small) 1971 * to any of the pages that make up the large page. (ie. any 1972 * promotion/demotion of page size is not up to the hat but up to 1973 * the page free list manager). The array 1974 * should consist of properly aligned contigous pages that are 1975 * part of a big page for a large mapping to be created. 1976 */ 1977 void 1978 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 1979 struct page **pps, uint_t attr, uint_t flags) 1980 { 1981 int ttesz; 1982 size_t mapsz; 1983 pgcnt_t numpg, npgs; 1984 tte_t tte; 1985 page_t *pp; 1986 int large_pages_disable; 1987 1988 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1989 1990 if (hat->sfmmu_xhat_provider) { 1991 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 1992 return; 1993 } 1994 1995 if (hat->sfmmu_rmstat) 1996 hat_resvstat(len, hat->sfmmu_as, addr); 1997 1998 #if defined(SF_ERRATA_57) 1999 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2000 (addr < errata57_limit) && (attr & PROT_EXEC) && 2001 !(flags & HAT_LOAD_SHARE)) { 2002 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2003 "user page executable"); 2004 attr &= ~PROT_EXEC; 2005 } 2006 #endif 2007 2008 /* Get number of pages */ 2009 npgs = len >> MMU_PAGESHIFT; 2010 2011 if (flags & HAT_LOAD_SHARE) { 2012 large_pages_disable = disable_ism_large_pages; 2013 } else { 2014 large_pages_disable = disable_large_pages; 2015 } 2016 2017 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2018 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 2019 return; 2020 } 2021 2022 while (npgs >= NHMENTS) { 2023 pp = *pps; 2024 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2025 /* 2026 * Check if this page size is disabled. 2027 */ 2028 if (large_pages_disable & (1 << ttesz)) 2029 continue; 2030 2031 numpg = TTEPAGES(ttesz); 2032 mapsz = numpg << MMU_PAGESHIFT; 2033 if ((npgs >= numpg) && 2034 IS_P2ALIGNED(addr, mapsz) && 2035 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2036 /* 2037 * At this point we have enough pages and 2038 * we know the virtual address and the pfn 2039 * are properly aligned. We still need 2040 * to check for physical contiguity but since 2041 * it is very likely that this is the case 2042 * we will assume they are so and undo 2043 * the request if necessary. It would 2044 * be great if we could get a hint flag 2045 * like HAT_CONTIG which would tell us 2046 * the pages are contigous for sure. 2047 */ 2048 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2049 attr, ttesz); 2050 if (!sfmmu_tteload_array(hat, &tte, addr, 2051 pps, flags)) { 2052 break; 2053 } 2054 } 2055 } 2056 if (ttesz == TTE8K) { 2057 /* 2058 * We were not able to map array using a large page 2059 * batch a hmeblk or fraction at a time. 2060 */ 2061 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2062 & (NHMENTS-1); 2063 numpg = NHMENTS - numpg; 2064 ASSERT(numpg <= npgs); 2065 mapsz = numpg * MMU_PAGESIZE; 2066 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2067 numpg); 2068 } 2069 addr += mapsz; 2070 npgs -= numpg; 2071 pps += numpg; 2072 } 2073 2074 if (npgs) { 2075 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 2076 } 2077 2078 /* 2079 * Check TSB and TLB page sizes. 2080 */ 2081 if ((flags & HAT_LOAD_SHARE) == 0) { 2082 sfmmu_check_page_sizes(hat, 1); 2083 } 2084 } 2085 2086 /* 2087 * Function tries to batch 8K pages into the same hme blk. 2088 */ 2089 static void 2090 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2091 uint_t attr, uint_t flags, pgcnt_t npgs) 2092 { 2093 tte_t tte; 2094 page_t *pp; 2095 struct hmehash_bucket *hmebp; 2096 struct hme_blk *hmeblkp; 2097 int index; 2098 2099 while (npgs) { 2100 /* 2101 * Acquire the hash bucket. 2102 */ 2103 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K); 2104 ASSERT(hmebp); 2105 2106 /* 2107 * Find the hment block. 2108 */ 2109 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2110 TTE8K, flags); 2111 ASSERT(hmeblkp); 2112 2113 do { 2114 /* 2115 * Make the tte. 2116 */ 2117 pp = *pps; 2118 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2119 2120 /* 2121 * Add the translation. 2122 */ 2123 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2124 vaddr, pps, flags); 2125 2126 /* 2127 * Goto next page. 2128 */ 2129 pps++; 2130 npgs--; 2131 2132 /* 2133 * Goto next address. 2134 */ 2135 vaddr += MMU_PAGESIZE; 2136 2137 /* 2138 * Don't crossover into a different hmentblk. 2139 */ 2140 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2141 (NHMENTS-1)); 2142 2143 } while (index != 0 && npgs != 0); 2144 2145 /* 2146 * Release the hash bucket. 2147 */ 2148 2149 sfmmu_tteload_release_hashbucket(hmebp); 2150 } 2151 } 2152 2153 /* 2154 * Construct a tte for a page: 2155 * 2156 * tte_valid = 1 2157 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2158 * tte_size = size 2159 * tte_nfo = attr & HAT_NOFAULT 2160 * tte_ie = attr & HAT_STRUCTURE_LE 2161 * tte_hmenum = hmenum 2162 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2163 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2164 * tte_ref = 1 (optimization) 2165 * tte_wr_perm = attr & PROT_WRITE; 2166 * tte_no_sync = attr & HAT_NOSYNC 2167 * tte_lock = attr & SFMMU_LOCKTTE 2168 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2169 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2170 * tte_e = attr & SFMMU_SIDEFFECT 2171 * tte_priv = !(attr & PROT_USER) 2172 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2173 * tte_glb = 0 2174 */ 2175 void 2176 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2177 { 2178 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2179 2180 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2181 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2182 2183 if (TTE_IS_NOSYNC(ttep)) { 2184 TTE_SET_REF(ttep); 2185 if (TTE_IS_WRITABLE(ttep)) { 2186 TTE_SET_MOD(ttep); 2187 } 2188 } 2189 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2190 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2191 } 2192 } 2193 2194 /* 2195 * This function will add a translation to the hme_blk and allocate the 2196 * hme_blk if one does not exist. 2197 * If a page structure is specified then it will add the 2198 * corresponding hment to the mapping list. 2199 * It will also update the hmenum field for the tte. 2200 */ 2201 void 2202 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2203 uint_t flags) 2204 { 2205 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags); 2206 } 2207 2208 /* 2209 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2210 * Assumes that a particular page size may only be resident in one TSB. 2211 */ 2212 static void 2213 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2214 { 2215 struct tsb_info *tsbinfop = NULL; 2216 uint64_t tag; 2217 struct tsbe *tsbe_addr; 2218 uint64_t tsb_base; 2219 uint_t tsb_size; 2220 int vpshift = MMU_PAGESHIFT; 2221 int phys = 0; 2222 2223 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2224 phys = ktsb_phys; 2225 if (ttesz >= TTE4M) { 2226 #ifndef sun4v 2227 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2228 #endif 2229 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2230 tsb_size = ktsb4m_szcode; 2231 } else { 2232 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2233 tsb_size = ktsb_szcode; 2234 } 2235 } else { 2236 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2237 2238 /* 2239 * If there isn't a TSB for this page size, or the TSB is 2240 * swapped out, there is nothing to do. Note that the latter 2241 * case seems impossible but can occur if hat_pageunload() 2242 * is called on an ISM mapping while the process is swapped 2243 * out. 2244 */ 2245 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2246 return; 2247 2248 /* 2249 * If another thread is in the middle of relocating a TSB 2250 * we can't unload the entry so set a flag so that the 2251 * TSB will be flushed before it can be accessed by the 2252 * process. 2253 */ 2254 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2255 if (ttep == NULL) 2256 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2257 return; 2258 } 2259 #if defined(UTSB_PHYS) 2260 phys = 1; 2261 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2262 #else 2263 tsb_base = (uint64_t)tsbinfop->tsb_va; 2264 #endif 2265 tsb_size = tsbinfop->tsb_szc; 2266 } 2267 if (ttesz >= TTE4M) 2268 vpshift = MMU_PAGESHIFT4M; 2269 2270 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2271 tag = sfmmu_make_tsbtag(vaddr); 2272 2273 if (ttep == NULL) { 2274 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2275 } else { 2276 if (ttesz >= TTE4M) { 2277 SFMMU_STAT(sf_tsb_load4m); 2278 } else { 2279 SFMMU_STAT(sf_tsb_load8k); 2280 } 2281 2282 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2283 } 2284 } 2285 2286 /* 2287 * Unmap all entries from [start, end) matching the given page size. 2288 * 2289 * This function is used primarily to unmap replicated 64K or 512K entries 2290 * from the TSB that are inserted using the base page size TSB pointer, but 2291 * it may also be called to unmap a range of addresses from the TSB. 2292 */ 2293 void 2294 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2295 { 2296 struct tsb_info *tsbinfop; 2297 uint64_t tag; 2298 struct tsbe *tsbe_addr; 2299 caddr_t vaddr; 2300 uint64_t tsb_base; 2301 int vpshift, vpgsz; 2302 uint_t tsb_size; 2303 int phys = 0; 2304 2305 /* 2306 * Assumptions: 2307 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2308 * at a time shooting down any valid entries we encounter. 2309 * 2310 * If ttesz >= 4M we walk the range 4M at a time shooting 2311 * down any valid mappings we find. 2312 */ 2313 if (sfmmup == ksfmmup) { 2314 phys = ktsb_phys; 2315 if (ttesz >= TTE4M) { 2316 #ifndef sun4v 2317 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2318 #endif 2319 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2320 tsb_size = ktsb4m_szcode; 2321 } else { 2322 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2323 tsb_size = ktsb_szcode; 2324 } 2325 } else { 2326 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2327 2328 /* 2329 * If there isn't a TSB for this page size, or the TSB is 2330 * swapped out, there is nothing to do. Note that the latter 2331 * case seems impossible but can occur if hat_pageunload() 2332 * is called on an ISM mapping while the process is swapped 2333 * out. 2334 */ 2335 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2336 return; 2337 2338 /* 2339 * If another thread is in the middle of relocating a TSB 2340 * we can't unload the entry so set a flag so that the 2341 * TSB will be flushed before it can be accessed by the 2342 * process. 2343 */ 2344 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2345 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2346 return; 2347 } 2348 #if defined(UTSB_PHYS) 2349 phys = 1; 2350 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2351 #else 2352 tsb_base = (uint64_t)tsbinfop->tsb_va; 2353 #endif 2354 tsb_size = tsbinfop->tsb_szc; 2355 } 2356 if (ttesz >= TTE4M) { 2357 vpshift = MMU_PAGESHIFT4M; 2358 vpgsz = MMU_PAGESIZE4M; 2359 } else { 2360 vpshift = MMU_PAGESHIFT; 2361 vpgsz = MMU_PAGESIZE; 2362 } 2363 2364 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2365 tag = sfmmu_make_tsbtag(vaddr); 2366 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2367 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2368 } 2369 } 2370 2371 /* 2372 * Select the optimum TSB size given the number of mappings 2373 * that need to be cached. 2374 */ 2375 static int 2376 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2377 { 2378 int szc = 0; 2379 2380 #ifdef DEBUG 2381 if (tsb_grow_stress) { 2382 uint32_t randval = (uint32_t)gettick() >> 4; 2383 return (randval % (tsb_max_growsize + 1)); 2384 } 2385 #endif /* DEBUG */ 2386 2387 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2388 szc++; 2389 return (szc); 2390 } 2391 2392 /* 2393 * This function will add a translation to the hme_blk and allocate the 2394 * hme_blk if one does not exist. 2395 * If a page structure is specified then it will add the 2396 * corresponding hment to the mapping list. 2397 * It will also update the hmenum field for the tte. 2398 * Furthermore, it attempts to create a large page translation 2399 * for <addr,hat> at page array pps. It assumes addr and first 2400 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2401 */ 2402 static int 2403 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2404 page_t **pps, uint_t flags) 2405 { 2406 struct hmehash_bucket *hmebp; 2407 struct hme_blk *hmeblkp; 2408 int ret; 2409 uint_t size; 2410 2411 /* 2412 * Get mapping size. 2413 */ 2414 size = TTE_CSZ(ttep); 2415 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2416 2417 /* 2418 * Acquire the hash bucket. 2419 */ 2420 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size); 2421 ASSERT(hmebp); 2422 2423 /* 2424 * Find the hment block. 2425 */ 2426 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags); 2427 ASSERT(hmeblkp); 2428 2429 /* 2430 * Add the translation. 2431 */ 2432 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags); 2433 2434 /* 2435 * Release the hash bucket. 2436 */ 2437 sfmmu_tteload_release_hashbucket(hmebp); 2438 2439 return (ret); 2440 } 2441 2442 /* 2443 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2444 */ 2445 static struct hmehash_bucket * 2446 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size) 2447 { 2448 struct hmehash_bucket *hmebp; 2449 int hmeshift; 2450 2451 hmeshift = HME_HASH_SHIFT(size); 2452 2453 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 2454 2455 SFMMU_HASH_LOCK(hmebp); 2456 2457 return (hmebp); 2458 } 2459 2460 /* 2461 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2462 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2463 * allocated. 2464 */ 2465 static struct hme_blk * 2466 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2467 caddr_t vaddr, uint_t size, uint_t flags) 2468 { 2469 hmeblk_tag hblktag; 2470 int hmeshift; 2471 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2472 uint64_t hblkpa, prevpa; 2473 struct kmem_cache *sfmmu_cache; 2474 uint_t forcefree; 2475 2476 hblktag.htag_id = sfmmup; 2477 hmeshift = HME_HASH_SHIFT(size); 2478 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2479 hblktag.htag_rehash = HME_HASH_REHASH(size); 2480 2481 ttearray_realloc: 2482 2483 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 2484 pr_hblk, prevpa, &list); 2485 2486 /* 2487 * We block until hblk_reserve_lock is released; it's held by 2488 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2489 * replaced by a hblk from sfmmu8_cache. 2490 */ 2491 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2492 hblk_reserve_thread != curthread) { 2493 SFMMU_HASH_UNLOCK(hmebp); 2494 mutex_enter(&hblk_reserve_lock); 2495 mutex_exit(&hblk_reserve_lock); 2496 SFMMU_STAT(sf_hblk_reserve_hit); 2497 SFMMU_HASH_LOCK(hmebp); 2498 goto ttearray_realloc; 2499 } 2500 2501 if (hmeblkp == NULL) { 2502 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2503 hblktag, flags); 2504 } else { 2505 /* 2506 * It is possible for 8k and 64k hblks to collide since they 2507 * have the same rehash value. This is because we 2508 * lazily free hblks and 8K/64K blks could be lingering. 2509 * If we find size mismatch we free the block and & try again. 2510 */ 2511 if (get_hblk_ttesz(hmeblkp) != size) { 2512 ASSERT(!hmeblkp->hblk_vcnt); 2513 ASSERT(!hmeblkp->hblk_hmecnt); 2514 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 2515 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 2516 goto ttearray_realloc; 2517 } 2518 if (hmeblkp->hblk_shw_bit) { 2519 /* 2520 * if the hblk was previously used as a shadow hblk then 2521 * we will change it to a normal hblk 2522 */ 2523 if (hmeblkp->hblk_shw_mask) { 2524 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2525 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2526 goto ttearray_realloc; 2527 } else { 2528 hmeblkp->hblk_shw_bit = 0; 2529 } 2530 } 2531 SFMMU_STAT(sf_hblk_hit); 2532 } 2533 2534 /* 2535 * hat_memload() should never call kmem_cache_free(); see block 2536 * comment showing the stacktrace in sfmmu_hblk_alloc(); 2537 * enqueue each hblk in the list to reserve list if it's created 2538 * from sfmmu8_cache *and* sfmmup == KHATID. 2539 */ 2540 forcefree = (sfmmup == KHATID) ? 1 : 0; 2541 while ((pr_hblk = list) != NULL) { 2542 list = pr_hblk->hblk_next; 2543 sfmmu_cache = get_hblk_cache(pr_hblk); 2544 if ((sfmmu_cache == sfmmu8_cache) && 2545 sfmmu_put_free_hblk(pr_hblk, forcefree)) 2546 continue; 2547 2548 ASSERT(sfmmup != KHATID); 2549 kmem_cache_free(sfmmu_cache, pr_hblk); 2550 } 2551 2552 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2553 ASSERT(!hmeblkp->hblk_shw_bit); 2554 2555 return (hmeblkp); 2556 } 2557 2558 /* 2559 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2560 * otherwise. 2561 */ 2562 static int 2563 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2564 caddr_t vaddr, page_t **pps, uint_t flags) 2565 { 2566 page_t *pp = *pps; 2567 int hmenum, size, remap; 2568 tte_t tteold, flush_tte; 2569 #ifdef DEBUG 2570 tte_t orig_old; 2571 #endif /* DEBUG */ 2572 struct sf_hment *sfhme; 2573 kmutex_t *pml, *pmtx; 2574 hatlock_t *hatlockp; 2575 2576 /* 2577 * remove this panic when we decide to let user virtual address 2578 * space be >= USERLIMIT. 2579 */ 2580 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2581 panic("user addr %p in kernel space", vaddr); 2582 #if defined(TTE_IS_GLOBAL) 2583 if (TTE_IS_GLOBAL(ttep)) 2584 panic("sfmmu_tteload: creating global tte"); 2585 #endif 2586 2587 #ifdef DEBUG 2588 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2589 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2590 panic("sfmmu_tteload: non cacheable memory tte"); 2591 #endif /* DEBUG */ 2592 2593 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2594 !TTE_IS_MOD(ttep)) { 2595 /* 2596 * Don't load TSB for dummy as in ISM. Also don't preload 2597 * the TSB if the TTE isn't writable since we're likely to 2598 * fault on it again -- preloading can be fairly expensive. 2599 */ 2600 flags |= SFMMU_NO_TSBLOAD; 2601 } 2602 2603 size = TTE_CSZ(ttep); 2604 switch (size) { 2605 case TTE8K: 2606 SFMMU_STAT(sf_tteload8k); 2607 break; 2608 case TTE64K: 2609 SFMMU_STAT(sf_tteload64k); 2610 break; 2611 case TTE512K: 2612 SFMMU_STAT(sf_tteload512k); 2613 break; 2614 case TTE4M: 2615 SFMMU_STAT(sf_tteload4m); 2616 break; 2617 case (TTE32M): 2618 SFMMU_STAT(sf_tteload32m); 2619 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2620 break; 2621 case (TTE256M): 2622 SFMMU_STAT(sf_tteload256m); 2623 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2624 break; 2625 } 2626 2627 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2628 2629 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 2630 2631 /* 2632 * Need to grab mlist lock here so that pageunload 2633 * will not change tte behind us. 2634 */ 2635 if (pp) { 2636 pml = sfmmu_mlist_enter(pp); 2637 } 2638 2639 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2640 /* 2641 * Look for corresponding hment and if valid verify 2642 * pfns are equal. 2643 */ 2644 remap = TTE_IS_VALID(&tteold); 2645 if (remap) { 2646 pfn_t new_pfn, old_pfn; 2647 2648 old_pfn = TTE_TO_PFN(vaddr, &tteold); 2649 new_pfn = TTE_TO_PFN(vaddr, ttep); 2650 2651 if (flags & HAT_LOAD_REMAP) { 2652 /* make sure we are remapping same type of pages */ 2653 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 2654 panic("sfmmu_tteload - tte remap io<->memory"); 2655 } 2656 if (old_pfn != new_pfn && 2657 (pp != NULL || sfhme->hme_page != NULL)) { 2658 panic("sfmmu_tteload - tte remap pp != NULL"); 2659 } 2660 } else if (old_pfn != new_pfn) { 2661 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 2662 (void *)hmeblkp); 2663 } 2664 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 2665 } 2666 2667 if (pp) { 2668 if (size == TTE8K) { 2669 #ifdef VAC 2670 /* 2671 * Handle VAC consistency 2672 */ 2673 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 2674 sfmmu_vac_conflict(sfmmup, vaddr, pp); 2675 } 2676 #endif 2677 2678 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2679 pmtx = sfmmu_page_enter(pp); 2680 PP_CLRRO(pp); 2681 sfmmu_page_exit(pmtx); 2682 } else if (!PP_ISMAPPED(pp) && 2683 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 2684 pmtx = sfmmu_page_enter(pp); 2685 if (!(PP_ISMOD(pp))) { 2686 PP_SETRO(pp); 2687 } 2688 sfmmu_page_exit(pmtx); 2689 } 2690 2691 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 2692 /* 2693 * sfmmu_pagearray_setup failed so return 2694 */ 2695 sfmmu_mlist_exit(pml); 2696 return (1); 2697 } 2698 } 2699 2700 /* 2701 * Make sure hment is not on a mapping list. 2702 */ 2703 ASSERT(remap || (sfhme->hme_page == NULL)); 2704 2705 /* if it is not a remap then hme->next better be NULL */ 2706 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 2707 2708 if (flags & HAT_LOAD_LOCK) { 2709 if (((int)hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 2710 panic("too high lckcnt-hmeblk %p", 2711 (void *)hmeblkp); 2712 } 2713 atomic_add_16(&hmeblkp->hblk_lckcnt, 1); 2714 2715 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 2716 } 2717 2718 #ifdef VAC 2719 if (pp && PP_ISNC(pp)) { 2720 /* 2721 * If the physical page is marked to be uncacheable, like 2722 * by a vac conflict, make sure the new mapping is also 2723 * uncacheable. 2724 */ 2725 TTE_CLR_VCACHEABLE(ttep); 2726 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 2727 } 2728 #endif 2729 ttep->tte_hmenum = hmenum; 2730 2731 #ifdef DEBUG 2732 orig_old = tteold; 2733 #endif /* DEBUG */ 2734 2735 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 2736 if ((sfmmup == KHATID) && 2737 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 2738 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2739 } 2740 #ifdef DEBUG 2741 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 2742 #endif /* DEBUG */ 2743 } 2744 2745 if (!TTE_IS_VALID(&tteold)) { 2746 2747 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 2748 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 2749 2750 /* 2751 * HAT_RELOAD_SHARE has been deprecated with lpg DISM. 2752 */ 2753 2754 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 2755 sfmmup != ksfmmup) { 2756 /* 2757 * If this is the first large mapping for the process 2758 * we must force any CPUs running this process to TL=0 2759 * where they will reload the HAT flags from the 2760 * tsbmiss area. This is necessary to make the large 2761 * mappings we are about to load visible to those CPUs; 2762 * otherwise they'll loop forever calling pagefault() 2763 * since we don't search large hash chains by default. 2764 */ 2765 hatlockp = sfmmu_hat_enter(sfmmup); 2766 if (size == TTE512K && 2767 !SFMMU_FLAGS_ISSET(sfmmup, HAT_512K_FLAG)) { 2768 SFMMU_FLAGS_SET(sfmmup, HAT_512K_FLAG); 2769 sfmmu_sync_mmustate(sfmmup); 2770 } else if (size == TTE4M && 2771 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 2772 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 2773 sfmmu_sync_mmustate(sfmmup); 2774 } else if (size == TTE64K && 2775 !SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) { 2776 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 2777 /* no sync mmustate; 64K shares 8K hashes */ 2778 } else if (mmu_page_sizes == max_mmu_page_sizes) { 2779 if (size == TTE32M && 2780 !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 2781 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 2782 sfmmu_sync_mmustate(sfmmup); 2783 } else if (size == TTE256M && 2784 !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 2785 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 2786 sfmmu_sync_mmustate(sfmmup); 2787 } 2788 } 2789 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 2790 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 2791 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 2792 } 2793 sfmmu_hat_exit(hatlockp); 2794 } 2795 } 2796 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 2797 2798 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 2799 hw_tte.tte_intlo; 2800 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 2801 hw_tte.tte_inthi; 2802 2803 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 2804 /* 2805 * If remap and new tte differs from old tte we need 2806 * to sync the mod bit and flush TLB/TSB. We don't 2807 * need to sync ref bit because we currently always set 2808 * ref bit in tteload. 2809 */ 2810 ASSERT(TTE_IS_REF(ttep)); 2811 if (TTE_IS_MOD(&tteold)) { 2812 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 2813 } 2814 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 2815 xt_sync(sfmmup->sfmmu_cpusran); 2816 } 2817 2818 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 2819 /* 2820 * We only preload 8K and 4M mappings into the TSB, since 2821 * 64K and 512K mappings are replicated and hence don't 2822 * have a single, unique TSB entry. Ditto for 32M/256M. 2823 */ 2824 if (size == TTE8K || size == TTE4M) { 2825 hatlockp = sfmmu_hat_enter(sfmmup); 2826 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, size); 2827 sfmmu_hat_exit(hatlockp); 2828 } 2829 } 2830 if (pp) { 2831 if (!remap) { 2832 HME_ADD(sfhme, pp); 2833 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 2834 ASSERT(hmeblkp->hblk_hmecnt > 0); 2835 2836 /* 2837 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 2838 * see pageunload() for comment. 2839 */ 2840 } 2841 sfmmu_mlist_exit(pml); 2842 } 2843 2844 return (0); 2845 } 2846 /* 2847 * Function unlocks hash bucket. 2848 */ 2849 static void 2850 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 2851 { 2852 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2853 SFMMU_HASH_UNLOCK(hmebp); 2854 } 2855 2856 /* 2857 * function which checks and sets up page array for a large 2858 * translation. Will set p_vcolor, p_index, p_ro fields. 2859 * Assumes addr and pfnum of first page are properly aligned. 2860 * Will check for physical contiguity. If check fails it return 2861 * non null. 2862 */ 2863 static int 2864 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 2865 { 2866 int i, index, ttesz; 2867 pfn_t pfnum; 2868 pgcnt_t npgs; 2869 page_t *pp, *pp1; 2870 kmutex_t *pmtx; 2871 #ifdef VAC 2872 int osz; 2873 int cflags = 0; 2874 int vac_err = 0; 2875 #endif 2876 int newidx = 0; 2877 2878 ttesz = TTE_CSZ(ttep); 2879 2880 ASSERT(ttesz > TTE8K); 2881 2882 npgs = TTEPAGES(ttesz); 2883 index = PAGESZ_TO_INDEX(ttesz); 2884 2885 pfnum = (*pps)->p_pagenum; 2886 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 2887 2888 /* 2889 * Save the first pp so we can do HAT_TMPNC at the end. 2890 */ 2891 pp1 = *pps; 2892 #ifdef VAC 2893 osz = fnd_mapping_sz(pp1); 2894 #endif 2895 2896 for (i = 0; i < npgs; i++, pps++) { 2897 pp = *pps; 2898 ASSERT(PAGE_LOCKED(pp)); 2899 ASSERT(pp->p_szc >= ttesz); 2900 ASSERT(pp->p_szc == pp1->p_szc); 2901 ASSERT(sfmmu_mlist_held(pp)); 2902 2903 /* 2904 * XXX is it possible to maintain P_RO on the root only? 2905 */ 2906 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2907 pmtx = sfmmu_page_enter(pp); 2908 PP_CLRRO(pp); 2909 sfmmu_page_exit(pmtx); 2910 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 2911 !PP_ISMOD(pp)) { 2912 pmtx = sfmmu_page_enter(pp); 2913 if (!(PP_ISMOD(pp))) { 2914 PP_SETRO(pp); 2915 } 2916 sfmmu_page_exit(pmtx); 2917 } 2918 2919 /* 2920 * If this is a remap we skip vac & contiguity checks. 2921 */ 2922 if (remap) 2923 continue; 2924 2925 /* 2926 * set p_vcolor and detect any vac conflicts. 2927 */ 2928 #ifdef VAC 2929 if (vac_err == 0) { 2930 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 2931 2932 } 2933 #endif 2934 2935 /* 2936 * Save current index in case we need to undo it. 2937 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 2938 * "SFMMU_INDEX_SHIFT 6" 2939 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 2940 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 2941 * 2942 * So: index = PAGESZ_TO_INDEX(ttesz); 2943 * if ttesz == 1 then index = 0x2 2944 * 2 then index = 0x4 2945 * 3 then index = 0x8 2946 * 4 then index = 0x10 2947 * 5 then index = 0x20 2948 * The code below checks if it's a new pagesize (ie, newidx) 2949 * in case we need to take it back out of p_index, 2950 * and then or's the new index into the existing index. 2951 */ 2952 if ((PP_MAPINDEX(pp) & index) == 0) 2953 newidx = 1; 2954 pp->p_index = (PP_MAPINDEX(pp) | index); 2955 2956 /* 2957 * contiguity check 2958 */ 2959 if (pp->p_pagenum != pfnum) { 2960 /* 2961 * If we fail the contiguity test then 2962 * the only thing we need to fix is the p_index field. 2963 * We might get a few extra flushes but since this 2964 * path is rare that is ok. The p_ro field will 2965 * get automatically fixed on the next tteload to 2966 * the page. NO TNC bit is set yet. 2967 */ 2968 while (i >= 0) { 2969 pp = *pps; 2970 if (newidx) 2971 pp->p_index = (PP_MAPINDEX(pp) & 2972 ~index); 2973 pps--; 2974 i--; 2975 } 2976 return (1); 2977 } 2978 pfnum++; 2979 addr += MMU_PAGESIZE; 2980 } 2981 2982 #ifdef VAC 2983 if (vac_err) { 2984 if (ttesz > osz) { 2985 /* 2986 * There are some smaller mappings that causes vac 2987 * conflicts. Convert all existing small mappings to 2988 * TNC. 2989 */ 2990 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 2991 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 2992 npgs); 2993 } else { 2994 /* EMPTY */ 2995 /* 2996 * If there exists an big page mapping, 2997 * that means the whole existing big page 2998 * has TNC setting already. No need to covert to 2999 * TNC again. 3000 */ 3001 ASSERT(PP_ISTNC(pp1)); 3002 } 3003 } 3004 #endif /* VAC */ 3005 3006 return (0); 3007 } 3008 3009 #ifdef VAC 3010 /* 3011 * Routine that detects vac consistency for a large page. It also 3012 * sets virtual color for all pp's for this big mapping. 3013 */ 3014 static int 3015 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3016 { 3017 int vcolor, ocolor; 3018 3019 ASSERT(sfmmu_mlist_held(pp)); 3020 3021 if (PP_ISNC(pp)) { 3022 return (HAT_TMPNC); 3023 } 3024 3025 vcolor = addr_to_vcolor(addr); 3026 if (PP_NEWPAGE(pp)) { 3027 PP_SET_VCOLOR(pp, vcolor); 3028 return (0); 3029 } 3030 3031 ocolor = PP_GET_VCOLOR(pp); 3032 if (ocolor == vcolor) { 3033 return (0); 3034 } 3035 3036 if (!PP_ISMAPPED(pp)) { 3037 /* 3038 * Previous user of page had a differnet color 3039 * but since there are no current users 3040 * we just flush the cache and change the color. 3041 * As an optimization for large pages we flush the 3042 * entire cache of that color and set a flag. 3043 */ 3044 SFMMU_STAT(sf_pgcolor_conflict); 3045 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3046 CacheColor_SetFlushed(*cflags, ocolor); 3047 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3048 } 3049 PP_SET_VCOLOR(pp, vcolor); 3050 return (0); 3051 } 3052 3053 /* 3054 * We got a real conflict with a current mapping. 3055 * set flags to start unencaching all mappings 3056 * and return failure so we restart looping 3057 * the pp array from the beginning. 3058 */ 3059 return (HAT_TMPNC); 3060 } 3061 #endif /* VAC */ 3062 3063 /* 3064 * creates a large page shadow hmeblk for a tte. 3065 * The purpose of this routine is to allow us to do quick unloads because 3066 * the vm layer can easily pass a very large but sparsely populated range. 3067 */ 3068 static struct hme_blk * 3069 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3070 { 3071 struct hmehash_bucket *hmebp; 3072 hmeblk_tag hblktag; 3073 int hmeshift, size, vshift; 3074 uint_t shw_mask, newshw_mask; 3075 struct hme_blk *hmeblkp; 3076 3077 ASSERT(sfmmup != KHATID); 3078 if (mmu_page_sizes == max_mmu_page_sizes) { 3079 ASSERT(ttesz < TTE256M); 3080 } else { 3081 ASSERT(ttesz < TTE4M); 3082 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3083 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3084 } 3085 3086 if (ttesz == TTE8K) { 3087 size = TTE512K; 3088 } else { 3089 size = ++ttesz; 3090 } 3091 3092 hblktag.htag_id = sfmmup; 3093 hmeshift = HME_HASH_SHIFT(size); 3094 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3095 hblktag.htag_rehash = HME_HASH_REHASH(size); 3096 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3097 3098 SFMMU_HASH_LOCK(hmebp); 3099 3100 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3101 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3102 if (hmeblkp == NULL) { 3103 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3104 hblktag, flags); 3105 } 3106 ASSERT(hmeblkp); 3107 if (!hmeblkp->hblk_shw_mask) { 3108 /* 3109 * if this is a unused hblk it was just allocated or could 3110 * potentially be a previous large page hblk so we need to 3111 * set the shadow bit. 3112 */ 3113 hmeblkp->hblk_shw_bit = 1; 3114 } 3115 ASSERT(hmeblkp->hblk_shw_bit == 1); 3116 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3117 ASSERT(vshift < 8); 3118 /* 3119 * Atomically set shw mask bit 3120 */ 3121 do { 3122 shw_mask = hmeblkp->hblk_shw_mask; 3123 newshw_mask = shw_mask | (1 << vshift); 3124 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3125 newshw_mask); 3126 } while (newshw_mask != shw_mask); 3127 3128 SFMMU_HASH_UNLOCK(hmebp); 3129 3130 return (hmeblkp); 3131 } 3132 3133 /* 3134 * This routine cleanup a previous shadow hmeblk and changes it to 3135 * a regular hblk. This happens rarely but it is possible 3136 * when a process wants to use large pages and there are hblks still 3137 * lying around from the previous as that used these hmeblks. 3138 * The alternative was to cleanup the shadow hblks at unload time 3139 * but since so few user processes actually use large pages, it is 3140 * better to be lazy and cleanup at this time. 3141 */ 3142 static void 3143 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3144 struct hmehash_bucket *hmebp) 3145 { 3146 caddr_t addr, endaddr; 3147 int hashno, size; 3148 3149 ASSERT(hmeblkp->hblk_shw_bit); 3150 3151 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3152 3153 if (!hmeblkp->hblk_shw_mask) { 3154 hmeblkp->hblk_shw_bit = 0; 3155 return; 3156 } 3157 addr = (caddr_t)get_hblk_base(hmeblkp); 3158 endaddr = get_hblk_endaddr(hmeblkp); 3159 size = get_hblk_ttesz(hmeblkp); 3160 hashno = size - 1; 3161 ASSERT(hashno > 0); 3162 SFMMU_HASH_UNLOCK(hmebp); 3163 3164 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3165 3166 SFMMU_HASH_LOCK(hmebp); 3167 } 3168 3169 static void 3170 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3171 int hashno) 3172 { 3173 int hmeshift, shadow = 0; 3174 hmeblk_tag hblktag; 3175 struct hmehash_bucket *hmebp; 3176 struct hme_blk *hmeblkp; 3177 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3178 uint64_t hblkpa, prevpa, nx_pa; 3179 3180 ASSERT(hashno > 0); 3181 hblktag.htag_id = sfmmup; 3182 hblktag.htag_rehash = hashno; 3183 3184 hmeshift = HME_HASH_SHIFT(hashno); 3185 3186 while (addr < endaddr) { 3187 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3188 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3189 SFMMU_HASH_LOCK(hmebp); 3190 /* inline HME_HASH_SEARCH */ 3191 hmeblkp = hmebp->hmeblkp; 3192 hblkpa = hmebp->hmeh_nextpa; 3193 prevpa = 0; 3194 pr_hblk = NULL; 3195 while (hmeblkp) { 3196 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 3197 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3198 /* found hme_blk */ 3199 if (hmeblkp->hblk_shw_bit) { 3200 if (hmeblkp->hblk_shw_mask) { 3201 shadow = 1; 3202 sfmmu_shadow_hcleanup(sfmmup, 3203 hmeblkp, hmebp); 3204 break; 3205 } else { 3206 hmeblkp->hblk_shw_bit = 0; 3207 } 3208 } 3209 3210 /* 3211 * Hblk_hmecnt and hblk_vcnt could be non zero 3212 * since hblk_unload() does not gurantee that. 3213 * 3214 * XXX - this could cause tteload() to spin 3215 * where sfmmu_shadow_hcleanup() is called. 3216 */ 3217 } 3218 3219 nx_hblk = hmeblkp->hblk_next; 3220 nx_pa = hmeblkp->hblk_nextpa; 3221 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3222 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 3223 pr_hblk); 3224 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3225 } else { 3226 pr_hblk = hmeblkp; 3227 prevpa = hblkpa; 3228 } 3229 hmeblkp = nx_hblk; 3230 hblkpa = nx_pa; 3231 } 3232 3233 SFMMU_HASH_UNLOCK(hmebp); 3234 3235 if (shadow) { 3236 /* 3237 * We found another shadow hblk so cleaned its 3238 * children. We need to go back and cleanup 3239 * the original hblk so we don't change the 3240 * addr. 3241 */ 3242 shadow = 0; 3243 } else { 3244 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3245 (1 << hmeshift)); 3246 } 3247 } 3248 sfmmu_hblks_list_purge(&list); 3249 } 3250 3251 /* 3252 * Release one hardware address translation lock on the given address range. 3253 */ 3254 void 3255 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3256 { 3257 struct hmehash_bucket *hmebp; 3258 hmeblk_tag hblktag; 3259 int hmeshift, hashno = 1; 3260 struct hme_blk *hmeblkp, *list = NULL; 3261 caddr_t endaddr; 3262 3263 ASSERT(sfmmup != NULL); 3264 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3265 3266 ASSERT((sfmmup == ksfmmup) || 3267 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3268 ASSERT((len & MMU_PAGEOFFSET) == 0); 3269 endaddr = addr + len; 3270 hblktag.htag_id = sfmmup; 3271 3272 /* 3273 * Spitfire supports 4 page sizes. 3274 * Most pages are expected to be of the smallest page size (8K) and 3275 * these will not need to be rehashed. 64K pages also don't need to be 3276 * rehashed because an hmeblk spans 64K of address space. 512K pages 3277 * might need 1 rehash and and 4M pages might need 2 rehashes. 3278 */ 3279 while (addr < endaddr) { 3280 hmeshift = HME_HASH_SHIFT(hashno); 3281 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3282 hblktag.htag_rehash = hashno; 3283 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3284 3285 SFMMU_HASH_LOCK(hmebp); 3286 3287 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3288 if (hmeblkp != NULL) { 3289 /* 3290 * If we encounter a shadow hmeblk then 3291 * we know there are no valid hmeblks mapping 3292 * this address at this size or larger. 3293 * Just increment address by the smallest 3294 * page size. 3295 */ 3296 if (hmeblkp->hblk_shw_bit) { 3297 addr += MMU_PAGESIZE; 3298 } else { 3299 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3300 endaddr); 3301 } 3302 SFMMU_HASH_UNLOCK(hmebp); 3303 hashno = 1; 3304 continue; 3305 } 3306 SFMMU_HASH_UNLOCK(hmebp); 3307 3308 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3309 /* 3310 * We have traversed the whole list and rehashed 3311 * if necessary without finding the address to unlock 3312 * which should never happen. 3313 */ 3314 panic("sfmmu_unlock: addr not found. " 3315 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3316 } else { 3317 hashno++; 3318 } 3319 } 3320 3321 sfmmu_hblks_list_purge(&list); 3322 } 3323 3324 /* 3325 * Function to unlock a range of addresses in an hmeblk. It returns the 3326 * next address that needs to be unlocked. 3327 * Should be called with the hash lock held. 3328 */ 3329 static caddr_t 3330 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 3331 { 3332 struct sf_hment *sfhme; 3333 tte_t tteold, ttemod; 3334 int ttesz, ret; 3335 3336 ASSERT(in_hblk_range(hmeblkp, addr)); 3337 ASSERT(hmeblkp->hblk_shw_bit == 0); 3338 3339 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 3340 ttesz = get_hblk_ttesz(hmeblkp); 3341 3342 HBLKTOHME(sfhme, hmeblkp, addr); 3343 while (addr < endaddr) { 3344 readtte: 3345 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3346 if (TTE_IS_VALID(&tteold)) { 3347 3348 ttemod = tteold; 3349 3350 ret = sfmmu_modifytte_try(&tteold, &ttemod, 3351 &sfhme->hme_tte); 3352 3353 if (ret < 0) 3354 goto readtte; 3355 3356 if (hmeblkp->hblk_lckcnt == 0) 3357 panic("zero hblk lckcnt"); 3358 3359 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 3360 (uintptr_t)endaddr) 3361 panic("can't unlock large tte"); 3362 3363 ASSERT(hmeblkp->hblk_lckcnt > 0); 3364 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 3365 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 3366 } else { 3367 panic("sfmmu_hblk_unlock: invalid tte"); 3368 } 3369 addr += TTEBYTES(ttesz); 3370 sfhme++; 3371 } 3372 return (addr); 3373 } 3374 3375 /* 3376 * Physical Address Mapping Framework 3377 * 3378 * General rules: 3379 * 3380 * (1) Applies only to seg_kmem memory pages. To make things easier, 3381 * seg_kpm addresses are also accepted by the routines, but nothing 3382 * is done with them since by definition their PA mappings are static. 3383 * (2) hat_add_callback() may only be called while holding the page lock 3384 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 3385 * or passing HAC_PAGELOCK flag. 3386 * (3) prehandler() and posthandler() may not call hat_add_callback() or 3387 * hat_delete_callback(), nor should they allocate memory. Post quiesce 3388 * callbacks may not sleep or acquire adaptive mutex locks. 3389 * (4) Either prehandler() or posthandler() (but not both) may be specified 3390 * as being NULL. Specifying an errhandler() is optional. 3391 * 3392 * Details of using the framework: 3393 * 3394 * registering a callback (hat_register_callback()) 3395 * 3396 * Pass prehandler, posthandler, errhandler addresses 3397 * as described below. If capture_cpus argument is nonzero, 3398 * suspend callback to the prehandler will occur with CPUs 3399 * captured and executing xc_loop() and CPUs will remain 3400 * captured until after the posthandler suspend callback 3401 * occurs. 3402 * 3403 * adding a callback (hat_add_callback()) 3404 * 3405 * as_pagelock(); 3406 * hat_add_callback(); 3407 * save returned pfn in private data structures or program registers; 3408 * as_pageunlock(); 3409 * 3410 * prehandler() 3411 * 3412 * Stop all accesses by physical address to this memory page. 3413 * Called twice: the first, PRESUSPEND, is a context safe to acquire 3414 * adaptive locks. The second, SUSPEND, is called at high PIL with 3415 * CPUs captured so adaptive locks may NOT be acquired (and all spin 3416 * locks must be XCALL_PIL or higher locks). 3417 * 3418 * May return the following errors: 3419 * EIO: A fatal error has occurred. This will result in panic. 3420 * EAGAIN: The page cannot be suspended. This will fail the 3421 * relocation. 3422 * 0: Success. 3423 * 3424 * posthandler() 3425 * 3426 * Save new pfn in private data structures or program registers; 3427 * not allowed to fail (non-zero return values will result in panic). 3428 * 3429 * errhandler() 3430 * 3431 * called when an error occurs related to the callback. Currently 3432 * the only such error is HAT_CB_ERR_LEAKED which indicates that 3433 * a page is being freed, but there are still outstanding callback(s) 3434 * registered on the page. 3435 * 3436 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 3437 * 3438 * stop using physical address 3439 * hat_delete_callback(); 3440 * 3441 */ 3442 3443 /* 3444 * Register a callback class. Each subsystem should do this once and 3445 * cache the id_t returned for use in setting up and tearing down callbacks. 3446 * 3447 * There is no facility for removing callback IDs once they are created; 3448 * the "key" should be unique for each module, so in case a module is unloaded 3449 * and subsequently re-loaded, we can recycle the module's previous entry. 3450 */ 3451 id_t 3452 hat_register_callback(int key, 3453 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 3454 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 3455 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 3456 int capture_cpus) 3457 { 3458 id_t id; 3459 3460 /* 3461 * Search the table for a pre-existing callback associated with 3462 * the identifier "key". If one exists, we re-use that entry in 3463 * the table for this instance, otherwise we assign the next 3464 * available table slot. 3465 */ 3466 for (id = 0; id < sfmmu_max_cb_id; id++) { 3467 if (sfmmu_cb_table[id].key == key) 3468 break; 3469 } 3470 3471 if (id == sfmmu_max_cb_id) { 3472 id = sfmmu_cb_nextid++; 3473 if (id >= sfmmu_max_cb_id) 3474 panic("hat_register_callback: out of callback IDs"); 3475 } 3476 3477 ASSERT(prehandler != NULL || posthandler != NULL); 3478 3479 sfmmu_cb_table[id].key = key; 3480 sfmmu_cb_table[id].prehandler = prehandler; 3481 sfmmu_cb_table[id].posthandler = posthandler; 3482 sfmmu_cb_table[id].errhandler = errhandler; 3483 sfmmu_cb_table[id].capture_cpus = capture_cpus; 3484 3485 return (id); 3486 } 3487 3488 #define HAC_COOKIE_NONE (void *)-1 3489 3490 /* 3491 * Add relocation callbacks to the specified addr/len which will be called 3492 * when relocating the associated page. See the description of pre and 3493 * posthandler above for more details. 3494 * 3495 * If HAC_PAGELOCK is included in flags, the underlying memory page is 3496 * locked internally so the caller must be able to deal with the callback 3497 * running even before this function has returned. If HAC_PAGELOCK is not 3498 * set, it is assumed that the underlying memory pages are locked. 3499 * 3500 * Since the caller must track the individual page boundaries anyway, 3501 * we only allow a callback to be added to a single page (large 3502 * or small). Thus [addr, addr + len) MUST be contained within a single 3503 * page. 3504 * 3505 * Registering multiple callbacks on the same [addr, addr+len) is supported, 3506 * _provided_that_ a unique parameter is specified for each callback. 3507 * If multiple callbacks are registered on the same range the callback will 3508 * be invoked with each unique parameter. Registering the same callback with 3509 * the same argument more than once will result in corrupted kernel state. 3510 * 3511 * Returns the pfn of the underlying kernel page in *rpfn 3512 * on success, or PFN_INVALID on failure. 3513 * 3514 * cookiep (if passed) provides storage space for an opaque cookie 3515 * to return later to hat_delete_callback(). This cookie makes the callback 3516 * deletion significantly quicker by avoiding a potentially lengthy hash 3517 * search. 3518 * 3519 * Returns values: 3520 * 0: success 3521 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 3522 * EINVAL: callback ID is not valid 3523 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 3524 * space 3525 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 3526 */ 3527 int 3528 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 3529 void *pvt, pfn_t *rpfn, void **cookiep) 3530 { 3531 struct hmehash_bucket *hmebp; 3532 hmeblk_tag hblktag; 3533 struct hme_blk *hmeblkp; 3534 int hmeshift, hashno; 3535 caddr_t saddr, eaddr, baseaddr; 3536 struct pa_hment *pahmep; 3537 struct sf_hment *sfhmep, *osfhmep; 3538 kmutex_t *pml; 3539 tte_t tte; 3540 page_t *pp; 3541 vnode_t *vp; 3542 u_offset_t off; 3543 pfn_t pfn; 3544 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 3545 int locked = 0; 3546 3547 /* 3548 * For KPM mappings, just return the physical address since we 3549 * don't need to register any callbacks. 3550 */ 3551 if (IS_KPM_ADDR(vaddr)) { 3552 uint64_t paddr; 3553 SFMMU_KPM_VTOP(vaddr, paddr); 3554 *rpfn = btop(paddr); 3555 if (cookiep != NULL) 3556 *cookiep = HAC_COOKIE_NONE; 3557 return (0); 3558 } 3559 3560 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 3561 *rpfn = PFN_INVALID; 3562 return (EINVAL); 3563 } 3564 3565 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 3566 *rpfn = PFN_INVALID; 3567 return (ENOMEM); 3568 } 3569 3570 sfhmep = &pahmep->sfment; 3571 3572 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3573 eaddr = saddr + len; 3574 3575 rehash: 3576 /* Find the mapping(s) for this page */ 3577 for (hashno = TTE64K, hmeblkp = NULL; 3578 hmeblkp == NULL && hashno <= mmu_hashcnt; 3579 hashno++) { 3580 hmeshift = HME_HASH_SHIFT(hashno); 3581 hblktag.htag_id = ksfmmup; 3582 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3583 hblktag.htag_rehash = hashno; 3584 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3585 3586 SFMMU_HASH_LOCK(hmebp); 3587 3588 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3589 3590 if (hmeblkp == NULL) 3591 SFMMU_HASH_UNLOCK(hmebp); 3592 } 3593 3594 if (hmeblkp == NULL) { 3595 kmem_cache_free(pa_hment_cache, pahmep); 3596 *rpfn = PFN_INVALID; 3597 return (ENXIO); 3598 } 3599 3600 HBLKTOHME(osfhmep, hmeblkp, saddr); 3601 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3602 3603 if (!TTE_IS_VALID(&tte)) { 3604 SFMMU_HASH_UNLOCK(hmebp); 3605 kmem_cache_free(pa_hment_cache, pahmep); 3606 *rpfn = PFN_INVALID; 3607 return (ENXIO); 3608 } 3609 3610 /* 3611 * Make sure the boundaries for the callback fall within this 3612 * single mapping. 3613 */ 3614 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 3615 ASSERT(saddr >= baseaddr); 3616 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 3617 SFMMU_HASH_UNLOCK(hmebp); 3618 kmem_cache_free(pa_hment_cache, pahmep); 3619 *rpfn = PFN_INVALID; 3620 return (ERANGE); 3621 } 3622 3623 pfn = sfmmu_ttetopfn(&tte, vaddr); 3624 3625 /* 3626 * The pfn may not have a page_t underneath in which case we 3627 * just return it. This can happen if we are doing I/O to a 3628 * static portion of the kernel's address space, for instance. 3629 */ 3630 pp = osfhmep->hme_page; 3631 if (pp == NULL) { 3632 SFMMU_HASH_UNLOCK(hmebp); 3633 kmem_cache_free(pa_hment_cache, pahmep); 3634 *rpfn = pfn; 3635 if (cookiep) 3636 *cookiep = HAC_COOKIE_NONE; 3637 return (0); 3638 } 3639 ASSERT(pp == PP_PAGEROOT(pp)); 3640 3641 vp = pp->p_vnode; 3642 off = pp->p_offset; 3643 3644 pml = sfmmu_mlist_enter(pp); 3645 3646 if (flags & HAC_PAGELOCK) { 3647 if (!page_trylock(pp, SE_SHARED)) { 3648 /* 3649 * Somebody is holding SE_EXCL lock. Might 3650 * even be hat_page_relocate(). Drop all 3651 * our locks, lookup the page in &kvp, and 3652 * retry. If it doesn't exist in &kvp, then 3653 * we must be dealing with a kernel mapped 3654 * page which doesn't actually belong to 3655 * segkmem so we punt. 3656 */ 3657 sfmmu_mlist_exit(pml); 3658 SFMMU_HASH_UNLOCK(hmebp); 3659 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3660 if (pp == NULL) { 3661 kmem_cache_free(pa_hment_cache, pahmep); 3662 *rpfn = pfn; 3663 if (cookiep) 3664 *cookiep = HAC_COOKIE_NONE; 3665 return (0); 3666 } 3667 page_unlock(pp); 3668 goto rehash; 3669 } 3670 locked = 1; 3671 } 3672 3673 if (!PAGE_LOCKED(pp) && !panicstr) 3674 panic("hat_add_callback: page 0x%p not locked", pp); 3675 3676 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 3677 pp->p_offset != off) { 3678 /* 3679 * The page moved before we got our hands on it. Drop 3680 * all the locks and try again. 3681 */ 3682 ASSERT((flags & HAC_PAGELOCK) != 0); 3683 sfmmu_mlist_exit(pml); 3684 SFMMU_HASH_UNLOCK(hmebp); 3685 page_unlock(pp); 3686 locked = 0; 3687 goto rehash; 3688 } 3689 3690 if (vp != &kvp) { 3691 /* 3692 * This is not a segkmem page but another page which 3693 * has been kernel mapped. It had better have at least 3694 * a share lock on it. Return the pfn. 3695 */ 3696 sfmmu_mlist_exit(pml); 3697 SFMMU_HASH_UNLOCK(hmebp); 3698 if (locked) 3699 page_unlock(pp); 3700 kmem_cache_free(pa_hment_cache, pahmep); 3701 ASSERT(PAGE_LOCKED(pp)); 3702 *rpfn = pfn; 3703 if (cookiep) 3704 *cookiep = HAC_COOKIE_NONE; 3705 return (0); 3706 } 3707 3708 /* 3709 * Setup this pa_hment and link its embedded dummy sf_hment into 3710 * the mapping list. 3711 */ 3712 pp->p_share++; 3713 pahmep->cb_id = callback_id; 3714 pahmep->addr = vaddr; 3715 pahmep->len = len; 3716 pahmep->refcnt = 1; 3717 pahmep->flags = 0; 3718 pahmep->pvt = pvt; 3719 3720 sfhmep->hme_tte.ll = 0; 3721 sfhmep->hme_data = pahmep; 3722 sfhmep->hme_prev = osfhmep; 3723 sfhmep->hme_next = osfhmep->hme_next; 3724 3725 if (osfhmep->hme_next) 3726 osfhmep->hme_next->hme_prev = sfhmep; 3727 3728 osfhmep->hme_next = sfhmep; 3729 3730 sfmmu_mlist_exit(pml); 3731 SFMMU_HASH_UNLOCK(hmebp); 3732 3733 if (locked) 3734 page_unlock(pp); 3735 3736 *rpfn = pfn; 3737 if (cookiep) 3738 *cookiep = (void *)pahmep; 3739 3740 return (0); 3741 } 3742 3743 /* 3744 * Remove the relocation callbacks from the specified addr/len. 3745 */ 3746 void 3747 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 3748 void *cookie) 3749 { 3750 struct hmehash_bucket *hmebp; 3751 hmeblk_tag hblktag; 3752 struct hme_blk *hmeblkp; 3753 int hmeshift, hashno; 3754 caddr_t saddr; 3755 struct pa_hment *pahmep; 3756 struct sf_hment *sfhmep, *osfhmep; 3757 kmutex_t *pml; 3758 tte_t tte; 3759 page_t *pp; 3760 vnode_t *vp; 3761 u_offset_t off; 3762 int locked = 0; 3763 3764 /* 3765 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 3766 * remove so just return. 3767 */ 3768 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 3769 return; 3770 3771 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3772 3773 rehash: 3774 /* Find the mapping(s) for this page */ 3775 for (hashno = TTE64K, hmeblkp = NULL; 3776 hmeblkp == NULL && hashno <= mmu_hashcnt; 3777 hashno++) { 3778 hmeshift = HME_HASH_SHIFT(hashno); 3779 hblktag.htag_id = ksfmmup; 3780 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3781 hblktag.htag_rehash = hashno; 3782 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3783 3784 SFMMU_HASH_LOCK(hmebp); 3785 3786 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3787 3788 if (hmeblkp == NULL) 3789 SFMMU_HASH_UNLOCK(hmebp); 3790 } 3791 3792 if (hmeblkp == NULL) 3793 return; 3794 3795 HBLKTOHME(osfhmep, hmeblkp, saddr); 3796 3797 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3798 if (!TTE_IS_VALID(&tte)) { 3799 SFMMU_HASH_UNLOCK(hmebp); 3800 return; 3801 } 3802 3803 pp = osfhmep->hme_page; 3804 if (pp == NULL) { 3805 SFMMU_HASH_UNLOCK(hmebp); 3806 ASSERT(cookie == NULL); 3807 return; 3808 } 3809 3810 vp = pp->p_vnode; 3811 off = pp->p_offset; 3812 3813 pml = sfmmu_mlist_enter(pp); 3814 3815 if (flags & HAC_PAGELOCK) { 3816 if (!page_trylock(pp, SE_SHARED)) { 3817 /* 3818 * Somebody is holding SE_EXCL lock. Might 3819 * even be hat_page_relocate(). Drop all 3820 * our locks, lookup the page in &kvp, and 3821 * retry. If it doesn't exist in &kvp, then 3822 * we must be dealing with a kernel mapped 3823 * page which doesn't actually belong to 3824 * segkmem so we punt. 3825 */ 3826 sfmmu_mlist_exit(pml); 3827 SFMMU_HASH_UNLOCK(hmebp); 3828 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3829 if (pp == NULL) { 3830 ASSERT(cookie == NULL); 3831 return; 3832 } 3833 page_unlock(pp); 3834 goto rehash; 3835 } 3836 locked = 1; 3837 } 3838 3839 ASSERT(PAGE_LOCKED(pp)); 3840 3841 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 3842 pp->p_offset != off) { 3843 /* 3844 * The page moved before we got our hands on it. Drop 3845 * all the locks and try again. 3846 */ 3847 ASSERT((flags & HAC_PAGELOCK) != 0); 3848 sfmmu_mlist_exit(pml); 3849 SFMMU_HASH_UNLOCK(hmebp); 3850 page_unlock(pp); 3851 locked = 0; 3852 goto rehash; 3853 } 3854 3855 if (vp != &kvp) { 3856 /* 3857 * This is not a segkmem page but another page which 3858 * has been kernel mapped. 3859 */ 3860 sfmmu_mlist_exit(pml); 3861 SFMMU_HASH_UNLOCK(hmebp); 3862 if (locked) 3863 page_unlock(pp); 3864 ASSERT(cookie == NULL); 3865 return; 3866 } 3867 3868 if (cookie != NULL) { 3869 pahmep = (struct pa_hment *)cookie; 3870 sfhmep = &pahmep->sfment; 3871 } else { 3872 for (sfhmep = pp->p_mapping; sfhmep != NULL; 3873 sfhmep = sfhmep->hme_next) { 3874 3875 /* 3876 * skip va<->pa mappings 3877 */ 3878 if (!IS_PAHME(sfhmep)) 3879 continue; 3880 3881 pahmep = sfhmep->hme_data; 3882 ASSERT(pahmep != NULL); 3883 3884 /* 3885 * if pa_hment matches, remove it 3886 */ 3887 if ((pahmep->pvt == pvt) && 3888 (pahmep->addr == vaddr) && 3889 (pahmep->len == len)) { 3890 break; 3891 } 3892 } 3893 } 3894 3895 if (sfhmep == NULL) { 3896 if (!panicstr) { 3897 panic("hat_delete_callback: pa_hment not found, pp %p", 3898 (void *)pp); 3899 } 3900 return; 3901 } 3902 3903 /* 3904 * Note: at this point a valid kernel mapping must still be 3905 * present on this page. 3906 */ 3907 pp->p_share--; 3908 if (pp->p_share <= 0) 3909 panic("hat_delete_callback: zero p_share"); 3910 3911 if (--pahmep->refcnt == 0) { 3912 if (pahmep->flags != 0) 3913 panic("hat_delete_callback: pa_hment is busy"); 3914 3915 /* 3916 * Remove sfhmep from the mapping list for the page. 3917 */ 3918 if (sfhmep->hme_prev) { 3919 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 3920 } else { 3921 pp->p_mapping = sfhmep->hme_next; 3922 } 3923 3924 if (sfhmep->hme_next) 3925 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 3926 3927 sfmmu_mlist_exit(pml); 3928 SFMMU_HASH_UNLOCK(hmebp); 3929 3930 if (locked) 3931 page_unlock(pp); 3932 3933 kmem_cache_free(pa_hment_cache, pahmep); 3934 return; 3935 } 3936 3937 sfmmu_mlist_exit(pml); 3938 SFMMU_HASH_UNLOCK(hmebp); 3939 if (locked) 3940 page_unlock(pp); 3941 } 3942 3943 /* 3944 * hat_probe returns 1 if the translation for the address 'addr' is 3945 * loaded, zero otherwise. 3946 * 3947 * hat_probe should be used only for advisorary purposes because it may 3948 * occasionally return the wrong value. The implementation must guarantee that 3949 * returning the wrong value is a very rare event. hat_probe is used 3950 * to implement optimizations in the segment drivers. 3951 * 3952 */ 3953 int 3954 hat_probe(struct hat *sfmmup, caddr_t addr) 3955 { 3956 pfn_t pfn; 3957 tte_t tte; 3958 3959 ASSERT(sfmmup != NULL); 3960 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3961 3962 ASSERT((sfmmup == ksfmmup) || 3963 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3964 3965 if (sfmmup == ksfmmup) { 3966 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 3967 == PFN_SUSPENDED) { 3968 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 3969 } 3970 } else { 3971 pfn = sfmmu_uvatopfn(addr, sfmmup); 3972 } 3973 3974 if (pfn != PFN_INVALID) 3975 return (1); 3976 else 3977 return (0); 3978 } 3979 3980 ssize_t 3981 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 3982 { 3983 tte_t tte; 3984 3985 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3986 3987 sfmmu_gettte(sfmmup, addr, &tte); 3988 if (TTE_IS_VALID(&tte)) { 3989 return (TTEBYTES(TTE_CSZ(&tte))); 3990 } 3991 return (-1); 3992 } 3993 3994 static void 3995 sfmmu_gettte(struct hat *sfmmup, caddr_t addr, tte_t *ttep) 3996 { 3997 struct hmehash_bucket *hmebp; 3998 hmeblk_tag hblktag; 3999 int hmeshift, hashno = 1; 4000 struct hme_blk *hmeblkp, *list = NULL; 4001 struct sf_hment *sfhmep; 4002 4003 /* support for ISM */ 4004 ism_map_t *ism_map; 4005 ism_blk_t *ism_blkp; 4006 int i; 4007 sfmmu_t *ism_hatid = NULL; 4008 sfmmu_t *locked_hatid = NULL; 4009 4010 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4011 4012 ism_blkp = sfmmup->sfmmu_iblk; 4013 if (ism_blkp) { 4014 sfmmu_ismhat_enter(sfmmup, 0); 4015 locked_hatid = sfmmup; 4016 } 4017 while (ism_blkp && ism_hatid == NULL) { 4018 ism_map = ism_blkp->iblk_maps; 4019 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 4020 if (addr >= ism_start(ism_map[i]) && 4021 addr < ism_end(ism_map[i])) { 4022 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 4023 addr = (caddr_t)(addr - 4024 ism_start(ism_map[i])); 4025 break; 4026 } 4027 } 4028 ism_blkp = ism_blkp->iblk_next; 4029 } 4030 if (locked_hatid) { 4031 sfmmu_ismhat_exit(locked_hatid, 0); 4032 } 4033 4034 hblktag.htag_id = sfmmup; 4035 ttep->ll = 0; 4036 4037 do { 4038 hmeshift = HME_HASH_SHIFT(hashno); 4039 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4040 hblktag.htag_rehash = hashno; 4041 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4042 4043 SFMMU_HASH_LOCK(hmebp); 4044 4045 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4046 if (hmeblkp != NULL) { 4047 HBLKTOHME(sfhmep, hmeblkp, addr); 4048 sfmmu_copytte(&sfhmep->hme_tte, ttep); 4049 SFMMU_HASH_UNLOCK(hmebp); 4050 break; 4051 } 4052 SFMMU_HASH_UNLOCK(hmebp); 4053 hashno++; 4054 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 4055 4056 sfmmu_hblks_list_purge(&list); 4057 } 4058 4059 uint_t 4060 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4061 { 4062 tte_t tte; 4063 4064 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4065 4066 sfmmu_gettte(sfmmup, addr, &tte); 4067 if (TTE_IS_VALID(&tte)) { 4068 *attr = sfmmu_ptov_attr(&tte); 4069 return (0); 4070 } 4071 *attr = 0; 4072 return ((uint_t)0xffffffff); 4073 } 4074 4075 /* 4076 * Enables more attributes on specified address range (ie. logical OR) 4077 */ 4078 void 4079 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4080 { 4081 if (hat->sfmmu_xhat_provider) { 4082 XHAT_SETATTR(hat, addr, len, attr); 4083 return; 4084 } else { 4085 /* 4086 * This must be a CPU HAT. If the address space has 4087 * XHATs attached, change attributes for all of them, 4088 * just in case 4089 */ 4090 ASSERT(hat->sfmmu_as != NULL); 4091 if (hat->sfmmu_as->a_xhat != NULL) 4092 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 4093 } 4094 4095 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4096 } 4097 4098 /* 4099 * Assigns attributes to the specified address range. All the attributes 4100 * are specified. 4101 */ 4102 void 4103 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4104 { 4105 if (hat->sfmmu_xhat_provider) { 4106 XHAT_CHGATTR(hat, addr, len, attr); 4107 return; 4108 } else { 4109 /* 4110 * This must be a CPU HAT. If the address space has 4111 * XHATs attached, change attributes for all of them, 4112 * just in case 4113 */ 4114 ASSERT(hat->sfmmu_as != NULL); 4115 if (hat->sfmmu_as->a_xhat != NULL) 4116 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 4117 } 4118 4119 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4120 } 4121 4122 /* 4123 * Remove attributes on the specified address range (ie. loginal NAND) 4124 */ 4125 void 4126 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4127 { 4128 if (hat->sfmmu_xhat_provider) { 4129 XHAT_CLRATTR(hat, addr, len, attr); 4130 return; 4131 } else { 4132 /* 4133 * This must be a CPU HAT. If the address space has 4134 * XHATs attached, change attributes for all of them, 4135 * just in case 4136 */ 4137 ASSERT(hat->sfmmu_as != NULL); 4138 if (hat->sfmmu_as->a_xhat != NULL) 4139 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4140 } 4141 4142 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4143 } 4144 4145 /* 4146 * Change attributes on an address range to that specified by attr and mode. 4147 */ 4148 static void 4149 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4150 int mode) 4151 { 4152 struct hmehash_bucket *hmebp; 4153 hmeblk_tag hblktag; 4154 int hmeshift, hashno = 1; 4155 struct hme_blk *hmeblkp, *list = NULL; 4156 caddr_t endaddr; 4157 cpuset_t cpuset; 4158 demap_range_t dmr; 4159 4160 CPUSET_ZERO(cpuset); 4161 4162 ASSERT((sfmmup == ksfmmup) || 4163 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4164 ASSERT((len & MMU_PAGEOFFSET) == 0); 4165 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4166 4167 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4168 ((addr + len) > (caddr_t)USERLIMIT)) { 4169 panic("user addr %p in kernel space", 4170 (void *)addr); 4171 } 4172 4173 endaddr = addr + len; 4174 hblktag.htag_id = sfmmup; 4175 DEMAP_RANGE_INIT(sfmmup, &dmr); 4176 4177 while (addr < endaddr) { 4178 hmeshift = HME_HASH_SHIFT(hashno); 4179 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4180 hblktag.htag_rehash = hashno; 4181 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4182 4183 SFMMU_HASH_LOCK(hmebp); 4184 4185 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4186 if (hmeblkp != NULL) { 4187 /* 4188 * We've encountered a shadow hmeblk so skip the range 4189 * of the next smaller mapping size. 4190 */ 4191 if (hmeblkp->hblk_shw_bit) { 4192 ASSERT(sfmmup != ksfmmup); 4193 ASSERT(hashno > 1); 4194 addr = (caddr_t)P2END((uintptr_t)addr, 4195 TTEBYTES(hashno - 1)); 4196 } else { 4197 addr = sfmmu_hblk_chgattr(sfmmup, 4198 hmeblkp, addr, endaddr, &dmr, attr, mode); 4199 } 4200 SFMMU_HASH_UNLOCK(hmebp); 4201 hashno = 1; 4202 continue; 4203 } 4204 SFMMU_HASH_UNLOCK(hmebp); 4205 4206 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4207 /* 4208 * We have traversed the whole list and rehashed 4209 * if necessary without finding the address to chgattr. 4210 * This is ok, so we increment the address by the 4211 * smallest hmeblk range for kernel mappings or for 4212 * user mappings with no large pages, and the largest 4213 * hmeblk range, to account for shadow hmeblks, for 4214 * user mappings with large pages and continue. 4215 */ 4216 if (sfmmup == ksfmmup) 4217 addr = (caddr_t)P2END((uintptr_t)addr, 4218 TTEBYTES(1)); 4219 else 4220 addr = (caddr_t)P2END((uintptr_t)addr, 4221 TTEBYTES(hashno)); 4222 hashno = 1; 4223 } else { 4224 hashno++; 4225 } 4226 } 4227 4228 sfmmu_hblks_list_purge(&list); 4229 DEMAP_RANGE_FLUSH(&dmr); 4230 cpuset = sfmmup->sfmmu_cpusran; 4231 xt_sync(cpuset); 4232 } 4233 4234 /* 4235 * This function chgattr on a range of addresses in an hmeblk. It returns the 4236 * next addres that needs to be chgattr. 4237 * It should be called with the hash lock held. 4238 * XXX It should be possible to optimize chgattr by not flushing every time but 4239 * on the other hand: 4240 * 1. do one flush crosscall. 4241 * 2. only flush if we are increasing permissions (make sure this will work) 4242 */ 4243 static caddr_t 4244 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4245 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4246 { 4247 tte_t tte, tteattr, tteflags, ttemod; 4248 struct sf_hment *sfhmep; 4249 int ttesz; 4250 struct page *pp = NULL; 4251 kmutex_t *pml, *pmtx; 4252 int ret; 4253 int use_demap_range; 4254 #if defined(SF_ERRATA_57) 4255 int check_exec; 4256 #endif 4257 4258 ASSERT(in_hblk_range(hmeblkp, addr)); 4259 ASSERT(hmeblkp->hblk_shw_bit == 0); 4260 4261 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4262 ttesz = get_hblk_ttesz(hmeblkp); 4263 4264 /* 4265 * Flush the current demap region if addresses have been 4266 * skipped or the page size doesn't match. 4267 */ 4268 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4269 if (use_demap_range) { 4270 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4271 } else { 4272 DEMAP_RANGE_FLUSH(dmrp); 4273 } 4274 4275 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4276 #if defined(SF_ERRATA_57) 4277 check_exec = (sfmmup != ksfmmup) && 4278 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4279 TTE_IS_EXECUTABLE(&tteattr); 4280 #endif 4281 HBLKTOHME(sfhmep, hmeblkp, addr); 4282 while (addr < endaddr) { 4283 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4284 if (TTE_IS_VALID(&tte)) { 4285 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4286 /* 4287 * if the new attr is the same as old 4288 * continue 4289 */ 4290 goto next_addr; 4291 } 4292 if (!TTE_IS_WRITABLE(&tteattr)) { 4293 /* 4294 * make sure we clear hw modify bit if we 4295 * removing write protections 4296 */ 4297 tteflags.tte_intlo |= TTE_HWWR_INT; 4298 } 4299 4300 pml = NULL; 4301 pp = sfhmep->hme_page; 4302 if (pp) { 4303 pml = sfmmu_mlist_enter(pp); 4304 } 4305 4306 if (pp != sfhmep->hme_page) { 4307 /* 4308 * tte must have been unloaded. 4309 */ 4310 ASSERT(pml); 4311 sfmmu_mlist_exit(pml); 4312 continue; 4313 } 4314 4315 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4316 4317 ttemod = tte; 4318 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4319 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4320 4321 #if defined(SF_ERRATA_57) 4322 if (check_exec && addr < errata57_limit) 4323 ttemod.tte_exec_perm = 0; 4324 #endif 4325 ret = sfmmu_modifytte_try(&tte, &ttemod, 4326 &sfhmep->hme_tte); 4327 4328 if (ret < 0) { 4329 /* tte changed underneath us */ 4330 if (pml) { 4331 sfmmu_mlist_exit(pml); 4332 } 4333 continue; 4334 } 4335 4336 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4337 /* 4338 * need to sync if we are clearing modify bit. 4339 */ 4340 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4341 } 4342 4343 if (pp && PP_ISRO(pp)) { 4344 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4345 pmtx = sfmmu_page_enter(pp); 4346 PP_CLRRO(pp); 4347 sfmmu_page_exit(pmtx); 4348 } 4349 } 4350 4351 if (ret > 0 && use_demap_range) { 4352 DEMAP_RANGE_MARKPG(dmrp, addr); 4353 } else if (ret > 0) { 4354 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4355 } 4356 4357 if (pml) { 4358 sfmmu_mlist_exit(pml); 4359 } 4360 } 4361 next_addr: 4362 addr += TTEBYTES(ttesz); 4363 sfhmep++; 4364 DEMAP_RANGE_NEXTPG(dmrp); 4365 } 4366 return (addr); 4367 } 4368 4369 /* 4370 * This routine converts virtual attributes to physical ones. It will 4371 * update the tteflags field with the tte mask corresponding to the attributes 4372 * affected and it returns the new attributes. It will also clear the modify 4373 * bit if we are taking away write permission. This is necessary since the 4374 * modify bit is the hardware permission bit and we need to clear it in order 4375 * to detect write faults. 4376 */ 4377 static uint64_t 4378 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 4379 { 4380 tte_t ttevalue; 4381 4382 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 4383 4384 switch (mode) { 4385 case SFMMU_CHGATTR: 4386 /* all attributes specified */ 4387 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 4388 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 4389 ttemaskp->tte_inthi = TTEINTHI_ATTR; 4390 ttemaskp->tte_intlo = TTEINTLO_ATTR; 4391 break; 4392 case SFMMU_SETATTR: 4393 ASSERT(!(attr & ~HAT_PROT_MASK)); 4394 ttemaskp->ll = 0; 4395 ttevalue.ll = 0; 4396 /* 4397 * a valid tte implies exec and read for sfmmu 4398 * so no need to do anything about them. 4399 * since priviledged access implies user access 4400 * PROT_USER doesn't make sense either. 4401 */ 4402 if (attr & PROT_WRITE) { 4403 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 4404 ttevalue.tte_intlo |= TTE_WRPRM_INT; 4405 } 4406 break; 4407 case SFMMU_CLRATTR: 4408 /* attributes will be nand with current ones */ 4409 if (attr & ~(PROT_WRITE | PROT_USER)) { 4410 panic("sfmmu: attr %x not supported", attr); 4411 } 4412 ttemaskp->ll = 0; 4413 ttevalue.ll = 0; 4414 if (attr & PROT_WRITE) { 4415 /* clear both writable and modify bit */ 4416 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 4417 } 4418 if (attr & PROT_USER) { 4419 ttemaskp->tte_intlo |= TTE_PRIV_INT; 4420 ttevalue.tte_intlo |= TTE_PRIV_INT; 4421 } 4422 break; 4423 default: 4424 panic("sfmmu_vtop_attr: bad mode %x", mode); 4425 } 4426 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 4427 return (ttevalue.ll); 4428 } 4429 4430 static uint_t 4431 sfmmu_ptov_attr(tte_t *ttep) 4432 { 4433 uint_t attr; 4434 4435 ASSERT(TTE_IS_VALID(ttep)); 4436 4437 attr = PROT_READ; 4438 4439 if (TTE_IS_WRITABLE(ttep)) { 4440 attr |= PROT_WRITE; 4441 } 4442 if (TTE_IS_EXECUTABLE(ttep)) { 4443 attr |= PROT_EXEC; 4444 } 4445 if (!TTE_IS_PRIVILEGED(ttep)) { 4446 attr |= PROT_USER; 4447 } 4448 if (TTE_IS_NFO(ttep)) { 4449 attr |= HAT_NOFAULT; 4450 } 4451 if (TTE_IS_NOSYNC(ttep)) { 4452 attr |= HAT_NOSYNC; 4453 } 4454 if (TTE_IS_SIDEFFECT(ttep)) { 4455 attr |= SFMMU_SIDEFFECT; 4456 } 4457 if (!TTE_IS_VCACHEABLE(ttep)) { 4458 attr |= SFMMU_UNCACHEVTTE; 4459 } 4460 if (!TTE_IS_PCACHEABLE(ttep)) { 4461 attr |= SFMMU_UNCACHEPTTE; 4462 } 4463 return (attr); 4464 } 4465 4466 /* 4467 * hat_chgprot is a deprecated hat call. New segment drivers 4468 * should store all attributes and use hat_*attr calls. 4469 * 4470 * Change the protections in the virtual address range 4471 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 4472 * then remove write permission, leaving the other 4473 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 4474 * 4475 */ 4476 void 4477 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 4478 { 4479 struct hmehash_bucket *hmebp; 4480 hmeblk_tag hblktag; 4481 int hmeshift, hashno = 1; 4482 struct hme_blk *hmeblkp, *list = NULL; 4483 caddr_t endaddr; 4484 cpuset_t cpuset; 4485 demap_range_t dmr; 4486 4487 ASSERT((len & MMU_PAGEOFFSET) == 0); 4488 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4489 4490 if (sfmmup->sfmmu_xhat_provider) { 4491 XHAT_CHGPROT(sfmmup, addr, len, vprot); 4492 return; 4493 } else { 4494 /* 4495 * This must be a CPU HAT. If the address space has 4496 * XHATs attached, change attributes for all of them, 4497 * just in case 4498 */ 4499 ASSERT(sfmmup->sfmmu_as != NULL); 4500 if (sfmmup->sfmmu_as->a_xhat != NULL) 4501 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 4502 } 4503 4504 CPUSET_ZERO(cpuset); 4505 4506 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 4507 ((addr + len) > (caddr_t)USERLIMIT)) { 4508 panic("user addr %p vprot %x in kernel space", 4509 (void *)addr, vprot); 4510 } 4511 endaddr = addr + len; 4512 hblktag.htag_id = sfmmup; 4513 DEMAP_RANGE_INIT(sfmmup, &dmr); 4514 4515 while (addr < endaddr) { 4516 hmeshift = HME_HASH_SHIFT(hashno); 4517 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4518 hblktag.htag_rehash = hashno; 4519 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4520 4521 SFMMU_HASH_LOCK(hmebp); 4522 4523 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4524 if (hmeblkp != NULL) { 4525 /* 4526 * We've encountered a shadow hmeblk so skip the range 4527 * of the next smaller mapping size. 4528 */ 4529 if (hmeblkp->hblk_shw_bit) { 4530 ASSERT(sfmmup != ksfmmup); 4531 ASSERT(hashno > 1); 4532 addr = (caddr_t)P2END((uintptr_t)addr, 4533 TTEBYTES(hashno - 1)); 4534 } else { 4535 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 4536 addr, endaddr, &dmr, vprot); 4537 } 4538 SFMMU_HASH_UNLOCK(hmebp); 4539 hashno = 1; 4540 continue; 4541 } 4542 SFMMU_HASH_UNLOCK(hmebp); 4543 4544 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4545 /* 4546 * We have traversed the whole list and rehashed 4547 * if necessary without finding the address to chgprot. 4548 * This is ok so we increment the address by the 4549 * smallest hmeblk range for kernel mappings and the 4550 * largest hmeblk range, to account for shadow hmeblks, 4551 * for user mappings and continue. 4552 */ 4553 if (sfmmup == ksfmmup) 4554 addr = (caddr_t)P2END((uintptr_t)addr, 4555 TTEBYTES(1)); 4556 else 4557 addr = (caddr_t)P2END((uintptr_t)addr, 4558 TTEBYTES(hashno)); 4559 hashno = 1; 4560 } else { 4561 hashno++; 4562 } 4563 } 4564 4565 sfmmu_hblks_list_purge(&list); 4566 DEMAP_RANGE_FLUSH(&dmr); 4567 cpuset = sfmmup->sfmmu_cpusran; 4568 xt_sync(cpuset); 4569 } 4570 4571 /* 4572 * This function chgprots a range of addresses in an hmeblk. It returns the 4573 * next addres that needs to be chgprot. 4574 * It should be called with the hash lock held. 4575 * XXX It shold be possible to optimize chgprot by not flushing every time but 4576 * on the other hand: 4577 * 1. do one flush crosscall. 4578 * 2. only flush if we are increasing permissions (make sure this will work) 4579 */ 4580 static caddr_t 4581 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4582 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 4583 { 4584 uint_t pprot; 4585 tte_t tte, ttemod; 4586 struct sf_hment *sfhmep; 4587 uint_t tteflags; 4588 int ttesz; 4589 struct page *pp = NULL; 4590 kmutex_t *pml, *pmtx; 4591 int ret; 4592 int use_demap_range; 4593 #if defined(SF_ERRATA_57) 4594 int check_exec; 4595 #endif 4596 4597 ASSERT(in_hblk_range(hmeblkp, addr)); 4598 ASSERT(hmeblkp->hblk_shw_bit == 0); 4599 4600 #ifdef DEBUG 4601 if (get_hblk_ttesz(hmeblkp) != TTE8K && 4602 (endaddr < get_hblk_endaddr(hmeblkp))) { 4603 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 4604 } 4605 #endif /* DEBUG */ 4606 4607 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4608 ttesz = get_hblk_ttesz(hmeblkp); 4609 4610 pprot = sfmmu_vtop_prot(vprot, &tteflags); 4611 #if defined(SF_ERRATA_57) 4612 check_exec = (sfmmup != ksfmmup) && 4613 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4614 ((vprot & PROT_EXEC) == PROT_EXEC); 4615 #endif 4616 HBLKTOHME(sfhmep, hmeblkp, addr); 4617 4618 /* 4619 * Flush the current demap region if addresses have been 4620 * skipped or the page size doesn't match. 4621 */ 4622 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 4623 if (use_demap_range) { 4624 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4625 } else { 4626 DEMAP_RANGE_FLUSH(dmrp); 4627 } 4628 4629 while (addr < endaddr) { 4630 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4631 if (TTE_IS_VALID(&tte)) { 4632 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 4633 /* 4634 * if the new protection is the same as old 4635 * continue 4636 */ 4637 goto next_addr; 4638 } 4639 pml = NULL; 4640 pp = sfhmep->hme_page; 4641 if (pp) { 4642 pml = sfmmu_mlist_enter(pp); 4643 } 4644 if (pp != sfhmep->hme_page) { 4645 /* 4646 * tte most have been unloaded 4647 * underneath us. Recheck 4648 */ 4649 ASSERT(pml); 4650 sfmmu_mlist_exit(pml); 4651 continue; 4652 } 4653 4654 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4655 4656 ttemod = tte; 4657 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 4658 #if defined(SF_ERRATA_57) 4659 if (check_exec && addr < errata57_limit) 4660 ttemod.tte_exec_perm = 0; 4661 #endif 4662 ret = sfmmu_modifytte_try(&tte, &ttemod, 4663 &sfhmep->hme_tte); 4664 4665 if (ret < 0) { 4666 /* tte changed underneath us */ 4667 if (pml) { 4668 sfmmu_mlist_exit(pml); 4669 } 4670 continue; 4671 } 4672 4673 if (tteflags & TTE_HWWR_INT) { 4674 /* 4675 * need to sync if we are clearing modify bit. 4676 */ 4677 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4678 } 4679 4680 if (pp && PP_ISRO(pp)) { 4681 if (pprot & TTE_WRPRM_INT) { 4682 pmtx = sfmmu_page_enter(pp); 4683 PP_CLRRO(pp); 4684 sfmmu_page_exit(pmtx); 4685 } 4686 } 4687 4688 if (ret > 0 && use_demap_range) { 4689 DEMAP_RANGE_MARKPG(dmrp, addr); 4690 } else if (ret > 0) { 4691 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4692 } 4693 4694 if (pml) { 4695 sfmmu_mlist_exit(pml); 4696 } 4697 } 4698 next_addr: 4699 addr += TTEBYTES(ttesz); 4700 sfhmep++; 4701 DEMAP_RANGE_NEXTPG(dmrp); 4702 } 4703 return (addr); 4704 } 4705 4706 /* 4707 * This routine is deprecated and should only be used by hat_chgprot. 4708 * The correct routine is sfmmu_vtop_attr. 4709 * This routine converts virtual page protections to physical ones. It will 4710 * update the tteflags field with the tte mask corresponding to the protections 4711 * affected and it returns the new protections. It will also clear the modify 4712 * bit if we are taking away write permission. This is necessary since the 4713 * modify bit is the hardware permission bit and we need to clear it in order 4714 * to detect write faults. 4715 * It accepts the following special protections: 4716 * ~PROT_WRITE = remove write permissions. 4717 * ~PROT_USER = remove user permissions. 4718 */ 4719 static uint_t 4720 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 4721 { 4722 if (vprot == (uint_t)~PROT_WRITE) { 4723 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 4724 return (0); /* will cause wrprm to be cleared */ 4725 } 4726 if (vprot == (uint_t)~PROT_USER) { 4727 *tteflagsp = TTE_PRIV_INT; 4728 return (0); /* will cause privprm to be cleared */ 4729 } 4730 if ((vprot == 0) || (vprot == PROT_USER) || 4731 ((vprot & PROT_ALL) != vprot)) { 4732 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4733 } 4734 4735 switch (vprot) { 4736 case (PROT_READ): 4737 case (PROT_EXEC): 4738 case (PROT_EXEC | PROT_READ): 4739 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4740 return (TTE_PRIV_INT); /* set prv and clr wrt */ 4741 case (PROT_WRITE): 4742 case (PROT_WRITE | PROT_READ): 4743 case (PROT_EXEC | PROT_WRITE): 4744 case (PROT_EXEC | PROT_WRITE | PROT_READ): 4745 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4746 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 4747 case (PROT_USER | PROT_READ): 4748 case (PROT_USER | PROT_EXEC): 4749 case (PROT_USER | PROT_EXEC | PROT_READ): 4750 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4751 return (0); /* clr prv and wrt */ 4752 case (PROT_USER | PROT_WRITE): 4753 case (PROT_USER | PROT_WRITE | PROT_READ): 4754 case (PROT_USER | PROT_EXEC | PROT_WRITE): 4755 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 4756 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4757 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 4758 default: 4759 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4760 } 4761 return (0); 4762 } 4763 4764 /* 4765 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 4766 * the normal algorithm would take too long for a very large VA range with 4767 * few real mappings. This routine just walks thru all HMEs in the global 4768 * hash table to find and remove mappings. 4769 */ 4770 static void 4771 hat_unload_large_virtual( 4772 struct hat *sfmmup, 4773 caddr_t startaddr, 4774 size_t len, 4775 uint_t flags, 4776 hat_callback_t *callback) 4777 { 4778 struct hmehash_bucket *hmebp; 4779 struct hme_blk *hmeblkp; 4780 struct hme_blk *pr_hblk = NULL; 4781 struct hme_blk *nx_hblk; 4782 struct hme_blk *list = NULL; 4783 int i; 4784 uint64_t hblkpa, prevpa, nx_pa; 4785 demap_range_t dmr, *dmrp; 4786 cpuset_t cpuset; 4787 caddr_t endaddr = startaddr + len; 4788 caddr_t sa; 4789 caddr_t ea; 4790 caddr_t cb_sa[MAX_CB_ADDR]; 4791 caddr_t cb_ea[MAX_CB_ADDR]; 4792 int addr_cnt = 0; 4793 int a = 0; 4794 4795 if (sfmmup->sfmmu_free) { 4796 dmrp = NULL; 4797 } else { 4798 dmrp = &dmr; 4799 DEMAP_RANGE_INIT(sfmmup, dmrp); 4800 } 4801 4802 /* 4803 * Loop through all the hash buckets of HME blocks looking for matches. 4804 */ 4805 for (i = 0; i <= UHMEHASH_SZ; i++) { 4806 hmebp = &uhme_hash[i]; 4807 SFMMU_HASH_LOCK(hmebp); 4808 hmeblkp = hmebp->hmeblkp; 4809 hblkpa = hmebp->hmeh_nextpa; 4810 prevpa = 0; 4811 pr_hblk = NULL; 4812 while (hmeblkp) { 4813 nx_hblk = hmeblkp->hblk_next; 4814 nx_pa = hmeblkp->hblk_nextpa; 4815 4816 /* 4817 * skip if not this context, if a shadow block or 4818 * if the mapping is not in the requested range 4819 */ 4820 if (hmeblkp->hblk_tag.htag_id != sfmmup || 4821 hmeblkp->hblk_shw_bit || 4822 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 4823 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 4824 pr_hblk = hmeblkp; 4825 prevpa = hblkpa; 4826 goto next_block; 4827 } 4828 4829 /* 4830 * unload if there are any current valid mappings 4831 */ 4832 if (hmeblkp->hblk_vcnt != 0 || 4833 hmeblkp->hblk_hmecnt != 0) 4834 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 4835 sa, ea, dmrp, flags); 4836 4837 /* 4838 * on unmap we also release the HME block itself, once 4839 * all mappings are gone. 4840 */ 4841 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 4842 !hmeblkp->hblk_vcnt && 4843 !hmeblkp->hblk_hmecnt) { 4844 ASSERT(!hmeblkp->hblk_lckcnt); 4845 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 4846 prevpa, pr_hblk); 4847 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 4848 } else { 4849 pr_hblk = hmeblkp; 4850 prevpa = hblkpa; 4851 } 4852 4853 if (callback == NULL) 4854 goto next_block; 4855 4856 /* 4857 * HME blocks may span more than one page, but we may be 4858 * unmapping only one page, so check for a smaller range 4859 * for the callback 4860 */ 4861 if (sa < startaddr) 4862 sa = startaddr; 4863 if (--ea > endaddr) 4864 ea = endaddr - 1; 4865 4866 cb_sa[addr_cnt] = sa; 4867 cb_ea[addr_cnt] = ea; 4868 if (++addr_cnt == MAX_CB_ADDR) { 4869 if (dmrp != NULL) { 4870 DEMAP_RANGE_FLUSH(dmrp); 4871 cpuset = sfmmup->sfmmu_cpusran; 4872 xt_sync(cpuset); 4873 } 4874 4875 for (a = 0; a < MAX_CB_ADDR; ++a) { 4876 callback->hcb_start_addr = cb_sa[a]; 4877 callback->hcb_end_addr = cb_ea[a]; 4878 callback->hcb_function(callback); 4879 } 4880 addr_cnt = 0; 4881 } 4882 4883 next_block: 4884 hmeblkp = nx_hblk; 4885 hblkpa = nx_pa; 4886 } 4887 SFMMU_HASH_UNLOCK(hmebp); 4888 } 4889 4890 sfmmu_hblks_list_purge(&list); 4891 if (dmrp != NULL) { 4892 DEMAP_RANGE_FLUSH(dmrp); 4893 cpuset = sfmmup->sfmmu_cpusran; 4894 xt_sync(cpuset); 4895 } 4896 4897 for (a = 0; a < addr_cnt; ++a) { 4898 callback->hcb_start_addr = cb_sa[a]; 4899 callback->hcb_end_addr = cb_ea[a]; 4900 callback->hcb_function(callback); 4901 } 4902 4903 /* 4904 * Check TSB and TLB page sizes if the process isn't exiting. 4905 */ 4906 if (!sfmmup->sfmmu_free) 4907 sfmmu_check_page_sizes(sfmmup, 0); 4908 } 4909 4910 /* 4911 * Unload all the mappings in the range [addr..addr+len). addr and len must 4912 * be MMU_PAGESIZE aligned. 4913 */ 4914 4915 extern struct seg *segkmap; 4916 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 4917 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 4918 4919 4920 void 4921 hat_unload_callback( 4922 struct hat *sfmmup, 4923 caddr_t addr, 4924 size_t len, 4925 uint_t flags, 4926 hat_callback_t *callback) 4927 { 4928 struct hmehash_bucket *hmebp; 4929 hmeblk_tag hblktag; 4930 int hmeshift, hashno, iskernel; 4931 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 4932 caddr_t endaddr; 4933 cpuset_t cpuset; 4934 uint64_t hblkpa, prevpa; 4935 int addr_count = 0; 4936 int a; 4937 caddr_t cb_start_addr[MAX_CB_ADDR]; 4938 caddr_t cb_end_addr[MAX_CB_ADDR]; 4939 int issegkmap = ISSEGKMAP(sfmmup, addr); 4940 demap_range_t dmr, *dmrp; 4941 4942 if (sfmmup->sfmmu_xhat_provider) { 4943 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 4944 return; 4945 } else { 4946 /* 4947 * This must be a CPU HAT. If the address space has 4948 * XHATs attached, unload the mappings for all of them, 4949 * just in case 4950 */ 4951 ASSERT(sfmmup->sfmmu_as != NULL); 4952 if (sfmmup->sfmmu_as->a_xhat != NULL) 4953 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 4954 len, flags, callback); 4955 } 4956 4957 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 4958 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4959 4960 ASSERT(sfmmup != NULL); 4961 ASSERT((len & MMU_PAGEOFFSET) == 0); 4962 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4963 4964 /* 4965 * Probing through a large VA range (say 63 bits) will be slow, even 4966 * at 4 Meg steps between the probes. So, when the virtual address range 4967 * is very large, search the HME entries for what to unload. 4968 * 4969 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 4970 * 4971 * UHMEHASH_SZ is number of hash buckets to examine 4972 * 4973 */ 4974 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 4975 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 4976 return; 4977 } 4978 4979 CPUSET_ZERO(cpuset); 4980 4981 /* 4982 * If the process is exiting, we can save a lot of fuss since 4983 * we'll flush the TLB when we free the ctx anyway. 4984 */ 4985 if (sfmmup->sfmmu_free) 4986 dmrp = NULL; 4987 else 4988 dmrp = &dmr; 4989 4990 DEMAP_RANGE_INIT(sfmmup, dmrp); 4991 endaddr = addr + len; 4992 hblktag.htag_id = sfmmup; 4993 4994 /* 4995 * It is likely for the vm to call unload over a wide range of 4996 * addresses that are actually very sparsely populated by 4997 * translations. In order to speed this up the sfmmu hat supports 4998 * the concept of shadow hmeblks. Dummy large page hmeblks that 4999 * correspond to actual small translations are allocated at tteload 5000 * time and are referred to as shadow hmeblks. Now, during unload 5001 * time, we first check if we have a shadow hmeblk for that 5002 * translation. The absence of one means the corresponding address 5003 * range is empty and can be skipped. 5004 * 5005 * The kernel is an exception to above statement and that is why 5006 * we don't use shadow hmeblks and hash starting from the smallest 5007 * page size. 5008 */ 5009 if (sfmmup == KHATID) { 5010 iskernel = 1; 5011 hashno = TTE64K; 5012 } else { 5013 iskernel = 0; 5014 if (mmu_page_sizes == max_mmu_page_sizes) { 5015 hashno = TTE256M; 5016 } else { 5017 hashno = TTE4M; 5018 } 5019 } 5020 while (addr < endaddr) { 5021 hmeshift = HME_HASH_SHIFT(hashno); 5022 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5023 hblktag.htag_rehash = hashno; 5024 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5025 5026 SFMMU_HASH_LOCK(hmebp); 5027 5028 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 5029 prevpa, &list); 5030 if (hmeblkp == NULL) { 5031 /* 5032 * didn't find an hmeblk. skip the appropiate 5033 * address range. 5034 */ 5035 SFMMU_HASH_UNLOCK(hmebp); 5036 if (iskernel) { 5037 if (hashno < mmu_hashcnt) { 5038 hashno++; 5039 continue; 5040 } else { 5041 hashno = TTE64K; 5042 addr = (caddr_t)roundup((uintptr_t)addr 5043 + 1, MMU_PAGESIZE64K); 5044 continue; 5045 } 5046 } 5047 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5048 (1 << hmeshift)); 5049 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5050 ASSERT(hashno == TTE64K); 5051 continue; 5052 } 5053 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5054 hashno = TTE512K; 5055 continue; 5056 } 5057 if (mmu_page_sizes == max_mmu_page_sizes) { 5058 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5059 hashno = TTE4M; 5060 continue; 5061 } 5062 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5063 hashno = TTE32M; 5064 continue; 5065 } 5066 hashno = TTE256M; 5067 continue; 5068 } else { 5069 hashno = TTE4M; 5070 continue; 5071 } 5072 } 5073 ASSERT(hmeblkp); 5074 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5075 /* 5076 * If the valid count is zero we can skip the range 5077 * mapped by this hmeblk. 5078 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5079 * is used by segment drivers as a hint 5080 * that the mapping resource won't be used any longer. 5081 * The best example of this is during exit(). 5082 */ 5083 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5084 get_hblk_span(hmeblkp)); 5085 if ((flags & HAT_UNLOAD_UNMAP) || 5086 (iskernel && !issegkmap)) { 5087 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5088 pr_hblk); 5089 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5090 } 5091 SFMMU_HASH_UNLOCK(hmebp); 5092 5093 if (iskernel) { 5094 hashno = TTE64K; 5095 continue; 5096 } 5097 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5098 ASSERT(hashno == TTE64K); 5099 continue; 5100 } 5101 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5102 hashno = TTE512K; 5103 continue; 5104 } 5105 if (mmu_page_sizes == max_mmu_page_sizes) { 5106 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5107 hashno = TTE4M; 5108 continue; 5109 } 5110 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5111 hashno = TTE32M; 5112 continue; 5113 } 5114 hashno = TTE256M; 5115 continue; 5116 } else { 5117 hashno = TTE4M; 5118 continue; 5119 } 5120 } 5121 if (hmeblkp->hblk_shw_bit) { 5122 /* 5123 * If we encounter a shadow hmeblk we know there is 5124 * smaller sized hmeblks mapping the same address space. 5125 * Decrement the hash size and rehash. 5126 */ 5127 ASSERT(sfmmup != KHATID); 5128 hashno--; 5129 SFMMU_HASH_UNLOCK(hmebp); 5130 continue; 5131 } 5132 5133 /* 5134 * track callback address ranges. 5135 * only start a new range when it's not contiguous 5136 */ 5137 if (callback != NULL) { 5138 if (addr_count > 0 && 5139 addr == cb_end_addr[addr_count - 1]) 5140 --addr_count; 5141 else 5142 cb_start_addr[addr_count] = addr; 5143 } 5144 5145 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5146 dmrp, flags); 5147 5148 if (callback != NULL) 5149 cb_end_addr[addr_count++] = addr; 5150 5151 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5152 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5153 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5154 pr_hblk); 5155 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5156 } 5157 SFMMU_HASH_UNLOCK(hmebp); 5158 5159 /* 5160 * Notify our caller as to exactly which pages 5161 * have been unloaded. We do these in clumps, 5162 * to minimize the number of xt_sync()s that need to occur. 5163 */ 5164 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5165 DEMAP_RANGE_FLUSH(dmrp); 5166 if (dmrp != NULL) { 5167 cpuset = sfmmup->sfmmu_cpusran; 5168 xt_sync(cpuset); 5169 } 5170 5171 for (a = 0; a < MAX_CB_ADDR; ++a) { 5172 callback->hcb_start_addr = cb_start_addr[a]; 5173 callback->hcb_end_addr = cb_end_addr[a]; 5174 callback->hcb_function(callback); 5175 } 5176 addr_count = 0; 5177 } 5178 if (iskernel) { 5179 hashno = TTE64K; 5180 continue; 5181 } 5182 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5183 ASSERT(hashno == TTE64K); 5184 continue; 5185 } 5186 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5187 hashno = TTE512K; 5188 continue; 5189 } 5190 if (mmu_page_sizes == max_mmu_page_sizes) { 5191 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5192 hashno = TTE4M; 5193 continue; 5194 } 5195 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5196 hashno = TTE32M; 5197 continue; 5198 } 5199 hashno = TTE256M; 5200 } else { 5201 hashno = TTE4M; 5202 } 5203 } 5204 5205 sfmmu_hblks_list_purge(&list); 5206 DEMAP_RANGE_FLUSH(dmrp); 5207 if (dmrp != NULL) { 5208 cpuset = sfmmup->sfmmu_cpusran; 5209 xt_sync(cpuset); 5210 } 5211 if (callback && addr_count != 0) { 5212 for (a = 0; a < addr_count; ++a) { 5213 callback->hcb_start_addr = cb_start_addr[a]; 5214 callback->hcb_end_addr = cb_end_addr[a]; 5215 callback->hcb_function(callback); 5216 } 5217 } 5218 5219 /* 5220 * Check TSB and TLB page sizes if the process isn't exiting. 5221 */ 5222 if (!sfmmup->sfmmu_free) 5223 sfmmu_check_page_sizes(sfmmup, 0); 5224 } 5225 5226 /* 5227 * Unload all the mappings in the range [addr..addr+len). addr and len must 5228 * be MMU_PAGESIZE aligned. 5229 */ 5230 void 5231 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5232 { 5233 if (sfmmup->sfmmu_xhat_provider) { 5234 XHAT_UNLOAD(sfmmup, addr, len, flags); 5235 return; 5236 } 5237 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5238 } 5239 5240 5241 /* 5242 * Find the largest mapping size for this page. 5243 */ 5244 int 5245 fnd_mapping_sz(page_t *pp) 5246 { 5247 int sz; 5248 int p_index; 5249 5250 p_index = PP_MAPINDEX(pp); 5251 5252 sz = 0; 5253 p_index >>= 1; /* don't care about 8K bit */ 5254 for (; p_index; p_index >>= 1) { 5255 sz++; 5256 } 5257 5258 return (sz); 5259 } 5260 5261 /* 5262 * This function unloads a range of addresses for an hmeblk. 5263 * It returns the next address to be unloaded. 5264 * It should be called with the hash lock held. 5265 */ 5266 static caddr_t 5267 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5268 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5269 { 5270 tte_t tte, ttemod; 5271 struct sf_hment *sfhmep; 5272 int ttesz; 5273 long ttecnt; 5274 page_t *pp; 5275 kmutex_t *pml; 5276 int ret; 5277 int use_demap_range; 5278 5279 ASSERT(in_hblk_range(hmeblkp, addr)); 5280 ASSERT(!hmeblkp->hblk_shw_bit); 5281 #ifdef DEBUG 5282 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5283 (endaddr < get_hblk_endaddr(hmeblkp))) { 5284 panic("sfmmu_hblk_unload: partial unload of large page"); 5285 } 5286 #endif /* DEBUG */ 5287 5288 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5289 ttesz = get_hblk_ttesz(hmeblkp); 5290 5291 use_demap_range = (do_virtual_coloring && 5292 ((dmrp == NULL) || TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5293 if (use_demap_range) { 5294 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5295 } else { 5296 DEMAP_RANGE_FLUSH(dmrp); 5297 } 5298 ttecnt = 0; 5299 HBLKTOHME(sfhmep, hmeblkp, addr); 5300 5301 while (addr < endaddr) { 5302 pml = NULL; 5303 again: 5304 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5305 if (TTE_IS_VALID(&tte)) { 5306 pp = sfhmep->hme_page; 5307 if (pp && pml == NULL) { 5308 pml = sfmmu_mlist_enter(pp); 5309 } 5310 5311 /* 5312 * Verify if hme still points to 'pp' now that 5313 * we have p_mapping lock. 5314 */ 5315 if (sfhmep->hme_page != pp) { 5316 if (pp != NULL && sfhmep->hme_page != NULL) { 5317 if (pml) { 5318 sfmmu_mlist_exit(pml); 5319 } 5320 /* Re-start this iteration. */ 5321 continue; 5322 } 5323 ASSERT((pp != NULL) && 5324 (sfhmep->hme_page == NULL)); 5325 goto tte_unloaded; 5326 } 5327 5328 /* 5329 * This point on we have both HASH and p_mapping 5330 * lock. 5331 */ 5332 ASSERT(pp == sfhmep->hme_page); 5333 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5334 5335 /* 5336 * We need to loop on modify tte because it is 5337 * possible for pagesync to come along and 5338 * change the software bits beneath us. 5339 * 5340 * Page_unload can also invalidate the tte after 5341 * we read tte outside of p_mapping lock. 5342 */ 5343 ttemod = tte; 5344 5345 TTE_SET_INVALID(&ttemod); 5346 ret = sfmmu_modifytte_try(&tte, &ttemod, 5347 &sfhmep->hme_tte); 5348 5349 if (ret <= 0) { 5350 if (TTE_IS_VALID(&tte)) { 5351 goto again; 5352 } else { 5353 /* 5354 * We read in a valid pte, but it 5355 * is unloaded by page_unload. 5356 * hme_page has become NULL and 5357 * we hold no p_mapping lock. 5358 */ 5359 ASSERT(pp == NULL && pml == NULL); 5360 goto tte_unloaded; 5361 } 5362 } 5363 5364 if (!(flags & HAT_UNLOAD_NOSYNC)) { 5365 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5366 } 5367 5368 /* 5369 * Ok- we invalidated the tte. Do the rest of the job. 5370 */ 5371 ttecnt++; 5372 5373 if (flags & HAT_UNLOAD_UNLOCK) { 5374 ASSERT(hmeblkp->hblk_lckcnt > 0); 5375 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 5376 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 5377 } 5378 5379 /* 5380 * Normally we would need to flush the page 5381 * from the virtual cache at this point in 5382 * order to prevent a potential cache alias 5383 * inconsistency. 5384 * The particular scenario we need to worry 5385 * about is: 5386 * Given: va1 and va2 are two virtual address 5387 * that alias and map the same physical 5388 * address. 5389 * 1. mapping exists from va1 to pa and data 5390 * has been read into the cache. 5391 * 2. unload va1. 5392 * 3. load va2 and modify data using va2. 5393 * 4 unload va2. 5394 * 5. load va1 and reference data. Unless we 5395 * flush the data cache when we unload we will 5396 * get stale data. 5397 * Fortunately, page coloring eliminates the 5398 * above scenario by remembering the color a 5399 * physical page was last or is currently 5400 * mapped to. Now, we delay the flush until 5401 * the loading of translations. Only when the 5402 * new translation is of a different color 5403 * are we forced to flush. 5404 */ 5405 if (use_demap_range) { 5406 /* 5407 * Mark this page as needing a demap. 5408 */ 5409 DEMAP_RANGE_MARKPG(dmrp, addr); 5410 } else { 5411 if (do_virtual_coloring) { 5412 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 5413 sfmmup->sfmmu_free, 0); 5414 } else { 5415 pfn_t pfnum; 5416 5417 pfnum = TTE_TO_PFN(addr, &tte); 5418 sfmmu_tlbcache_demap(addr, sfmmup, 5419 hmeblkp, pfnum, sfmmup->sfmmu_free, 5420 FLUSH_NECESSARY_CPUS, 5421 CACHE_FLUSH, 0); 5422 } 5423 } 5424 5425 if (pp) { 5426 /* 5427 * Remove the hment from the mapping list 5428 */ 5429 ASSERT(hmeblkp->hblk_hmecnt > 0); 5430 5431 /* 5432 * Again, we cannot 5433 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 5434 */ 5435 HME_SUB(sfhmep, pp); 5436 membar_stst(); 5437 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 5438 } 5439 5440 ASSERT(hmeblkp->hblk_vcnt > 0); 5441 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 5442 5443 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 5444 !hmeblkp->hblk_lckcnt); 5445 5446 #ifdef VAC 5447 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 5448 if (PP_ISTNC(pp)) { 5449 /* 5450 * If page was temporary 5451 * uncached, try to recache 5452 * it. Note that HME_SUB() was 5453 * called above so p_index and 5454 * mlist had been updated. 5455 */ 5456 conv_tnc(pp, ttesz); 5457 } else if (pp->p_mapping == NULL) { 5458 ASSERT(kpm_enable); 5459 /* 5460 * Page is marked to be in VAC conflict 5461 * to an existing kpm mapping and/or is 5462 * kpm mapped using only the regular 5463 * pagesize. 5464 */ 5465 sfmmu_kpm_hme_unload(pp); 5466 } 5467 } 5468 #endif /* VAC */ 5469 } else if ((pp = sfhmep->hme_page) != NULL) { 5470 /* 5471 * TTE is invalid but the hme 5472 * still exists. let pageunload 5473 * complete its job. 5474 */ 5475 ASSERT(pml == NULL); 5476 pml = sfmmu_mlist_enter(pp); 5477 if (sfhmep->hme_page != NULL) { 5478 sfmmu_mlist_exit(pml); 5479 pml = NULL; 5480 goto again; 5481 } 5482 ASSERT(sfhmep->hme_page == NULL); 5483 } else if (hmeblkp->hblk_hmecnt != 0) { 5484 /* 5485 * pageunload may have not finished decrementing 5486 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 5487 * wait for pageunload to finish. Rely on pageunload 5488 * to decrement hblk_hmecnt after hblk_vcnt. 5489 */ 5490 pfn_t pfn = TTE_TO_TTEPFN(&tte); 5491 ASSERT(pml == NULL); 5492 if (pf_is_memory(pfn)) { 5493 pp = page_numtopp_nolock(pfn); 5494 if (pp != NULL) { 5495 pml = sfmmu_mlist_enter(pp); 5496 sfmmu_mlist_exit(pml); 5497 pml = NULL; 5498 } 5499 } 5500 } 5501 5502 tte_unloaded: 5503 /* 5504 * At this point, the tte we are looking at 5505 * should be unloaded, and hme has been unlinked 5506 * from page too. This is important because in 5507 * pageunload, it does ttesync() then HME_SUB. 5508 * We need to make sure HME_SUB has been completed 5509 * so we know ttesync() has been completed. Otherwise, 5510 * at exit time, after return from hat layer, VM will 5511 * release as structure which hat_setstat() (called 5512 * by ttesync()) needs. 5513 */ 5514 #ifdef DEBUG 5515 { 5516 tte_t dtte; 5517 5518 ASSERT(sfhmep->hme_page == NULL); 5519 5520 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 5521 ASSERT(!TTE_IS_VALID(&dtte)); 5522 } 5523 #endif 5524 5525 if (pml) { 5526 sfmmu_mlist_exit(pml); 5527 } 5528 5529 addr += TTEBYTES(ttesz); 5530 sfhmep++; 5531 DEMAP_RANGE_NEXTPG(dmrp); 5532 } 5533 if (ttecnt > 0) 5534 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 5535 return (addr); 5536 } 5537 5538 /* 5539 * Synchronize all the mappings in the range [addr..addr+len). 5540 * Can be called with clearflag having two states: 5541 * HAT_SYNC_DONTZERO means just return the rm stats 5542 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 5543 */ 5544 void 5545 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 5546 { 5547 struct hmehash_bucket *hmebp; 5548 hmeblk_tag hblktag; 5549 int hmeshift, hashno = 1; 5550 struct hme_blk *hmeblkp, *list = NULL; 5551 caddr_t endaddr; 5552 cpuset_t cpuset; 5553 5554 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 5555 ASSERT((sfmmup == ksfmmup) || 5556 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5557 ASSERT((len & MMU_PAGEOFFSET) == 0); 5558 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 5559 (clearflag == HAT_SYNC_ZERORM)); 5560 5561 CPUSET_ZERO(cpuset); 5562 5563 endaddr = addr + len; 5564 hblktag.htag_id = sfmmup; 5565 /* 5566 * Spitfire supports 4 page sizes. 5567 * Most pages are expected to be of the smallest page 5568 * size (8K) and these will not need to be rehashed. 64K 5569 * pages also don't need to be rehashed because the an hmeblk 5570 * spans 64K of address space. 512K pages might need 1 rehash and 5571 * and 4M pages 2 rehashes. 5572 */ 5573 while (addr < endaddr) { 5574 hmeshift = HME_HASH_SHIFT(hashno); 5575 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5576 hblktag.htag_rehash = hashno; 5577 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5578 5579 SFMMU_HASH_LOCK(hmebp); 5580 5581 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5582 if (hmeblkp != NULL) { 5583 /* 5584 * We've encountered a shadow hmeblk so skip the range 5585 * of the next smaller mapping size. 5586 */ 5587 if (hmeblkp->hblk_shw_bit) { 5588 ASSERT(sfmmup != ksfmmup); 5589 ASSERT(hashno > 1); 5590 addr = (caddr_t)P2END((uintptr_t)addr, 5591 TTEBYTES(hashno - 1)); 5592 } else { 5593 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 5594 addr, endaddr, clearflag); 5595 } 5596 SFMMU_HASH_UNLOCK(hmebp); 5597 hashno = 1; 5598 continue; 5599 } 5600 SFMMU_HASH_UNLOCK(hmebp); 5601 5602 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5603 /* 5604 * We have traversed the whole list and rehashed 5605 * if necessary without finding the address to sync. 5606 * This is ok so we increment the address by the 5607 * smallest hmeblk range for kernel mappings and the 5608 * largest hmeblk range, to account for shadow hmeblks, 5609 * for user mappings and continue. 5610 */ 5611 if (sfmmup == ksfmmup) 5612 addr = (caddr_t)P2END((uintptr_t)addr, 5613 TTEBYTES(1)); 5614 else 5615 addr = (caddr_t)P2END((uintptr_t)addr, 5616 TTEBYTES(hashno)); 5617 hashno = 1; 5618 } else { 5619 hashno++; 5620 } 5621 } 5622 sfmmu_hblks_list_purge(&list); 5623 cpuset = sfmmup->sfmmu_cpusran; 5624 xt_sync(cpuset); 5625 } 5626 5627 static caddr_t 5628 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5629 caddr_t endaddr, int clearflag) 5630 { 5631 tte_t tte, ttemod; 5632 struct sf_hment *sfhmep; 5633 int ttesz; 5634 struct page *pp; 5635 kmutex_t *pml; 5636 int ret; 5637 5638 ASSERT(hmeblkp->hblk_shw_bit == 0); 5639 5640 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5641 5642 ttesz = get_hblk_ttesz(hmeblkp); 5643 HBLKTOHME(sfhmep, hmeblkp, addr); 5644 5645 while (addr < endaddr) { 5646 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5647 if (TTE_IS_VALID(&tte)) { 5648 pml = NULL; 5649 pp = sfhmep->hme_page; 5650 if (pp) { 5651 pml = sfmmu_mlist_enter(pp); 5652 } 5653 if (pp != sfhmep->hme_page) { 5654 /* 5655 * tte most have been unloaded 5656 * underneath us. Recheck 5657 */ 5658 ASSERT(pml); 5659 sfmmu_mlist_exit(pml); 5660 continue; 5661 } 5662 5663 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5664 5665 if (clearflag == HAT_SYNC_ZERORM) { 5666 ttemod = tte; 5667 TTE_CLR_RM(&ttemod); 5668 ret = sfmmu_modifytte_try(&tte, &ttemod, 5669 &sfhmep->hme_tte); 5670 if (ret < 0) { 5671 if (pml) { 5672 sfmmu_mlist_exit(pml); 5673 } 5674 continue; 5675 } 5676 5677 if (ret > 0) { 5678 sfmmu_tlb_demap(addr, sfmmup, 5679 hmeblkp, 0, 0); 5680 } 5681 } 5682 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5683 if (pml) { 5684 sfmmu_mlist_exit(pml); 5685 } 5686 } 5687 addr += TTEBYTES(ttesz); 5688 sfhmep++; 5689 } 5690 return (addr); 5691 } 5692 5693 /* 5694 * This function will sync a tte to the page struct and it will 5695 * update the hat stats. Currently it allows us to pass a NULL pp 5696 * and we will simply update the stats. We may want to change this 5697 * so we only keep stats for pages backed by pp's. 5698 */ 5699 static void 5700 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 5701 { 5702 uint_t rm = 0; 5703 int sz; 5704 pgcnt_t npgs; 5705 5706 ASSERT(TTE_IS_VALID(ttep)); 5707 5708 if (TTE_IS_NOSYNC(ttep)) { 5709 return; 5710 } 5711 5712 if (TTE_IS_REF(ttep)) { 5713 rm = P_REF; 5714 } 5715 if (TTE_IS_MOD(ttep)) { 5716 rm |= P_MOD; 5717 } 5718 5719 if (rm == 0) { 5720 return; 5721 } 5722 5723 sz = TTE_CSZ(ttep); 5724 if (sfmmup->sfmmu_rmstat) { 5725 int i; 5726 caddr_t vaddr = addr; 5727 5728 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 5729 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 5730 } 5731 5732 } 5733 5734 /* 5735 * XXX I want to use cas to update nrm bits but they 5736 * currently belong in common/vm and not in hat where 5737 * they should be. 5738 * The nrm bits are protected by the same mutex as 5739 * the one that protects the page's mapping list. 5740 */ 5741 if (!pp) 5742 return; 5743 ASSERT(sfmmu_mlist_held(pp)); 5744 /* 5745 * If the tte is for a large page, we need to sync all the 5746 * pages covered by the tte. 5747 */ 5748 if (sz != TTE8K) { 5749 ASSERT(pp->p_szc != 0); 5750 pp = PP_GROUPLEADER(pp, sz); 5751 ASSERT(sfmmu_mlist_held(pp)); 5752 } 5753 5754 /* Get number of pages from tte size. */ 5755 npgs = TTEPAGES(sz); 5756 5757 do { 5758 ASSERT(pp); 5759 ASSERT(sfmmu_mlist_held(pp)); 5760 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 5761 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 5762 hat_page_setattr(pp, rm); 5763 5764 /* 5765 * Are we done? If not, we must have a large mapping. 5766 * For large mappings we need to sync the rest of the pages 5767 * covered by this tte; goto the next page. 5768 */ 5769 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 5770 } 5771 5772 /* 5773 * Execute pre-callback handler of each pa_hment linked to pp 5774 * 5775 * Inputs: 5776 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 5777 * capture_cpus: pointer to return value (below) 5778 * 5779 * Returns: 5780 * Propagates the subsystem callback return values back to the caller; 5781 * returns 0 on success. If capture_cpus is non-NULL, the value returned 5782 * is zero if all of the pa_hments are of a type that do not require 5783 * capturing CPUs prior to suspending the mapping, else it is 1. 5784 */ 5785 static int 5786 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 5787 { 5788 struct sf_hment *sfhmep; 5789 struct pa_hment *pahmep; 5790 int (*f)(caddr_t, uint_t, uint_t, void *); 5791 int ret; 5792 id_t id; 5793 int locked = 0; 5794 kmutex_t *pml; 5795 5796 ASSERT(PAGE_EXCL(pp)); 5797 if (!sfmmu_mlist_held(pp)) { 5798 pml = sfmmu_mlist_enter(pp); 5799 locked = 1; 5800 } 5801 5802 if (capture_cpus) 5803 *capture_cpus = 0; 5804 5805 top: 5806 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5807 /* 5808 * skip sf_hments corresponding to VA<->PA mappings; 5809 * for pa_hment's, hme_tte.ll is zero 5810 */ 5811 if (!IS_PAHME(sfhmep)) 5812 continue; 5813 5814 pahmep = sfhmep->hme_data; 5815 ASSERT(pahmep != NULL); 5816 5817 /* 5818 * skip if pre-handler has been called earlier in this loop 5819 */ 5820 if (pahmep->flags & flag) 5821 continue; 5822 5823 id = pahmep->cb_id; 5824 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5825 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 5826 *capture_cpus = 1; 5827 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 5828 pahmep->flags |= flag; 5829 continue; 5830 } 5831 5832 /* 5833 * Drop the mapping list lock to avoid locking order issues. 5834 */ 5835 if (locked) 5836 sfmmu_mlist_exit(pml); 5837 5838 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 5839 if (ret != 0) 5840 return (ret); /* caller must do the cleanup */ 5841 5842 if (locked) { 5843 pml = sfmmu_mlist_enter(pp); 5844 pahmep->flags |= flag; 5845 goto top; 5846 } 5847 5848 pahmep->flags |= flag; 5849 } 5850 5851 if (locked) 5852 sfmmu_mlist_exit(pml); 5853 5854 return (0); 5855 } 5856 5857 /* 5858 * Execute post-callback handler of each pa_hment linked to pp 5859 * 5860 * Same overall assumptions and restrictions apply as for 5861 * hat_pageprocess_precallbacks(). 5862 */ 5863 static void 5864 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 5865 { 5866 pfn_t pgpfn = pp->p_pagenum; 5867 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 5868 pfn_t newpfn; 5869 struct sf_hment *sfhmep; 5870 struct pa_hment *pahmep; 5871 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 5872 id_t id; 5873 int locked = 0; 5874 kmutex_t *pml; 5875 5876 ASSERT(PAGE_EXCL(pp)); 5877 if (!sfmmu_mlist_held(pp)) { 5878 pml = sfmmu_mlist_enter(pp); 5879 locked = 1; 5880 } 5881 5882 top: 5883 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5884 /* 5885 * skip sf_hments corresponding to VA<->PA mappings; 5886 * for pa_hment's, hme_tte.ll is zero 5887 */ 5888 if (!IS_PAHME(sfhmep)) 5889 continue; 5890 5891 pahmep = sfhmep->hme_data; 5892 ASSERT(pahmep != NULL); 5893 5894 if ((pahmep->flags & flag) == 0) 5895 continue; 5896 5897 pahmep->flags &= ~flag; 5898 5899 id = pahmep->cb_id; 5900 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5901 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 5902 continue; 5903 5904 /* 5905 * Convert the base page PFN into the constituent PFN 5906 * which is needed by the callback handler. 5907 */ 5908 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 5909 5910 /* 5911 * Drop the mapping list lock to avoid locking order issues. 5912 */ 5913 if (locked) 5914 sfmmu_mlist_exit(pml); 5915 5916 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 5917 != 0) 5918 panic("sfmmu: posthandler failed"); 5919 5920 if (locked) { 5921 pml = sfmmu_mlist_enter(pp); 5922 goto top; 5923 } 5924 } 5925 5926 if (locked) 5927 sfmmu_mlist_exit(pml); 5928 } 5929 5930 /* 5931 * Suspend locked kernel mapping 5932 */ 5933 void 5934 hat_pagesuspend(struct page *pp) 5935 { 5936 struct sf_hment *sfhmep; 5937 sfmmu_t *sfmmup; 5938 tte_t tte, ttemod; 5939 struct hme_blk *hmeblkp; 5940 caddr_t addr; 5941 int index, cons; 5942 cpuset_t cpuset; 5943 5944 ASSERT(PAGE_EXCL(pp)); 5945 ASSERT(sfmmu_mlist_held(pp)); 5946 5947 mutex_enter(&kpr_suspendlock); 5948 5949 /* 5950 * Call into dtrace to tell it we're about to suspend a 5951 * kernel mapping. This prevents us from running into issues 5952 * with probe context trying to touch a suspended page 5953 * in the relocation codepath itself. 5954 */ 5955 if (dtrace_kreloc_init) 5956 (*dtrace_kreloc_init)(); 5957 5958 index = PP_MAPINDEX(pp); 5959 cons = TTE8K; 5960 5961 retry: 5962 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5963 5964 if (IS_PAHME(sfhmep)) 5965 continue; 5966 5967 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 5968 continue; 5969 5970 /* 5971 * Loop until we successfully set the suspend bit in 5972 * the TTE. 5973 */ 5974 again: 5975 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5976 ASSERT(TTE_IS_VALID(&tte)); 5977 5978 ttemod = tte; 5979 TTE_SET_SUSPEND(&ttemod); 5980 if (sfmmu_modifytte_try(&tte, &ttemod, 5981 &sfhmep->hme_tte) < 0) 5982 goto again; 5983 5984 /* 5985 * Invalidate TSB entry 5986 */ 5987 hmeblkp = sfmmu_hmetohblk(sfhmep); 5988 5989 sfmmup = hblktosfmmu(hmeblkp); 5990 ASSERT(sfmmup == ksfmmup); 5991 5992 addr = tte_to_vaddr(hmeblkp, tte); 5993 5994 /* 5995 * No need to make sure that the TSB for this sfmmu is 5996 * not being relocated since it is ksfmmup and thus it 5997 * will never be relocated. 5998 */ 5999 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 6000 6001 /* 6002 * Update xcall stats 6003 */ 6004 cpuset = cpu_ready_set; 6005 CPUSET_DEL(cpuset, CPU->cpu_id); 6006 6007 /* LINTED: constant in conditional context */ 6008 SFMMU_XCALL_STATS(ksfmmup); 6009 6010 /* 6011 * Flush TLB entry on remote CPU's 6012 */ 6013 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6014 (uint64_t)ksfmmup); 6015 xt_sync(cpuset); 6016 6017 /* 6018 * Flush TLB entry on local CPU 6019 */ 6020 vtag_flushpage(addr, (uint64_t)ksfmmup); 6021 } 6022 6023 while (index != 0) { 6024 index = index >> 1; 6025 if (index != 0) 6026 cons++; 6027 if (index & 0x1) { 6028 pp = PP_GROUPLEADER(pp, cons); 6029 goto retry; 6030 } 6031 } 6032 } 6033 6034 #ifdef DEBUG 6035 6036 #define N_PRLE 1024 6037 struct prle { 6038 page_t *targ; 6039 page_t *repl; 6040 int status; 6041 int pausecpus; 6042 hrtime_t whence; 6043 }; 6044 6045 static struct prle page_relocate_log[N_PRLE]; 6046 static int prl_entry; 6047 static kmutex_t prl_mutex; 6048 6049 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6050 mutex_enter(&prl_mutex); \ 6051 page_relocate_log[prl_entry].targ = *(t); \ 6052 page_relocate_log[prl_entry].repl = *(r); \ 6053 page_relocate_log[prl_entry].status = (s); \ 6054 page_relocate_log[prl_entry].pausecpus = (p); \ 6055 page_relocate_log[prl_entry].whence = gethrtime(); \ 6056 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6057 mutex_exit(&prl_mutex); 6058 6059 #else /* !DEBUG */ 6060 #define PAGE_RELOCATE_LOG(t, r, s, p) 6061 #endif 6062 6063 /* 6064 * Core Kernel Page Relocation Algorithm 6065 * 6066 * Input: 6067 * 6068 * target : constituent pages are SE_EXCL locked. 6069 * replacement: constituent pages are SE_EXCL locked. 6070 * 6071 * Output: 6072 * 6073 * nrelocp: number of pages relocated 6074 */ 6075 int 6076 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6077 { 6078 page_t *targ, *repl; 6079 page_t *tpp, *rpp; 6080 kmutex_t *low, *high; 6081 spgcnt_t npages, i; 6082 page_t *pl = NULL; 6083 int old_pil; 6084 cpuset_t cpuset; 6085 int cap_cpus; 6086 int ret; 6087 6088 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 6089 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6090 return (EAGAIN); 6091 } 6092 6093 mutex_enter(&kpr_mutex); 6094 kreloc_thread = curthread; 6095 6096 targ = *target; 6097 repl = *replacement; 6098 ASSERT(repl != NULL); 6099 ASSERT(targ->p_szc == repl->p_szc); 6100 6101 npages = page_get_pagecnt(targ->p_szc); 6102 6103 /* 6104 * unload VA<->PA mappings that are not locked 6105 */ 6106 tpp = targ; 6107 for (i = 0; i < npages; i++) { 6108 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6109 tpp++; 6110 } 6111 6112 /* 6113 * Do "presuspend" callbacks, in a context from which we can still 6114 * block as needed. Note that we don't hold the mapping list lock 6115 * of "targ" at this point due to potential locking order issues; 6116 * we assume that between the hat_pageunload() above and holding 6117 * the SE_EXCL lock that the mapping list *cannot* change at this 6118 * point. 6119 */ 6120 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6121 if (ret != 0) { 6122 /* 6123 * EIO translates to fatal error, for all others cleanup 6124 * and return EAGAIN. 6125 */ 6126 ASSERT(ret != EIO); 6127 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6128 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6129 kreloc_thread = NULL; 6130 mutex_exit(&kpr_mutex); 6131 return (EAGAIN); 6132 } 6133 6134 /* 6135 * acquire p_mapping list lock for both the target and replacement 6136 * root pages. 6137 * 6138 * low and high refer to the need to grab the mlist locks in a 6139 * specific order in order to prevent race conditions. Thus the 6140 * lower lock must be grabbed before the higher lock. 6141 * 6142 * This will block hat_unload's accessing p_mapping list. Since 6143 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6144 * blocked. Thus, no one else will be accessing the p_mapping list 6145 * while we suspend and reload the locked mapping below. 6146 */ 6147 tpp = targ; 6148 rpp = repl; 6149 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6150 6151 kpreempt_disable(); 6152 6153 #ifdef VAC 6154 /* 6155 * If the replacement page is of a different virtual color 6156 * than the page it is replacing, we need to handle the VAC 6157 * consistency for it just as we would if we were setting up 6158 * a new mapping to a page. 6159 */ 6160 if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) { 6161 if (tpp->p_vcolor != rpp->p_vcolor) { 6162 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6163 rpp->p_pagenum); 6164 } 6165 } 6166 #endif 6167 6168 /* 6169 * We raise our PIL to 13 so that we don't get captured by 6170 * another CPU or pinned by an interrupt thread. We can't go to 6171 * PIL 14 since the nexus driver(s) may need to interrupt at 6172 * that level in the case of IOMMU pseudo mappings. 6173 */ 6174 cpuset = cpu_ready_set; 6175 CPUSET_DEL(cpuset, CPU->cpu_id); 6176 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6177 old_pil = splr(XCALL_PIL); 6178 } else { 6179 old_pil = -1; 6180 xc_attention(cpuset); 6181 } 6182 ASSERT(getpil() == XCALL_PIL); 6183 6184 /* 6185 * Now do suspend callbacks. In the case of an IOMMU mapping 6186 * this will suspend all DMA activity to the page while it is 6187 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6188 * may be captured at this point we should have acquired any needed 6189 * locks in the presuspend callback. 6190 */ 6191 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6192 if (ret != 0) { 6193 repl = targ; 6194 goto suspend_fail; 6195 } 6196 6197 /* 6198 * Raise the PIL yet again, this time to block all high-level 6199 * interrupts on this CPU. This is necessary to prevent an 6200 * interrupt routine from pinning the thread which holds the 6201 * mapping suspended and then touching the suspended page. 6202 * 6203 * Once the page is suspended we also need to be careful to 6204 * avoid calling any functions which touch any seg_kmem memory 6205 * since that memory may be backed by the very page we are 6206 * relocating in here! 6207 */ 6208 hat_pagesuspend(targ); 6209 6210 /* 6211 * Now that we are confident everybody has stopped using this page, 6212 * copy the page contents. Note we use a physical copy to prevent 6213 * locking issues and to avoid fpRAS because we can't handle it in 6214 * this context. 6215 */ 6216 for (i = 0; i < npages; i++, tpp++, rpp++) { 6217 /* 6218 * Copy the contents of the page. 6219 */ 6220 ppcopy_kernel(tpp, rpp); 6221 } 6222 6223 tpp = targ; 6224 rpp = repl; 6225 for (i = 0; i < npages; i++, tpp++, rpp++) { 6226 /* 6227 * Copy attributes. VAC consistency was handled above, 6228 * if required. 6229 */ 6230 rpp->p_nrm = tpp->p_nrm; 6231 tpp->p_nrm = 0; 6232 rpp->p_index = tpp->p_index; 6233 tpp->p_index = 0; 6234 #ifdef VAC 6235 rpp->p_vcolor = tpp->p_vcolor; 6236 #endif 6237 } 6238 6239 /* 6240 * First, unsuspend the page, if we set the suspend bit, and transfer 6241 * the mapping list from the target page to the replacement page. 6242 * Next process postcallbacks; since pa_hment's are linked only to the 6243 * p_mapping list of root page, we don't iterate over the constituent 6244 * pages. 6245 */ 6246 hat_pagereload(targ, repl); 6247 6248 suspend_fail: 6249 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6250 6251 /* 6252 * Now lower our PIL and release any captured CPUs since we 6253 * are out of the "danger zone". After this it will again be 6254 * safe to acquire adaptive mutex locks, or to drop them... 6255 */ 6256 if (old_pil != -1) { 6257 splx(old_pil); 6258 } else { 6259 xc_dismissed(cpuset); 6260 } 6261 6262 kpreempt_enable(); 6263 6264 sfmmu_mlist_reloc_exit(low, high); 6265 6266 /* 6267 * Postsuspend callbacks should drop any locks held across 6268 * the suspend callbacks. As before, we don't hold the mapping 6269 * list lock at this point.. our assumption is that the mapping 6270 * list still can't change due to our holding SE_EXCL lock and 6271 * there being no unlocked mappings left. Hence the restriction 6272 * on calling context to hat_delete_callback() 6273 */ 6274 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6275 if (ret != 0) { 6276 /* 6277 * The second presuspend call failed: we got here through 6278 * the suspend_fail label above. 6279 */ 6280 ASSERT(ret != EIO); 6281 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6282 kreloc_thread = NULL; 6283 mutex_exit(&kpr_mutex); 6284 return (EAGAIN); 6285 } 6286 6287 /* 6288 * Now that we're out of the performance critical section we can 6289 * take care of updating the hash table, since we still 6290 * hold all the pages locked SE_EXCL at this point we 6291 * needn't worry about things changing out from under us. 6292 */ 6293 tpp = targ; 6294 rpp = repl; 6295 for (i = 0; i < npages; i++, tpp++, rpp++) { 6296 6297 /* 6298 * replace targ with replacement in page_hash table 6299 */ 6300 targ = tpp; 6301 page_relocate_hash(rpp, targ); 6302 6303 /* 6304 * concatenate target; caller of platform_page_relocate() 6305 * expects target to be concatenated after returning. 6306 */ 6307 ASSERT(targ->p_next == targ); 6308 ASSERT(targ->p_prev == targ); 6309 page_list_concat(&pl, &targ); 6310 } 6311 6312 ASSERT(*target == pl); 6313 *nrelocp = npages; 6314 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6315 kreloc_thread = NULL; 6316 mutex_exit(&kpr_mutex); 6317 return (0); 6318 } 6319 6320 /* 6321 * Called when stray pa_hments are found attached to a page which is 6322 * being freed. Notify the subsystem which attached the pa_hment of 6323 * the error if it registered a suitable handler, else panic. 6324 */ 6325 static void 6326 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6327 { 6328 id_t cb_id = pahmep->cb_id; 6329 6330 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6331 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6332 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6333 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6334 return; /* non-fatal */ 6335 } 6336 panic("pa_hment leaked: 0x%p", pahmep); 6337 } 6338 6339 /* 6340 * Remove all mappings to page 'pp'. 6341 */ 6342 int 6343 hat_pageunload(struct page *pp, uint_t forceflag) 6344 { 6345 struct page *origpp = pp; 6346 struct sf_hment *sfhme, *tmphme; 6347 struct hme_blk *hmeblkp; 6348 kmutex_t *pml; 6349 #ifdef VAC 6350 kmutex_t *pmtx; 6351 #endif 6352 cpuset_t cpuset, tset; 6353 int index, cons; 6354 int xhme_blks; 6355 int pa_hments; 6356 6357 ASSERT(PAGE_EXCL(pp)); 6358 6359 retry_xhat: 6360 tmphme = NULL; 6361 xhme_blks = 0; 6362 pa_hments = 0; 6363 CPUSET_ZERO(cpuset); 6364 6365 pml = sfmmu_mlist_enter(pp); 6366 6367 #ifdef VAC 6368 if (pp->p_kpmref) 6369 sfmmu_kpm_pageunload(pp); 6370 ASSERT(!PP_ISMAPPED_KPM(pp)); 6371 #endif 6372 6373 index = PP_MAPINDEX(pp); 6374 cons = TTE8K; 6375 retry: 6376 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6377 tmphme = sfhme->hme_next; 6378 6379 if (IS_PAHME(sfhme)) { 6380 ASSERT(sfhme->hme_data != NULL); 6381 pa_hments++; 6382 continue; 6383 } 6384 6385 hmeblkp = sfmmu_hmetohblk(sfhme); 6386 if (hmeblkp->hblk_xhat_bit) { 6387 struct xhat_hme_blk *xblk = 6388 (struct xhat_hme_blk *)hmeblkp; 6389 6390 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 6391 pp, forceflag, XBLK2PROVBLK(xblk)); 6392 6393 xhme_blks = 1; 6394 continue; 6395 } 6396 6397 /* 6398 * If there are kernel mappings don't unload them, they will 6399 * be suspended. 6400 */ 6401 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 6402 hmeblkp->hblk_tag.htag_id == ksfmmup) 6403 continue; 6404 6405 tset = sfmmu_pageunload(pp, sfhme, cons); 6406 CPUSET_OR(cpuset, tset); 6407 } 6408 6409 while (index != 0) { 6410 index = index >> 1; 6411 if (index != 0) 6412 cons++; 6413 if (index & 0x1) { 6414 /* Go to leading page */ 6415 pp = PP_GROUPLEADER(pp, cons); 6416 ASSERT(sfmmu_mlist_held(pp)); 6417 goto retry; 6418 } 6419 } 6420 6421 /* 6422 * cpuset may be empty if the page was only mapped by segkpm, 6423 * in which case we won't actually cross-trap. 6424 */ 6425 xt_sync(cpuset); 6426 6427 /* 6428 * The page should have no mappings at this point, unless 6429 * we were called from hat_page_relocate() in which case we 6430 * leave the locked mappings which will be suspended later. 6431 */ 6432 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 6433 (forceflag == SFMMU_KERNEL_RELOC)); 6434 6435 #ifdef VAC 6436 if (PP_ISTNC(pp)) { 6437 if (cons == TTE8K) { 6438 pmtx = sfmmu_page_enter(pp); 6439 PP_CLRTNC(pp); 6440 sfmmu_page_exit(pmtx); 6441 } else { 6442 conv_tnc(pp, cons); 6443 } 6444 } 6445 #endif /* VAC */ 6446 6447 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 6448 /* 6449 * Unlink any pa_hments and free them, calling back 6450 * the responsible subsystem to notify it of the error. 6451 * This can occur in situations such as drivers leaking 6452 * DMA handles: naughty, but common enough that we'd like 6453 * to keep the system running rather than bringing it 6454 * down with an obscure error like "pa_hment leaked" 6455 * which doesn't aid the user in debugging their driver. 6456 */ 6457 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6458 tmphme = sfhme->hme_next; 6459 if (IS_PAHME(sfhme)) { 6460 struct pa_hment *pahmep = sfhme->hme_data; 6461 sfmmu_pahment_leaked(pahmep); 6462 HME_SUB(sfhme, pp); 6463 kmem_cache_free(pa_hment_cache, pahmep); 6464 } 6465 } 6466 6467 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 6468 } 6469 6470 sfmmu_mlist_exit(pml); 6471 6472 /* 6473 * XHAT may not have finished unloading pages 6474 * because some other thread was waiting for 6475 * mlist lock and XHAT_PAGEUNLOAD let it do 6476 * the job. 6477 */ 6478 if (xhme_blks) { 6479 pp = origpp; 6480 goto retry_xhat; 6481 } 6482 6483 return (0); 6484 } 6485 6486 cpuset_t 6487 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 6488 { 6489 struct hme_blk *hmeblkp; 6490 sfmmu_t *sfmmup; 6491 tte_t tte, ttemod; 6492 #ifdef DEBUG 6493 tte_t orig_old; 6494 #endif /* DEBUG */ 6495 caddr_t addr; 6496 int ttesz; 6497 int ret; 6498 cpuset_t cpuset; 6499 6500 ASSERT(pp != NULL); 6501 ASSERT(sfmmu_mlist_held(pp)); 6502 ASSERT(pp->p_vnode != &kvp); 6503 6504 CPUSET_ZERO(cpuset); 6505 6506 hmeblkp = sfmmu_hmetohblk(sfhme); 6507 6508 readtte: 6509 sfmmu_copytte(&sfhme->hme_tte, &tte); 6510 if (TTE_IS_VALID(&tte)) { 6511 sfmmup = hblktosfmmu(hmeblkp); 6512 ttesz = get_hblk_ttesz(hmeblkp); 6513 /* 6514 * Only unload mappings of 'cons' size. 6515 */ 6516 if (ttesz != cons) 6517 return (cpuset); 6518 6519 /* 6520 * Note that we have p_mapping lock, but no hash lock here. 6521 * hblk_unload() has to have both hash lock AND p_mapping 6522 * lock before it tries to modify tte. So, the tte could 6523 * not become invalid in the sfmmu_modifytte_try() below. 6524 */ 6525 ttemod = tte; 6526 #ifdef DEBUG 6527 orig_old = tte; 6528 #endif /* DEBUG */ 6529 6530 TTE_SET_INVALID(&ttemod); 6531 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6532 if (ret < 0) { 6533 #ifdef DEBUG 6534 /* only R/M bits can change. */ 6535 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 6536 #endif /* DEBUG */ 6537 goto readtte; 6538 } 6539 6540 if (ret == 0) { 6541 panic("pageunload: cas failed?"); 6542 } 6543 6544 addr = tte_to_vaddr(hmeblkp, tte); 6545 6546 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6547 6548 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 6549 6550 /* 6551 * We need to flush the page from the virtual cache 6552 * in order to prevent a virtual cache alias 6553 * inconsistency. The particular scenario we need 6554 * to worry about is: 6555 * Given: va1 and va2 are two virtual address that 6556 * alias and will map the same physical address. 6557 * 1. mapping exists from va1 to pa and data has 6558 * been read into the cache. 6559 * 2. unload va1. 6560 * 3. load va2 and modify data using va2. 6561 * 4 unload va2. 6562 * 5. load va1 and reference data. Unless we flush 6563 * the data cache when we unload we will get 6564 * stale data. 6565 * This scenario is taken care of by using virtual 6566 * page coloring. 6567 */ 6568 if (sfmmup->sfmmu_ismhat) { 6569 /* 6570 * Flush TSBs, TLBs and caches 6571 * of every process 6572 * sharing this ism segment. 6573 */ 6574 sfmmu_hat_lock_all(); 6575 mutex_enter(&ism_mlist_lock); 6576 kpreempt_disable(); 6577 if (do_virtual_coloring) 6578 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6579 pp->p_pagenum, CACHE_NO_FLUSH); 6580 else 6581 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6582 pp->p_pagenum, CACHE_FLUSH); 6583 kpreempt_enable(); 6584 mutex_exit(&ism_mlist_lock); 6585 sfmmu_hat_unlock_all(); 6586 cpuset = cpu_ready_set; 6587 } else if (do_virtual_coloring) { 6588 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6589 cpuset = sfmmup->sfmmu_cpusran; 6590 } else { 6591 sfmmu_tlbcache_demap(addr, sfmmup, hmeblkp, 6592 pp->p_pagenum, 0, FLUSH_NECESSARY_CPUS, 6593 CACHE_FLUSH, 0); 6594 cpuset = sfmmup->sfmmu_cpusran; 6595 } 6596 6597 /* 6598 * Hme_sub has to run after ttesync() and a_rss update. 6599 * See hblk_unload(). 6600 */ 6601 HME_SUB(sfhme, pp); 6602 membar_stst(); 6603 6604 /* 6605 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 6606 * since pteload may have done a HME_ADD() right after 6607 * we did the HME_SUB() above. Hmecnt is now maintained 6608 * by cas only. no lock guranteed its value. The only 6609 * gurantee we have is the hmecnt should not be less than 6610 * what it should be so the hblk will not be taken away. 6611 * It's also important that we decremented the hmecnt after 6612 * we are done with hmeblkp so that this hmeblk won't be 6613 * stolen. 6614 */ 6615 ASSERT(hmeblkp->hblk_hmecnt > 0); 6616 ASSERT(hmeblkp->hblk_vcnt > 0); 6617 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6618 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6619 /* 6620 * This is bug 4063182. 6621 * XXX: fixme 6622 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6623 * !hmeblkp->hblk_lckcnt); 6624 */ 6625 } else { 6626 panic("invalid tte? pp %p &tte %p", 6627 (void *)pp, (void *)&tte); 6628 } 6629 6630 return (cpuset); 6631 } 6632 6633 /* 6634 * While relocating a kernel page, this function will move the mappings 6635 * from tpp to dpp and modify any associated data with these mappings. 6636 * It also unsuspends the suspended kernel mapping. 6637 */ 6638 static void 6639 hat_pagereload(struct page *tpp, struct page *dpp) 6640 { 6641 struct sf_hment *sfhme; 6642 tte_t tte, ttemod; 6643 int index, cons; 6644 6645 ASSERT(getpil() == PIL_MAX); 6646 ASSERT(sfmmu_mlist_held(tpp)); 6647 ASSERT(sfmmu_mlist_held(dpp)); 6648 6649 index = PP_MAPINDEX(tpp); 6650 cons = TTE8K; 6651 6652 /* Update real mappings to the page */ 6653 retry: 6654 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 6655 if (IS_PAHME(sfhme)) 6656 continue; 6657 sfmmu_copytte(&sfhme->hme_tte, &tte); 6658 ttemod = tte; 6659 6660 /* 6661 * replace old pfn with new pfn in TTE 6662 */ 6663 PFN_TO_TTE(ttemod, dpp->p_pagenum); 6664 6665 /* 6666 * clear suspend bit 6667 */ 6668 ASSERT(TTE_IS_SUSPEND(&ttemod)); 6669 TTE_CLR_SUSPEND(&ttemod); 6670 6671 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 6672 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 6673 6674 /* 6675 * set hme_page point to new page 6676 */ 6677 sfhme->hme_page = dpp; 6678 } 6679 6680 /* 6681 * move p_mapping list from old page to new page 6682 */ 6683 dpp->p_mapping = tpp->p_mapping; 6684 tpp->p_mapping = NULL; 6685 dpp->p_share = tpp->p_share; 6686 tpp->p_share = 0; 6687 6688 while (index != 0) { 6689 index = index >> 1; 6690 if (index != 0) 6691 cons++; 6692 if (index & 0x1) { 6693 tpp = PP_GROUPLEADER(tpp, cons); 6694 dpp = PP_GROUPLEADER(dpp, cons); 6695 goto retry; 6696 } 6697 } 6698 6699 if (dtrace_kreloc_fini) 6700 (*dtrace_kreloc_fini)(); 6701 mutex_exit(&kpr_suspendlock); 6702 } 6703 6704 uint_t 6705 hat_pagesync(struct page *pp, uint_t clearflag) 6706 { 6707 struct sf_hment *sfhme, *tmphme = NULL; 6708 struct hme_blk *hmeblkp; 6709 kmutex_t *pml; 6710 cpuset_t cpuset, tset; 6711 int index, cons; 6712 extern ulong_t po_share; 6713 page_t *save_pp = pp; 6714 6715 CPUSET_ZERO(cpuset); 6716 6717 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 6718 return (PP_GENERIC_ATTR(pp)); 6719 } 6720 6721 if ((clearflag == (HAT_SYNC_STOPON_REF | HAT_SYNC_DONTZERO)) && 6722 PP_ISREF(pp)) { 6723 return (PP_GENERIC_ATTR(pp)); 6724 } 6725 6726 if ((clearflag == (HAT_SYNC_STOPON_MOD | HAT_SYNC_DONTZERO)) && 6727 PP_ISMOD(pp)) { 6728 return (PP_GENERIC_ATTR(pp)); 6729 } 6730 6731 if ((clearflag & HAT_SYNC_STOPON_SHARED) != 0 && 6732 (pp->p_share > po_share) && 6733 !(clearflag & HAT_SYNC_ZERORM)) { 6734 if (PP_ISRO(pp)) 6735 hat_page_setattr(pp, P_REF); 6736 return (PP_GENERIC_ATTR(pp)); 6737 } 6738 6739 clearflag &= ~HAT_SYNC_STOPON_SHARED; 6740 pml = sfmmu_mlist_enter(pp); 6741 index = PP_MAPINDEX(pp); 6742 cons = TTE8K; 6743 retry: 6744 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6745 /* 6746 * We need to save the next hment on the list since 6747 * it is possible for pagesync to remove an invalid hment 6748 * from the list. 6749 */ 6750 tmphme = sfhme->hme_next; 6751 /* 6752 * If we are looking for large mappings and this hme doesn't 6753 * reach the range we are seeking, just ignore its. 6754 */ 6755 hmeblkp = sfmmu_hmetohblk(sfhme); 6756 if (hmeblkp->hblk_xhat_bit) 6757 continue; 6758 6759 if (hme_size(sfhme) < cons) 6760 continue; 6761 tset = sfmmu_pagesync(pp, sfhme, 6762 clearflag & ~HAT_SYNC_STOPON_RM); 6763 CPUSET_OR(cpuset, tset); 6764 /* 6765 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 6766 * as the "ref" or "mod" is set. 6767 */ 6768 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 6769 ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 6770 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) { 6771 index = 0; 6772 break; 6773 } 6774 } 6775 6776 while (index) { 6777 index = index >> 1; 6778 cons++; 6779 if (index & 0x1) { 6780 /* Go to leading page */ 6781 pp = PP_GROUPLEADER(pp, cons); 6782 goto retry; 6783 } 6784 } 6785 6786 xt_sync(cpuset); 6787 sfmmu_mlist_exit(pml); 6788 return (PP_GENERIC_ATTR(save_pp)); 6789 } 6790 6791 /* 6792 * Get all the hardware dependent attributes for a page struct 6793 */ 6794 static cpuset_t 6795 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 6796 uint_t clearflag) 6797 { 6798 caddr_t addr; 6799 tte_t tte, ttemod; 6800 struct hme_blk *hmeblkp; 6801 int ret; 6802 sfmmu_t *sfmmup; 6803 cpuset_t cpuset; 6804 6805 ASSERT(pp != NULL); 6806 ASSERT(sfmmu_mlist_held(pp)); 6807 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6808 (clearflag == HAT_SYNC_ZERORM)); 6809 6810 SFMMU_STAT(sf_pagesync); 6811 6812 CPUSET_ZERO(cpuset); 6813 6814 sfmmu_pagesync_retry: 6815 6816 sfmmu_copytte(&sfhme->hme_tte, &tte); 6817 if (TTE_IS_VALID(&tte)) { 6818 hmeblkp = sfmmu_hmetohblk(sfhme); 6819 sfmmup = hblktosfmmu(hmeblkp); 6820 addr = tte_to_vaddr(hmeblkp, tte); 6821 if (clearflag == HAT_SYNC_ZERORM) { 6822 ttemod = tte; 6823 TTE_CLR_RM(&ttemod); 6824 ret = sfmmu_modifytte_try(&tte, &ttemod, 6825 &sfhme->hme_tte); 6826 if (ret < 0) { 6827 /* 6828 * cas failed and the new value is not what 6829 * we want. 6830 */ 6831 goto sfmmu_pagesync_retry; 6832 } 6833 6834 if (ret > 0) { 6835 /* we win the cas */ 6836 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6837 cpuset = sfmmup->sfmmu_cpusran; 6838 } 6839 } 6840 6841 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6842 } 6843 return (cpuset); 6844 } 6845 6846 /* 6847 * Remove write permission from a mappings to a page, so that 6848 * we can detect the next modification of it. This requires modifying 6849 * the TTE then invalidating (demap) any TLB entry using that TTE. 6850 * This code is similar to sfmmu_pagesync(). 6851 */ 6852 static cpuset_t 6853 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 6854 { 6855 caddr_t addr; 6856 tte_t tte; 6857 tte_t ttemod; 6858 struct hme_blk *hmeblkp; 6859 int ret; 6860 sfmmu_t *sfmmup; 6861 cpuset_t cpuset; 6862 6863 ASSERT(pp != NULL); 6864 ASSERT(sfmmu_mlist_held(pp)); 6865 6866 CPUSET_ZERO(cpuset); 6867 SFMMU_STAT(sf_clrwrt); 6868 6869 retry: 6870 6871 sfmmu_copytte(&sfhme->hme_tte, &tte); 6872 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 6873 hmeblkp = sfmmu_hmetohblk(sfhme); 6874 6875 /* 6876 * xhat mappings should never be to a VMODSORT page. 6877 */ 6878 ASSERT(hmeblkp->hblk_xhat_bit == 0); 6879 6880 sfmmup = hblktosfmmu(hmeblkp); 6881 addr = tte_to_vaddr(hmeblkp, tte); 6882 6883 ttemod = tte; 6884 TTE_CLR_WRT(&ttemod); 6885 TTE_CLR_MOD(&ttemod); 6886 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6887 6888 /* 6889 * if cas failed and the new value is not what 6890 * we want retry 6891 */ 6892 if (ret < 0) 6893 goto retry; 6894 6895 /* we win the cas */ 6896 if (ret > 0) { 6897 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6898 cpuset = sfmmup->sfmmu_cpusran; 6899 } 6900 } 6901 6902 return (cpuset); 6903 } 6904 6905 /* 6906 * Walk all mappings of a page, removing write permission and clearing the 6907 * ref/mod bits. This code is similar to hat_pagesync() 6908 */ 6909 static void 6910 hat_page_clrwrt(page_t *pp) 6911 { 6912 struct sf_hment *sfhme; 6913 struct sf_hment *tmphme = NULL; 6914 kmutex_t *pml; 6915 cpuset_t cpuset; 6916 cpuset_t tset; 6917 int index; 6918 int cons; 6919 6920 CPUSET_ZERO(cpuset); 6921 6922 pml = sfmmu_mlist_enter(pp); 6923 index = PP_MAPINDEX(pp); 6924 cons = TTE8K; 6925 retry: 6926 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6927 tmphme = sfhme->hme_next; 6928 6929 /* 6930 * If we are looking for large mappings and this hme doesn't 6931 * reach the range we are seeking, just ignore its. 6932 */ 6933 6934 if (hme_size(sfhme) < cons) 6935 continue; 6936 6937 tset = sfmmu_pageclrwrt(pp, sfhme); 6938 CPUSET_OR(cpuset, tset); 6939 } 6940 6941 while (index) { 6942 index = index >> 1; 6943 cons++; 6944 if (index & 0x1) { 6945 /* Go to leading page */ 6946 pp = PP_GROUPLEADER(pp, cons); 6947 goto retry; 6948 } 6949 } 6950 6951 xt_sync(cpuset); 6952 sfmmu_mlist_exit(pml); 6953 } 6954 6955 /* 6956 * Set the given REF/MOD/RO bits for the given page. 6957 * For a vnode with a sorted v_pages list, we need to change 6958 * the attributes and the v_pages list together under page_vnode_mutex. 6959 */ 6960 void 6961 hat_page_setattr(page_t *pp, uint_t flag) 6962 { 6963 vnode_t *vp = pp->p_vnode; 6964 page_t **listp; 6965 kmutex_t *pmtx; 6966 kmutex_t *vphm = NULL; 6967 6968 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 6969 6970 /* 6971 * nothing to do if attribute already set 6972 */ 6973 if ((pp->p_nrm & flag) == flag) 6974 return; 6975 6976 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 6977 vphm = page_vnode_mutex(vp); 6978 mutex_enter(vphm); 6979 } 6980 6981 pmtx = sfmmu_page_enter(pp); 6982 pp->p_nrm |= flag; 6983 sfmmu_page_exit(pmtx); 6984 6985 if (vphm != NULL) { 6986 /* 6987 * Some File Systems examine v_pages for NULL w/o 6988 * grabbing the vphm mutex. Must not let it become NULL when 6989 * pp is the only page on the list. 6990 */ 6991 if (pp->p_vpnext != pp) { 6992 page_vpsub(&vp->v_pages, pp); 6993 if (vp->v_pages != NULL) 6994 listp = &vp->v_pages->p_vpprev->p_vpnext; 6995 else 6996 listp = &vp->v_pages; 6997 page_vpadd(listp, pp); 6998 } 6999 mutex_exit(vphm); 7000 } 7001 } 7002 7003 void 7004 hat_page_clrattr(page_t *pp, uint_t flag) 7005 { 7006 vnode_t *vp = pp->p_vnode; 7007 kmutex_t *vphm = NULL; 7008 kmutex_t *pmtx; 7009 7010 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7011 7012 /* 7013 * For vnode with a sorted v_pages list, we need to change 7014 * the attributes and the v_pages list together under page_vnode_mutex. 7015 */ 7016 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7017 vphm = page_vnode_mutex(vp); 7018 mutex_enter(vphm); 7019 } 7020 7021 pmtx = sfmmu_page_enter(pp); 7022 pp->p_nrm &= ~flag; 7023 sfmmu_page_exit(pmtx); 7024 7025 if (vphm != NULL) { 7026 /* 7027 * Some File Systems examine v_pages for NULL w/o 7028 * grabbing the vphm mutex. Must not let it become NULL when 7029 * pp is the only page on the list. 7030 */ 7031 if (pp->p_vpnext != pp) { 7032 page_vpsub(&vp->v_pages, pp); 7033 page_vpadd(&vp->v_pages, pp); 7034 } 7035 mutex_exit(vphm); 7036 7037 /* 7038 * VMODSORT works by removing write permissions and getting 7039 * a fault when a page is made dirty. At this point 7040 * we need to remove write permission from all mappings 7041 * to this page. 7042 */ 7043 hat_page_clrwrt(pp); 7044 } 7045 } 7046 7047 7048 uint_t 7049 hat_page_getattr(page_t *pp, uint_t flag) 7050 { 7051 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7052 return ((uint_t)(pp->p_nrm & flag)); 7053 } 7054 7055 /* 7056 * DEBUG kernels: verify that a kernel va<->pa translation 7057 * is safe by checking the underlying page_t is in a page 7058 * relocation-safe state. 7059 */ 7060 #ifdef DEBUG 7061 void 7062 sfmmu_check_kpfn(pfn_t pfn) 7063 { 7064 page_t *pp; 7065 int index, cons; 7066 7067 if (hat_check_vtop == 0) 7068 return; 7069 7070 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 7071 return; 7072 7073 pp = page_numtopp_nolock(pfn); 7074 if (!pp) 7075 return; 7076 7077 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7078 return; 7079 7080 /* 7081 * Handed a large kernel page, we dig up the root page since we 7082 * know the root page might have the lock also. 7083 */ 7084 if (pp->p_szc != 0) { 7085 index = PP_MAPINDEX(pp); 7086 cons = TTE8K; 7087 again: 7088 while (index != 0) { 7089 index >>= 1; 7090 if (index != 0) 7091 cons++; 7092 if (index & 0x1) { 7093 pp = PP_GROUPLEADER(pp, cons); 7094 goto again; 7095 } 7096 } 7097 } 7098 7099 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7100 return; 7101 7102 /* 7103 * Pages need to be locked or allocated "permanent" (either from 7104 * static_arena arena or explicitly setting PG_NORELOC when calling 7105 * page_create_va()) for VA->PA translations to be valid. 7106 */ 7107 if (!PP_ISNORELOC(pp)) 7108 panic("Illegal VA->PA translation, pp 0x%p not permanent", pp); 7109 else 7110 panic("Illegal VA->PA translation, pp 0x%p not locked", pp); 7111 } 7112 #endif /* DEBUG */ 7113 7114 /* 7115 * Returns a page frame number for a given virtual address. 7116 * Returns PFN_INVALID to indicate an invalid mapping 7117 */ 7118 pfn_t 7119 hat_getpfnum(struct hat *hat, caddr_t addr) 7120 { 7121 pfn_t pfn; 7122 tte_t tte; 7123 7124 /* 7125 * We would like to 7126 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7127 * but we can't because the iommu driver will call this 7128 * routine at interrupt time and it can't grab the as lock 7129 * or it will deadlock: A thread could have the as lock 7130 * and be waiting for io. The io can't complete 7131 * because the interrupt thread is blocked trying to grab 7132 * the as lock. 7133 */ 7134 7135 ASSERT(hat->sfmmu_xhat_provider == NULL); 7136 7137 if (hat == ksfmmup) { 7138 if (segkpm && IS_KPM_ADDR(addr)) 7139 return (sfmmu_kpm_vatopfn(addr)); 7140 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7141 == PFN_SUSPENDED) { 7142 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7143 } 7144 sfmmu_check_kpfn(pfn); 7145 return (pfn); 7146 } else { 7147 return (sfmmu_uvatopfn(addr, hat)); 7148 } 7149 } 7150 7151 /* 7152 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7153 * Use hat_getpfnum(kas.a_hat, ...) instead. 7154 * 7155 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7156 * but can't right now due to the fact that some software has grown to use 7157 * this interface incorrectly. So for now when the interface is misused, 7158 * return a warning to the user that in the future it won't work in the 7159 * way they're abusing it, and carry on (after disabling page relocation). 7160 */ 7161 pfn_t 7162 hat_getkpfnum(caddr_t addr) 7163 { 7164 pfn_t pfn; 7165 tte_t tte; 7166 int badcaller = 0; 7167 extern int segkmem_reloc; 7168 7169 if (segkpm && IS_KPM_ADDR(addr)) { 7170 badcaller = 1; 7171 pfn = sfmmu_kpm_vatopfn(addr); 7172 } else { 7173 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7174 == PFN_SUSPENDED) { 7175 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7176 } 7177 badcaller = pf_is_memory(pfn); 7178 } 7179 7180 if (badcaller) { 7181 /* 7182 * We can't return PFN_INVALID or the caller may panic 7183 * or corrupt the system. The only alternative is to 7184 * disable page relocation at this point for all kernel 7185 * memory. This will impact any callers of page_relocate() 7186 * such as FMA or DR. 7187 * 7188 * RFE: Add junk here to spit out an ereport so the sysadmin 7189 * can be advised that he should upgrade his device driver 7190 * so that this doesn't happen. 7191 */ 7192 hat_getkpfnum_badcall(caller()); 7193 if (hat_kpr_enabled && segkmem_reloc) { 7194 hat_kpr_enabled = 0; 7195 segkmem_reloc = 0; 7196 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 7197 } 7198 } 7199 return (pfn); 7200 } 7201 7202 pfn_t 7203 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup) 7204 { 7205 struct hmehash_bucket *hmebp; 7206 hmeblk_tag hblktag; 7207 int hmeshift, hashno = 1; 7208 struct hme_blk *hmeblkp = NULL; 7209 7210 struct sf_hment *sfhmep; 7211 tte_t tte; 7212 pfn_t pfn; 7213 7214 /* support for ISM */ 7215 ism_map_t *ism_map; 7216 ism_blk_t *ism_blkp; 7217 int i; 7218 sfmmu_t *ism_hatid = NULL; 7219 sfmmu_t *locked_hatid = NULL; 7220 7221 7222 ASSERT(sfmmup != ksfmmup); 7223 SFMMU_STAT(sf_user_vtop); 7224 /* 7225 * Set ism_hatid if vaddr falls in a ISM segment. 7226 */ 7227 ism_blkp = sfmmup->sfmmu_iblk; 7228 if (ism_blkp) { 7229 sfmmu_ismhat_enter(sfmmup, 0); 7230 locked_hatid = sfmmup; 7231 } 7232 while (ism_blkp && ism_hatid == NULL) { 7233 ism_map = ism_blkp->iblk_maps; 7234 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7235 if (vaddr >= ism_start(ism_map[i]) && 7236 vaddr < ism_end(ism_map[i])) { 7237 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7238 vaddr = (caddr_t)(vaddr - 7239 ism_start(ism_map[i])); 7240 break; 7241 } 7242 } 7243 ism_blkp = ism_blkp->iblk_next; 7244 } 7245 if (locked_hatid) { 7246 sfmmu_ismhat_exit(locked_hatid, 0); 7247 } 7248 7249 hblktag.htag_id = sfmmup; 7250 do { 7251 hmeshift = HME_HASH_SHIFT(hashno); 7252 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7253 hblktag.htag_rehash = hashno; 7254 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7255 7256 SFMMU_HASH_LOCK(hmebp); 7257 7258 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7259 if (hmeblkp != NULL) { 7260 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7261 sfmmu_copytte(&sfhmep->hme_tte, &tte); 7262 if (TTE_IS_VALID(&tte)) { 7263 pfn = TTE_TO_PFN(vaddr, &tte); 7264 } else { 7265 pfn = PFN_INVALID; 7266 } 7267 SFMMU_HASH_UNLOCK(hmebp); 7268 return (pfn); 7269 } 7270 SFMMU_HASH_UNLOCK(hmebp); 7271 hashno++; 7272 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7273 return (PFN_INVALID); 7274 } 7275 7276 7277 /* 7278 * For compatability with AT&T and later optimizations 7279 */ 7280 /* ARGSUSED */ 7281 void 7282 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 7283 { 7284 ASSERT(hat != NULL); 7285 ASSERT(hat->sfmmu_xhat_provider == NULL); 7286 } 7287 7288 /* 7289 * Return the number of mappings to a particular page. 7290 * This number is an approximation of the number of 7291 * number of people sharing the page. 7292 */ 7293 ulong_t 7294 hat_page_getshare(page_t *pp) 7295 { 7296 page_t *spp = pp; /* start page */ 7297 kmutex_t *pml; 7298 ulong_t cnt; 7299 int index, sz = TTE64K; 7300 7301 /* 7302 * We need to grab the mlist lock to make sure any outstanding 7303 * load/unloads complete. Otherwise we could return zero 7304 * even though the unload(s) hasn't finished yet. 7305 */ 7306 pml = sfmmu_mlist_enter(spp); 7307 cnt = spp->p_share; 7308 7309 #ifdef VAC 7310 if (kpm_enable) 7311 cnt += spp->p_kpmref; 7312 #endif 7313 7314 /* 7315 * If we have any large mappings, we count the number of 7316 * mappings that this large page is part of. 7317 */ 7318 index = PP_MAPINDEX(spp); 7319 index >>= 1; 7320 while (index) { 7321 pp = PP_GROUPLEADER(spp, sz); 7322 if ((index & 0x1) && pp != spp) { 7323 cnt += pp->p_share; 7324 spp = pp; 7325 } 7326 index >>= 1; 7327 sz++; 7328 } 7329 sfmmu_mlist_exit(pml); 7330 return (cnt); 7331 } 7332 7333 /* 7334 * Unload all large mappings to the pp and reset the p_szc field of every 7335 * constituent page according to the remaining mappings. 7336 * 7337 * pp must be locked SE_EXCL. Even though no other constituent pages are 7338 * locked it's legal to unload the large mappings to the pp because all 7339 * constituent pages of large locked mappings have to be locked SE_SHARED. 7340 * This means if we have SE_EXCL lock on one of constituent pages none of the 7341 * large mappings to pp are locked. 7342 * 7343 * Decrease p_szc field starting from the last constituent page and ending 7344 * with the root page. This method is used because other threads rely on the 7345 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 7346 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 7347 * ensures that p_szc changes of the constituent pages appears atomic for all 7348 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 7349 * 7350 * This mechanism is only used for file system pages where it's not always 7351 * possible to get SE_EXCL locks on all constituent pages to demote the size 7352 * code (as is done for anonymous or kernel large pages). 7353 * 7354 * See more comments in front of sfmmu_mlspl_enter(). 7355 */ 7356 void 7357 hat_page_demote(page_t *pp) 7358 { 7359 int index; 7360 int sz; 7361 cpuset_t cpuset; 7362 int sync = 0; 7363 page_t *rootpp; 7364 struct sf_hment *sfhme; 7365 struct sf_hment *tmphme = NULL; 7366 struct hme_blk *hmeblkp; 7367 uint_t pszc; 7368 page_t *lastpp; 7369 cpuset_t tset; 7370 pgcnt_t npgs; 7371 kmutex_t *pml; 7372 kmutex_t *pmtx = NULL; 7373 7374 ASSERT(PAGE_EXCL(pp)); 7375 ASSERT(!PP_ISFREE(pp)); 7376 ASSERT(page_szc_lock_assert(pp)); 7377 pml = sfmmu_mlist_enter(pp); 7378 7379 pszc = pp->p_szc; 7380 if (pszc == 0) { 7381 goto out; 7382 } 7383 7384 index = PP_MAPINDEX(pp) >> 1; 7385 7386 if (index) { 7387 CPUSET_ZERO(cpuset); 7388 sz = TTE64K; 7389 sync = 1; 7390 } 7391 7392 while (index) { 7393 if (!(index & 0x1)) { 7394 index >>= 1; 7395 sz++; 7396 continue; 7397 } 7398 ASSERT(sz <= pszc); 7399 rootpp = PP_GROUPLEADER(pp, sz); 7400 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 7401 tmphme = sfhme->hme_next; 7402 hmeblkp = sfmmu_hmetohblk(sfhme); 7403 if (hme_size(sfhme) != sz) { 7404 continue; 7405 } 7406 if (hmeblkp->hblk_xhat_bit) { 7407 cmn_err(CE_PANIC, 7408 "hat_page_demote: xhat hmeblk"); 7409 } 7410 tset = sfmmu_pageunload(rootpp, sfhme, sz); 7411 CPUSET_OR(cpuset, tset); 7412 } 7413 if (index >>= 1) { 7414 sz++; 7415 } 7416 } 7417 7418 ASSERT(!PP_ISMAPPED_LARGE(pp)); 7419 7420 if (sync) { 7421 xt_sync(cpuset); 7422 #ifdef VAC 7423 if (PP_ISTNC(pp)) { 7424 conv_tnc(rootpp, sz); 7425 } 7426 #endif /* VAC */ 7427 } 7428 7429 pmtx = sfmmu_page_enter(pp); 7430 7431 ASSERT(pp->p_szc == pszc); 7432 rootpp = PP_PAGEROOT(pp); 7433 ASSERT(rootpp->p_szc == pszc); 7434 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 7435 7436 while (lastpp != rootpp) { 7437 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 7438 ASSERT(sz < pszc); 7439 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 7440 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 7441 while (--npgs > 0) { 7442 lastpp->p_szc = (uchar_t)sz; 7443 lastpp = PP_PAGEPREV(lastpp); 7444 } 7445 if (sz) { 7446 /* 7447 * make sure before current root's pszc 7448 * is updated all updates to constituent pages pszc 7449 * fields are globally visible. 7450 */ 7451 membar_producer(); 7452 } 7453 lastpp->p_szc = sz; 7454 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 7455 if (lastpp != rootpp) { 7456 lastpp = PP_PAGEPREV(lastpp); 7457 } 7458 } 7459 if (sz == 0) { 7460 /* the loop above doesn't cover this case */ 7461 rootpp->p_szc = 0; 7462 } 7463 out: 7464 ASSERT(pp->p_szc == 0); 7465 if (pmtx != NULL) { 7466 sfmmu_page_exit(pmtx); 7467 } 7468 sfmmu_mlist_exit(pml); 7469 } 7470 7471 /* 7472 * Refresh the HAT ismttecnt[] element for size szc. 7473 * Caller must have set ISM busy flag to prevent mapping 7474 * lists from changing while we're traversing them. 7475 */ 7476 pgcnt_t 7477 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 7478 { 7479 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 7480 ism_map_t *ism_map; 7481 pgcnt_t npgs = 0; 7482 int j; 7483 7484 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 7485 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 7486 ism_map = ism_blkp->iblk_maps; 7487 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) 7488 npgs += ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 7489 } 7490 sfmmup->sfmmu_ismttecnt[szc] = npgs; 7491 return (npgs); 7492 } 7493 7494 /* 7495 * Yield the memory claim requirement for an address space. 7496 * 7497 * This is currently implemented as the number of bytes that have active 7498 * hardware translations that have page structures. Therefore, it can 7499 * underestimate the traditional resident set size, eg, if the 7500 * physical page is present and the hardware translation is missing; 7501 * and it can overestimate the rss, eg, if there are active 7502 * translations to a frame buffer with page structs. 7503 * Also, it does not take sharing into account. 7504 * 7505 * Note that we don't acquire locks here since this function is most often 7506 * called from the clock thread. 7507 */ 7508 size_t 7509 hat_get_mapped_size(struct hat *hat) 7510 { 7511 size_t assize = 0; 7512 int i; 7513 7514 if (hat == NULL) 7515 return (0); 7516 7517 ASSERT(hat->sfmmu_xhat_provider == NULL); 7518 7519 for (i = 0; i < mmu_page_sizes; i++) 7520 assize += (pgcnt_t)hat->sfmmu_ttecnt[i] * TTEBYTES(i); 7521 7522 if (hat->sfmmu_iblk == NULL) 7523 return (assize); 7524 7525 for (i = 0; i < mmu_page_sizes; i++) 7526 assize += (pgcnt_t)hat->sfmmu_ismttecnt[i] * TTEBYTES(i); 7527 7528 return (assize); 7529 } 7530 7531 int 7532 hat_stats_enable(struct hat *hat) 7533 { 7534 hatlock_t *hatlockp; 7535 7536 ASSERT(hat->sfmmu_xhat_provider == NULL); 7537 7538 hatlockp = sfmmu_hat_enter(hat); 7539 hat->sfmmu_rmstat++; 7540 sfmmu_hat_exit(hatlockp); 7541 return (1); 7542 } 7543 7544 void 7545 hat_stats_disable(struct hat *hat) 7546 { 7547 hatlock_t *hatlockp; 7548 7549 ASSERT(hat->sfmmu_xhat_provider == NULL); 7550 7551 hatlockp = sfmmu_hat_enter(hat); 7552 hat->sfmmu_rmstat--; 7553 sfmmu_hat_exit(hatlockp); 7554 } 7555 7556 /* 7557 * Routines for entering or removing ourselves from the 7558 * ism_hat's mapping list. 7559 */ 7560 static void 7561 iment_add(struct ism_ment *iment, struct hat *ism_hat) 7562 { 7563 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7564 7565 iment->iment_prev = NULL; 7566 iment->iment_next = ism_hat->sfmmu_iment; 7567 if (ism_hat->sfmmu_iment) { 7568 ism_hat->sfmmu_iment->iment_prev = iment; 7569 } 7570 ism_hat->sfmmu_iment = iment; 7571 } 7572 7573 static void 7574 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 7575 { 7576 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7577 7578 if (ism_hat->sfmmu_iment == NULL) { 7579 panic("ism map entry remove - no entries"); 7580 } 7581 7582 if (iment->iment_prev) { 7583 ASSERT(ism_hat->sfmmu_iment != iment); 7584 iment->iment_prev->iment_next = iment->iment_next; 7585 } else { 7586 ASSERT(ism_hat->sfmmu_iment == iment); 7587 ism_hat->sfmmu_iment = iment->iment_next; 7588 } 7589 7590 if (iment->iment_next) { 7591 iment->iment_next->iment_prev = iment->iment_prev; 7592 } 7593 7594 /* 7595 * zero out the entry 7596 */ 7597 iment->iment_next = NULL; 7598 iment->iment_prev = NULL; 7599 iment->iment_hat = NULL; 7600 } 7601 7602 /* 7603 * Hat_share()/unshare() return an (non-zero) error 7604 * when saddr and daddr are not properly aligned. 7605 * 7606 * The top level mapping element determines the alignment 7607 * requirement for saddr and daddr, depending on different 7608 * architectures. 7609 * 7610 * When hat_share()/unshare() are not supported, 7611 * HATOP_SHARE()/UNSHARE() return 0 7612 */ 7613 int 7614 hat_share(struct hat *sfmmup, caddr_t addr, 7615 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 7616 { 7617 ism_blk_t *ism_blkp; 7618 ism_blk_t *new_iblk; 7619 ism_map_t *ism_map; 7620 ism_ment_t *ism_ment; 7621 int i, added; 7622 hatlock_t *hatlockp; 7623 int reload_mmu = 0; 7624 uint_t ismshift = page_get_shift(ismszc); 7625 size_t ismpgsz = page_get_pagesize(ismszc); 7626 uint_t ismmask = (uint_t)ismpgsz - 1; 7627 size_t sh_size = ISM_SHIFT(ismshift, len); 7628 ushort_t ismhatflag; 7629 7630 #ifdef DEBUG 7631 caddr_t eaddr = addr + len; 7632 #endif /* DEBUG */ 7633 7634 ASSERT(ism_hatid != NULL && sfmmup != NULL); 7635 ASSERT(sptaddr == ISMID_STARTADDR); 7636 /* 7637 * Check the alignment. 7638 */ 7639 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 7640 return (EINVAL); 7641 7642 /* 7643 * Check size alignment. 7644 */ 7645 if (!ISM_ALIGNED(ismshift, len)) 7646 return (EINVAL); 7647 7648 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 7649 7650 /* 7651 * Allocate ism_ment for the ism_hat's mapping list, and an 7652 * ism map blk in case we need one. We must do our 7653 * allocations before acquiring locks to prevent a deadlock 7654 * in the kmem allocator on the mapping list lock. 7655 */ 7656 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 7657 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 7658 7659 /* 7660 * Serialize ISM mappings with the ISM busy flag, and also the 7661 * trap handlers. 7662 */ 7663 sfmmu_ismhat_enter(sfmmup, 0); 7664 7665 /* 7666 * Allocate an ism map blk if necessary. 7667 */ 7668 if (sfmmup->sfmmu_iblk == NULL) { 7669 sfmmup->sfmmu_iblk = new_iblk; 7670 bzero(new_iblk, sizeof (*new_iblk)); 7671 new_iblk->iblk_nextpa = (uint64_t)-1; 7672 membar_stst(); /* make sure next ptr visible to all CPUs */ 7673 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 7674 reload_mmu = 1; 7675 new_iblk = NULL; 7676 } 7677 7678 #ifdef DEBUG 7679 /* 7680 * Make sure mapping does not already exist. 7681 */ 7682 ism_blkp = sfmmup->sfmmu_iblk; 7683 while (ism_blkp) { 7684 ism_map = ism_blkp->iblk_maps; 7685 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 7686 if ((addr >= ism_start(ism_map[i]) && 7687 addr < ism_end(ism_map[i])) || 7688 eaddr > ism_start(ism_map[i]) && 7689 eaddr <= ism_end(ism_map[i])) { 7690 panic("sfmmu_share: Already mapped!"); 7691 } 7692 } 7693 ism_blkp = ism_blkp->iblk_next; 7694 } 7695 #endif /* DEBUG */ 7696 7697 ASSERT(ismszc >= TTE4M); 7698 if (ismszc == TTE4M) { 7699 ismhatflag = HAT_4M_FLAG; 7700 } else if (ismszc == TTE32M) { 7701 ismhatflag = HAT_32M_FLAG; 7702 } else if (ismszc == TTE256M) { 7703 ismhatflag = HAT_256M_FLAG; 7704 } 7705 /* 7706 * Add mapping to first available mapping slot. 7707 */ 7708 ism_blkp = sfmmup->sfmmu_iblk; 7709 added = 0; 7710 while (!added) { 7711 ism_map = ism_blkp->iblk_maps; 7712 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7713 if (ism_map[i].imap_ismhat == NULL) { 7714 7715 ism_map[i].imap_ismhat = ism_hatid; 7716 ism_map[i].imap_vb_shift = (ushort_t)ismshift; 7717 ism_map[i].imap_hatflags = ismhatflag; 7718 ism_map[i].imap_sz_mask = ismmask; 7719 /* 7720 * imap_seg is checked in ISM_CHECK to see if 7721 * non-NULL, then other info assumed valid. 7722 */ 7723 membar_stst(); 7724 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 7725 ism_map[i].imap_ment = ism_ment; 7726 7727 /* 7728 * Now add ourselves to the ism_hat's 7729 * mapping list. 7730 */ 7731 ism_ment->iment_hat = sfmmup; 7732 ism_ment->iment_base_va = addr; 7733 ism_hatid->sfmmu_ismhat = 1; 7734 ism_hatid->sfmmu_flags = 0; 7735 mutex_enter(&ism_mlist_lock); 7736 iment_add(ism_ment, ism_hatid); 7737 mutex_exit(&ism_mlist_lock); 7738 added = 1; 7739 break; 7740 } 7741 } 7742 if (!added && ism_blkp->iblk_next == NULL) { 7743 ism_blkp->iblk_next = new_iblk; 7744 new_iblk = NULL; 7745 bzero(ism_blkp->iblk_next, 7746 sizeof (*ism_blkp->iblk_next)); 7747 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 7748 membar_stst(); 7749 ism_blkp->iblk_nextpa = 7750 va_to_pa((caddr_t)ism_blkp->iblk_next); 7751 } 7752 ism_blkp = ism_blkp->iblk_next; 7753 } 7754 7755 /* 7756 * Update our counters for this sfmmup's ism mappings. 7757 */ 7758 for (i = 0; i <= ismszc; i++) { 7759 if (!(disable_ism_large_pages & (1 << i))) 7760 (void) ism_tsb_entries(sfmmup, i); 7761 } 7762 7763 hatlockp = sfmmu_hat_enter(sfmmup); 7764 7765 /* 7766 * For ISM and DISM we do not support 512K pages, so we only 7767 * only search the 4M and 8K/64K hashes for 4 pagesize cpus, and search 7768 * the 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 7769 */ 7770 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 7771 7772 if (ismszc > TTE4M && !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) 7773 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7774 7775 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) 7776 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 7777 7778 /* 7779 * If we updated the ismblkpa for this HAT or we need 7780 * to start searching the 256M or 32M or 4M hash, we must 7781 * make sure all CPUs running this process reload their 7782 * tsbmiss area. Otherwise they will fail to load the mappings 7783 * in the tsbmiss handler and will loop calling pagefault(). 7784 */ 7785 switch (ismszc) { 7786 case TTE256M: 7787 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 7788 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 7789 sfmmu_sync_mmustate(sfmmup); 7790 } 7791 break; 7792 case TTE32M: 7793 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 7794 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 7795 sfmmu_sync_mmustate(sfmmup); 7796 } 7797 break; 7798 case TTE4M: 7799 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 7800 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7801 sfmmu_sync_mmustate(sfmmup); 7802 } 7803 break; 7804 default: 7805 break; 7806 } 7807 7808 /* 7809 * Now we can drop the locks. 7810 */ 7811 sfmmu_ismhat_exit(sfmmup, 1); 7812 sfmmu_hat_exit(hatlockp); 7813 7814 /* 7815 * Free up ismblk if we didn't use it. 7816 */ 7817 if (new_iblk != NULL) 7818 kmem_cache_free(ism_blk_cache, new_iblk); 7819 7820 /* 7821 * Check TSB and TLB page sizes. 7822 */ 7823 sfmmu_check_page_sizes(sfmmup, 1); 7824 7825 return (0); 7826 } 7827 7828 /* 7829 * hat_unshare removes exactly one ism_map from 7830 * this process's as. It expects multiple calls 7831 * to hat_unshare for multiple shm segments. 7832 */ 7833 void 7834 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 7835 { 7836 ism_map_t *ism_map; 7837 ism_ment_t *free_ment = NULL; 7838 ism_blk_t *ism_blkp; 7839 struct hat *ism_hatid; 7840 int found, i; 7841 hatlock_t *hatlockp; 7842 struct tsb_info *tsbinfo; 7843 uint_t ismshift = page_get_shift(ismszc); 7844 size_t sh_size = ISM_SHIFT(ismshift, len); 7845 7846 ASSERT(ISM_ALIGNED(ismshift, addr)); 7847 ASSERT(ISM_ALIGNED(ismshift, len)); 7848 ASSERT(sfmmup != NULL); 7849 ASSERT(sfmmup != ksfmmup); 7850 7851 if (sfmmup->sfmmu_xhat_provider) { 7852 XHAT_UNSHARE(sfmmup, addr, len); 7853 return; 7854 } else { 7855 /* 7856 * This must be a CPU HAT. If the address space has 7857 * XHATs attached, inform all XHATs that ISM segment 7858 * is going away 7859 */ 7860 ASSERT(sfmmup->sfmmu_as != NULL); 7861 if (sfmmup->sfmmu_as->a_xhat != NULL) 7862 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 7863 } 7864 7865 /* 7866 * Make sure that during the entire time ISM mappings are removed, 7867 * the trap handlers serialize behind us, and that no one else 7868 * can be mucking with ISM mappings. This also lets us get away 7869 * with not doing expensive cross calls to flush the TLB -- we 7870 * just discard the context, flush the entire TSB, and call it 7871 * a day. 7872 */ 7873 sfmmu_ismhat_enter(sfmmup, 0); 7874 7875 /* 7876 * Remove the mapping. 7877 * 7878 * We can't have any holes in the ism map. 7879 * The tsb miss code while searching the ism map will 7880 * stop on an empty map slot. So we must move 7881 * everyone past the hole up 1 if any. 7882 * 7883 * Also empty ism map blks are not freed until the 7884 * process exits. This is to prevent a MT race condition 7885 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 7886 */ 7887 found = 0; 7888 ism_blkp = sfmmup->sfmmu_iblk; 7889 while (!found && ism_blkp) { 7890 ism_map = ism_blkp->iblk_maps; 7891 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7892 if (addr == ism_start(ism_map[i]) && 7893 sh_size == (size_t)(ism_size(ism_map[i]))) { 7894 found = 1; 7895 break; 7896 } 7897 } 7898 if (!found) 7899 ism_blkp = ism_blkp->iblk_next; 7900 } 7901 7902 if (found) { 7903 ism_hatid = ism_map[i].imap_ismhat; 7904 ASSERT(ism_hatid != NULL); 7905 ASSERT(ism_hatid->sfmmu_ismhat == 1); 7906 7907 /* 7908 * First remove ourselves from the ism mapping list. 7909 */ 7910 mutex_enter(&ism_mlist_lock); 7911 iment_sub(ism_map[i].imap_ment, ism_hatid); 7912 mutex_exit(&ism_mlist_lock); 7913 free_ment = ism_map[i].imap_ment; 7914 7915 /* 7916 * Now gurantee that any other cpu 7917 * that tries to process an ISM miss 7918 * will go to tl=0. 7919 */ 7920 hatlockp = sfmmu_hat_enter(sfmmup); 7921 7922 sfmmu_invalidate_ctx(sfmmup); 7923 7924 sfmmu_hat_exit(hatlockp); 7925 7926 /* 7927 * We delete the ism map by copying 7928 * the next map over the current one. 7929 * We will take the next one in the maps 7930 * array or from the next ism_blk. 7931 */ 7932 while (ism_blkp) { 7933 ism_map = ism_blkp->iblk_maps; 7934 while (i < (ISM_MAP_SLOTS - 1)) { 7935 ism_map[i] = ism_map[i + 1]; 7936 i++; 7937 } 7938 /* i == (ISM_MAP_SLOTS - 1) */ 7939 ism_blkp = ism_blkp->iblk_next; 7940 if (ism_blkp) { 7941 ism_map[i] = ism_blkp->iblk_maps[0]; 7942 i = 0; 7943 } else { 7944 ism_map[i].imap_seg = 0; 7945 ism_map[i].imap_vb_shift = 0; 7946 ism_map[i].imap_hatflags = 0; 7947 ism_map[i].imap_sz_mask = 0; 7948 ism_map[i].imap_ismhat = NULL; 7949 ism_map[i].imap_ment = NULL; 7950 } 7951 } 7952 7953 /* 7954 * Now flush entire TSB for the process, since 7955 * demapping page by page can be too expensive. 7956 * We don't have to flush the TLB here anymore 7957 * since we switch to a new TLB ctx instead. 7958 * Also, there is no need to flush if the process 7959 * is exiting since the TSB will be freed later. 7960 */ 7961 if (!sfmmup->sfmmu_free) { 7962 hatlockp = sfmmu_hat_enter(sfmmup); 7963 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 7964 tsbinfo = tsbinfo->tsb_next) { 7965 if (tsbinfo->tsb_flags & TSB_SWAPPED) 7966 continue; 7967 sfmmu_inv_tsb(tsbinfo->tsb_va, 7968 TSB_BYTES(tsbinfo->tsb_szc)); 7969 } 7970 sfmmu_hat_exit(hatlockp); 7971 } 7972 } 7973 7974 /* 7975 * Update our counters for this sfmmup's ism mappings. 7976 */ 7977 for (i = 0; i <= ismszc; i++) { 7978 if (!(disable_ism_large_pages & (1 << i))) 7979 (void) ism_tsb_entries(sfmmup, i); 7980 } 7981 7982 sfmmu_ismhat_exit(sfmmup, 0); 7983 7984 /* 7985 * We must do our freeing here after dropping locks 7986 * to prevent a deadlock in the kmem allocator on the 7987 * mapping list lock. 7988 */ 7989 if (free_ment != NULL) 7990 kmem_cache_free(ism_ment_cache, free_ment); 7991 7992 /* 7993 * Check TSB and TLB page sizes if the process isn't exiting. 7994 */ 7995 if (!sfmmup->sfmmu_free) 7996 sfmmu_check_page_sizes(sfmmup, 0); 7997 } 7998 7999 /* ARGSUSED */ 8000 static int 8001 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8002 { 8003 /* void *buf is sfmmu_t pointer */ 8004 return (0); 8005 } 8006 8007 /* ARGSUSED */ 8008 static void 8009 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8010 { 8011 /* void *buf is sfmmu_t pointer */ 8012 } 8013 8014 /* 8015 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8016 * field to be the pa of this hmeblk 8017 */ 8018 /* ARGSUSED */ 8019 static int 8020 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8021 { 8022 struct hme_blk *hmeblkp; 8023 8024 bzero(buf, (size_t)cdrarg); 8025 hmeblkp = (struct hme_blk *)buf; 8026 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8027 8028 #ifdef HBLK_TRACE 8029 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8030 #endif /* HBLK_TRACE */ 8031 8032 return (0); 8033 } 8034 8035 /* ARGSUSED */ 8036 static void 8037 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8038 { 8039 8040 #ifdef HBLK_TRACE 8041 8042 struct hme_blk *hmeblkp; 8043 8044 hmeblkp = (struct hme_blk *)buf; 8045 mutex_destroy(&hmeblkp->hblk_audit_lock); 8046 8047 #endif /* HBLK_TRACE */ 8048 } 8049 8050 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 8051 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 8052 /* 8053 * The kmem allocator will callback into our reclaim routine when the system 8054 * is running low in memory. We traverse the hash and free up all unused but 8055 * still cached hme_blks. We also traverse the free list and free them up 8056 * as well. 8057 */ 8058 /*ARGSUSED*/ 8059 static void 8060 sfmmu_hblkcache_reclaim(void *cdrarg) 8061 { 8062 int i; 8063 uint64_t hblkpa, prevpa, nx_pa; 8064 struct hmehash_bucket *hmebp; 8065 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 8066 static struct hmehash_bucket *uhmehash_reclaim_hand; 8067 static struct hmehash_bucket *khmehash_reclaim_hand; 8068 struct hme_blk *list = NULL; 8069 8070 hmebp = uhmehash_reclaim_hand; 8071 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 8072 uhmehash_reclaim_hand = hmebp = uhme_hash; 8073 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8074 8075 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8076 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8077 hmeblkp = hmebp->hmeblkp; 8078 hblkpa = hmebp->hmeh_nextpa; 8079 prevpa = 0; 8080 pr_hblk = NULL; 8081 while (hmeblkp) { 8082 nx_hblk = hmeblkp->hblk_next; 8083 nx_pa = hmeblkp->hblk_nextpa; 8084 if (!hmeblkp->hblk_vcnt && 8085 !hmeblkp->hblk_hmecnt) { 8086 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8087 prevpa, pr_hblk); 8088 sfmmu_hblk_free(hmebp, hmeblkp, 8089 hblkpa, &list); 8090 } else { 8091 pr_hblk = hmeblkp; 8092 prevpa = hblkpa; 8093 } 8094 hmeblkp = nx_hblk; 8095 hblkpa = nx_pa; 8096 } 8097 SFMMU_HASH_UNLOCK(hmebp); 8098 } 8099 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 8100 hmebp = uhme_hash; 8101 } 8102 8103 hmebp = khmehash_reclaim_hand; 8104 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 8105 khmehash_reclaim_hand = hmebp = khme_hash; 8106 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8107 8108 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8109 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8110 hmeblkp = hmebp->hmeblkp; 8111 hblkpa = hmebp->hmeh_nextpa; 8112 prevpa = 0; 8113 pr_hblk = NULL; 8114 while (hmeblkp) { 8115 nx_hblk = hmeblkp->hblk_next; 8116 nx_pa = hmeblkp->hblk_nextpa; 8117 if (!hmeblkp->hblk_vcnt && 8118 !hmeblkp->hblk_hmecnt) { 8119 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8120 prevpa, pr_hblk); 8121 sfmmu_hblk_free(hmebp, hmeblkp, 8122 hblkpa, &list); 8123 } else { 8124 pr_hblk = hmeblkp; 8125 prevpa = hblkpa; 8126 } 8127 hmeblkp = nx_hblk; 8128 hblkpa = nx_pa; 8129 } 8130 SFMMU_HASH_UNLOCK(hmebp); 8131 } 8132 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 8133 hmebp = khme_hash; 8134 } 8135 sfmmu_hblks_list_purge(&list); 8136 } 8137 8138 /* 8139 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 8140 * same goes for sfmmu_get_addrvcolor(). 8141 * 8142 * This function will return the virtual color for the specified page. The 8143 * virtual color corresponds to this page current mapping or its last mapping. 8144 * It is used by memory allocators to choose addresses with the correct 8145 * alignment so vac consistency is automatically maintained. If the page 8146 * has no color it returns -1. 8147 */ 8148 /*ARGSUSED*/ 8149 int 8150 sfmmu_get_ppvcolor(struct page *pp) 8151 { 8152 #ifdef VAC 8153 int color; 8154 8155 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 8156 return (-1); 8157 } 8158 color = PP_GET_VCOLOR(pp); 8159 ASSERT(color < mmu_btop(shm_alignment)); 8160 return (color); 8161 #else 8162 return (-1); 8163 #endif /* VAC */ 8164 } 8165 8166 /* 8167 * This function will return the desired alignment for vac consistency 8168 * (vac color) given a virtual address. If no vac is present it returns -1. 8169 */ 8170 /*ARGSUSED*/ 8171 int 8172 sfmmu_get_addrvcolor(caddr_t vaddr) 8173 { 8174 #ifdef VAC 8175 if (cache & CACHE_VAC) { 8176 return (addr_to_vcolor(vaddr)); 8177 } else { 8178 return (-1); 8179 } 8180 #else 8181 return (-1); 8182 #endif /* VAC */ 8183 } 8184 8185 #ifdef VAC 8186 /* 8187 * Check for conflicts. 8188 * A conflict exists if the new and existent mappings do not match in 8189 * their "shm_alignment fields. If conflicts exist, the existant mappings 8190 * are flushed unless one of them is locked. If one of them is locked, then 8191 * the mappings are flushed and converted to non-cacheable mappings. 8192 */ 8193 static void 8194 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 8195 { 8196 struct hat *tmphat; 8197 struct sf_hment *sfhmep, *tmphme = NULL; 8198 struct hme_blk *hmeblkp; 8199 int vcolor; 8200 tte_t tte; 8201 8202 ASSERT(sfmmu_mlist_held(pp)); 8203 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 8204 8205 vcolor = addr_to_vcolor(addr); 8206 if (PP_NEWPAGE(pp)) { 8207 PP_SET_VCOLOR(pp, vcolor); 8208 return; 8209 } 8210 8211 if (PP_GET_VCOLOR(pp) == vcolor) { 8212 return; 8213 } 8214 8215 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 8216 /* 8217 * Previous user of page had a different color 8218 * but since there are no current users 8219 * we just flush the cache and change the color. 8220 */ 8221 SFMMU_STAT(sf_pgcolor_conflict); 8222 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8223 PP_SET_VCOLOR(pp, vcolor); 8224 return; 8225 } 8226 8227 /* 8228 * If we get here we have a vac conflict with a current 8229 * mapping. VAC conflict policy is as follows. 8230 * - The default is to unload the other mappings unless: 8231 * - If we have a large mapping we uncache the page. 8232 * We need to uncache the rest of the large page too. 8233 * - If any of the mappings are locked we uncache the page. 8234 * - If the requested mapping is inconsistent 8235 * with another mapping and that mapping 8236 * is in the same address space we have to 8237 * make it non-cached. The default thing 8238 * to do is unload the inconsistent mapping 8239 * but if they are in the same address space 8240 * we run the risk of unmapping the pc or the 8241 * stack which we will use as we return to the user, 8242 * in which case we can then fault on the thing 8243 * we just unloaded and get into an infinite loop. 8244 */ 8245 if (PP_ISMAPPED_LARGE(pp)) { 8246 int sz; 8247 8248 /* 8249 * Existing mapping is for big pages. We don't unload 8250 * existing big mappings to satisfy new mappings. 8251 * Always convert all mappings to TNC. 8252 */ 8253 sz = fnd_mapping_sz(pp); 8254 pp = PP_GROUPLEADER(pp, sz); 8255 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 8256 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 8257 TTEPAGES(sz)); 8258 8259 return; 8260 } 8261 8262 /* 8263 * check if any mapping is in same as or if it is locked 8264 * since in that case we need to uncache. 8265 */ 8266 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8267 tmphme = sfhmep->hme_next; 8268 hmeblkp = sfmmu_hmetohblk(sfhmep); 8269 if (hmeblkp->hblk_xhat_bit) 8270 continue; 8271 tmphat = hblktosfmmu(hmeblkp); 8272 sfmmu_copytte(&sfhmep->hme_tte, &tte); 8273 ASSERT(TTE_IS_VALID(&tte)); 8274 if ((tmphat == hat) || hmeblkp->hblk_lckcnt) { 8275 /* 8276 * We have an uncache conflict 8277 */ 8278 SFMMU_STAT(sf_uncache_conflict); 8279 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 8280 return; 8281 } 8282 } 8283 8284 /* 8285 * We have an unload conflict 8286 * We have already checked for LARGE mappings, therefore 8287 * the remaining mapping(s) must be TTE8K. 8288 */ 8289 SFMMU_STAT(sf_unload_conflict); 8290 8291 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8292 tmphme = sfhmep->hme_next; 8293 hmeblkp = sfmmu_hmetohblk(sfhmep); 8294 if (hmeblkp->hblk_xhat_bit) 8295 continue; 8296 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 8297 } 8298 8299 if (PP_ISMAPPED_KPM(pp)) 8300 sfmmu_kpm_vac_unload(pp, addr); 8301 8302 /* 8303 * Unloads only do TLB flushes so we need to flush the 8304 * cache here. 8305 */ 8306 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8307 PP_SET_VCOLOR(pp, vcolor); 8308 } 8309 8310 /* 8311 * Whenever a mapping is unloaded and the page is in TNC state, 8312 * we see if the page can be made cacheable again. 'pp' is 8313 * the page that we just unloaded a mapping from, the size 8314 * of mapping that was unloaded is 'ottesz'. 8315 * Remark: 8316 * The recache policy for mpss pages can leave a performance problem 8317 * under the following circumstances: 8318 * . A large page in uncached mode has just been unmapped. 8319 * . All constituent pages are TNC due to a conflicting small mapping. 8320 * . There are many other, non conflicting, small mappings around for 8321 * a lot of the constituent pages. 8322 * . We're called w/ the "old" groupleader page and the old ottesz, 8323 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 8324 * we end up w/ TTE8K or npages == 1. 8325 * . We call tst_tnc w/ the old groupleader only, and if there is no 8326 * conflict, we re-cache only this page. 8327 * . All other small mappings are not checked and will be left in TNC mode. 8328 * The problem is not very serious because: 8329 * . mpss is actually only defined for heap and stack, so the probability 8330 * is not very high that a large page mapping exists in parallel to a small 8331 * one (this is possible, but seems to be bad programming style in the 8332 * appl). 8333 * . The problem gets a little bit more serious, when those TNC pages 8334 * have to be mapped into kernel space, e.g. for networking. 8335 * . When VAC alias conflicts occur in applications, this is regarded 8336 * as an application bug. So if kstat's show them, the appl should 8337 * be changed anyway. 8338 */ 8339 void 8340 conv_tnc(page_t *pp, int ottesz) 8341 { 8342 int cursz, dosz; 8343 pgcnt_t curnpgs, dopgs; 8344 pgcnt_t pg64k; 8345 page_t *pp2; 8346 8347 /* 8348 * Determine how big a range we check for TNC and find 8349 * leader page. cursz is the size of the biggest 8350 * mapping that still exist on 'pp'. 8351 */ 8352 if (PP_ISMAPPED_LARGE(pp)) { 8353 cursz = fnd_mapping_sz(pp); 8354 } else { 8355 cursz = TTE8K; 8356 } 8357 8358 if (ottesz >= cursz) { 8359 dosz = ottesz; 8360 pp2 = pp; 8361 } else { 8362 dosz = cursz; 8363 pp2 = PP_GROUPLEADER(pp, dosz); 8364 } 8365 8366 pg64k = TTEPAGES(TTE64K); 8367 dopgs = TTEPAGES(dosz); 8368 8369 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 8370 8371 while (dopgs != 0) { 8372 curnpgs = TTEPAGES(cursz); 8373 if (tst_tnc(pp2, curnpgs)) { 8374 SFMMU_STAT_ADD(sf_recache, curnpgs); 8375 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 8376 curnpgs); 8377 } 8378 8379 ASSERT(dopgs >= curnpgs); 8380 dopgs -= curnpgs; 8381 8382 if (dopgs == 0) { 8383 break; 8384 } 8385 8386 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 8387 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 8388 cursz = fnd_mapping_sz(pp2); 8389 } else { 8390 cursz = TTE8K; 8391 } 8392 } 8393 } 8394 8395 /* 8396 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 8397 * returns 0 otherwise. Note that oaddr argument is valid for only 8398 * 8k pages. 8399 */ 8400 int 8401 tst_tnc(page_t *pp, pgcnt_t npages) 8402 { 8403 struct sf_hment *sfhme; 8404 struct hme_blk *hmeblkp; 8405 tte_t tte; 8406 caddr_t vaddr; 8407 int clr_valid = 0; 8408 int color, color1, bcolor; 8409 int i, ncolors; 8410 8411 ASSERT(pp != NULL); 8412 ASSERT(!(cache & CACHE_WRITEBACK)); 8413 8414 if (npages > 1) { 8415 ncolors = CACHE_NUM_COLOR; 8416 } 8417 8418 for (i = 0; i < npages; i++) { 8419 ASSERT(sfmmu_mlist_held(pp)); 8420 ASSERT(PP_ISTNC(pp)); 8421 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 8422 8423 if (PP_ISPNC(pp)) { 8424 return (0); 8425 } 8426 8427 clr_valid = 0; 8428 if (PP_ISMAPPED_KPM(pp)) { 8429 caddr_t kpmvaddr; 8430 8431 ASSERT(kpm_enable); 8432 kpmvaddr = hat_kpm_page2va(pp, 1); 8433 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 8434 color1 = addr_to_vcolor(kpmvaddr); 8435 clr_valid = 1; 8436 } 8437 8438 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8439 hmeblkp = sfmmu_hmetohblk(sfhme); 8440 if (hmeblkp->hblk_xhat_bit) 8441 continue; 8442 8443 sfmmu_copytte(&sfhme->hme_tte, &tte); 8444 ASSERT(TTE_IS_VALID(&tte)); 8445 8446 vaddr = tte_to_vaddr(hmeblkp, tte); 8447 color = addr_to_vcolor(vaddr); 8448 8449 if (npages > 1) { 8450 /* 8451 * If there is a big mapping, make sure 8452 * 8K mapping is consistent with the big 8453 * mapping. 8454 */ 8455 bcolor = i % ncolors; 8456 if (color != bcolor) { 8457 return (0); 8458 } 8459 } 8460 if (!clr_valid) { 8461 clr_valid = 1; 8462 color1 = color; 8463 } 8464 8465 if (color1 != color) { 8466 return (0); 8467 } 8468 } 8469 8470 pp = PP_PAGENEXT(pp); 8471 } 8472 8473 return (1); 8474 } 8475 8476 void 8477 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 8478 pgcnt_t npages) 8479 { 8480 kmutex_t *pmtx; 8481 int i, ncolors, bcolor; 8482 kpm_hlk_t *kpmp; 8483 cpuset_t cpuset; 8484 8485 ASSERT(pp != NULL); 8486 ASSERT(!(cache & CACHE_WRITEBACK)); 8487 8488 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 8489 pmtx = sfmmu_page_enter(pp); 8490 8491 /* 8492 * Fast path caching single unmapped page 8493 */ 8494 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 8495 flags == HAT_CACHE) { 8496 PP_CLRTNC(pp); 8497 PP_CLRPNC(pp); 8498 sfmmu_page_exit(pmtx); 8499 sfmmu_kpm_kpmp_exit(kpmp); 8500 return; 8501 } 8502 8503 /* 8504 * We need to capture all cpus in order to change cacheability 8505 * because we can't allow one cpu to access the same physical 8506 * page using a cacheable and a non-cachebale mapping at the same 8507 * time. Since we may end up walking the ism mapping list 8508 * have to grab it's lock now since we can't after all the 8509 * cpus have been captured. 8510 */ 8511 sfmmu_hat_lock_all(); 8512 mutex_enter(&ism_mlist_lock); 8513 kpreempt_disable(); 8514 cpuset = cpu_ready_set; 8515 xc_attention(cpuset); 8516 8517 if (npages > 1) { 8518 /* 8519 * Make sure all colors are flushed since the 8520 * sfmmu_page_cache() only flushes one color- 8521 * it does not know big pages. 8522 */ 8523 ncolors = CACHE_NUM_COLOR; 8524 if (flags & HAT_TMPNC) { 8525 for (i = 0; i < ncolors; i++) { 8526 sfmmu_cache_flushcolor(i, pp->p_pagenum); 8527 } 8528 cache_flush_flag = CACHE_NO_FLUSH; 8529 } 8530 } 8531 8532 for (i = 0; i < npages; i++) { 8533 8534 ASSERT(sfmmu_mlist_held(pp)); 8535 8536 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 8537 8538 if (npages > 1) { 8539 bcolor = i % ncolors; 8540 } else { 8541 bcolor = NO_VCOLOR; 8542 } 8543 8544 sfmmu_page_cache(pp, flags, cache_flush_flag, 8545 bcolor); 8546 } 8547 8548 pp = PP_PAGENEXT(pp); 8549 } 8550 8551 xt_sync(cpuset); 8552 xc_dismissed(cpuset); 8553 mutex_exit(&ism_mlist_lock); 8554 sfmmu_hat_unlock_all(); 8555 sfmmu_page_exit(pmtx); 8556 sfmmu_kpm_kpmp_exit(kpmp); 8557 kpreempt_enable(); 8558 } 8559 8560 /* 8561 * This function changes the virtual cacheability of all mappings to a 8562 * particular page. When changing from uncache to cacheable the mappings will 8563 * only be changed if all of them have the same virtual color. 8564 * We need to flush the cache in all cpus. It is possible that 8565 * a process referenced a page as cacheable but has sinced exited 8566 * and cleared the mapping list. We still to flush it but have no 8567 * state so all cpus is the only alternative. 8568 */ 8569 static void 8570 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 8571 { 8572 struct sf_hment *sfhme; 8573 struct hme_blk *hmeblkp; 8574 sfmmu_t *sfmmup; 8575 tte_t tte, ttemod; 8576 caddr_t vaddr; 8577 int ret, color; 8578 pfn_t pfn; 8579 8580 color = bcolor; 8581 pfn = pp->p_pagenum; 8582 8583 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8584 8585 hmeblkp = sfmmu_hmetohblk(sfhme); 8586 8587 if (hmeblkp->hblk_xhat_bit) 8588 continue; 8589 8590 sfmmu_copytte(&sfhme->hme_tte, &tte); 8591 ASSERT(TTE_IS_VALID(&tte)); 8592 vaddr = tte_to_vaddr(hmeblkp, tte); 8593 color = addr_to_vcolor(vaddr); 8594 8595 #ifdef DEBUG 8596 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 8597 ASSERT(color == bcolor); 8598 } 8599 #endif 8600 8601 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 8602 8603 ttemod = tte; 8604 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 8605 TTE_CLR_VCACHEABLE(&ttemod); 8606 } else { /* flags & HAT_CACHE */ 8607 TTE_SET_VCACHEABLE(&ttemod); 8608 } 8609 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 8610 if (ret < 0) { 8611 /* 8612 * Since all cpus are captured modifytte should not 8613 * fail. 8614 */ 8615 panic("sfmmu_page_cache: write to tte failed"); 8616 } 8617 8618 sfmmup = hblktosfmmu(hmeblkp); 8619 if (cache_flush_flag == CACHE_FLUSH) { 8620 /* 8621 * Flush TSBs, TLBs and caches 8622 */ 8623 if (sfmmup->sfmmu_ismhat) { 8624 if (flags & HAT_CACHE) { 8625 SFMMU_STAT(sf_ism_recache); 8626 } else { 8627 SFMMU_STAT(sf_ism_uncache); 8628 } 8629 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8630 pfn, CACHE_FLUSH); 8631 } else { 8632 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 8633 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 8634 } 8635 8636 /* 8637 * all cache entries belonging to this pfn are 8638 * now flushed. 8639 */ 8640 cache_flush_flag = CACHE_NO_FLUSH; 8641 } else { 8642 8643 /* 8644 * Flush only TSBs and TLBs. 8645 */ 8646 if (sfmmup->sfmmu_ismhat) { 8647 if (flags & HAT_CACHE) { 8648 SFMMU_STAT(sf_ism_recache); 8649 } else { 8650 SFMMU_STAT(sf_ism_uncache); 8651 } 8652 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8653 pfn, CACHE_NO_FLUSH); 8654 } else { 8655 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 8656 } 8657 } 8658 } 8659 8660 if (PP_ISMAPPED_KPM(pp)) 8661 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 8662 8663 switch (flags) { 8664 8665 default: 8666 panic("sfmmu_pagecache: unknown flags"); 8667 break; 8668 8669 case HAT_CACHE: 8670 PP_CLRTNC(pp); 8671 PP_CLRPNC(pp); 8672 PP_SET_VCOLOR(pp, color); 8673 break; 8674 8675 case HAT_TMPNC: 8676 PP_SETTNC(pp); 8677 PP_SET_VCOLOR(pp, NO_VCOLOR); 8678 break; 8679 8680 case HAT_UNCACHE: 8681 PP_SETPNC(pp); 8682 PP_CLRTNC(pp); 8683 PP_SET_VCOLOR(pp, NO_VCOLOR); 8684 break; 8685 } 8686 } 8687 #endif /* VAC */ 8688 8689 8690 /* 8691 * Wrapper routine used to return a context. 8692 * 8693 * It's the responsibility of the caller to guarantee that the 8694 * process serializes on calls here by taking the HAT lock for 8695 * the hat. 8696 * 8697 */ 8698 static void 8699 sfmmu_get_ctx(sfmmu_t *sfmmup) 8700 { 8701 mmu_ctx_t *mmu_ctxp; 8702 uint_t pstate_save; 8703 8704 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8705 ASSERT(sfmmup != ksfmmup); 8706 8707 kpreempt_disable(); 8708 8709 mmu_ctxp = CPU_MMU_CTXP(CPU); 8710 ASSERT(mmu_ctxp); 8711 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 8712 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 8713 8714 /* 8715 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 8716 */ 8717 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 8718 sfmmu_ctx_wrap_around(mmu_ctxp); 8719 8720 /* 8721 * Let the MMU set up the page sizes to use for 8722 * this context in the TLB. Don't program 2nd dtlb for ism hat. 8723 */ 8724 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 8725 mmu_set_ctx_page_sizes(sfmmup); 8726 } 8727 8728 /* 8729 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 8730 * interrupts disabled to prevent race condition with wrap-around 8731 * ctx invalidatation. In sun4v, ctx invalidation also involves 8732 * a HV call to set the number of TSBs to 0. If interrupts are not 8733 * disabled until after sfmmu_load_mmustate is complete TSBs may 8734 * become assigned to INVALID_CONTEXT. This is not allowed. 8735 */ 8736 pstate_save = sfmmu_disable_intrs(); 8737 8738 sfmmu_alloc_ctx(sfmmup, 1, CPU); 8739 sfmmu_load_mmustate(sfmmup); 8740 8741 sfmmu_enable_intrs(pstate_save); 8742 8743 kpreempt_enable(); 8744 } 8745 8746 /* 8747 * When all cnums are used up in a MMU, cnum will wrap around to the 8748 * next generation and start from 2. 8749 */ 8750 static void 8751 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp) 8752 { 8753 8754 /* caller must have disabled the preemption */ 8755 ASSERT(curthread->t_preempt >= 1); 8756 ASSERT(mmu_ctxp != NULL); 8757 8758 /* acquire Per-MMU (PM) spin lock */ 8759 mutex_enter(&mmu_ctxp->mmu_lock); 8760 8761 /* re-check to see if wrap-around is needed */ 8762 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 8763 goto done; 8764 8765 SFMMU_MMU_STAT(mmu_wrap_around); 8766 8767 /* update gnum */ 8768 ASSERT(mmu_ctxp->mmu_gnum != 0); 8769 mmu_ctxp->mmu_gnum++; 8770 if (mmu_ctxp->mmu_gnum == 0 || 8771 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 8772 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 8773 (void *)mmu_ctxp); 8774 } 8775 8776 if (mmu_ctxp->mmu_ncpus > 1) { 8777 cpuset_t cpuset; 8778 8779 membar_enter(); /* make sure updated gnum visible */ 8780 8781 SFMMU_XCALL_STATS(NULL); 8782 8783 /* xcall to others on the same MMU to invalidate ctx */ 8784 cpuset = mmu_ctxp->mmu_cpuset; 8785 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id)); 8786 CPUSET_DEL(cpuset, CPU->cpu_id); 8787 CPUSET_AND(cpuset, cpu_ready_set); 8788 8789 /* 8790 * Pass in INVALID_CONTEXT as the first parameter to 8791 * sfmmu_raise_tsb_exception, which invalidates the context 8792 * of any process running on the CPUs in the MMU. 8793 */ 8794 xt_some(cpuset, sfmmu_raise_tsb_exception, 8795 INVALID_CONTEXT, INVALID_CONTEXT); 8796 xt_sync(cpuset); 8797 8798 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 8799 } 8800 8801 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 8802 sfmmu_setctx_sec(INVALID_CONTEXT); 8803 sfmmu_clear_utsbinfo(); 8804 } 8805 8806 /* 8807 * No xcall is needed here. For sun4u systems all CPUs in context 8808 * domain share a single physical MMU therefore it's enough to flush 8809 * TLB on local CPU. On sun4v systems we use 1 global context 8810 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 8811 * handler. Note that vtag_flushall_uctxs() is called 8812 * for Ultra II machine, where the equivalent flushall functionality 8813 * is implemented in SW, and only user ctx TLB entries are flushed. 8814 */ 8815 if (&vtag_flushall_uctxs != NULL) { 8816 vtag_flushall_uctxs(); 8817 } else { 8818 vtag_flushall(); 8819 } 8820 8821 /* reset mmu cnum, skips cnum 0 and 1 */ 8822 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 8823 8824 done: 8825 mutex_exit(&mmu_ctxp->mmu_lock); 8826 } 8827 8828 8829 /* 8830 * For multi-threaded process, set the process context to INVALID_CONTEXT 8831 * so that it faults and reloads the MMU state from TL=0. For single-threaded 8832 * process, we can just load the MMU state directly without having to 8833 * set context invalid. Caller must hold the hat lock since we don't 8834 * acquire it here. 8835 */ 8836 static void 8837 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 8838 { 8839 uint_t cnum; 8840 uint_t pstate_save; 8841 8842 ASSERT(sfmmup != ksfmmup); 8843 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8844 8845 kpreempt_disable(); 8846 8847 /* 8848 * We check whether the pass'ed-in sfmmup is the same as the 8849 * current running proc. This is to makes sure the current proc 8850 * stays single-threaded if it already is. 8851 */ 8852 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 8853 (curthread->t_procp->p_lwpcnt == 1)) { 8854 /* single-thread */ 8855 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 8856 if (cnum != INVALID_CONTEXT) { 8857 uint_t curcnum; 8858 /* 8859 * Disable interrupts to prevent race condition 8860 * with sfmmu_ctx_wrap_around ctx invalidation. 8861 * In sun4v, ctx invalidation involves setting 8862 * TSB to NULL, hence, interrupts should be disabled 8863 * untill after sfmmu_load_mmustate is completed. 8864 */ 8865 pstate_save = sfmmu_disable_intrs(); 8866 curcnum = sfmmu_getctx_sec(); 8867 if (curcnum == cnum) 8868 sfmmu_load_mmustate(sfmmup); 8869 sfmmu_enable_intrs(pstate_save); 8870 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 8871 } 8872 } else { 8873 /* 8874 * multi-thread 8875 * or when sfmmup is not the same as the curproc. 8876 */ 8877 sfmmu_invalidate_ctx(sfmmup); 8878 } 8879 8880 kpreempt_enable(); 8881 } 8882 8883 8884 /* 8885 * Replace the specified TSB with a new TSB. This function gets called when 8886 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 8887 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 8888 * (8K). 8889 * 8890 * Caller must hold the HAT lock, but should assume any tsb_info 8891 * pointers it has are no longer valid after calling this function. 8892 * 8893 * Return values: 8894 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 8895 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 8896 * something to this tsbinfo/TSB 8897 * TSB_SUCCESS Operation succeeded 8898 */ 8899 static tsb_replace_rc_t 8900 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 8901 hatlock_t *hatlockp, uint_t flags) 8902 { 8903 struct tsb_info *new_tsbinfo = NULL; 8904 struct tsb_info *curtsb, *prevtsb; 8905 uint_t tte_sz_mask; 8906 int i; 8907 8908 ASSERT(sfmmup != ksfmmup); 8909 ASSERT(sfmmup->sfmmu_ismhat == 0); 8910 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8911 ASSERT(szc <= tsb_max_growsize); 8912 8913 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 8914 return (TSB_LOSTRACE); 8915 8916 /* 8917 * Find the tsb_info ahead of this one in the list, and 8918 * also make sure that the tsb_info passed in really 8919 * exists! 8920 */ 8921 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8922 curtsb != old_tsbinfo && curtsb != NULL; 8923 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8924 ASSERT(curtsb != NULL); 8925 8926 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8927 /* 8928 * The process is swapped out, so just set the new size 8929 * code. When it swaps back in, we'll allocate a new one 8930 * of the new chosen size. 8931 */ 8932 curtsb->tsb_szc = szc; 8933 return (TSB_SUCCESS); 8934 } 8935 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 8936 8937 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 8938 8939 /* 8940 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 8941 * If we fail to allocate a TSB, exit. 8942 */ 8943 sfmmu_hat_exit(hatlockp); 8944 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, tte_sz_mask, 8945 flags, sfmmup)) { 8946 (void) sfmmu_hat_enter(sfmmup); 8947 if (!(flags & TSB_SWAPIN)) 8948 SFMMU_STAT(sf_tsb_resize_failures); 8949 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8950 return (TSB_ALLOCFAIL); 8951 } 8952 (void) sfmmu_hat_enter(sfmmup); 8953 8954 /* 8955 * Re-check to make sure somebody else didn't muck with us while we 8956 * didn't hold the HAT lock. If the process swapped out, fine, just 8957 * exit; this can happen if we try to shrink the TSB from the context 8958 * of another process (such as on an ISM unmap), though it is rare. 8959 */ 8960 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8961 SFMMU_STAT(sf_tsb_resize_failures); 8962 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8963 sfmmu_hat_exit(hatlockp); 8964 sfmmu_tsbinfo_free(new_tsbinfo); 8965 (void) sfmmu_hat_enter(sfmmup); 8966 return (TSB_LOSTRACE); 8967 } 8968 8969 #ifdef DEBUG 8970 /* Reverify that the tsb_info still exists.. for debugging only */ 8971 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8972 curtsb != old_tsbinfo && curtsb != NULL; 8973 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8974 ASSERT(curtsb != NULL); 8975 #endif /* DEBUG */ 8976 8977 /* 8978 * Quiesce any CPUs running this process on their next TLB miss 8979 * so they atomically see the new tsb_info. We temporarily set the 8980 * context to invalid context so new threads that come on processor 8981 * after we do the xcall to cpusran will also serialize behind the 8982 * HAT lock on TLB miss and will see the new TSB. Since this short 8983 * race with a new thread coming on processor is relatively rare, 8984 * this synchronization mechanism should be cheaper than always 8985 * pausing all CPUs for the duration of the setup, which is what 8986 * the old implementation did. This is particuarly true if we are 8987 * copying a huge chunk of memory around during that window. 8988 * 8989 * The memory barriers are to make sure things stay consistent 8990 * with resume() since it does not hold the HAT lock while 8991 * walking the list of tsb_info structures. 8992 */ 8993 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 8994 /* The TSB is either growing or shrinking. */ 8995 sfmmu_invalidate_ctx(sfmmup); 8996 } else { 8997 /* 8998 * It is illegal to swap in TSBs from a process other 8999 * than a process being swapped in. This in turn 9000 * implies we do not have a valid MMU context here 9001 * since a process needs one to resolve translation 9002 * misses. 9003 */ 9004 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 9005 } 9006 9007 #ifdef DEBUG 9008 ASSERT(max_mmu_ctxdoms > 0); 9009 9010 /* 9011 * Process should have INVALID_CONTEXT on all MMUs 9012 */ 9013 for (i = 0; i < max_mmu_ctxdoms; i++) { 9014 9015 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 9016 } 9017 #endif 9018 9019 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 9020 membar_stst(); /* strict ordering required */ 9021 if (prevtsb) 9022 prevtsb->tsb_next = new_tsbinfo; 9023 else 9024 sfmmup->sfmmu_tsb = new_tsbinfo; 9025 membar_enter(); /* make sure new TSB globally visible */ 9026 sfmmu_setup_tsbinfo(sfmmup); 9027 9028 /* 9029 * We need to migrate TSB entries from the old TSB to the new TSB 9030 * if tsb_remap_ttes is set and the TSB is growing. 9031 */ 9032 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 9033 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 9034 9035 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9036 9037 /* 9038 * Drop the HAT lock to free our old tsb_info. 9039 */ 9040 sfmmu_hat_exit(hatlockp); 9041 9042 if ((flags & TSB_GROW) == TSB_GROW) { 9043 SFMMU_STAT(sf_tsb_grow); 9044 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 9045 SFMMU_STAT(sf_tsb_shrink); 9046 } 9047 9048 sfmmu_tsbinfo_free(old_tsbinfo); 9049 9050 (void) sfmmu_hat_enter(sfmmup); 9051 return (TSB_SUCCESS); 9052 } 9053 9054 /* 9055 * This function will re-program hat pgsz array, and invalidate the 9056 * process' context, forcing the process to switch to another 9057 * context on the next TLB miss, and therefore start using the 9058 * TLB that is reprogrammed for the new page sizes. 9059 */ 9060 void 9061 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 9062 { 9063 int i; 9064 hatlock_t *hatlockp = NULL; 9065 9066 hatlockp = sfmmu_hat_enter(sfmmup); 9067 /* USIII+-IV+ optimization, requires hat lock */ 9068 if (tmp_pgsz) { 9069 for (i = 0; i < mmu_page_sizes; i++) 9070 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 9071 } 9072 SFMMU_STAT(sf_tlb_reprog_pgsz); 9073 9074 sfmmu_invalidate_ctx(sfmmup); 9075 9076 sfmmu_hat_exit(hatlockp); 9077 } 9078 9079 /* 9080 * This function assumes that there are either four or six supported page 9081 * sizes and at most two programmable TLBs, so we need to decide which 9082 * page sizes are most important and then tell the MMU layer so it 9083 * can adjust the TLB page sizes accordingly (if supported). 9084 * 9085 * If these assumptions change, this function will need to be 9086 * updated to support whatever the new limits are. 9087 * 9088 * The growing flag is nonzero if we are growing the address space, 9089 * and zero if it is shrinking. This allows us to decide whether 9090 * to grow or shrink our TSB, depending upon available memory 9091 * conditions. 9092 */ 9093 static void 9094 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 9095 { 9096 uint64_t ttecnt[MMU_PAGE_SIZES]; 9097 uint64_t tte8k_cnt, tte4m_cnt; 9098 uint8_t i; 9099 int sectsb_thresh; 9100 9101 /* 9102 * Kernel threads, processes with small address spaces not using 9103 * large pages, and dummy ISM HATs need not apply. 9104 */ 9105 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 9106 return; 9107 9108 if ((sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) == 0 && 9109 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 9110 return; 9111 9112 for (i = 0; i < mmu_page_sizes; i++) { 9113 ttecnt[i] = SFMMU_TTE_CNT(sfmmup, i); 9114 } 9115 9116 /* Check pagesizes in use, and possibly reprogram DTLB. */ 9117 if (&mmu_check_page_sizes) 9118 mmu_check_page_sizes(sfmmup, ttecnt); 9119 9120 /* 9121 * Calculate the number of 8k ttes to represent the span of these 9122 * pages. 9123 */ 9124 tte8k_cnt = ttecnt[TTE8K] + 9125 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 9126 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 9127 if (mmu_page_sizes == max_mmu_page_sizes) { 9128 tte4m_cnt = ttecnt[TTE4M] + 9129 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 9130 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 9131 } else { 9132 tte4m_cnt = ttecnt[TTE4M]; 9133 } 9134 9135 /* 9136 * Inflate TSB sizes by a factor of 2 if this process 9137 * uses 4M text pages to minimize extra conflict misses 9138 * in the first TSB since without counting text pages 9139 * 8K TSB may become too small. 9140 * 9141 * Also double the size of the second TSB to minimize 9142 * extra conflict misses due to competition between 4M text pages 9143 * and data pages. 9144 * 9145 * We need to adjust the second TSB allocation threshold by the 9146 * inflation factor, since there is no point in creating a second 9147 * TSB when we know all the mappings can fit in the I/D TLBs. 9148 */ 9149 sectsb_thresh = tsb_sectsb_threshold; 9150 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 9151 tte8k_cnt <<= 1; 9152 tte4m_cnt <<= 1; 9153 sectsb_thresh <<= 1; 9154 } 9155 9156 /* 9157 * Check to see if our TSB is the right size; we may need to 9158 * grow or shrink it. If the process is small, our work is 9159 * finished at this point. 9160 */ 9161 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 9162 return; 9163 } 9164 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 9165 } 9166 9167 static void 9168 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 9169 uint64_t tte4m_cnt, int sectsb_thresh) 9170 { 9171 int tsb_bits; 9172 uint_t tsb_szc; 9173 struct tsb_info *tsbinfop; 9174 hatlock_t *hatlockp = NULL; 9175 9176 hatlockp = sfmmu_hat_enter(sfmmup); 9177 ASSERT(hatlockp != NULL); 9178 tsbinfop = sfmmup->sfmmu_tsb; 9179 ASSERT(tsbinfop != NULL); 9180 9181 /* 9182 * If we're growing, select the size based on RSS. If we're 9183 * shrinking, leave some room so we don't have to turn around and 9184 * grow again immediately. 9185 */ 9186 if (growing) 9187 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 9188 else 9189 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 9190 9191 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9192 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9193 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9194 hatlockp, TSB_SHRINK); 9195 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 9196 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9197 hatlockp, TSB_GROW); 9198 } 9199 tsbinfop = sfmmup->sfmmu_tsb; 9200 9201 /* 9202 * With the TLB and first TSB out of the way, we need to see if 9203 * we need a second TSB for 4M pages. If we managed to reprogram 9204 * the TLB page sizes above, the process will start using this new 9205 * TSB right away; otherwise, it will start using it on the next 9206 * context switch. Either way, it's no big deal so there's no 9207 * synchronization with the trap handlers here unless we grow the 9208 * TSB (in which case it's required to prevent using the old one 9209 * after it's freed). Note: second tsb is required for 32M/256M 9210 * page sizes. 9211 */ 9212 if (tte4m_cnt > sectsb_thresh) { 9213 /* 9214 * If we're growing, select the size based on RSS. If we're 9215 * shrinking, leave some room so we don't have to turn 9216 * around and grow again immediately. 9217 */ 9218 if (growing) 9219 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 9220 else 9221 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 9222 if (tsbinfop->tsb_next == NULL) { 9223 struct tsb_info *newtsb; 9224 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 9225 0 : TSB_ALLOC; 9226 9227 sfmmu_hat_exit(hatlockp); 9228 9229 /* 9230 * Try to allocate a TSB for 4[32|256]M pages. If we 9231 * can't get the size we want, retry w/a minimum sized 9232 * TSB. If that still didn't work, give up; we can 9233 * still run without one. 9234 */ 9235 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 9236 TSB4M|TSB32M|TSB256M:TSB4M; 9237 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 9238 allocflags, sfmmup) != 0) && 9239 (sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 9240 tsb_bits, allocflags, sfmmup) != 0)) { 9241 return; 9242 } 9243 9244 hatlockp = sfmmu_hat_enter(sfmmup); 9245 9246 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 9247 sfmmup->sfmmu_tsb->tsb_next = newtsb; 9248 SFMMU_STAT(sf_tsb_sectsb_create); 9249 sfmmu_setup_tsbinfo(sfmmup); 9250 sfmmu_hat_exit(hatlockp); 9251 return; 9252 } else { 9253 /* 9254 * It's annoying, but possible for us 9255 * to get here.. we dropped the HAT lock 9256 * because of locking order in the kmem 9257 * allocator, and while we were off getting 9258 * our memory, some other thread decided to 9259 * do us a favor and won the race to get a 9260 * second TSB for this process. Sigh. 9261 */ 9262 sfmmu_hat_exit(hatlockp); 9263 sfmmu_tsbinfo_free(newtsb); 9264 return; 9265 } 9266 } 9267 9268 /* 9269 * We have a second TSB, see if it's big enough. 9270 */ 9271 tsbinfop = tsbinfop->tsb_next; 9272 9273 /* 9274 * Check to see if our second TSB is the right size; 9275 * we may need to grow or shrink it. 9276 * To prevent thrashing (e.g. growing the TSB on a 9277 * subsequent map operation), only try to shrink if 9278 * the TSB reach exceeds twice the virtual address 9279 * space size. 9280 */ 9281 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9282 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9283 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9284 tsb_szc, hatlockp, TSB_SHRINK); 9285 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 9286 TSB_OK_GROW()) { 9287 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9288 tsb_szc, hatlockp, TSB_GROW); 9289 } 9290 } 9291 9292 sfmmu_hat_exit(hatlockp); 9293 } 9294 9295 /* 9296 * Get the preferred page size code for a hat. 9297 * This is only advice, so locking is not done; 9298 * this transitory information could change 9299 * following the call anyway. This interface is 9300 * sun4 private. 9301 */ 9302 /*ARGSUSED*/ 9303 uint_t 9304 hat_preferred_pgsz(struct hat *hat, caddr_t vaddr, size_t maplen, int maptype) 9305 { 9306 sfmmu_t *sfmmup = (sfmmu_t *)hat; 9307 uint_t szc, maxszc = mmu_page_sizes - 1; 9308 size_t pgsz; 9309 9310 if (maptype == MAPPGSZ_ISM) { 9311 for (szc = maxszc; szc >= TTE4M; szc--) { 9312 if (disable_ism_large_pages & (1 << szc)) 9313 continue; 9314 9315 pgsz = hw_page_array[szc].hp_size; 9316 if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz)) 9317 return (szc); 9318 } 9319 return (TTE4M); 9320 } else if (&mmu_preferred_pgsz) { /* USIII+-USIV+ */ 9321 return (mmu_preferred_pgsz(sfmmup, vaddr, maplen)); 9322 } else { /* USIII, USII, Niagara */ 9323 for (szc = maxszc; szc > TTE8K; szc--) { 9324 if (disable_large_pages & (1 << szc)) 9325 continue; 9326 9327 pgsz = hw_page_array[szc].hp_size; 9328 if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz)) 9329 return (szc); 9330 } 9331 return (TTE8K); 9332 } 9333 } 9334 9335 /* 9336 * Free up a sfmmu 9337 * Since the sfmmu is currently embedded in the hat struct we simply zero 9338 * out our fields and free up the ism map blk list if any. 9339 */ 9340 static void 9341 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 9342 { 9343 ism_blk_t *blkp, *nx_blkp; 9344 #ifdef DEBUG 9345 ism_map_t *map; 9346 int i; 9347 #endif 9348 9349 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 9350 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 9351 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 9352 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 9353 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 9354 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 9355 9356 sfmmup->sfmmu_free = 0; 9357 sfmmup->sfmmu_ismhat = 0; 9358 9359 blkp = sfmmup->sfmmu_iblk; 9360 sfmmup->sfmmu_iblk = NULL; 9361 9362 while (blkp) { 9363 #ifdef DEBUG 9364 map = blkp->iblk_maps; 9365 for (i = 0; i < ISM_MAP_SLOTS; i++) { 9366 ASSERT(map[i].imap_seg == 0); 9367 ASSERT(map[i].imap_ismhat == NULL); 9368 ASSERT(map[i].imap_ment == NULL); 9369 } 9370 #endif 9371 nx_blkp = blkp->iblk_next; 9372 blkp->iblk_next = NULL; 9373 blkp->iblk_nextpa = (uint64_t)-1; 9374 kmem_cache_free(ism_blk_cache, blkp); 9375 blkp = nx_blkp; 9376 } 9377 } 9378 9379 /* 9380 * Locking primitves accessed by HATLOCK macros 9381 */ 9382 9383 #define SFMMU_SPL_MTX (0x0) 9384 #define SFMMU_ML_MTX (0x1) 9385 9386 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 9387 SPL_HASH(pg) : MLIST_HASH(pg)) 9388 9389 kmutex_t * 9390 sfmmu_page_enter(struct page *pp) 9391 { 9392 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 9393 } 9394 9395 void 9396 sfmmu_page_exit(kmutex_t *spl) 9397 { 9398 mutex_exit(spl); 9399 } 9400 9401 int 9402 sfmmu_page_spl_held(struct page *pp) 9403 { 9404 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 9405 } 9406 9407 kmutex_t * 9408 sfmmu_mlist_enter(struct page *pp) 9409 { 9410 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 9411 } 9412 9413 void 9414 sfmmu_mlist_exit(kmutex_t *mml) 9415 { 9416 mutex_exit(mml); 9417 } 9418 9419 int 9420 sfmmu_mlist_held(struct page *pp) 9421 { 9422 9423 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 9424 } 9425 9426 /* 9427 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 9428 * sfmmu_mlist_enter() case mml_table lock array is used and for 9429 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 9430 * 9431 * The lock is taken on a root page so that it protects an operation on all 9432 * constituent pages of a large page pp belongs to. 9433 * 9434 * The routine takes a lock from the appropriate array. The lock is determined 9435 * by hashing the root page. After taking the lock this routine checks if the 9436 * root page has the same size code that was used to determine the root (i.e 9437 * that root hasn't changed). If root page has the expected p_szc field we 9438 * have the right lock and it's returned to the caller. If root's p_szc 9439 * decreased we release the lock and retry from the beginning. This case can 9440 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 9441 * value and taking the lock. The number of retries due to p_szc decrease is 9442 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 9443 * determined by hashing pp itself. 9444 * 9445 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 9446 * possible that p_szc can increase. To increase p_szc a thread has to lock 9447 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 9448 * callers that don't hold a page locked recheck if hmeblk through which pp 9449 * was found still maps this pp. If it doesn't map it anymore returned lock 9450 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 9451 * p_szc increase after taking the lock it returns this lock without further 9452 * retries because in this case the caller doesn't care about which lock was 9453 * taken. The caller will drop it right away. 9454 * 9455 * After the routine returns it's guaranteed that hat_page_demote() can't 9456 * change p_szc field of any of constituent pages of a large page pp belongs 9457 * to as long as pp was either locked at least SHARED prior to this call or 9458 * the caller finds that hment that pointed to this pp still references this 9459 * pp (this also assumes that the caller holds hme hash bucket lock so that 9460 * the same pp can't be remapped into the same hmeblk after it was unmapped by 9461 * hat_pageunload()). 9462 */ 9463 static kmutex_t * 9464 sfmmu_mlspl_enter(struct page *pp, int type) 9465 { 9466 kmutex_t *mtx; 9467 uint_t prev_rszc = UINT_MAX; 9468 page_t *rootpp; 9469 uint_t szc; 9470 uint_t rszc; 9471 uint_t pszc = pp->p_szc; 9472 9473 ASSERT(pp != NULL); 9474 9475 again: 9476 if (pszc == 0) { 9477 mtx = SFMMU_MLSPL_MTX(type, pp); 9478 mutex_enter(mtx); 9479 return (mtx); 9480 } 9481 9482 /* The lock lives in the root page */ 9483 rootpp = PP_GROUPLEADER(pp, pszc); 9484 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9485 mutex_enter(mtx); 9486 9487 /* 9488 * Return mml in the following 3 cases: 9489 * 9490 * 1) If pp itself is root since if its p_szc decreased before we took 9491 * the lock pp is still the root of smaller szc page. And if its p_szc 9492 * increased it doesn't matter what lock we return (see comment in 9493 * front of this routine). 9494 * 9495 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 9496 * large page we have the right lock since any previous potential 9497 * hat_page_demote() is done demoting from greater than current root's 9498 * p_szc because hat_page_demote() changes root's p_szc last. No 9499 * further hat_page_demote() can start or be in progress since it 9500 * would need the same lock we currently hold. 9501 * 9502 * 3) If rootpp's p_szc increased since previous iteration it doesn't 9503 * matter what lock we return (see comment in front of this routine). 9504 */ 9505 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 9506 rszc >= prev_rszc) { 9507 return (mtx); 9508 } 9509 9510 /* 9511 * hat_page_demote() could have decreased root's p_szc. 9512 * In this case pp's p_szc must also be smaller than pszc. 9513 * Retry. 9514 */ 9515 if (rszc < pszc) { 9516 szc = pp->p_szc; 9517 if (szc < pszc) { 9518 mutex_exit(mtx); 9519 pszc = szc; 9520 goto again; 9521 } 9522 /* 9523 * pp's p_szc increased after it was decreased. 9524 * page cannot be mapped. Return current lock. The caller 9525 * will drop it right away. 9526 */ 9527 return (mtx); 9528 } 9529 9530 /* 9531 * root's p_szc is greater than pp's p_szc. 9532 * hat_page_demote() is not done with all pages 9533 * yet. Wait for it to complete. 9534 */ 9535 mutex_exit(mtx); 9536 rootpp = PP_GROUPLEADER(rootpp, rszc); 9537 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9538 mutex_enter(mtx); 9539 mutex_exit(mtx); 9540 prev_rszc = rszc; 9541 goto again; 9542 } 9543 9544 static int 9545 sfmmu_mlspl_held(struct page *pp, int type) 9546 { 9547 kmutex_t *mtx; 9548 9549 ASSERT(pp != NULL); 9550 /* The lock lives in the root page */ 9551 pp = PP_PAGEROOT(pp); 9552 ASSERT(pp != NULL); 9553 9554 mtx = SFMMU_MLSPL_MTX(type, pp); 9555 return (MUTEX_HELD(mtx)); 9556 } 9557 9558 static uint_t 9559 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 9560 { 9561 struct hme_blk *hblkp; 9562 9563 if (freehblkp != NULL) { 9564 mutex_enter(&freehblkp_lock); 9565 if (freehblkp != NULL) { 9566 /* 9567 * If the current thread is owning hblk_reserve, 9568 * let it succede even if freehblkcnt is really low. 9569 */ 9570 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 9571 SFMMU_STAT(sf_get_free_throttle); 9572 mutex_exit(&freehblkp_lock); 9573 return (0); 9574 } 9575 freehblkcnt--; 9576 *hmeblkpp = freehblkp; 9577 hblkp = *hmeblkpp; 9578 freehblkp = hblkp->hblk_next; 9579 mutex_exit(&freehblkp_lock); 9580 hblkp->hblk_next = NULL; 9581 SFMMU_STAT(sf_get_free_success); 9582 return (1); 9583 } 9584 mutex_exit(&freehblkp_lock); 9585 } 9586 SFMMU_STAT(sf_get_free_fail); 9587 return (0); 9588 } 9589 9590 static uint_t 9591 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 9592 { 9593 struct hme_blk *hblkp; 9594 9595 /* 9596 * If the current thread is mapping into kernel space, 9597 * let it succede even if freehblkcnt is max 9598 * so that it will avoid freeing it to kmem. 9599 * This will prevent stack overflow due to 9600 * possible recursion since kmem_cache_free() 9601 * might require creation of a slab which 9602 * in turn needs an hmeblk to map that slab; 9603 * let's break this vicious chain at the first 9604 * opportunity. 9605 */ 9606 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9607 mutex_enter(&freehblkp_lock); 9608 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9609 SFMMU_STAT(sf_put_free_success); 9610 freehblkcnt++; 9611 hmeblkp->hblk_next = freehblkp; 9612 freehblkp = hmeblkp; 9613 mutex_exit(&freehblkp_lock); 9614 return (1); 9615 } 9616 mutex_exit(&freehblkp_lock); 9617 } 9618 9619 /* 9620 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 9621 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 9622 * we are not in the process of mapping into kernel space. 9623 */ 9624 ASSERT(!critical); 9625 while (freehblkcnt > HBLK_RESERVE_CNT) { 9626 mutex_enter(&freehblkp_lock); 9627 if (freehblkcnt > HBLK_RESERVE_CNT) { 9628 freehblkcnt--; 9629 hblkp = freehblkp; 9630 freehblkp = hblkp->hblk_next; 9631 mutex_exit(&freehblkp_lock); 9632 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 9633 kmem_cache_free(sfmmu8_cache, hblkp); 9634 continue; 9635 } 9636 mutex_exit(&freehblkp_lock); 9637 } 9638 SFMMU_STAT(sf_put_free_fail); 9639 return (0); 9640 } 9641 9642 static void 9643 sfmmu_hblk_swap(struct hme_blk *new) 9644 { 9645 struct hme_blk *old, *hblkp, *prev; 9646 uint64_t hblkpa, prevpa, newpa; 9647 caddr_t base, vaddr, endaddr; 9648 struct hmehash_bucket *hmebp; 9649 struct sf_hment *osfhme, *nsfhme; 9650 page_t *pp; 9651 kmutex_t *pml; 9652 tte_t tte; 9653 9654 #ifdef DEBUG 9655 hmeblk_tag hblktag; 9656 struct hme_blk *found; 9657 #endif 9658 old = HBLK_RESERVE; 9659 9660 /* 9661 * save pa before bcopy clobbers it 9662 */ 9663 newpa = new->hblk_nextpa; 9664 9665 base = (caddr_t)get_hblk_base(old); 9666 endaddr = base + get_hblk_span(old); 9667 9668 /* 9669 * acquire hash bucket lock. 9670 */ 9671 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K); 9672 9673 /* 9674 * copy contents from old to new 9675 */ 9676 bcopy((void *)old, (void *)new, HME8BLK_SZ); 9677 9678 /* 9679 * add new to hash chain 9680 */ 9681 sfmmu_hblk_hash_add(hmebp, new, newpa); 9682 9683 /* 9684 * search hash chain for hblk_reserve; this needs to be performed 9685 * after adding new, otherwise prevpa and prev won't correspond 9686 * to the hblk which is prior to old in hash chain when we call 9687 * sfmmu_hblk_hash_rm to remove old later. 9688 */ 9689 for (prevpa = 0, prev = NULL, 9690 hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp; 9691 hblkp != NULL && hblkp != old; 9692 prevpa = hblkpa, prev = hblkp, 9693 hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next); 9694 9695 if (hblkp != old) 9696 panic("sfmmu_hblk_swap: hblk_reserve not found"); 9697 9698 /* 9699 * p_mapping list is still pointing to hments in hblk_reserve; 9700 * fix up p_mapping list so that they point to hments in new. 9701 * 9702 * Since all these mappings are created by hblk_reserve_thread 9703 * on the way and it's using at least one of the buffers from each of 9704 * the newly minted slabs, there is no danger of any of these 9705 * mappings getting unloaded by another thread. 9706 * 9707 * tsbmiss could only modify ref/mod bits of hments in old/new. 9708 * Since all of these hments hold mappings established by segkmem 9709 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 9710 * have no meaning for the mappings in hblk_reserve. hments in 9711 * old and new are identical except for ref/mod bits. 9712 */ 9713 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 9714 9715 HBLKTOHME(osfhme, old, vaddr); 9716 sfmmu_copytte(&osfhme->hme_tte, &tte); 9717 9718 if (TTE_IS_VALID(&tte)) { 9719 if ((pp = osfhme->hme_page) == NULL) 9720 panic("sfmmu_hblk_swap: page not mapped"); 9721 9722 pml = sfmmu_mlist_enter(pp); 9723 9724 if (pp != osfhme->hme_page) 9725 panic("sfmmu_hblk_swap: mapping changed"); 9726 9727 HBLKTOHME(nsfhme, new, vaddr); 9728 9729 HME_ADD(nsfhme, pp); 9730 HME_SUB(osfhme, pp); 9731 9732 sfmmu_mlist_exit(pml); 9733 } 9734 } 9735 9736 /* 9737 * remove old from hash chain 9738 */ 9739 sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev); 9740 9741 #ifdef DEBUG 9742 9743 hblktag.htag_id = ksfmmup; 9744 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 9745 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 9746 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 9747 9748 if (found != new) 9749 panic("sfmmu_hblk_swap: new hblk not found"); 9750 #endif 9751 9752 SFMMU_HASH_UNLOCK(hmebp); 9753 9754 /* 9755 * Reset hblk_reserve 9756 */ 9757 bzero((void *)old, HME8BLK_SZ); 9758 old->hblk_nextpa = va_to_pa((caddr_t)old); 9759 } 9760 9761 /* 9762 * Grab the mlist mutex for both pages passed in. 9763 * 9764 * low and high will be returned as pointers to the mutexes for these pages. 9765 * low refers to the mutex residing in the lower bin of the mlist hash, while 9766 * high refers to the mutex residing in the higher bin of the mlist hash. This 9767 * is due to the locking order restrictions on the same thread grabbing 9768 * multiple mlist mutexes. The low lock must be acquired before the high lock. 9769 * 9770 * If both pages hash to the same mutex, only grab that single mutex, and 9771 * high will be returned as NULL 9772 * If the pages hash to different bins in the hash, grab the lower addressed 9773 * lock first and then the higher addressed lock in order to follow the locking 9774 * rules involved with the same thread grabbing multiple mlist mutexes. 9775 * low and high will both have non-NULL values. 9776 */ 9777 static void 9778 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 9779 kmutex_t **low, kmutex_t **high) 9780 { 9781 kmutex_t *mml_targ, *mml_repl; 9782 9783 /* 9784 * no need to do the dance around szc as in sfmmu_mlist_enter() 9785 * because this routine is only called by hat_page_relocate() and all 9786 * targ and repl pages are already locked EXCL so szc can't change. 9787 */ 9788 9789 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 9790 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 9791 9792 if (mml_targ == mml_repl) { 9793 *low = mml_targ; 9794 *high = NULL; 9795 } else { 9796 if (mml_targ < mml_repl) { 9797 *low = mml_targ; 9798 *high = mml_repl; 9799 } else { 9800 *low = mml_repl; 9801 *high = mml_targ; 9802 } 9803 } 9804 9805 mutex_enter(*low); 9806 if (*high) 9807 mutex_enter(*high); 9808 } 9809 9810 static void 9811 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 9812 { 9813 if (high) 9814 mutex_exit(high); 9815 mutex_exit(low); 9816 } 9817 9818 static hatlock_t * 9819 sfmmu_hat_enter(sfmmu_t *sfmmup) 9820 { 9821 hatlock_t *hatlockp; 9822 9823 if (sfmmup != ksfmmup) { 9824 hatlockp = TSB_HASH(sfmmup); 9825 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 9826 return (hatlockp); 9827 } 9828 return (NULL); 9829 } 9830 9831 static hatlock_t * 9832 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 9833 { 9834 hatlock_t *hatlockp; 9835 9836 if (sfmmup != ksfmmup) { 9837 hatlockp = TSB_HASH(sfmmup); 9838 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 9839 return (NULL); 9840 return (hatlockp); 9841 } 9842 return (NULL); 9843 } 9844 9845 static void 9846 sfmmu_hat_exit(hatlock_t *hatlockp) 9847 { 9848 if (hatlockp != NULL) 9849 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 9850 } 9851 9852 static void 9853 sfmmu_hat_lock_all(void) 9854 { 9855 int i; 9856 for (i = 0; i < SFMMU_NUM_LOCK; i++) 9857 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 9858 } 9859 9860 static void 9861 sfmmu_hat_unlock_all(void) 9862 { 9863 int i; 9864 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 9865 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 9866 } 9867 9868 int 9869 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 9870 { 9871 ASSERT(sfmmup != ksfmmup); 9872 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 9873 } 9874 9875 /* 9876 * Locking primitives to provide consistency between ISM unmap 9877 * and other operations. Since ISM unmap can take a long time, we 9878 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 9879 * contention on the hatlock buckets while ISM segments are being 9880 * unmapped. The tradeoff is that the flags don't prevent priority 9881 * inversion from occurring, so we must request kernel priority in 9882 * case we have to sleep to keep from getting buried while holding 9883 * the HAT_ISMBUSY flag set, which in turn could block other kernel 9884 * threads from running (for example, in sfmmu_uvatopfn()). 9885 */ 9886 static void 9887 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 9888 { 9889 hatlock_t *hatlockp; 9890 9891 THREAD_KPRI_REQUEST(); 9892 if (!hatlock_held) 9893 hatlockp = sfmmu_hat_enter(sfmmup); 9894 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 9895 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 9896 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 9897 if (!hatlock_held) 9898 sfmmu_hat_exit(hatlockp); 9899 } 9900 9901 static void 9902 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 9903 { 9904 hatlock_t *hatlockp; 9905 9906 if (!hatlock_held) 9907 hatlockp = sfmmu_hat_enter(sfmmup); 9908 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 9909 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 9910 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 9911 if (!hatlock_held) 9912 sfmmu_hat_exit(hatlockp); 9913 THREAD_KPRI_RELEASE(); 9914 } 9915 9916 /* 9917 * 9918 * Algorithm: 9919 * 9920 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 9921 * hblks. 9922 * 9923 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 9924 * 9925 * (a) try to return an hblk from reserve pool of free hblks; 9926 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 9927 * and return hblk_reserve. 9928 * 9929 * (3) call kmem_cache_alloc() to allocate hblk; 9930 * 9931 * (a) if hblk_reserve_lock is held by the current thread, 9932 * atomically replace hblk_reserve by the hblk that is 9933 * returned by kmem_cache_alloc; release hblk_reserve_lock 9934 * and call kmem_cache_alloc() again. 9935 * (b) if reserve pool is not full, add the hblk that is 9936 * returned by kmem_cache_alloc to reserve pool and 9937 * call kmem_cache_alloc again. 9938 * 9939 */ 9940 static struct hme_blk * 9941 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 9942 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 9943 uint_t flags) 9944 { 9945 struct hme_blk *hmeblkp = NULL; 9946 struct hme_blk *newhblkp; 9947 struct hme_blk *shw_hblkp = NULL; 9948 struct kmem_cache *sfmmu_cache = NULL; 9949 uint64_t hblkpa; 9950 ulong_t index; 9951 uint_t owner; /* set to 1 if using hblk_reserve */ 9952 uint_t forcefree; 9953 int sleep; 9954 9955 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 9956 9957 /* 9958 * If segkmem is not created yet, allocate from static hmeblks 9959 * created at the end of startup_modules(). See the block comment 9960 * in startup_modules() describing how we estimate the number of 9961 * static hmeblks that will be needed during re-map. 9962 */ 9963 if (!hblk_alloc_dynamic) { 9964 9965 if (size == TTE8K) { 9966 index = nucleus_hblk8.index; 9967 if (index >= nucleus_hblk8.len) { 9968 /* 9969 * If we panic here, see startup_modules() to 9970 * make sure that we are calculating the 9971 * number of hblk8's that we need correctly. 9972 */ 9973 panic("no nucleus hblk8 to allocate"); 9974 } 9975 hmeblkp = 9976 (struct hme_blk *)&nucleus_hblk8.list[index]; 9977 nucleus_hblk8.index++; 9978 SFMMU_STAT(sf_hblk8_nalloc); 9979 } else { 9980 index = nucleus_hblk1.index; 9981 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 9982 /* 9983 * If we panic here, see startup_modules() 9984 * and H8TOH1; most likely you need to 9985 * update the calculation of the number 9986 * of hblk1's the kernel needs to boot. 9987 */ 9988 panic("no nucleus hblk1 to allocate"); 9989 } 9990 hmeblkp = 9991 (struct hme_blk *)&nucleus_hblk1.list[index]; 9992 nucleus_hblk1.index++; 9993 SFMMU_STAT(sf_hblk1_nalloc); 9994 } 9995 9996 goto hblk_init; 9997 } 9998 9999 SFMMU_HASH_UNLOCK(hmebp); 10000 10001 if (sfmmup != KHATID) { 10002 if (mmu_page_sizes == max_mmu_page_sizes) { 10003 if (size < TTE256M) 10004 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10005 size, flags); 10006 } else { 10007 if (size < TTE4M) 10008 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10009 size, flags); 10010 } 10011 } 10012 10013 fill_hblk: 10014 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 10015 10016 if (owner && size == TTE8K) { 10017 10018 /* 10019 * We are really in a tight spot. We already own 10020 * hblk_reserve and we need another hblk. In anticipation 10021 * of this kind of scenario, we specifically set aside 10022 * HBLK_RESERVE_MIN number of hblks to be used exclusively 10023 * by owner of hblk_reserve. 10024 */ 10025 SFMMU_STAT(sf_hblk_recurse_cnt); 10026 10027 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 10028 panic("sfmmu_hblk_alloc: reserve list is empty"); 10029 10030 goto hblk_verify; 10031 } 10032 10033 ASSERT(!owner); 10034 10035 if ((flags & HAT_NO_KALLOC) == 0) { 10036 10037 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 10038 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 10039 10040 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 10041 hmeblkp = sfmmu_hblk_steal(size); 10042 } else { 10043 /* 10044 * if we are the owner of hblk_reserve, 10045 * swap hblk_reserve with hmeblkp and 10046 * start a fresh life. Hope things go 10047 * better this time. 10048 */ 10049 if (hblk_reserve_thread == curthread) { 10050 ASSERT(sfmmu_cache == sfmmu8_cache); 10051 sfmmu_hblk_swap(hmeblkp); 10052 hblk_reserve_thread = NULL; 10053 mutex_exit(&hblk_reserve_lock); 10054 goto fill_hblk; 10055 } 10056 /* 10057 * let's donate this hblk to our reserve list if 10058 * we are not mapping kernel range 10059 */ 10060 if (size == TTE8K && sfmmup != KHATID) 10061 if (sfmmu_put_free_hblk(hmeblkp, 0)) 10062 goto fill_hblk; 10063 } 10064 } else { 10065 /* 10066 * We are here to map the slab in sfmmu8_cache; let's 10067 * check if we could tap our reserve list; if successful, 10068 * this will avoid the pain of going thru sfmmu_hblk_swap 10069 */ 10070 SFMMU_STAT(sf_hblk_slab_cnt); 10071 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 10072 /* 10073 * let's start hblk_reserve dance 10074 */ 10075 SFMMU_STAT(sf_hblk_reserve_cnt); 10076 owner = 1; 10077 mutex_enter(&hblk_reserve_lock); 10078 hmeblkp = HBLK_RESERVE; 10079 hblk_reserve_thread = curthread; 10080 } 10081 } 10082 10083 hblk_verify: 10084 ASSERT(hmeblkp != NULL); 10085 set_hblk_sz(hmeblkp, size); 10086 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10087 SFMMU_HASH_LOCK(hmebp); 10088 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10089 if (newhblkp != NULL) { 10090 SFMMU_HASH_UNLOCK(hmebp); 10091 if (hmeblkp != HBLK_RESERVE) { 10092 /* 10093 * This is really tricky! 10094 * 10095 * vmem_alloc(vmem_seg_arena) 10096 * vmem_alloc(vmem_internal_arena) 10097 * segkmem_alloc(heap_arena) 10098 * vmem_alloc(heap_arena) 10099 * page_create() 10100 * hat_memload() 10101 * kmem_cache_free() 10102 * kmem_cache_alloc() 10103 * kmem_slab_create() 10104 * vmem_alloc(kmem_internal_arena) 10105 * segkmem_alloc(heap_arena) 10106 * vmem_alloc(heap_arena) 10107 * page_create() 10108 * hat_memload() 10109 * kmem_cache_free() 10110 * ... 10111 * 10112 * Thus, hat_memload() could call kmem_cache_free 10113 * for enough number of times that we could easily 10114 * hit the bottom of the stack or run out of reserve 10115 * list of vmem_seg structs. So, we must donate 10116 * this hblk to reserve list if it's allocated 10117 * from sfmmu8_cache *and* mapping kernel range. 10118 * We don't need to worry about freeing hmeblk1's 10119 * to kmem since they don't map any kmem slabs. 10120 * 10121 * Note: When segkmem supports largepages, we must 10122 * free hmeblk1's to reserve list as well. 10123 */ 10124 forcefree = (sfmmup == KHATID) ? 1 : 0; 10125 if (size == TTE8K && 10126 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 10127 goto re_verify; 10128 } 10129 ASSERT(sfmmup != KHATID); 10130 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10131 } else { 10132 /* 10133 * Hey! we don't need hblk_reserve any more. 10134 */ 10135 ASSERT(owner); 10136 hblk_reserve_thread = NULL; 10137 mutex_exit(&hblk_reserve_lock); 10138 owner = 0; 10139 } 10140 re_verify: 10141 /* 10142 * let's check if the goodies are still present 10143 */ 10144 SFMMU_HASH_LOCK(hmebp); 10145 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10146 if (newhblkp != NULL) { 10147 /* 10148 * return newhblkp if it's not hblk_reserve; 10149 * if newhblkp is hblk_reserve, return it 10150 * _only if_ we are the owner of hblk_reserve. 10151 */ 10152 if (newhblkp != HBLK_RESERVE || owner) { 10153 return (newhblkp); 10154 } else { 10155 /* 10156 * we just hit hblk_reserve in the hash and 10157 * we are not the owner of that; 10158 * 10159 * block until hblk_reserve_thread completes 10160 * swapping hblk_reserve and try the dance 10161 * once again. 10162 */ 10163 SFMMU_HASH_UNLOCK(hmebp); 10164 mutex_enter(&hblk_reserve_lock); 10165 mutex_exit(&hblk_reserve_lock); 10166 SFMMU_STAT(sf_hblk_reserve_hit); 10167 goto fill_hblk; 10168 } 10169 } else { 10170 /* 10171 * it's no more! try the dance once again. 10172 */ 10173 SFMMU_HASH_UNLOCK(hmebp); 10174 goto fill_hblk; 10175 } 10176 } 10177 10178 hblk_init: 10179 set_hblk_sz(hmeblkp, size); 10180 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10181 hmeblkp->hblk_next = (struct hme_blk *)NULL; 10182 hmeblkp->hblk_tag = hblktag; 10183 hmeblkp->hblk_shadow = shw_hblkp; 10184 hblkpa = hmeblkp->hblk_nextpa; 10185 hmeblkp->hblk_nextpa = 0; 10186 10187 ASSERT(get_hblk_ttesz(hmeblkp) == size); 10188 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 10189 ASSERT(hmeblkp->hblk_hmecnt == 0); 10190 ASSERT(hmeblkp->hblk_vcnt == 0); 10191 ASSERT(hmeblkp->hblk_lckcnt == 0); 10192 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10193 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 10194 return (hmeblkp); 10195 } 10196 10197 /* 10198 * This function performs any cleanup required on the hme_blk 10199 * and returns it to the free list. 10200 */ 10201 /* ARGSUSED */ 10202 static void 10203 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10204 uint64_t hblkpa, struct hme_blk **listp) 10205 { 10206 int shw_size, vshift; 10207 struct hme_blk *shw_hblkp; 10208 uint_t shw_mask, newshw_mask; 10209 uintptr_t vaddr; 10210 int size; 10211 uint_t critical; 10212 10213 ASSERT(hmeblkp); 10214 ASSERT(!hmeblkp->hblk_hmecnt); 10215 ASSERT(!hmeblkp->hblk_vcnt); 10216 ASSERT(!hmeblkp->hblk_lckcnt); 10217 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10218 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 10219 10220 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 10221 10222 size = get_hblk_ttesz(hmeblkp); 10223 shw_hblkp = hmeblkp->hblk_shadow; 10224 if (shw_hblkp) { 10225 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 10226 if (mmu_page_sizes == max_mmu_page_sizes) { 10227 ASSERT(size < TTE256M); 10228 } else { 10229 ASSERT(size < TTE4M); 10230 } 10231 10232 shw_size = get_hblk_ttesz(shw_hblkp); 10233 vaddr = get_hblk_base(hmeblkp); 10234 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10235 ASSERT(vshift < 8); 10236 /* 10237 * Atomically clear shadow mask bit 10238 */ 10239 do { 10240 shw_mask = shw_hblkp->hblk_shw_mask; 10241 ASSERT(shw_mask & (1 << vshift)); 10242 newshw_mask = shw_mask & ~(1 << vshift); 10243 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10244 shw_mask, newshw_mask); 10245 } while (newshw_mask != shw_mask); 10246 hmeblkp->hblk_shadow = NULL; 10247 } 10248 hmeblkp->hblk_next = NULL; 10249 hmeblkp->hblk_nextpa = hblkpa; 10250 hmeblkp->hblk_shw_bit = 0; 10251 10252 if (hmeblkp->hblk_nuc_bit == 0) { 10253 10254 if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical)) 10255 return; 10256 10257 hmeblkp->hblk_next = *listp; 10258 *listp = hmeblkp; 10259 } 10260 } 10261 10262 static void 10263 sfmmu_hblks_list_purge(struct hme_blk **listp) 10264 { 10265 struct hme_blk *hmeblkp; 10266 10267 while ((hmeblkp = *listp) != NULL) { 10268 *listp = hmeblkp->hblk_next; 10269 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10270 } 10271 } 10272 10273 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 10274 10275 static uint_t sfmmu_hblk_steal_twice; 10276 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 10277 10278 /* 10279 * Steal a hmeblk 10280 * Enough hmeblks were allocated at startup (nucleus hmeblks) and also 10281 * hmeblks were added dynamically. We should never ever not be able to 10282 * find one. Look for an unused/unlocked hmeblk in user hash table. 10283 */ 10284 static struct hme_blk * 10285 sfmmu_hblk_steal(int size) 10286 { 10287 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 10288 struct hmehash_bucket *hmebp; 10289 struct hme_blk *hmeblkp = NULL, *pr_hblk; 10290 uint64_t hblkpa, prevpa; 10291 int i; 10292 10293 for (;;) { 10294 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 10295 uhmehash_steal_hand; 10296 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 10297 10298 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 10299 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 10300 SFMMU_HASH_LOCK(hmebp); 10301 hmeblkp = hmebp->hmeblkp; 10302 hblkpa = hmebp->hmeh_nextpa; 10303 prevpa = 0; 10304 pr_hblk = NULL; 10305 while (hmeblkp) { 10306 /* 10307 * check if it is a hmeblk that is not locked 10308 * and not shared. skip shadow hmeblks with 10309 * shadow_mask set i.e valid count non zero. 10310 */ 10311 if ((get_hblk_ttesz(hmeblkp) == size) && 10312 (hmeblkp->hblk_shw_bit == 0 || 10313 hmeblkp->hblk_vcnt == 0) && 10314 (hmeblkp->hblk_lckcnt == 0)) { 10315 /* 10316 * there is a high probability that we 10317 * will find a free one. search some 10318 * buckets for a free hmeblk initially 10319 * before unloading a valid hmeblk. 10320 */ 10321 if ((hmeblkp->hblk_vcnt == 0 && 10322 hmeblkp->hblk_hmecnt == 0) || (i >= 10323 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 10324 if (sfmmu_steal_this_hblk(hmebp, 10325 hmeblkp, hblkpa, prevpa, 10326 pr_hblk)) { 10327 /* 10328 * Hblk is unloaded 10329 * successfully 10330 */ 10331 break; 10332 } 10333 } 10334 } 10335 pr_hblk = hmeblkp; 10336 prevpa = hblkpa; 10337 hblkpa = hmeblkp->hblk_nextpa; 10338 hmeblkp = hmeblkp->hblk_next; 10339 } 10340 10341 SFMMU_HASH_UNLOCK(hmebp); 10342 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 10343 hmebp = uhme_hash; 10344 } 10345 uhmehash_steal_hand = hmebp; 10346 10347 if (hmeblkp != NULL) 10348 break; 10349 10350 /* 10351 * in the worst case, look for a free one in the kernel 10352 * hash table. 10353 */ 10354 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 10355 SFMMU_HASH_LOCK(hmebp); 10356 hmeblkp = hmebp->hmeblkp; 10357 hblkpa = hmebp->hmeh_nextpa; 10358 prevpa = 0; 10359 pr_hblk = NULL; 10360 while (hmeblkp) { 10361 /* 10362 * check if it is free hmeblk 10363 */ 10364 if ((get_hblk_ttesz(hmeblkp) == size) && 10365 (hmeblkp->hblk_lckcnt == 0) && 10366 (hmeblkp->hblk_vcnt == 0) && 10367 (hmeblkp->hblk_hmecnt == 0)) { 10368 if (sfmmu_steal_this_hblk(hmebp, 10369 hmeblkp, hblkpa, prevpa, pr_hblk)) { 10370 break; 10371 } else { 10372 /* 10373 * Cannot fail since we have 10374 * hash lock. 10375 */ 10376 panic("fail to steal?"); 10377 } 10378 } 10379 10380 pr_hblk = hmeblkp; 10381 prevpa = hblkpa; 10382 hblkpa = hmeblkp->hblk_nextpa; 10383 hmeblkp = hmeblkp->hblk_next; 10384 } 10385 10386 SFMMU_HASH_UNLOCK(hmebp); 10387 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 10388 hmebp = khme_hash; 10389 } 10390 10391 if (hmeblkp != NULL) 10392 break; 10393 sfmmu_hblk_steal_twice++; 10394 } 10395 return (hmeblkp); 10396 } 10397 10398 /* 10399 * This routine does real work to prepare a hblk to be "stolen" by 10400 * unloading the mappings, updating shadow counts .... 10401 * It returns 1 if the block is ready to be reused (stolen), or 0 10402 * means the block cannot be stolen yet- pageunload is still working 10403 * on this hblk. 10404 */ 10405 static int 10406 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10407 uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk) 10408 { 10409 int shw_size, vshift; 10410 struct hme_blk *shw_hblkp; 10411 uintptr_t vaddr; 10412 uint_t shw_mask, newshw_mask; 10413 10414 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10415 10416 /* 10417 * check if the hmeblk is free, unload if necessary 10418 */ 10419 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10420 sfmmu_t *sfmmup; 10421 demap_range_t dmr; 10422 10423 sfmmup = hblktosfmmu(hmeblkp); 10424 DEMAP_RANGE_INIT(sfmmup, &dmr); 10425 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 10426 (caddr_t)get_hblk_base(hmeblkp), 10427 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 10428 DEMAP_RANGE_FLUSH(&dmr); 10429 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10430 /* 10431 * Pageunload is working on the same hblk. 10432 */ 10433 return (0); 10434 } 10435 10436 sfmmu_hblk_steal_unload_count++; 10437 } 10438 10439 ASSERT(hmeblkp->hblk_lckcnt == 0); 10440 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 10441 10442 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 10443 hmeblkp->hblk_nextpa = hblkpa; 10444 10445 shw_hblkp = hmeblkp->hblk_shadow; 10446 if (shw_hblkp) { 10447 shw_size = get_hblk_ttesz(shw_hblkp); 10448 vaddr = get_hblk_base(hmeblkp); 10449 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10450 ASSERT(vshift < 8); 10451 /* 10452 * Atomically clear shadow mask bit 10453 */ 10454 do { 10455 shw_mask = shw_hblkp->hblk_shw_mask; 10456 ASSERT(shw_mask & (1 << vshift)); 10457 newshw_mask = shw_mask & ~(1 << vshift); 10458 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10459 shw_mask, newshw_mask); 10460 } while (newshw_mask != shw_mask); 10461 hmeblkp->hblk_shadow = NULL; 10462 } 10463 10464 /* 10465 * remove shadow bit if we are stealing an unused shadow hmeblk. 10466 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 10467 * we are indeed allocating a shadow hmeblk. 10468 */ 10469 hmeblkp->hblk_shw_bit = 0; 10470 10471 sfmmu_hblk_steal_count++; 10472 SFMMU_STAT(sf_steal_count); 10473 10474 return (1); 10475 } 10476 10477 struct hme_blk * 10478 sfmmu_hmetohblk(struct sf_hment *sfhme) 10479 { 10480 struct hme_blk *hmeblkp; 10481 struct sf_hment *sfhme0; 10482 struct hme_blk *hblk_dummy = 0; 10483 10484 /* 10485 * No dummy sf_hments, please. 10486 */ 10487 ASSERT(sfhme->hme_tte.ll != 0); 10488 10489 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 10490 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 10491 (uintptr_t)&hblk_dummy->hblk_hme[0]); 10492 10493 return (hmeblkp); 10494 } 10495 10496 /* 10497 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 10498 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 10499 * KM_SLEEP allocation. 10500 * 10501 * Return 0 on success, -1 otherwise. 10502 */ 10503 static void 10504 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 10505 { 10506 struct tsb_info *tsbinfop, *next; 10507 tsb_replace_rc_t rc; 10508 boolean_t gotfirst = B_FALSE; 10509 10510 ASSERT(sfmmup != ksfmmup); 10511 ASSERT(sfmmu_hat_lock_held(sfmmup)); 10512 10513 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 10514 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10515 } 10516 10517 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10518 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 10519 } else { 10520 return; 10521 } 10522 10523 ASSERT(sfmmup->sfmmu_tsb != NULL); 10524 10525 /* 10526 * Loop over all tsbinfo's replacing them with ones that actually have 10527 * a TSB. If any of the replacements ever fail, bail out of the loop. 10528 */ 10529 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 10530 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 10531 next = tsbinfop->tsb_next; 10532 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 10533 hatlockp, TSB_SWAPIN); 10534 if (rc != TSB_SUCCESS) { 10535 break; 10536 } 10537 gotfirst = B_TRUE; 10538 } 10539 10540 switch (rc) { 10541 case TSB_SUCCESS: 10542 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10543 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10544 return; 10545 case TSB_ALLOCFAIL: 10546 break; 10547 default: 10548 panic("sfmmu_replace_tsb returned unrecognized failure code " 10549 "%d", rc); 10550 } 10551 10552 /* 10553 * In this case, we failed to get one of our TSBs. If we failed to 10554 * get the first TSB, get one of minimum size (8KB). Walk the list 10555 * and throw away the tsbinfos, starting where the allocation failed; 10556 * we can get by with just one TSB as long as we don't leave the 10557 * SWAPPED tsbinfo structures lying around. 10558 */ 10559 tsbinfop = sfmmup->sfmmu_tsb; 10560 next = tsbinfop->tsb_next; 10561 tsbinfop->tsb_next = NULL; 10562 10563 sfmmu_hat_exit(hatlockp); 10564 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 10565 next = tsbinfop->tsb_next; 10566 sfmmu_tsbinfo_free(tsbinfop); 10567 } 10568 hatlockp = sfmmu_hat_enter(sfmmup); 10569 10570 /* 10571 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 10572 * pages. 10573 */ 10574 if (!gotfirst) { 10575 tsbinfop = sfmmup->sfmmu_tsb; 10576 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 10577 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 10578 ASSERT(rc == TSB_SUCCESS); 10579 } 10580 10581 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10582 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10583 } 10584 10585 /* 10586 * Handle exceptions for low level tsb_handler. 10587 * 10588 * There are many scenarios that could land us here: 10589 * 10590 * If the context is invalid we land here. The context can be invalid 10591 * for 3 reasons: 1) we couldn't allocate a new context and now need to 10592 * perform a wrap around operation in order to allocate a new context. 10593 * 2) Context was invalidated to change pagesize programming 3) ISMs or 10594 * TSBs configuration is changeing for this process and we are forced into 10595 * here to do a syncronization operation. If the context is valid we can 10596 * be here from window trap hanlder. In this case just call trap to handle 10597 * the fault. 10598 * 10599 * Note that the process will run in INVALID_CONTEXT before 10600 * faulting into here and subsequently loading the MMU registers 10601 * (including the TSB base register) associated with this process. 10602 * For this reason, the trap handlers must all test for 10603 * INVALID_CONTEXT before attempting to access any registers other 10604 * than the context registers. 10605 */ 10606 void 10607 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 10608 { 10609 sfmmu_t *sfmmup; 10610 uint_t ctxnum; 10611 klwp_id_t lwp; 10612 char lwp_save_state; 10613 hatlock_t *hatlockp; 10614 struct tsb_info *tsbinfop; 10615 10616 SFMMU_STAT(sf_tsb_exceptions); 10617 SFMMU_MMU_STAT(mmu_tsb_exceptions); 10618 sfmmup = astosfmmu(curthread->t_procp->p_as); 10619 ctxnum = tagaccess & TAGACC_CTX_MASK; 10620 10621 ASSERT(sfmmup != ksfmmup && ctxnum != KCONTEXT); 10622 ASSERT(sfmmup->sfmmu_ismhat == 0); 10623 /* 10624 * First, make sure we come out of here with a valid ctx, 10625 * since if we don't get one we'll simply loop on the 10626 * faulting instruction. 10627 * 10628 * If the ISM mappings are changing, the TSB is being relocated, or 10629 * the process is swapped out we serialize behind the controlling 10630 * thread with the sfmmu_flags and sfmmu_tsb_cv condition variable. 10631 * Otherwise we synchronize with the context stealer or the thread 10632 * that required us to change out our MMU registers (such 10633 * as a thread changing out our TSB while we were running) by 10634 * locking the HAT and grabbing the rwlock on the context as a 10635 * reader temporarily. 10636 */ 10637 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 10638 ctxnum == INVALID_CONTEXT); 10639 10640 if (ctxnum == INVALID_CONTEXT) { 10641 /* 10642 * Must set lwp state to LWP_SYS before 10643 * trying to acquire any adaptive lock 10644 */ 10645 lwp = ttolwp(curthread); 10646 ASSERT(lwp); 10647 lwp_save_state = lwp->lwp_state; 10648 lwp->lwp_state = LWP_SYS; 10649 10650 hatlockp = sfmmu_hat_enter(sfmmup); 10651 retry: 10652 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 10653 tsbinfop = tsbinfop->tsb_next) { 10654 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 10655 cv_wait(&sfmmup->sfmmu_tsb_cv, 10656 HATLOCK_MUTEXP(hatlockp)); 10657 goto retry; 10658 } 10659 } 10660 10661 /* 10662 * Wait for ISM maps to be updated. 10663 */ 10664 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 10665 cv_wait(&sfmmup->sfmmu_tsb_cv, 10666 HATLOCK_MUTEXP(hatlockp)); 10667 goto retry; 10668 } 10669 10670 /* 10671 * If we're swapping in, get TSB(s). Note that we must do 10672 * this before we get a ctx or load the MMU state. Once 10673 * we swap in we have to recheck to make sure the TSB(s) and 10674 * ISM mappings didn't change while we slept. 10675 */ 10676 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10677 sfmmu_tsb_swapin(sfmmup, hatlockp); 10678 goto retry; 10679 } 10680 10681 sfmmu_get_ctx(sfmmup); 10682 10683 sfmmu_hat_exit(hatlockp); 10684 /* 10685 * Must restore lwp_state if not calling 10686 * trap() for further processing. Restore 10687 * it anyway. 10688 */ 10689 lwp->lwp_state = lwp_save_state; 10690 if (sfmmup->sfmmu_ttecnt[TTE8K] != 0 || 10691 sfmmup->sfmmu_ttecnt[TTE64K] != 0 || 10692 sfmmup->sfmmu_ttecnt[TTE512K] != 0 || 10693 sfmmup->sfmmu_ttecnt[TTE4M] != 0 || 10694 sfmmup->sfmmu_ttecnt[TTE32M] != 0 || 10695 sfmmup->sfmmu_ttecnt[TTE256M] != 0) { 10696 return; 10697 } 10698 if (traptype == T_DATA_PROT) { 10699 traptype = T_DATA_MMU_MISS; 10700 } 10701 } 10702 trap(rp, (caddr_t)tagaccess, traptype, 0); 10703 } 10704 10705 /* 10706 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 10707 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 10708 * rather than spinning to avoid send mondo timeouts with 10709 * interrupts enabled. When the lock is acquired it is immediately 10710 * released and we return back to sfmmu_vatopfn just after 10711 * the GET_TTE call. 10712 */ 10713 void 10714 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 10715 { 10716 struct page **pp; 10717 10718 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10719 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10720 } 10721 10722 /* 10723 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 10724 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 10725 * cross traps which cannot be handled while spinning in the 10726 * trap handlers. Simply enter and exit the kpr_suspendlock spin 10727 * mutex, which is held by the holder of the suspend bit, and then 10728 * retry the trapped instruction after unwinding. 10729 */ 10730 /*ARGSUSED*/ 10731 void 10732 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 10733 { 10734 ASSERT(curthread != kreloc_thread); 10735 mutex_enter(&kpr_suspendlock); 10736 mutex_exit(&kpr_suspendlock); 10737 } 10738 10739 /* 10740 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 10741 * This routine may be called with all cpu's captured. Therefore, the 10742 * caller is responsible for holding all locks and disabling kernel 10743 * preemption. 10744 */ 10745 /* ARGSUSED */ 10746 static void 10747 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 10748 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 10749 { 10750 cpuset_t cpuset; 10751 caddr_t va; 10752 ism_ment_t *ment; 10753 sfmmu_t *sfmmup; 10754 #ifdef VAC 10755 int vcolor; 10756 #endif 10757 int ttesz; 10758 10759 /* 10760 * Walk the ism_hat's mapping list and flush the page 10761 * from every hat sharing this ism_hat. This routine 10762 * may be called while all cpu's have been captured. 10763 * Therefore we can't attempt to grab any locks. For now 10764 * this means we will protect the ism mapping list under 10765 * a single lock which will be grabbed by the caller. 10766 * If hat_share/unshare scalibility becomes a performance 10767 * problem then we may need to re-think ism mapping list locking. 10768 */ 10769 ASSERT(ism_sfmmup->sfmmu_ismhat); 10770 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 10771 addr = addr - ISMID_STARTADDR; 10772 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 10773 10774 sfmmup = ment->iment_hat; 10775 10776 va = ment->iment_base_va; 10777 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 10778 10779 /* 10780 * Flush TSB of ISM mappings. 10781 */ 10782 ttesz = get_hblk_ttesz(hmeblkp); 10783 if (ttesz == TTE8K || ttesz == TTE4M) { 10784 sfmmu_unload_tsb(sfmmup, va, ttesz); 10785 } else { 10786 caddr_t sva = va; 10787 caddr_t eva; 10788 ASSERT(addr == (caddr_t)get_hblk_base(hmeblkp)); 10789 eva = sva + get_hblk_span(hmeblkp); 10790 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); 10791 } 10792 10793 cpuset = sfmmup->sfmmu_cpusran; 10794 CPUSET_AND(cpuset, cpu_ready_set); 10795 CPUSET_DEL(cpuset, CPU->cpu_id); 10796 10797 SFMMU_XCALL_STATS(sfmmup); 10798 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 10799 (uint64_t)sfmmup); 10800 10801 vtag_flushpage(va, (uint64_t)sfmmup); 10802 10803 #ifdef VAC 10804 /* 10805 * Flush D$ 10806 * When flushing D$ we must flush all 10807 * cpu's. See sfmmu_cache_flush(). 10808 */ 10809 if (cache_flush_flag == CACHE_FLUSH) { 10810 cpuset = cpu_ready_set; 10811 CPUSET_DEL(cpuset, CPU->cpu_id); 10812 10813 SFMMU_XCALL_STATS(sfmmup); 10814 vcolor = addr_to_vcolor(va); 10815 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10816 vac_flushpage(pfnum, vcolor); 10817 } 10818 #endif /* VAC */ 10819 } 10820 } 10821 10822 /* 10823 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 10824 * a particular virtual address and ctx. If noflush is set we do not 10825 * flush the TLB/TSB. This function may or may not be called with the 10826 * HAT lock held. 10827 */ 10828 static void 10829 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10830 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 10831 int hat_lock_held) 10832 { 10833 #ifdef VAC 10834 int vcolor; 10835 #endif 10836 cpuset_t cpuset; 10837 hatlock_t *hatlockp; 10838 10839 #if defined(lint) && !defined(VAC) 10840 pfnum = pfnum; 10841 cpu_flag = cpu_flag; 10842 cache_flush_flag = cache_flush_flag; 10843 #endif 10844 /* 10845 * There is no longer a need to protect against ctx being 10846 * stolen here since we don't store the ctx in the TSB anymore. 10847 */ 10848 #ifdef VAC 10849 vcolor = addr_to_vcolor(addr); 10850 #endif 10851 10852 /* 10853 * We must hold the hat lock during the flush of TLB, 10854 * to avoid a race with sfmmu_invalidate_ctx(), where 10855 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 10856 * causing TLB demap routine to skip flush on that MMU. 10857 * If the context on a MMU has already been set to 10858 * INVALID_CONTEXT, we just get an extra flush on 10859 * that MMU. 10860 */ 10861 if (!hat_lock_held && !tlb_noflush) 10862 hatlockp = sfmmu_hat_enter(sfmmup); 10863 10864 kpreempt_disable(); 10865 if (!tlb_noflush) { 10866 /* 10867 * Flush the TSB and TLB. 10868 */ 10869 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10870 10871 cpuset = sfmmup->sfmmu_cpusran; 10872 CPUSET_AND(cpuset, cpu_ready_set); 10873 CPUSET_DEL(cpuset, CPU->cpu_id); 10874 10875 SFMMU_XCALL_STATS(sfmmup); 10876 10877 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 10878 (uint64_t)sfmmup); 10879 10880 vtag_flushpage(addr, (uint64_t)sfmmup); 10881 } 10882 10883 if (!hat_lock_held && !tlb_noflush) 10884 sfmmu_hat_exit(hatlockp); 10885 10886 #ifdef VAC 10887 /* 10888 * Flush the D$ 10889 * 10890 * Even if the ctx is stolen, we need to flush the 10891 * cache. Our ctx stealer only flushes the TLBs. 10892 */ 10893 if (cache_flush_flag == CACHE_FLUSH) { 10894 if (cpu_flag & FLUSH_ALL_CPUS) { 10895 cpuset = cpu_ready_set; 10896 } else { 10897 cpuset = sfmmup->sfmmu_cpusran; 10898 CPUSET_AND(cpuset, cpu_ready_set); 10899 } 10900 CPUSET_DEL(cpuset, CPU->cpu_id); 10901 SFMMU_XCALL_STATS(sfmmup); 10902 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10903 vac_flushpage(pfnum, vcolor); 10904 } 10905 #endif /* VAC */ 10906 kpreempt_enable(); 10907 } 10908 10909 /* 10910 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 10911 * address and ctx. If noflush is set we do not currently do anything. 10912 * This function may or may not be called with the HAT lock held. 10913 */ 10914 static void 10915 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10916 int tlb_noflush, int hat_lock_held) 10917 { 10918 cpuset_t cpuset; 10919 hatlock_t *hatlockp; 10920 10921 /* 10922 * If the process is exiting we have nothing to do. 10923 */ 10924 if (tlb_noflush) 10925 return; 10926 10927 /* 10928 * Flush TSB. 10929 */ 10930 if (!hat_lock_held) 10931 hatlockp = sfmmu_hat_enter(sfmmup); 10932 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10933 10934 kpreempt_disable(); 10935 10936 cpuset = sfmmup->sfmmu_cpusran; 10937 CPUSET_AND(cpuset, cpu_ready_set); 10938 CPUSET_DEL(cpuset, CPU->cpu_id); 10939 10940 SFMMU_XCALL_STATS(sfmmup); 10941 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 10942 10943 vtag_flushpage(addr, (uint64_t)sfmmup); 10944 10945 if (!hat_lock_held) 10946 sfmmu_hat_exit(hatlockp); 10947 10948 kpreempt_enable(); 10949 10950 } 10951 10952 /* 10953 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 10954 * call handler that can flush a range of pages to save on xcalls. 10955 */ 10956 static int sfmmu_xcall_save; 10957 10958 static void 10959 sfmmu_tlb_range_demap(demap_range_t *dmrp) 10960 { 10961 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 10962 hatlock_t *hatlockp; 10963 cpuset_t cpuset; 10964 uint64_t sfmmu_pgcnt; 10965 pgcnt_t pgcnt = 0; 10966 int pgunload = 0; 10967 int dirtypg = 0; 10968 caddr_t addr = dmrp->dmr_addr; 10969 caddr_t eaddr; 10970 uint64_t bitvec = dmrp->dmr_bitvec; 10971 10972 ASSERT(bitvec & 1); 10973 10974 /* 10975 * Flush TSB and calculate number of pages to flush. 10976 */ 10977 while (bitvec != 0) { 10978 dirtypg = 0; 10979 /* 10980 * Find the first page to flush and then count how many 10981 * pages there are after it that also need to be flushed. 10982 * This way the number of TSB flushes is minimized. 10983 */ 10984 while ((bitvec & 1) == 0) { 10985 pgcnt++; 10986 addr += MMU_PAGESIZE; 10987 bitvec >>= 1; 10988 } 10989 while (bitvec & 1) { 10990 dirtypg++; 10991 bitvec >>= 1; 10992 } 10993 eaddr = addr + ptob(dirtypg); 10994 hatlockp = sfmmu_hat_enter(sfmmup); 10995 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 10996 sfmmu_hat_exit(hatlockp); 10997 pgunload += dirtypg; 10998 addr = eaddr; 10999 pgcnt += dirtypg; 11000 } 11001 11002 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 11003 if (sfmmup->sfmmu_free == 0) { 11004 addr = dmrp->dmr_addr; 11005 bitvec = dmrp->dmr_bitvec; 11006 11007 /* 11008 * make sure it has SFMMU_PGCNT_SHIFT bits only, 11009 * as it will be used to pack argument for xt_some 11010 */ 11011 ASSERT((pgcnt > 0) && 11012 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 11013 11014 /* 11015 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 11016 * the low 6 bits of sfmmup. This is doable since pgcnt 11017 * always >= 1. 11018 */ 11019 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 11020 sfmmu_pgcnt = (uint64_t)sfmmup | 11021 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 11022 11023 /* 11024 * We must hold the hat lock during the flush of TLB, 11025 * to avoid a race with sfmmu_invalidate_ctx(), where 11026 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 11027 * causing TLB demap routine to skip flush on that MMU. 11028 * If the context on a MMU has already been set to 11029 * INVALID_CONTEXT, we just get an extra flush on 11030 * that MMU. 11031 */ 11032 hatlockp = sfmmu_hat_enter(sfmmup); 11033 kpreempt_disable(); 11034 11035 cpuset = sfmmup->sfmmu_cpusran; 11036 CPUSET_AND(cpuset, cpu_ready_set); 11037 CPUSET_DEL(cpuset, CPU->cpu_id); 11038 11039 SFMMU_XCALL_STATS(sfmmup); 11040 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 11041 sfmmu_pgcnt); 11042 11043 for (; bitvec != 0; bitvec >>= 1) { 11044 if (bitvec & 1) 11045 vtag_flushpage(addr, (uint64_t)sfmmup); 11046 addr += MMU_PAGESIZE; 11047 } 11048 kpreempt_enable(); 11049 sfmmu_hat_exit(hatlockp); 11050 11051 sfmmu_xcall_save += (pgunload-1); 11052 } 11053 dmrp->dmr_bitvec = 0; 11054 } 11055 11056 /* 11057 * In cases where we need to synchronize with TLB/TSB miss trap 11058 * handlers, _and_ need to flush the TLB, it's a lot easier to 11059 * throw away the context from the process than to do a 11060 * special song and dance to keep things consistent for the 11061 * handlers. 11062 * 11063 * Since the process suddenly ends up without a context and our caller 11064 * holds the hat lock, threads that fault after this function is called 11065 * will pile up on the lock. We can then do whatever we need to 11066 * atomically from the context of the caller. The first blocked thread 11067 * to resume executing will get the process a new context, and the 11068 * process will resume executing. 11069 * 11070 * One added advantage of this approach is that on MMUs that 11071 * support a "flush all" operation, we will delay the flush until 11072 * cnum wrap-around, and then flush the TLB one time. This 11073 * is rather rare, so it's a lot less expensive than making 8000 11074 * x-calls to flush the TLB 8000 times. 11075 * 11076 * A per-process (PP) lock is used to synchronize ctx allocations in 11077 * resume() and ctx invalidations here. 11078 */ 11079 static void 11080 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 11081 { 11082 cpuset_t cpuset; 11083 int cnum, currcnum; 11084 mmu_ctx_t *mmu_ctxp; 11085 int i; 11086 uint_t pstate_save; 11087 11088 SFMMU_STAT(sf_ctx_inv); 11089 11090 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11091 ASSERT(sfmmup != ksfmmup); 11092 11093 kpreempt_disable(); 11094 11095 mmu_ctxp = CPU_MMU_CTXP(CPU); 11096 ASSERT(mmu_ctxp); 11097 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 11098 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 11099 11100 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 11101 11102 pstate_save = sfmmu_disable_intrs(); 11103 11104 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 11105 /* set HAT cnum invalid across all context domains. */ 11106 for (i = 0; i < max_mmu_ctxdoms; i++) { 11107 11108 cnum = sfmmup->sfmmu_ctxs[i].cnum; 11109 if (cnum == INVALID_CONTEXT) { 11110 continue; 11111 } 11112 11113 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 11114 } 11115 membar_enter(); /* make sure globally visible to all CPUs */ 11116 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 11117 11118 sfmmu_enable_intrs(pstate_save); 11119 11120 cpuset = sfmmup->sfmmu_cpusran; 11121 CPUSET_DEL(cpuset, CPU->cpu_id); 11122 CPUSET_AND(cpuset, cpu_ready_set); 11123 if (!CPUSET_ISNULL(cpuset)) { 11124 SFMMU_XCALL_STATS(sfmmup); 11125 xt_some(cpuset, sfmmu_raise_tsb_exception, 11126 (uint64_t)sfmmup, INVALID_CONTEXT); 11127 xt_sync(cpuset); 11128 SFMMU_STAT(sf_tsb_raise_exception); 11129 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 11130 } 11131 11132 /* 11133 * If the hat to-be-invalidated is the same as the current 11134 * process on local CPU we need to invalidate 11135 * this CPU context as well. 11136 */ 11137 if ((sfmmu_getctx_sec() == currcnum) && 11138 (currcnum != INVALID_CONTEXT)) { 11139 sfmmu_setctx_sec(INVALID_CONTEXT); 11140 sfmmu_clear_utsbinfo(); 11141 } 11142 11143 kpreempt_enable(); 11144 11145 /* 11146 * we hold the hat lock, so nobody should allocate a context 11147 * for us yet 11148 */ 11149 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 11150 } 11151 11152 #ifdef VAC 11153 /* 11154 * We need to flush the cache in all cpus. It is possible that 11155 * a process referenced a page as cacheable but has sinced exited 11156 * and cleared the mapping list. We still to flush it but have no 11157 * state so all cpus is the only alternative. 11158 */ 11159 void 11160 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 11161 { 11162 cpuset_t cpuset; 11163 11164 kpreempt_disable(); 11165 cpuset = cpu_ready_set; 11166 CPUSET_DEL(cpuset, CPU->cpu_id); 11167 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 11168 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 11169 xt_sync(cpuset); 11170 vac_flushpage(pfnum, vcolor); 11171 kpreempt_enable(); 11172 } 11173 11174 void 11175 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 11176 { 11177 cpuset_t cpuset; 11178 11179 ASSERT(vcolor >= 0); 11180 11181 kpreempt_disable(); 11182 cpuset = cpu_ready_set; 11183 CPUSET_DEL(cpuset, CPU->cpu_id); 11184 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 11185 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 11186 xt_sync(cpuset); 11187 vac_flushcolor(vcolor, pfnum); 11188 kpreempt_enable(); 11189 } 11190 #endif /* VAC */ 11191 11192 /* 11193 * We need to prevent processes from accessing the TSB using a cached physical 11194 * address. It's alright if they try to access the TSB via virtual address 11195 * since they will just fault on that virtual address once the mapping has 11196 * been suspended. 11197 */ 11198 #pragma weak sendmondo_in_recover 11199 11200 /* ARGSUSED */ 11201 static int 11202 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 11203 { 11204 hatlock_t *hatlockp; 11205 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11206 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11207 extern uint32_t sendmondo_in_recover; 11208 11209 if (flags != HAT_PRESUSPEND) 11210 return (0); 11211 11212 hatlockp = sfmmu_hat_enter(sfmmup); 11213 11214 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 11215 11216 /* 11217 * For Cheetah+ Erratum 25: 11218 * Wait for any active recovery to finish. We can't risk 11219 * relocating the TSB of the thread running mondo_recover_proc() 11220 * since, if we did that, we would deadlock. The scenario we are 11221 * trying to avoid is as follows: 11222 * 11223 * THIS CPU RECOVER CPU 11224 * -------- ----------- 11225 * Begins recovery, walking through TSB 11226 * hat_pagesuspend() TSB TTE 11227 * TLB miss on TSB TTE, spins at TL1 11228 * xt_sync() 11229 * send_mondo_timeout() 11230 * mondo_recover_proc() 11231 * ((deadlocked)) 11232 * 11233 * The second half of the workaround is that mondo_recover_proc() 11234 * checks to see if the tsb_info has the RELOC flag set, and if it 11235 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 11236 * and hence avoiding the TLB miss that could result in a deadlock. 11237 */ 11238 if (&sendmondo_in_recover) { 11239 membar_enter(); /* make sure RELOC flag visible */ 11240 while (sendmondo_in_recover) { 11241 drv_usecwait(1); 11242 membar_consumer(); 11243 } 11244 } 11245 11246 sfmmu_invalidate_ctx(sfmmup); 11247 sfmmu_hat_exit(hatlockp); 11248 11249 return (0); 11250 } 11251 11252 /* ARGSUSED */ 11253 static int 11254 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 11255 void *tsbinfo, pfn_t newpfn) 11256 { 11257 hatlock_t *hatlockp; 11258 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11259 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11260 11261 if (flags != HAT_POSTUNSUSPEND) 11262 return (0); 11263 11264 hatlockp = sfmmu_hat_enter(sfmmup); 11265 11266 SFMMU_STAT(sf_tsb_reloc); 11267 11268 /* 11269 * The process may have swapped out while we were relocating one 11270 * of its TSBs. If so, don't bother doing the setup since the 11271 * process can't be using the memory anymore. 11272 */ 11273 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 11274 ASSERT(va == tsbinfop->tsb_va); 11275 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 11276 sfmmu_setup_tsbinfo(sfmmup); 11277 11278 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 11279 sfmmu_inv_tsb(tsbinfop->tsb_va, 11280 TSB_BYTES(tsbinfop->tsb_szc)); 11281 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 11282 } 11283 } 11284 11285 membar_exit(); 11286 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 11287 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11288 11289 sfmmu_hat_exit(hatlockp); 11290 11291 return (0); 11292 } 11293 11294 /* 11295 * Allocate and initialize a tsb_info structure. Note that we may or may not 11296 * allocate a TSB here, depending on the flags passed in. 11297 */ 11298 static int 11299 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 11300 uint_t flags, sfmmu_t *sfmmup) 11301 { 11302 int err; 11303 11304 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 11305 sfmmu_tsbinfo_cache, KM_SLEEP); 11306 11307 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 11308 tsb_szc, flags, sfmmup)) != 0) { 11309 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 11310 SFMMU_STAT(sf_tsb_allocfail); 11311 *tsbinfopp = NULL; 11312 return (err); 11313 } 11314 SFMMU_STAT(sf_tsb_alloc); 11315 11316 /* 11317 * Bump the TSB size counters for this TSB size. 11318 */ 11319 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 11320 return (0); 11321 } 11322 11323 static void 11324 sfmmu_tsb_free(struct tsb_info *tsbinfo) 11325 { 11326 caddr_t tsbva = tsbinfo->tsb_va; 11327 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 11328 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 11329 vmem_t *vmp = tsbinfo->tsb_vmp; 11330 11331 /* 11332 * If we allocated this TSB from relocatable kernel memory, then we 11333 * need to uninstall the callback handler. 11334 */ 11335 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 11336 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11337 caddr_t slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 11338 page_t **ppl; 11339 int ret; 11340 11341 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 11342 ASSERT(ret == 0); 11343 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 11344 0, NULL); 11345 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 11346 } 11347 11348 if (kmem_cachep != NULL) { 11349 kmem_cache_free(kmem_cachep, tsbva); 11350 } else { 11351 vmem_xfree(vmp, (void *)tsbva, tsb_size); 11352 } 11353 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 11354 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 11355 } 11356 11357 static void 11358 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 11359 { 11360 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 11361 sfmmu_tsb_free(tsbinfo); 11362 } 11363 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 11364 11365 } 11366 11367 /* 11368 * Setup all the references to physical memory for this tsbinfo. 11369 * The underlying page(s) must be locked. 11370 */ 11371 static void 11372 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 11373 { 11374 ASSERT(pfn != PFN_INVALID); 11375 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 11376 11377 #ifndef sun4v 11378 if (tsbinfo->tsb_szc == 0) { 11379 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 11380 PROT_WRITE|PROT_READ, TTE8K); 11381 } else { 11382 /* 11383 * Round down PA and use a large mapping; the handlers will 11384 * compute the TSB pointer at the correct offset into the 11385 * big virtual page. NOTE: this assumes all TSBs larger 11386 * than 8K must come from physically contiguous slabs of 11387 * size tsb_slab_size. 11388 */ 11389 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 11390 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 11391 } 11392 tsbinfo->tsb_pa = ptob(pfn); 11393 11394 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 11395 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 11396 11397 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 11398 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 11399 #else /* sun4v */ 11400 tsbinfo->tsb_pa = ptob(pfn); 11401 #endif /* sun4v */ 11402 } 11403 11404 11405 /* 11406 * Returns zero on success, ENOMEM if over the high water mark, 11407 * or EAGAIN if the caller needs to retry with a smaller TSB 11408 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 11409 * 11410 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 11411 * is specified and the TSB requested is PAGESIZE, though it 11412 * may sleep waiting for memory if sufficient memory is not 11413 * available. 11414 */ 11415 static int 11416 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 11417 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 11418 { 11419 caddr_t vaddr = NULL; 11420 caddr_t slab_vaddr; 11421 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11422 int tsbbytes = TSB_BYTES(tsbcode); 11423 int lowmem = 0; 11424 struct kmem_cache *kmem_cachep = NULL; 11425 vmem_t *vmp = NULL; 11426 lgrp_id_t lgrpid = LGRP_NONE; 11427 pfn_t pfn; 11428 uint_t cbflags = HAC_SLEEP; 11429 page_t **pplist; 11430 int ret; 11431 11432 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 11433 flags |= TSB_ALLOC; 11434 11435 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 11436 11437 tsbinfo->tsb_sfmmu = sfmmup; 11438 11439 /* 11440 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 11441 * return. 11442 */ 11443 if ((flags & TSB_ALLOC) == 0) { 11444 tsbinfo->tsb_szc = tsbcode; 11445 tsbinfo->tsb_ttesz_mask = tteszmask; 11446 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 11447 tsbinfo->tsb_pa = -1; 11448 tsbinfo->tsb_tte.ll = 0; 11449 tsbinfo->tsb_next = NULL; 11450 tsbinfo->tsb_flags = TSB_SWAPPED; 11451 tsbinfo->tsb_cache = NULL; 11452 tsbinfo->tsb_vmp = NULL; 11453 return (0); 11454 } 11455 11456 #ifdef DEBUG 11457 /* 11458 * For debugging: 11459 * Randomly force allocation failures every tsb_alloc_mtbf 11460 * tries if TSB_FORCEALLOC is not specified. This will 11461 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 11462 * it is even, to allow testing of both failure paths... 11463 */ 11464 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 11465 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 11466 tsb_alloc_count = 0; 11467 tsb_alloc_fail_mtbf++; 11468 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 11469 } 11470 #endif /* DEBUG */ 11471 11472 /* 11473 * Enforce high water mark if we are not doing a forced allocation 11474 * and are not shrinking a process' TSB. 11475 */ 11476 if ((flags & TSB_SHRINK) == 0 && 11477 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 11478 if ((flags & TSB_FORCEALLOC) == 0) 11479 return (ENOMEM); 11480 lowmem = 1; 11481 } 11482 11483 /* 11484 * Allocate from the correct location based upon the size of the TSB 11485 * compared to the base page size, and what memory conditions dictate. 11486 * Note we always do nonblocking allocations from the TSB arena since 11487 * we don't want memory fragmentation to cause processes to block 11488 * indefinitely waiting for memory; until the kernel algorithms that 11489 * coalesce large pages are improved this is our best option. 11490 * 11491 * Algorithm: 11492 * If allocating a "large" TSB (>8K), allocate from the 11493 * appropriate kmem_tsb_default_arena vmem arena 11494 * else if low on memory or the TSB_FORCEALLOC flag is set or 11495 * tsb_forceheap is set 11496 * Allocate from kernel heap via sfmmu_tsb8k_cache with 11497 * KM_SLEEP (never fails) 11498 * else 11499 * Allocate from appropriate sfmmu_tsb_cache with 11500 * KM_NOSLEEP 11501 * endif 11502 */ 11503 if (tsb_lgrp_affinity) 11504 lgrpid = lgrp_home_id(curthread); 11505 if (lgrpid == LGRP_NONE) 11506 lgrpid = 0; /* use lgrp of boot CPU */ 11507 11508 if (tsbbytes > MMU_PAGESIZE) { 11509 vmp = kmem_tsb_default_arena[lgrpid]; 11510 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 0, 0, 11511 NULL, NULL, VM_NOSLEEP); 11512 #ifdef DEBUG 11513 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 11514 #else /* !DEBUG */ 11515 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 11516 #endif /* DEBUG */ 11517 kmem_cachep = sfmmu_tsb8k_cache; 11518 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 11519 ASSERT(vaddr != NULL); 11520 } else { 11521 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 11522 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 11523 } 11524 11525 tsbinfo->tsb_cache = kmem_cachep; 11526 tsbinfo->tsb_vmp = vmp; 11527 11528 if (vaddr == NULL) { 11529 return (EAGAIN); 11530 } 11531 11532 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 11533 kmem_cachep = tsbinfo->tsb_cache; 11534 11535 /* 11536 * If we are allocating from outside the cage, then we need to 11537 * register a relocation callback handler. Note that for now 11538 * since pseudo mappings always hang off of the slab's root page, 11539 * we need only lock the first 8K of the TSB slab. This is a bit 11540 * hacky but it is good for performance. 11541 */ 11542 if (kmem_cachep != sfmmu_tsb8k_cache) { 11543 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 11544 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 11545 ASSERT(ret == 0); 11546 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 11547 cbflags, (void *)tsbinfo, &pfn, NULL); 11548 11549 /* 11550 * Need to free up resources if we could not successfully 11551 * add the callback function and return an error condition. 11552 */ 11553 if (ret != 0) { 11554 if (kmem_cachep) { 11555 kmem_cache_free(kmem_cachep, vaddr); 11556 } else { 11557 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 11558 } 11559 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 11560 S_WRITE); 11561 return (EAGAIN); 11562 } 11563 } else { 11564 /* 11565 * Since allocation of 8K TSBs from heap is rare and occurs 11566 * during memory pressure we allocate them from permanent 11567 * memory rather than using callbacks to get the PFN. 11568 */ 11569 pfn = hat_getpfnum(kas.a_hat, vaddr); 11570 } 11571 11572 tsbinfo->tsb_va = vaddr; 11573 tsbinfo->tsb_szc = tsbcode; 11574 tsbinfo->tsb_ttesz_mask = tteszmask; 11575 tsbinfo->tsb_next = NULL; 11576 tsbinfo->tsb_flags = 0; 11577 11578 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 11579 11580 if (kmem_cachep != sfmmu_tsb8k_cache) { 11581 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 11582 } 11583 11584 sfmmu_inv_tsb(vaddr, tsbbytes); 11585 return (0); 11586 } 11587 11588 /* 11589 * Initialize per cpu tsb and per cpu tsbmiss_area 11590 */ 11591 void 11592 sfmmu_init_tsbs(void) 11593 { 11594 int i; 11595 struct tsbmiss *tsbmissp; 11596 struct kpmtsbm *kpmtsbmp; 11597 #ifndef sun4v 11598 extern int dcache_line_mask; 11599 #endif /* sun4v */ 11600 extern uint_t vac_colors; 11601 11602 /* 11603 * Init. tsb miss area. 11604 */ 11605 tsbmissp = tsbmiss_area; 11606 11607 for (i = 0; i < NCPU; tsbmissp++, i++) { 11608 /* 11609 * initialize the tsbmiss area. 11610 * Do this for all possible CPUs as some may be added 11611 * while the system is running. There is no cost to this. 11612 */ 11613 tsbmissp->ksfmmup = ksfmmup; 11614 #ifndef sun4v 11615 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 11616 #endif /* sun4v */ 11617 tsbmissp->khashstart = 11618 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 11619 tsbmissp->uhashstart = 11620 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 11621 tsbmissp->khashsz = khmehash_num; 11622 tsbmissp->uhashsz = uhmehash_num; 11623 } 11624 11625 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 11626 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 11627 11628 if (kpm_enable == 0) 11629 return; 11630 11631 /* -- Begin KPM specific init -- */ 11632 11633 if (kpm_smallpages) { 11634 /* 11635 * If we're using base pagesize pages for seg_kpm 11636 * mappings, we use the kernel TSB since we can't afford 11637 * to allocate a second huge TSB for these mappings. 11638 */ 11639 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11640 kpm_tsbsz = ktsb_szcode; 11641 kpmsm_tsbbase = kpm_tsbbase; 11642 kpmsm_tsbsz = kpm_tsbsz; 11643 } else { 11644 /* 11645 * In VAC conflict case, just put the entries in the 11646 * kernel 8K indexed TSB for now so we can find them. 11647 * This could really be changed in the future if we feel 11648 * the need... 11649 */ 11650 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11651 kpmsm_tsbsz = ktsb_szcode; 11652 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 11653 kpm_tsbsz = ktsb4m_szcode; 11654 } 11655 11656 kpmtsbmp = kpmtsbm_area; 11657 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 11658 /* 11659 * Initialize the kpmtsbm area. 11660 * Do this for all possible CPUs as some may be added 11661 * while the system is running. There is no cost to this. 11662 */ 11663 kpmtsbmp->vbase = kpm_vbase; 11664 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 11665 kpmtsbmp->sz_shift = kpm_size_shift; 11666 kpmtsbmp->kpmp_shift = kpmp_shift; 11667 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 11668 if (kpm_smallpages == 0) { 11669 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 11670 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 11671 } else { 11672 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 11673 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 11674 } 11675 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 11676 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 11677 #ifdef DEBUG 11678 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 11679 #endif /* DEBUG */ 11680 if (ktsb_phys) 11681 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 11682 } 11683 11684 /* -- End KPM specific init -- */ 11685 } 11686 11687 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 11688 struct tsb_info ktsb_info[2]; 11689 11690 /* 11691 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 11692 */ 11693 void 11694 sfmmu_init_ktsbinfo() 11695 { 11696 ASSERT(ksfmmup != NULL); 11697 ASSERT(ksfmmup->sfmmu_tsb == NULL); 11698 /* 11699 * Allocate tsbinfos for kernel and copy in data 11700 * to make debug easier and sun4v setup easier. 11701 */ 11702 ktsb_info[0].tsb_sfmmu = ksfmmup; 11703 ktsb_info[0].tsb_szc = ktsb_szcode; 11704 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 11705 ktsb_info[0].tsb_va = ktsb_base; 11706 ktsb_info[0].tsb_pa = ktsb_pbase; 11707 ktsb_info[0].tsb_flags = 0; 11708 ktsb_info[0].tsb_tte.ll = 0; 11709 ktsb_info[0].tsb_cache = NULL; 11710 11711 ktsb_info[1].tsb_sfmmu = ksfmmup; 11712 ktsb_info[1].tsb_szc = ktsb4m_szcode; 11713 ktsb_info[1].tsb_ttesz_mask = TSB4M; 11714 ktsb_info[1].tsb_va = ktsb4m_base; 11715 ktsb_info[1].tsb_pa = ktsb4m_pbase; 11716 ktsb_info[1].tsb_flags = 0; 11717 ktsb_info[1].tsb_tte.ll = 0; 11718 ktsb_info[1].tsb_cache = NULL; 11719 11720 /* Link them into ksfmmup. */ 11721 ktsb_info[0].tsb_next = &ktsb_info[1]; 11722 ktsb_info[1].tsb_next = NULL; 11723 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 11724 11725 sfmmu_setup_tsbinfo(ksfmmup); 11726 } 11727 11728 /* 11729 * Cache the last value returned from va_to_pa(). If the VA specified 11730 * in the current call to cached_va_to_pa() maps to the same Page (as the 11731 * previous call to cached_va_to_pa()), then compute the PA using 11732 * cached info, else call va_to_pa(). 11733 * 11734 * Note: this function is neither MT-safe nor consistent in the presence 11735 * of multiple, interleaved threads. This function was created to enable 11736 * an optimization used during boot (at a point when there's only one thread 11737 * executing on the "boot CPU", and before startup_vm() has been called). 11738 */ 11739 static uint64_t 11740 cached_va_to_pa(void *vaddr) 11741 { 11742 static uint64_t prev_vaddr_base = 0; 11743 static uint64_t prev_pfn = 0; 11744 11745 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 11746 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 11747 } else { 11748 uint64_t pa = va_to_pa(vaddr); 11749 11750 if (pa != ((uint64_t)-1)) { 11751 /* 11752 * Computed physical address is valid. Cache its 11753 * related info for the next cached_va_to_pa() call. 11754 */ 11755 prev_pfn = pa & MMU_PAGEMASK; 11756 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 11757 } 11758 11759 return (pa); 11760 } 11761 } 11762 11763 /* 11764 * Carve up our nucleus hblk region. We may allocate more hblks than 11765 * asked due to rounding errors but we are guaranteed to have at least 11766 * enough space to allocate the requested number of hblk8's and hblk1's. 11767 */ 11768 void 11769 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 11770 { 11771 struct hme_blk *hmeblkp; 11772 size_t hme8blk_sz, hme1blk_sz; 11773 size_t i; 11774 size_t hblk8_bound; 11775 ulong_t j = 0, k = 0; 11776 11777 ASSERT(addr != NULL && size != 0); 11778 11779 /* Need to use proper structure alignment */ 11780 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 11781 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 11782 11783 nucleus_hblk8.list = (void *)addr; 11784 nucleus_hblk8.index = 0; 11785 11786 /* 11787 * Use as much memory as possible for hblk8's since we 11788 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 11789 * We need to hold back enough space for the hblk1's which 11790 * we'll allocate next. 11791 */ 11792 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 11793 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 11794 hmeblkp = (struct hme_blk *)addr; 11795 addr += hme8blk_sz; 11796 hmeblkp->hblk_nuc_bit = 1; 11797 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11798 } 11799 nucleus_hblk8.len = j; 11800 ASSERT(j >= nhblk8); 11801 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 11802 11803 nucleus_hblk1.list = (void *)addr; 11804 nucleus_hblk1.index = 0; 11805 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 11806 hmeblkp = (struct hme_blk *)addr; 11807 addr += hme1blk_sz; 11808 hmeblkp->hblk_nuc_bit = 1; 11809 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11810 } 11811 ASSERT(k >= nhblk1); 11812 nucleus_hblk1.len = k; 11813 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 11814 } 11815 11816 /* 11817 * This function is currently not supported on this platform. For what 11818 * it's supposed to do, see hat.c and hat_srmmu.c 11819 */ 11820 /* ARGSUSED */ 11821 faultcode_t 11822 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 11823 uint_t flags) 11824 { 11825 ASSERT(hat->sfmmu_xhat_provider == NULL); 11826 return (FC_NOSUPPORT); 11827 } 11828 11829 /* 11830 * Searchs the mapping list of the page for a mapping of the same size. If not 11831 * found the corresponding bit is cleared in the p_index field. When large 11832 * pages are more prevalent in the system, we can maintain the mapping list 11833 * in order and we don't have to traverse the list each time. Just check the 11834 * next and prev entries, and if both are of different size, we clear the bit. 11835 */ 11836 static void 11837 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 11838 { 11839 struct sf_hment *sfhmep; 11840 struct hme_blk *hmeblkp; 11841 int index; 11842 pgcnt_t npgs; 11843 11844 ASSERT(ttesz > TTE8K); 11845 11846 ASSERT(sfmmu_mlist_held(pp)); 11847 11848 ASSERT(PP_ISMAPPED_LARGE(pp)); 11849 11850 /* 11851 * Traverse mapping list looking for another mapping of same size. 11852 * since we only want to clear index field if all mappings of 11853 * that size are gone. 11854 */ 11855 11856 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 11857 hmeblkp = sfmmu_hmetohblk(sfhmep); 11858 if (hmeblkp->hblk_xhat_bit) 11859 continue; 11860 if (hme_size(sfhmep) == ttesz) { 11861 /* 11862 * another mapping of the same size. don't clear index. 11863 */ 11864 return; 11865 } 11866 } 11867 11868 /* 11869 * Clear the p_index bit for large page. 11870 */ 11871 index = PAGESZ_TO_INDEX(ttesz); 11872 npgs = TTEPAGES(ttesz); 11873 while (npgs-- > 0) { 11874 ASSERT(pp->p_index & index); 11875 pp->p_index &= ~index; 11876 pp = PP_PAGENEXT(pp); 11877 } 11878 } 11879 11880 /* 11881 * return supported features 11882 */ 11883 /* ARGSUSED */ 11884 int 11885 hat_supported(enum hat_features feature, void *arg) 11886 { 11887 switch (feature) { 11888 case HAT_SHARED_PT: 11889 case HAT_DYNAMIC_ISM_UNMAP: 11890 case HAT_VMODSORT: 11891 return (1); 11892 default: 11893 return (0); 11894 } 11895 } 11896 11897 void 11898 hat_enter(struct hat *hat) 11899 { 11900 hatlock_t *hatlockp; 11901 11902 if (hat != ksfmmup) { 11903 hatlockp = TSB_HASH(hat); 11904 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 11905 } 11906 } 11907 11908 void 11909 hat_exit(struct hat *hat) 11910 { 11911 hatlock_t *hatlockp; 11912 11913 if (hat != ksfmmup) { 11914 hatlockp = TSB_HASH(hat); 11915 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 11916 } 11917 } 11918 11919 /*ARGSUSED*/ 11920 void 11921 hat_reserve(struct as *as, caddr_t addr, size_t len) 11922 { 11923 } 11924 11925 static void 11926 hat_kstat_init(void) 11927 { 11928 kstat_t *ksp; 11929 11930 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 11931 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 11932 KSTAT_FLAG_VIRTUAL); 11933 if (ksp) { 11934 ksp->ks_data = (void *) &sfmmu_global_stat; 11935 kstat_install(ksp); 11936 } 11937 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 11938 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 11939 KSTAT_FLAG_VIRTUAL); 11940 if (ksp) { 11941 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 11942 kstat_install(ksp); 11943 } 11944 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 11945 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 11946 KSTAT_FLAG_WRITABLE); 11947 if (ksp) { 11948 ksp->ks_update = sfmmu_kstat_percpu_update; 11949 kstat_install(ksp); 11950 } 11951 } 11952 11953 /* ARGSUSED */ 11954 static int 11955 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 11956 { 11957 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 11958 struct tsbmiss *tsbm = tsbmiss_area; 11959 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 11960 int i; 11961 11962 ASSERT(cpu_kstat); 11963 if (rw == KSTAT_READ) { 11964 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 11965 cpu_kstat->sf_itlb_misses = tsbm->itlb_misses; 11966 cpu_kstat->sf_dtlb_misses = tsbm->dtlb_misses; 11967 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 11968 tsbm->uprot_traps; 11969 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 11970 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 11971 11972 if (tsbm->itlb_misses > 0 && tsbm->dtlb_misses > 0) { 11973 cpu_kstat->sf_tsb_hits = 11974 (tsbm->itlb_misses + tsbm->dtlb_misses) - 11975 (tsbm->utsb_misses + tsbm->ktsb_misses + 11976 kpmtsbm->kpm_tsb_misses); 11977 } else { 11978 cpu_kstat->sf_tsb_hits = 0; 11979 } 11980 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 11981 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 11982 } 11983 } else { 11984 /* KSTAT_WRITE is used to clear stats */ 11985 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 11986 tsbm->itlb_misses = 0; 11987 tsbm->dtlb_misses = 0; 11988 tsbm->utsb_misses = 0; 11989 tsbm->ktsb_misses = 0; 11990 tsbm->uprot_traps = 0; 11991 tsbm->kprot_traps = 0; 11992 kpmtsbm->kpm_dtlb_misses = 0; 11993 kpmtsbm->kpm_tsb_misses = 0; 11994 } 11995 } 11996 return (0); 11997 } 11998 11999 #ifdef DEBUG 12000 12001 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 12002 12003 /* 12004 * A tte checker. *orig_old is the value we read before cas. 12005 * *cur is the value returned by cas. 12006 * *new is the desired value when we do the cas. 12007 * 12008 * *hmeblkp is currently unused. 12009 */ 12010 12011 /* ARGSUSED */ 12012 void 12013 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 12014 { 12015 pfn_t i, j, k; 12016 int cpuid = CPU->cpu_id; 12017 12018 gorig[cpuid] = orig_old; 12019 gcur[cpuid] = cur; 12020 gnew[cpuid] = new; 12021 12022 #ifdef lint 12023 hmeblkp = hmeblkp; 12024 #endif 12025 12026 if (TTE_IS_VALID(orig_old)) { 12027 if (TTE_IS_VALID(cur)) { 12028 i = TTE_TO_TTEPFN(orig_old); 12029 j = TTE_TO_TTEPFN(cur); 12030 k = TTE_TO_TTEPFN(new); 12031 if (i != j) { 12032 /* remap error? */ 12033 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 12034 } 12035 12036 if (i != k) { 12037 /* remap error? */ 12038 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 12039 } 12040 } else { 12041 if (TTE_IS_VALID(new)) { 12042 panic("chk_tte: invalid cur? "); 12043 } 12044 12045 i = TTE_TO_TTEPFN(orig_old); 12046 k = TTE_TO_TTEPFN(new); 12047 if (i != k) { 12048 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 12049 } 12050 } 12051 } else { 12052 if (TTE_IS_VALID(cur)) { 12053 j = TTE_TO_TTEPFN(cur); 12054 if (TTE_IS_VALID(new)) { 12055 k = TTE_TO_TTEPFN(new); 12056 if (j != k) { 12057 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 12058 j, k); 12059 } 12060 } else { 12061 panic("chk_tte: why here?"); 12062 } 12063 } else { 12064 if (!TTE_IS_VALID(new)) { 12065 panic("chk_tte: why here2 ?"); 12066 } 12067 } 12068 } 12069 } 12070 12071 #endif /* DEBUG */ 12072 12073 extern void prefetch_tsbe_read(struct tsbe *); 12074 extern void prefetch_tsbe_write(struct tsbe *); 12075 12076 12077 /* 12078 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 12079 * us optimal performance on Cheetah+. You can only have 8 outstanding 12080 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 12081 * prefetch to make the most utilization of the prefetch capability. 12082 */ 12083 #define TSBE_PREFETCH_STRIDE (7) 12084 12085 void 12086 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 12087 { 12088 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 12089 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 12090 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 12091 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 12092 struct tsbe *old; 12093 struct tsbe *new; 12094 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 12095 uint64_t va; 12096 int new_offset; 12097 int i; 12098 int vpshift; 12099 int last_prefetch; 12100 12101 if (old_bytes == new_bytes) { 12102 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 12103 } else { 12104 12105 /* 12106 * A TSBE is 16 bytes which means there are four TSBE's per 12107 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 12108 */ 12109 old = (struct tsbe *)old_tsbinfo->tsb_va; 12110 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 12111 for (i = 0; i < old_entries; i++, old++) { 12112 if (((i & (4-1)) == 0) && (i < last_prefetch)) 12113 prefetch_tsbe_read(old); 12114 if (!old->tte_tag.tag_invalid) { 12115 /* 12116 * We have a valid TTE to remap. Check the 12117 * size. We won't remap 64K or 512K TTEs 12118 * because they span more than one TSB entry 12119 * and are indexed using an 8K virt. page. 12120 * Ditto for 32M and 256M TTEs. 12121 */ 12122 if (TTE_CSZ(&old->tte_data) == TTE64K || 12123 TTE_CSZ(&old->tte_data) == TTE512K) 12124 continue; 12125 if (mmu_page_sizes == max_mmu_page_sizes) { 12126 if (TTE_CSZ(&old->tte_data) == TTE32M || 12127 TTE_CSZ(&old->tte_data) == TTE256M) 12128 continue; 12129 } 12130 12131 /* clear the lower 22 bits of the va */ 12132 va = *(uint64_t *)old << 22; 12133 /* turn va into a virtual pfn */ 12134 va >>= 22 - TSB_START_SIZE; 12135 /* 12136 * or in bits from the offset in the tsb 12137 * to get the real virtual pfn. These 12138 * correspond to bits [21:13] in the va 12139 */ 12140 vpshift = 12141 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 12142 0x1ff; 12143 va |= (i << vpshift); 12144 va >>= vpshift; 12145 new_offset = va & (new_entries - 1); 12146 new = new_base + new_offset; 12147 prefetch_tsbe_write(new); 12148 *new = *old; 12149 } 12150 } 12151 } 12152 } 12153 12154 /* 12155 * unused in sfmmu 12156 */ 12157 void 12158 hat_dump(void) 12159 { 12160 } 12161 12162 /* 12163 * Called when a thread is exiting and we have switched to the kernel address 12164 * space. Perform the same VM initialization resume() uses when switching 12165 * processes. 12166 * 12167 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 12168 * we call it anyway in case the semantics change in the future. 12169 */ 12170 /*ARGSUSED*/ 12171 void 12172 hat_thread_exit(kthread_t *thd) 12173 { 12174 uint64_t pgsz_cnum; 12175 uint_t pstate_save; 12176 12177 ASSERT(thd->t_procp->p_as == &kas); 12178 12179 pgsz_cnum = KCONTEXT; 12180 #ifdef sun4u 12181 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 12182 #endif 12183 /* 12184 * Note that sfmmu_load_mmustate() is currently a no-op for 12185 * kernel threads. We need to disable interrupts here, 12186 * simply because otherwise sfmmu_load_mmustate() would panic 12187 * if the caller does not disable interrupts. 12188 */ 12189 pstate_save = sfmmu_disable_intrs(); 12190 sfmmu_setctx_sec(pgsz_cnum); 12191 sfmmu_load_mmustate(ksfmmup); 12192 sfmmu_enable_intrs(pstate_save); 12193 } 12194