1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * VM - Hardware Address Translation management for Spitfire MMU. 30 * 31 * This file implements the machine specific hardware translation 32 * needed by the VM system. The machine independent interface is 33 * described in <vm/hat.h> while the machine dependent interface 34 * and data structures are described in <vm/hat_sfmmu.h>. 35 * 36 * The hat layer manages the address translation hardware as a cache 37 * driven by calls from the higher levels in the VM system. 38 */ 39 40 #include <sys/types.h> 41 #include <sys/kstat.h> 42 #include <vm/hat.h> 43 #include <vm/hat_sfmmu.h> 44 #include <vm/page.h> 45 #include <sys/pte.h> 46 #include <sys/systm.h> 47 #include <sys/mman.h> 48 #include <sys/sysmacros.h> 49 #include <sys/machparam.h> 50 #include <sys/vtrace.h> 51 #include <sys/kmem.h> 52 #include <sys/mmu.h> 53 #include <sys/cmn_err.h> 54 #include <sys/cpu.h> 55 #include <sys/cpuvar.h> 56 #include <sys/debug.h> 57 #include <sys/lgrp.h> 58 #include <sys/archsystm.h> 59 #include <sys/machsystm.h> 60 #include <sys/vmsystm.h> 61 #include <vm/as.h> 62 #include <vm/seg.h> 63 #include <vm/seg_kp.h> 64 #include <vm/seg_kmem.h> 65 #include <vm/seg_kpm.h> 66 #include <vm/rm.h> 67 #include <sys/t_lock.h> 68 #include <sys/obpdefs.h> 69 #include <sys/vm_machparam.h> 70 #include <sys/var.h> 71 #include <sys/trap.h> 72 #include <sys/machtrap.h> 73 #include <sys/scb.h> 74 #include <sys/bitmap.h> 75 #include <sys/machlock.h> 76 #include <sys/membar.h> 77 #include <sys/atomic.h> 78 #include <sys/cpu_module.h> 79 #include <sys/prom_debug.h> 80 #include <sys/ksynch.h> 81 #include <sys/mem_config.h> 82 #include <sys/mem_cage.h> 83 #include <sys/dtrace.h> 84 #include <vm/vm_dep.h> 85 #include <vm/xhat_sfmmu.h> 86 #include <sys/fpu/fpusystm.h> 87 #include <vm/mach_kpm.h> 88 89 #if defined(SF_ERRATA_57) 90 extern caddr_t errata57_limit; 91 #endif 92 93 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 94 (sizeof (int64_t))) 95 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 96 97 #define HBLK_RESERVE_CNT 128 98 #define HBLK_RESERVE_MIN 20 99 100 static struct hme_blk *freehblkp; 101 static kmutex_t freehblkp_lock; 102 static int freehblkcnt; 103 104 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 105 static kmutex_t hblk_reserve_lock; 106 static kthread_t *hblk_reserve_thread; 107 108 static nucleus_hblk8_info_t nucleus_hblk8; 109 static nucleus_hblk1_info_t nucleus_hblk1; 110 111 /* 112 * SFMMU specific hat functions 113 */ 114 void hat_pagecachectl(struct page *, int); 115 116 /* flags for hat_pagecachectl */ 117 #define HAT_CACHE 0x1 118 #define HAT_UNCACHE 0x2 119 #define HAT_TMPNC 0x4 120 121 /* 122 * Flag to allow the creation of non-cacheable translations 123 * to system memory. It is off by default. At the moment this 124 * flag is used by the ecache error injector. The error injector 125 * will turn it on when creating such a translation then shut it 126 * off when it's finished. 127 */ 128 129 int sfmmu_allow_nc_trans = 0; 130 131 /* 132 * Flag to disable large page support. 133 * value of 1 => disable all large pages. 134 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 135 * 136 * For example, use the value 0x4 to disable 512K pages. 137 * 138 */ 139 #define LARGE_PAGES_OFF 0x1 140 141 /* 142 * The disable_large_pages and disable_ism_large_pages variables control 143 * hat_memload_array and the page sizes to be used by ISM and the kernel. 144 * 145 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 146 * are only used to control which OOB pages to use at upper VM segment creation 147 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 148 * Their values may come from platform or CPU specific code to disable page 149 * sizes that should not be used. 150 * 151 * WARNING: 512K pages are currently not supported for ISM/DISM. 152 */ 153 uint_t disable_large_pages = 0; 154 uint_t disable_ism_large_pages = (1 << TTE512K); 155 uint_t disable_auto_data_large_pages = 0; 156 uint_t disable_auto_text_large_pages = 0; 157 158 /* 159 * Private sfmmu data structures for hat management 160 */ 161 static struct kmem_cache *sfmmuid_cache; 162 static struct kmem_cache *mmuctxdom_cache; 163 164 /* 165 * Private sfmmu data structures for tsb management 166 */ 167 static struct kmem_cache *sfmmu_tsbinfo_cache; 168 static struct kmem_cache *sfmmu_tsb8k_cache; 169 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 170 static vmem_t *kmem_tsb_arena; 171 172 /* 173 * sfmmu static variables for hmeblk resource management. 174 */ 175 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 176 static struct kmem_cache *sfmmu8_cache; 177 static struct kmem_cache *sfmmu1_cache; 178 static struct kmem_cache *pa_hment_cache; 179 180 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 181 /* 182 * private data for ism 183 */ 184 static struct kmem_cache *ism_blk_cache; 185 static struct kmem_cache *ism_ment_cache; 186 #define ISMID_STARTADDR NULL 187 188 /* 189 * Whether to delay TLB flushes and use Cheetah's flush-all support 190 * when removing contexts from the dirty list. 191 */ 192 int delay_tlb_flush; 193 int disable_delay_tlb_flush; 194 195 /* 196 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 197 * HAT flags, synchronizing TLB/TSB coherency, and context management. 198 * The lock is hashed on the sfmmup since the case where we need to lock 199 * all processes is rare but does occur (e.g. we need to unload a shared 200 * mapping from all processes using the mapping). We have a lot of buckets, 201 * and each slab of sfmmu_t's can use about a quarter of them, giving us 202 * a fairly good distribution without wasting too much space and overhead 203 * when we have to grab them all. 204 */ 205 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 206 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 207 208 /* 209 * Hash algorithm optimized for a small number of slabs. 210 * 7 is (highbit((sizeof sfmmu_t)) - 1) 211 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 212 * kmem_cache, and thus they will be sequential within that cache. In 213 * addition, each new slab will have a different "color" up to cache_maxcolor 214 * which will skew the hashing for each successive slab which is allocated. 215 * If the size of sfmmu_t changed to a larger size, this algorithm may need 216 * to be revisited. 217 */ 218 #define TSB_HASH_SHIFT_BITS (7) 219 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 220 221 #ifdef DEBUG 222 int tsb_hash_debug = 0; 223 #define TSB_HASH(sfmmup) \ 224 (tsb_hash_debug ? &hat_lock[0] : \ 225 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 226 #else /* DEBUG */ 227 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 228 #endif /* DEBUG */ 229 230 231 /* sfmmu_replace_tsb() return codes. */ 232 typedef enum tsb_replace_rc { 233 TSB_SUCCESS, 234 TSB_ALLOCFAIL, 235 TSB_LOSTRACE, 236 TSB_ALREADY_SWAPPED, 237 TSB_CANTGROW 238 } tsb_replace_rc_t; 239 240 /* 241 * Flags for TSB allocation routines. 242 */ 243 #define TSB_ALLOC 0x01 244 #define TSB_FORCEALLOC 0x02 245 #define TSB_GROW 0x04 246 #define TSB_SHRINK 0x08 247 #define TSB_SWAPIN 0x10 248 249 /* 250 * Support for HAT callbacks. 251 */ 252 #define SFMMU_MAX_RELOC_CALLBACKS 10 253 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 254 static id_t sfmmu_cb_nextid = 0; 255 static id_t sfmmu_tsb_cb_id; 256 struct sfmmu_callback *sfmmu_cb_table; 257 258 /* 259 * Kernel page relocation is enabled by default for non-caged 260 * kernel pages. This has little effect unless segkmem_reloc is 261 * set, since by default kernel memory comes from inside the 262 * kernel cage. 263 */ 264 int hat_kpr_enabled = 1; 265 266 kmutex_t kpr_mutex; 267 kmutex_t kpr_suspendlock; 268 kthread_t *kreloc_thread; 269 270 /* 271 * Enable VA->PA translation sanity checking on DEBUG kernels. 272 * Disabled by default. This is incompatible with some 273 * drivers (error injector, RSM) so if it breaks you get 274 * to keep both pieces. 275 */ 276 int hat_check_vtop = 0; 277 278 /* 279 * Private sfmmu routines (prototypes) 280 */ 281 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 282 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 283 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t); 284 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 285 caddr_t, demap_range_t *, uint_t); 286 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 287 caddr_t, int); 288 static void sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *, 289 uint64_t, struct hme_blk **); 290 static void sfmmu_hblks_list_purge(struct hme_blk **); 291 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 292 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 293 static struct hme_blk *sfmmu_hblk_steal(int); 294 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 295 struct hme_blk *, uint64_t, uint64_t, 296 struct hme_blk *); 297 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 298 299 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 300 uint_t, uint_t, pgcnt_t); 301 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 302 uint_t); 303 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 304 uint_t); 305 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 306 caddr_t, int); 307 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 308 struct hmehash_bucket *, caddr_t, uint_t, uint_t); 309 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 310 caddr_t, page_t **, uint_t); 311 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 312 313 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 314 pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *); 315 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 316 #ifdef VAC 317 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 318 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 319 int tst_tnc(page_t *pp, pgcnt_t); 320 void conv_tnc(page_t *pp, int); 321 #endif 322 323 static void sfmmu_get_ctx(sfmmu_t *); 324 static void sfmmu_free_sfmmu(sfmmu_t *); 325 326 static void sfmmu_gettte(struct hat *, caddr_t, tte_t *); 327 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 328 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 329 330 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 331 static void hat_pagereload(struct page *, struct page *); 332 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 333 #ifdef VAC 334 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 335 static void sfmmu_page_cache(page_t *, int, int, int); 336 #endif 337 338 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 339 pfn_t, int, int, int, int); 340 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 341 pfn_t, int); 342 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 343 static void sfmmu_tlb_range_demap(demap_range_t *); 344 static void sfmmu_invalidate_ctx(sfmmu_t *); 345 static void sfmmu_sync_mmustate(sfmmu_t *); 346 347 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 348 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 349 sfmmu_t *); 350 static void sfmmu_tsb_free(struct tsb_info *); 351 static void sfmmu_tsbinfo_free(struct tsb_info *); 352 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 353 sfmmu_t *); 354 355 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 356 static int sfmmu_select_tsb_szc(pgcnt_t); 357 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 358 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 359 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 360 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 361 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 362 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 363 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 364 hatlock_t *, uint_t); 365 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 366 367 #ifdef VAC 368 void sfmmu_cache_flush(pfn_t, int); 369 void sfmmu_cache_flushcolor(int, pfn_t); 370 #endif 371 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 372 caddr_t, demap_range_t *, uint_t, int); 373 374 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 375 static uint_t sfmmu_ptov_attr(tte_t *); 376 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 377 caddr_t, demap_range_t *, uint_t); 378 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 379 static int sfmmu_idcache_constructor(void *, void *, int); 380 static void sfmmu_idcache_destructor(void *, void *); 381 static int sfmmu_hblkcache_constructor(void *, void *, int); 382 static void sfmmu_hblkcache_destructor(void *, void *); 383 static void sfmmu_hblkcache_reclaim(void *); 384 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 385 struct hmehash_bucket *); 386 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 387 static void sfmmu_rm_large_mappings(page_t *, int); 388 389 static void hat_lock_init(void); 390 static void hat_kstat_init(void); 391 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 392 static void sfmmu_check_page_sizes(sfmmu_t *, int); 393 int fnd_mapping_sz(page_t *); 394 static void iment_add(struct ism_ment *, struct hat *); 395 static void iment_sub(struct ism_ment *, struct hat *); 396 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 397 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 398 #ifdef sun4v 399 extern void sfmmu_invalidate_tsbinfo(sfmmu_t *); 400 #endif /* sun4v */ 401 extern void sfmmu_clear_utsbinfo(void); 402 403 static void sfmmu_ctx_wrap_around(mmu_ctx_t *); 404 405 /* kpm globals */ 406 #ifdef DEBUG 407 /* 408 * Enable trap level tsbmiss handling 409 */ 410 int kpm_tsbmtl = 1; 411 412 /* 413 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 414 * required TLB shootdowns in this case, so handle w/ care. Off by default. 415 */ 416 int kpm_tlb_flush; 417 #endif /* DEBUG */ 418 419 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 420 421 #ifdef DEBUG 422 static void sfmmu_check_hblk_flist(); 423 #endif 424 425 /* 426 * Semi-private sfmmu data structures. Some of them are initialize in 427 * startup or in hat_init. Some of them are private but accessed by 428 * assembly code or mach_sfmmu.c 429 */ 430 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 431 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 432 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 433 uint64_t khme_hash_pa; /* PA of khme_hash */ 434 int uhmehash_num; /* # of buckets in user hash table */ 435 int khmehash_num; /* # of buckets in kernel hash table */ 436 437 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 438 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 439 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 440 441 #define DEFAULT_NUM_CTXS_PER_MMU 8192 442 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 443 444 int cache; /* describes system cache */ 445 446 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 447 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 448 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 449 int ktsb_sz; /* kernel 8k-indexed tsb size */ 450 451 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 452 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 453 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 454 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 455 456 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 457 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 458 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 459 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 460 461 #ifndef sun4v 462 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 463 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 464 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 465 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 466 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 467 #endif /* sun4v */ 468 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 469 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 470 471 /* 472 * Size to use for TSB slabs. Future platforms that support page sizes 473 * larger than 4M may wish to change these values, and provide their own 474 * assembly macros for building and decoding the TSB base register contents. 475 * Note disable_large_pages will override the value set here. 476 */ 477 uint_t tsb_slab_ttesz = TTE4M; 478 uint_t tsb_slab_size; 479 uint_t tsb_slab_shift; 480 uint_t tsb_slab_mask; /* PFN mask for TTE */ 481 482 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 483 int tsb_max_growsize = UTSB_MAX_SZCODE; 484 485 /* 486 * Tunable parameters dealing with TSB policies. 487 */ 488 489 /* 490 * This undocumented tunable forces all 8K TSBs to be allocated from 491 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 492 */ 493 #ifdef DEBUG 494 int tsb_forceheap = 0; 495 #endif /* DEBUG */ 496 497 /* 498 * Decide whether to use per-lgroup arenas, or one global set of 499 * TSB arenas. The default is not to break up per-lgroup, since 500 * most platforms don't recognize any tangible benefit from it. 501 */ 502 int tsb_lgrp_affinity = 0; 503 504 /* 505 * Used for growing the TSB based on the process RSS. 506 * tsb_rss_factor is based on the smallest TSB, and is 507 * shifted by the TSB size to determine if we need to grow. 508 * The default will grow the TSB if the number of TTEs for 509 * this page size exceeds 75% of the number of TSB entries, 510 * which should _almost_ eliminate all conflict misses 511 * (at the expense of using up lots and lots of memory). 512 */ 513 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 514 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 515 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 516 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 517 default_tsb_size) 518 #define TSB_OK_SHRINK() \ 519 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 520 #define TSB_OK_GROW() \ 521 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 522 523 int enable_tsb_rss_sizing = 1; 524 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 525 526 /* which TSB size code to use for new address spaces or if rss sizing off */ 527 int default_tsb_size = TSB_8K_SZCODE; 528 529 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 530 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 531 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 532 533 #ifdef DEBUG 534 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 535 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 536 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 537 static int tsb_alloc_fail_mtbf = 0; 538 static int tsb_alloc_count = 0; 539 #endif /* DEBUG */ 540 541 /* if set to 1, will remap valid TTEs when growing TSB. */ 542 int tsb_remap_ttes = 1; 543 544 /* 545 * If we have more than this many mappings, allocate a second TSB. 546 * This default is chosen because the I/D fully associative TLBs are 547 * assumed to have at least 8 available entries. Platforms with a 548 * larger fully-associative TLB could probably override the default. 549 */ 550 int tsb_sectsb_threshold = 8; 551 552 /* 553 * kstat data 554 */ 555 struct sfmmu_global_stat sfmmu_global_stat; 556 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 557 558 /* 559 * Global data 560 */ 561 sfmmu_t *ksfmmup; /* kernel's hat id */ 562 563 #ifdef DEBUG 564 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 565 #endif 566 567 /* sfmmu locking operations */ 568 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 569 static int sfmmu_mlspl_held(struct page *, int); 570 571 kmutex_t *sfmmu_page_enter(page_t *); 572 void sfmmu_page_exit(kmutex_t *); 573 int sfmmu_page_spl_held(struct page *); 574 575 /* sfmmu internal locking operations - accessed directly */ 576 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 577 kmutex_t **, kmutex_t **); 578 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 579 static hatlock_t * 580 sfmmu_hat_enter(sfmmu_t *); 581 static hatlock_t * 582 sfmmu_hat_tryenter(sfmmu_t *); 583 static void sfmmu_hat_exit(hatlock_t *); 584 static void sfmmu_hat_lock_all(void); 585 static void sfmmu_hat_unlock_all(void); 586 static void sfmmu_ismhat_enter(sfmmu_t *, int); 587 static void sfmmu_ismhat_exit(sfmmu_t *, int); 588 589 /* 590 * Array of mutexes protecting a page's mapping list and p_nrm field. 591 * 592 * The hash function looks complicated, but is made up so that: 593 * 594 * "pp" not shifted, so adjacent pp values will hash to different cache lines 595 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 596 * 597 * "pp" >> mml_shift, incorporates more source bits into the hash result 598 * 599 * "& (mml_table_size - 1), should be faster than using remainder "%" 600 * 601 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 602 * cacheline, since they get declared next to each other below. We'll trust 603 * ld not to do something random. 604 */ 605 #ifdef DEBUG 606 int mlist_hash_debug = 0; 607 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 608 &mml_table[((uintptr_t)(pp) + \ 609 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 610 #else /* !DEBUG */ 611 #define MLIST_HASH(pp) &mml_table[ \ 612 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 613 #endif /* !DEBUG */ 614 615 kmutex_t *mml_table; 616 uint_t mml_table_sz; /* must be a power of 2 */ 617 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 618 619 kpm_hlk_t *kpmp_table; 620 uint_t kpmp_table_sz; /* must be a power of 2 */ 621 uchar_t kpmp_shift; 622 623 kpm_shlk_t *kpmp_stable; 624 uint_t kpmp_stable_sz; /* must be a power of 2 */ 625 626 /* 627 * SPL_HASH was improved to avoid false cache line sharing 628 */ 629 #define SPL_TABLE_SIZE 128 630 #define SPL_MASK (SPL_TABLE_SIZE - 1) 631 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 632 633 #define SPL_INDEX(pp) \ 634 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 635 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 636 (SPL_TABLE_SIZE - 1)) 637 638 #define SPL_HASH(pp) \ 639 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 640 641 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 642 643 644 /* 645 * hat_unload_callback() will group together callbacks in order 646 * to avoid xt_sync() calls. This is the maximum size of the group. 647 */ 648 #define MAX_CB_ADDR 32 649 650 tte_t hw_tte; 651 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 652 653 static char *mmu_ctx_kstat_names[] = { 654 "mmu_ctx_tsb_exceptions", 655 "mmu_ctx_tsb_raise_exception", 656 "mmu_ctx_wrap_around", 657 }; 658 659 /* 660 * Wrapper for vmem_xalloc since vmem_create only allows limited 661 * parameters for vm_source_alloc functions. This function allows us 662 * to specify alignment consistent with the size of the object being 663 * allocated. 664 */ 665 static void * 666 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 667 { 668 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 669 } 670 671 /* Common code for setting tsb_alloc_hiwater. */ 672 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 673 ptob(pages) / tsb_alloc_hiwater_factor 674 675 /* 676 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 677 * a single TSB. physmem is the number of physical pages so we need physmem 8K 678 * TTEs to represent all those physical pages. We round this up by using 679 * 1<<highbit(). To figure out which size code to use, remember that the size 680 * code is just an amount to shift the smallest TSB size to get the size of 681 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 682 * highbit() - 1) to get the size code for the smallest TSB that can represent 683 * all of physical memory, while erring on the side of too much. 684 * 685 * If the computed size code is less than the current tsb_max_growsize, we set 686 * tsb_max_growsize to the computed size code. In the case where the computed 687 * size code is greater than tsb_max_growsize, we have these restrictions that 688 * apply to increasing tsb_max_growsize: 689 * 1) TSBs can't grow larger than the TSB slab size 690 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 691 */ 692 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 693 int i, szc; \ 694 \ 695 i = highbit(pages); \ 696 if ((1 << (i - 1)) == (pages)) \ 697 i--; /* 2^n case, round down */ \ 698 szc = i - TSB_START_SIZE; \ 699 if (szc < tsb_max_growsize) \ 700 tsb_max_growsize = szc; \ 701 else if ((szc > tsb_max_growsize) && \ 702 (szc <= tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT))) \ 703 tsb_max_growsize = MIN(szc, UTSB_MAX_SZCODE); \ 704 } 705 706 /* 707 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 708 * tsb_info which handles that TTE size. 709 */ 710 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) \ 711 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 712 ASSERT(sfmmu_hat_lock_held(sfmmup)); \ 713 if ((tte_szc) >= TTE4M) \ 714 (tsbinfop) = (tsbinfop)->tsb_next; 715 716 /* 717 * Return the number of mappings present in the HAT 718 * for a particular process and page size. 719 */ 720 #define SFMMU_TTE_CNT(sfmmup, szc) \ 721 (sfmmup)->sfmmu_iblk? \ 722 (sfmmup)->sfmmu_ismttecnt[(szc)] + \ 723 (sfmmup)->sfmmu_ttecnt[(szc)] : \ 724 (sfmmup)->sfmmu_ttecnt[(szc)]; 725 726 /* 727 * Macro to use to unload entries from the TSB. 728 * It has knowledge of which page sizes get replicated in the TSB 729 * and will call the appropriate unload routine for the appropriate size. 730 */ 731 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp) \ 732 { \ 733 int ttesz = get_hblk_ttesz(hmeblkp); \ 734 if (ttesz == TTE8K || ttesz == TTE4M) { \ 735 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 736 } else { \ 737 caddr_t sva = (caddr_t)get_hblk_base(hmeblkp); \ 738 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 739 ASSERT(addr >= sva && addr < eva); \ 740 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 741 } \ 742 } 743 744 745 /* Update tsb_alloc_hiwater after memory is configured. */ 746 /*ARGSUSED*/ 747 static void 748 sfmmu_update_tsb_post_add(void *arg, pgcnt_t delta_pages) 749 { 750 /* Assumes physmem has already been updated. */ 751 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 752 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 753 } 754 755 /* 756 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 757 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 758 * deleted. 759 */ 760 /*ARGSUSED*/ 761 static int 762 sfmmu_update_tsb_pre_del(void *arg, pgcnt_t delta_pages) 763 { 764 return (0); 765 } 766 767 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 768 /*ARGSUSED*/ 769 static void 770 sfmmu_update_tsb_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 771 { 772 /* 773 * Whether the delete was cancelled or not, just go ahead and update 774 * tsb_alloc_hiwater and tsb_max_growsize. 775 */ 776 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 777 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 778 } 779 780 static kphysm_setup_vector_t sfmmu_update_tsb_vec = { 781 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 782 sfmmu_update_tsb_post_add, /* post_add */ 783 sfmmu_update_tsb_pre_del, /* pre_del */ 784 sfmmu_update_tsb_post_del /* post_del */ 785 }; 786 787 788 /* 789 * HME_BLK HASH PRIMITIVES 790 */ 791 792 /* 793 * Enter a hme on the mapping list for page pp. 794 * When large pages are more prevalent in the system we might want to 795 * keep the mapping list in ascending order by the hment size. For now, 796 * small pages are more frequent, so don't slow it down. 797 */ 798 #define HME_ADD(hme, pp) \ 799 { \ 800 ASSERT(sfmmu_mlist_held(pp)); \ 801 \ 802 hme->hme_prev = NULL; \ 803 hme->hme_next = pp->p_mapping; \ 804 hme->hme_page = pp; \ 805 if (pp->p_mapping) { \ 806 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 807 ASSERT(pp->p_share > 0); \ 808 } else { \ 809 /* EMPTY */ \ 810 ASSERT(pp->p_share == 0); \ 811 } \ 812 pp->p_mapping = hme; \ 813 pp->p_share++; \ 814 } 815 816 /* 817 * Enter a hme on the mapping list for page pp. 818 * If we are unmapping a large translation, we need to make sure that the 819 * change is reflect in the corresponding bit of the p_index field. 820 */ 821 #define HME_SUB(hme, pp) \ 822 { \ 823 ASSERT(sfmmu_mlist_held(pp)); \ 824 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 825 \ 826 if (pp->p_mapping == NULL) { \ 827 panic("hme_remove - no mappings"); \ 828 } \ 829 \ 830 membar_stst(); /* ensure previous stores finish */ \ 831 \ 832 ASSERT(pp->p_share > 0); \ 833 pp->p_share--; \ 834 \ 835 if (hme->hme_prev) { \ 836 ASSERT(pp->p_mapping != hme); \ 837 ASSERT(hme->hme_prev->hme_page == pp || \ 838 IS_PAHME(hme->hme_prev)); \ 839 hme->hme_prev->hme_next = hme->hme_next; \ 840 } else { \ 841 ASSERT(pp->p_mapping == hme); \ 842 pp->p_mapping = hme->hme_next; \ 843 ASSERT((pp->p_mapping == NULL) ? \ 844 (pp->p_share == 0) : 1); \ 845 } \ 846 \ 847 if (hme->hme_next) { \ 848 ASSERT(hme->hme_next->hme_page == pp || \ 849 IS_PAHME(hme->hme_next)); \ 850 hme->hme_next->hme_prev = hme->hme_prev; \ 851 } \ 852 \ 853 /* zero out the entry */ \ 854 hme->hme_next = NULL; \ 855 hme->hme_prev = NULL; \ 856 hme->hme_page = NULL; \ 857 \ 858 if (hme_size(hme) > TTE8K) { \ 859 /* remove mappings for remainder of large pg */ \ 860 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 861 } \ 862 } 863 864 /* 865 * This function returns the hment given the hme_blk and a vaddr. 866 * It assumes addr has already been checked to belong to hme_blk's 867 * range. 868 */ 869 #define HBLKTOHME(hment, hmeblkp, addr) \ 870 { \ 871 int index; \ 872 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 873 } 874 875 /* 876 * Version of HBLKTOHME that also returns the index in hmeblkp 877 * of the hment. 878 */ 879 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 880 { \ 881 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 882 \ 883 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 884 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 885 } else \ 886 idx = 0; \ 887 \ 888 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 889 } 890 891 /* 892 * Disable any page sizes not supported by the CPU 893 */ 894 void 895 hat_init_pagesizes() 896 { 897 int i; 898 899 mmu_exported_page_sizes = 0; 900 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 901 902 szc_2_userszc[i] = (uint_t)-1; 903 userszc_2_szc[i] = (uint_t)-1; 904 905 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 906 disable_large_pages |= (1 << i); 907 } else { 908 szc_2_userszc[i] = mmu_exported_page_sizes; 909 userszc_2_szc[mmu_exported_page_sizes] = i; 910 mmu_exported_page_sizes++; 911 } 912 } 913 914 disable_ism_large_pages |= disable_large_pages; 915 disable_auto_data_large_pages = disable_large_pages; 916 disable_auto_text_large_pages = disable_large_pages; 917 918 /* 919 * Initialize mmu-specific large page sizes. 920 */ 921 if (&mmu_large_pages_disabled) { 922 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 923 disable_ism_large_pages |= 924 mmu_large_pages_disabled(HAT_LOAD_SHARE); 925 disable_auto_data_large_pages |= 926 mmu_large_pages_disabled(HAT_AUTO_DATA); 927 disable_auto_text_large_pages |= 928 mmu_large_pages_disabled(HAT_AUTO_TEXT); 929 } 930 } 931 932 /* 933 * Initialize the hardware address translation structures. 934 */ 935 void 936 hat_init(void) 937 { 938 int i; 939 uint_t sz; 940 uint_t maxtsb; 941 size_t size; 942 943 hat_lock_init(); 944 hat_kstat_init(); 945 946 /* 947 * Hardware-only bits in a TTE 948 */ 949 MAKE_TTE_MASK(&hw_tte); 950 951 hat_init_pagesizes(); 952 953 /* Initialize the hash locks */ 954 for (i = 0; i < khmehash_num; i++) { 955 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 956 MUTEX_DEFAULT, NULL); 957 } 958 for (i = 0; i < uhmehash_num; i++) { 959 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 960 MUTEX_DEFAULT, NULL); 961 } 962 khmehash_num--; /* make sure counter starts from 0 */ 963 uhmehash_num--; /* make sure counter starts from 0 */ 964 965 /* 966 * Allocate context domain structures. 967 * 968 * A platform may choose to modify max_mmu_ctxdoms in 969 * set_platform_defaults(). If a platform does not define 970 * a set_platform_defaults() or does not choose to modify 971 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 972 * 973 * For sun4v, there will be one global context domain, this is to 974 * avoid the ldom cpu substitution problem. 975 * 976 * For all platforms that have CPUs sharing MMUs, this 977 * value must be defined. 978 */ 979 if (max_mmu_ctxdoms == 0) { 980 #ifndef sun4v 981 max_mmu_ctxdoms = max_ncpus; 982 #else /* sun4v */ 983 max_mmu_ctxdoms = 1; 984 #endif /* sun4v */ 985 } 986 987 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 988 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 989 990 /* mmu_ctx_t is 64 bytes aligned */ 991 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 992 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 993 /* 994 * MMU context domain initialization for the Boot CPU. 995 * This needs the context domains array allocated above. 996 */ 997 mutex_enter(&cpu_lock); 998 sfmmu_cpu_init(CPU); 999 mutex_exit(&cpu_lock); 1000 1001 /* 1002 * Intialize ism mapping list lock. 1003 */ 1004 1005 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1006 1007 /* 1008 * Each sfmmu structure carries an array of MMU context info 1009 * structures, one per context domain. The size of this array depends 1010 * on the maximum number of context domains. So, the size of the 1011 * sfmmu structure varies per platform. 1012 * 1013 * sfmmu is allocated from static arena, because trap 1014 * handler at TL > 0 is not allowed to touch kernel relocatable 1015 * memory. sfmmu's alignment is changed to 64 bytes from 1016 * default 8 bytes, as the lower 6 bits will be used to pass 1017 * pgcnt to vtag_flush_pgcnt_tl1. 1018 */ 1019 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1020 1021 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1022 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1023 NULL, NULL, static_arena, 0); 1024 1025 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1026 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1027 1028 /* 1029 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1030 * from the heap when low on memory or when TSB_FORCEALLOC is 1031 * specified, don't use magazines to cache them--we want to return 1032 * them to the system as quickly as possible. 1033 */ 1034 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1035 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1036 static_arena, KMC_NOMAGAZINE); 1037 1038 /* 1039 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1040 * memory, which corresponds to the old static reserve for TSBs. 1041 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1042 * memory we'll allocate for TSB slabs; beyond this point TSB 1043 * allocations will be taken from the kernel heap (via 1044 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1045 * consumer. 1046 */ 1047 if (tsb_alloc_hiwater_factor == 0) { 1048 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1049 } 1050 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1051 1052 /* Set tsb_max_growsize. */ 1053 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1054 1055 /* 1056 * On smaller memory systems, allocate TSB memory in smaller chunks 1057 * than the default 4M slab size. We also honor disable_large_pages 1058 * here. 1059 * 1060 * The trap handlers need to be patched with the final slab shift, 1061 * since they need to be able to construct the TSB pointer at runtime. 1062 */ 1063 if (tsb_max_growsize <= TSB_512K_SZCODE) 1064 tsb_slab_ttesz = TTE512K; 1065 1066 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1067 if (!(disable_large_pages & (1 << sz))) 1068 break; 1069 } 1070 1071 tsb_slab_ttesz = sz; 1072 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1073 tsb_slab_size = 1 << tsb_slab_shift; 1074 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1075 1076 maxtsb = tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); 1077 if (tsb_max_growsize > maxtsb) 1078 tsb_max_growsize = maxtsb; 1079 1080 /* 1081 * Set up memory callback to update tsb_alloc_hiwater and 1082 * tsb_max_growsize. 1083 */ 1084 i = kphysm_setup_func_register(&sfmmu_update_tsb_vec, (void *) 0); 1085 ASSERT(i == 0); 1086 1087 /* 1088 * kmem_tsb_arena is the source from which large TSB slabs are 1089 * drawn. The quantum of this arena corresponds to the largest 1090 * TSB size we can dynamically allocate for user processes. 1091 * Currently it must also be a supported page size since we 1092 * use exactly one translation entry to map each slab page. 1093 * 1094 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1095 * which most TSBs are allocated. Since most TSB allocations are 1096 * typically 8K we have a kmem cache we stack on top of each 1097 * kmem_tsb_default_arena to speed up those allocations. 1098 * 1099 * Note the two-level scheme of arenas is required only 1100 * because vmem_create doesn't allow us to specify alignment 1101 * requirements. If this ever changes the code could be 1102 * simplified to use only one level of arenas. 1103 */ 1104 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1105 sfmmu_vmem_xalloc_aligned_wrapper, vmem_xfree, heap_arena, 1106 0, VM_SLEEP); 1107 1108 if (tsb_lgrp_affinity) { 1109 char s[50]; 1110 for (i = 0; i < NLGRPS_MAX; i++) { 1111 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1112 kmem_tsb_default_arena[i] = 1113 vmem_create(s, NULL, 0, PAGESIZE, 1114 sfmmu_tsb_segkmem_alloc, sfmmu_tsb_segkmem_free, 1115 kmem_tsb_arena, 0, VM_SLEEP | VM_BESTFIT); 1116 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1117 sfmmu_tsb_cache[i] = kmem_cache_create(s, PAGESIZE, 1118 PAGESIZE, NULL, NULL, NULL, NULL, 1119 kmem_tsb_default_arena[i], 0); 1120 } 1121 } else { 1122 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1123 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1124 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1125 VM_SLEEP | VM_BESTFIT); 1126 1127 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1128 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1129 kmem_tsb_default_arena[0], 0); 1130 } 1131 1132 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1133 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1134 sfmmu_hblkcache_destructor, 1135 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1136 hat_memload_arena, KMC_NOHASH); 1137 1138 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1139 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 1140 1141 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1142 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1143 sfmmu_hblkcache_destructor, 1144 NULL, (void *)HME1BLK_SZ, 1145 hat_memload1_arena, KMC_NOHASH); 1146 1147 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1148 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1149 1150 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1151 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1152 NULL, NULL, static_arena, KMC_NOHASH); 1153 1154 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1155 sizeof (ism_ment_t), 0, NULL, NULL, 1156 NULL, NULL, NULL, 0); 1157 1158 /* 1159 * We grab the first hat for the kernel, 1160 */ 1161 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1162 kas.a_hat = hat_alloc(&kas); 1163 AS_LOCK_EXIT(&kas, &kas.a_lock); 1164 1165 /* 1166 * Initialize hblk_reserve. 1167 */ 1168 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1169 va_to_pa((caddr_t)hblk_reserve); 1170 1171 #ifndef UTSB_PHYS 1172 /* 1173 * Reserve some kernel virtual address space for the locked TTEs 1174 * that allow us to probe the TSB from TL>0. 1175 */ 1176 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1177 0, 0, NULL, NULL, VM_SLEEP); 1178 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1179 0, 0, NULL, NULL, VM_SLEEP); 1180 #endif 1181 1182 #ifdef VAC 1183 /* 1184 * The big page VAC handling code assumes VAC 1185 * will not be bigger than the smallest big 1186 * page- which is 64K. 1187 */ 1188 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1189 cmn_err(CE_PANIC, "VAC too big!"); 1190 } 1191 #endif 1192 1193 (void) xhat_init(); 1194 1195 uhme_hash_pa = va_to_pa(uhme_hash); 1196 khme_hash_pa = va_to_pa(khme_hash); 1197 1198 /* 1199 * Initialize relocation locks. kpr_suspendlock is held 1200 * at PIL_MAX to prevent interrupts from pinning the holder 1201 * of a suspended TTE which may access it leading to a 1202 * deadlock condition. 1203 */ 1204 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1205 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1206 } 1207 1208 /* 1209 * Initialize locking for the hat layer, called early during boot. 1210 */ 1211 static void 1212 hat_lock_init() 1213 { 1214 int i; 1215 1216 /* 1217 * initialize the array of mutexes protecting a page's mapping 1218 * list and p_nrm field. 1219 */ 1220 for (i = 0; i < mml_table_sz; i++) 1221 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1222 1223 if (kpm_enable) { 1224 for (i = 0; i < kpmp_table_sz; i++) { 1225 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1226 MUTEX_DEFAULT, NULL); 1227 } 1228 } 1229 1230 /* 1231 * Initialize array of mutex locks that protects sfmmu fields and 1232 * TSB lists. 1233 */ 1234 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1235 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1236 NULL); 1237 } 1238 1239 extern caddr_t kmem64_base, kmem64_end; 1240 1241 #define SFMMU_KERNEL_MAXVA \ 1242 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1243 1244 /* 1245 * Allocate a hat structure. 1246 * Called when an address space first uses a hat. 1247 */ 1248 struct hat * 1249 hat_alloc(struct as *as) 1250 { 1251 sfmmu_t *sfmmup; 1252 int i; 1253 uint64_t cnum; 1254 extern uint_t get_color_start(struct as *); 1255 1256 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1257 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1258 sfmmup->sfmmu_as = as; 1259 sfmmup->sfmmu_flags = 0; 1260 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1261 1262 if (as == &kas) { 1263 ksfmmup = sfmmup; 1264 sfmmup->sfmmu_cext = 0; 1265 cnum = KCONTEXT; 1266 1267 sfmmup->sfmmu_clrstart = 0; 1268 sfmmup->sfmmu_tsb = NULL; 1269 /* 1270 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1271 * to setup tsb_info for ksfmmup. 1272 */ 1273 } else { 1274 1275 /* 1276 * Just set to invalid ctx. When it faults, it will 1277 * get a valid ctx. This would avoid the situation 1278 * where we get a ctx, but it gets stolen and then 1279 * we fault when we try to run and so have to get 1280 * another ctx. 1281 */ 1282 sfmmup->sfmmu_cext = 0; 1283 cnum = INVALID_CONTEXT; 1284 1285 /* initialize original physical page coloring bin */ 1286 sfmmup->sfmmu_clrstart = get_color_start(as); 1287 #ifdef DEBUG 1288 if (tsb_random_size) { 1289 uint32_t randval = (uint32_t)gettick() >> 4; 1290 int size = randval % (tsb_max_growsize + 1); 1291 1292 /* chose a random tsb size for stress testing */ 1293 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1294 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1295 } else 1296 #endif /* DEBUG */ 1297 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1298 default_tsb_size, 1299 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1300 sfmmup->sfmmu_flags = HAT_SWAPPED; 1301 ASSERT(sfmmup->sfmmu_tsb != NULL); 1302 } 1303 1304 ASSERT(max_mmu_ctxdoms > 0); 1305 for (i = 0; i < max_mmu_ctxdoms; i++) { 1306 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1307 sfmmup->sfmmu_ctxs[i].gnum = 0; 1308 } 1309 1310 sfmmu_setup_tsbinfo(sfmmup); 1311 for (i = 0; i < max_mmu_page_sizes; i++) { 1312 sfmmup->sfmmu_ttecnt[i] = 0; 1313 sfmmup->sfmmu_ismttecnt[i] = 0; 1314 sfmmup->sfmmu_pgsz[i] = TTE8K; 1315 } 1316 1317 sfmmup->sfmmu_iblk = NULL; 1318 sfmmup->sfmmu_ismhat = 0; 1319 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1320 if (sfmmup == ksfmmup) { 1321 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1322 } else { 1323 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1324 } 1325 sfmmup->sfmmu_free = 0; 1326 sfmmup->sfmmu_rmstat = 0; 1327 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1328 sfmmup->sfmmu_xhat_provider = NULL; 1329 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1330 return (sfmmup); 1331 } 1332 1333 /* 1334 * Create per-MMU context domain kstats for a given MMU ctx. 1335 */ 1336 static void 1337 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1338 { 1339 mmu_ctx_stat_t stat; 1340 kstat_t *mmu_kstat; 1341 1342 ASSERT(MUTEX_HELD(&cpu_lock)); 1343 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1344 1345 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1346 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1347 1348 if (mmu_kstat == NULL) { 1349 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1350 mmu_ctxp->mmu_idx); 1351 } else { 1352 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1353 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1354 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1355 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1356 mmu_ctxp->mmu_kstat = mmu_kstat; 1357 kstat_install(mmu_kstat); 1358 } 1359 } 1360 1361 /* 1362 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1363 * context domain information for a given CPU. If a platform does not 1364 * specify that interface, then the function below is used instead to return 1365 * default information. The defaults are as follows: 1366 * 1367 * - For sun4u systems there's one MMU context domain per CPU. 1368 * This default is used by all sun4u systems except OPL. OPL systems 1369 * provide platform specific interface to map CPU ids to MMU ids 1370 * because on OPL more than 1 CPU shares a single MMU. 1371 * Note that on sun4v, there is one global context domain for 1372 * the entire system. This is to avoid running into potential problem 1373 * with ldom physical cpu substitution feature. 1374 * - The number of MMU context IDs supported on any CPU in the 1375 * system is 8K. 1376 */ 1377 /*ARGSUSED*/ 1378 static void 1379 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1380 { 1381 infop->mmu_nctxs = nctxs; 1382 #ifndef sun4v 1383 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1384 #else /* sun4v */ 1385 infop->mmu_idx = 0; 1386 #endif /* sun4v */ 1387 } 1388 1389 /* 1390 * Called during CPU initialization to set the MMU context-related information 1391 * for a CPU. 1392 * 1393 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1394 */ 1395 void 1396 sfmmu_cpu_init(cpu_t *cp) 1397 { 1398 mmu_ctx_info_t info; 1399 mmu_ctx_t *mmu_ctxp; 1400 1401 ASSERT(MUTEX_HELD(&cpu_lock)); 1402 1403 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1404 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1405 else 1406 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1407 1408 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1409 1410 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1411 /* Each mmu_ctx is cacheline aligned. */ 1412 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1413 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1414 1415 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1416 (void *)ipltospl(DISP_LEVEL)); 1417 mmu_ctxp->mmu_idx = info.mmu_idx; 1418 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1419 /* 1420 * Globally for lifetime of a system, 1421 * gnum must always increase. 1422 * mmu_saved_gnum is protected by the cpu_lock. 1423 */ 1424 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1425 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1426 1427 sfmmu_mmu_kstat_create(mmu_ctxp); 1428 1429 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1430 } else { 1431 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1432 } 1433 1434 /* 1435 * The mmu_lock is acquired here to prevent races with 1436 * the wrap-around code. 1437 */ 1438 mutex_enter(&mmu_ctxp->mmu_lock); 1439 1440 1441 mmu_ctxp->mmu_ncpus++; 1442 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1443 CPU_MMU_IDX(cp) = info.mmu_idx; 1444 CPU_MMU_CTXP(cp) = mmu_ctxp; 1445 1446 mutex_exit(&mmu_ctxp->mmu_lock); 1447 } 1448 1449 /* 1450 * Called to perform MMU context-related cleanup for a CPU. 1451 */ 1452 void 1453 sfmmu_cpu_cleanup(cpu_t *cp) 1454 { 1455 mmu_ctx_t *mmu_ctxp; 1456 1457 ASSERT(MUTEX_HELD(&cpu_lock)); 1458 1459 mmu_ctxp = CPU_MMU_CTXP(cp); 1460 ASSERT(mmu_ctxp != NULL); 1461 1462 /* 1463 * The mmu_lock is acquired here to prevent races with 1464 * the wrap-around code. 1465 */ 1466 mutex_enter(&mmu_ctxp->mmu_lock); 1467 1468 CPU_MMU_CTXP(cp) = NULL; 1469 1470 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1471 if (--mmu_ctxp->mmu_ncpus == 0) { 1472 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1473 mutex_exit(&mmu_ctxp->mmu_lock); 1474 mutex_destroy(&mmu_ctxp->mmu_lock); 1475 1476 if (mmu_ctxp->mmu_kstat) 1477 kstat_delete(mmu_ctxp->mmu_kstat); 1478 1479 /* mmu_saved_gnum is protected by the cpu_lock. */ 1480 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1481 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1482 1483 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1484 1485 return; 1486 } 1487 1488 mutex_exit(&mmu_ctxp->mmu_lock); 1489 } 1490 1491 /* 1492 * Hat_setup, makes an address space context the current active one. 1493 * In sfmmu this translates to setting the secondary context with the 1494 * corresponding context. 1495 */ 1496 void 1497 hat_setup(struct hat *sfmmup, int allocflag) 1498 { 1499 hatlock_t *hatlockp; 1500 1501 /* Init needs some special treatment. */ 1502 if (allocflag == HAT_INIT) { 1503 /* 1504 * Make sure that we have 1505 * 1. a TSB 1506 * 2. a valid ctx that doesn't get stolen after this point. 1507 */ 1508 hatlockp = sfmmu_hat_enter(sfmmup); 1509 1510 /* 1511 * Swap in the TSB. hat_init() allocates tsbinfos without 1512 * TSBs, but we need one for init, since the kernel does some 1513 * special things to set up its stack and needs the TSB to 1514 * resolve page faults. 1515 */ 1516 sfmmu_tsb_swapin(sfmmup, hatlockp); 1517 1518 sfmmu_get_ctx(sfmmup); 1519 1520 sfmmu_hat_exit(hatlockp); 1521 } else { 1522 ASSERT(allocflag == HAT_ALLOC); 1523 1524 hatlockp = sfmmu_hat_enter(sfmmup); 1525 kpreempt_disable(); 1526 1527 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1528 1529 /* 1530 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1531 * pagesize bits don't matter in this case since we are passing 1532 * INVALID_CONTEXT to it. 1533 */ 1534 sfmmu_setctx_sec(INVALID_CONTEXT); 1535 sfmmu_clear_utsbinfo(); 1536 1537 kpreempt_enable(); 1538 sfmmu_hat_exit(hatlockp); 1539 } 1540 } 1541 1542 /* 1543 * Free all the translation resources for the specified address space. 1544 * Called from as_free when an address space is being destroyed. 1545 */ 1546 void 1547 hat_free_start(struct hat *sfmmup) 1548 { 1549 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1550 ASSERT(sfmmup != ksfmmup); 1551 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1552 1553 sfmmup->sfmmu_free = 1; 1554 } 1555 1556 void 1557 hat_free_end(struct hat *sfmmup) 1558 { 1559 int i; 1560 1561 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1562 if (sfmmup->sfmmu_ismhat) { 1563 for (i = 0; i < mmu_page_sizes; i++) { 1564 sfmmup->sfmmu_ttecnt[i] = 0; 1565 sfmmup->sfmmu_ismttecnt[i] = 0; 1566 } 1567 } else { 1568 /* EMPTY */ 1569 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1570 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1571 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1572 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1573 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1574 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1575 } 1576 1577 if (sfmmup->sfmmu_rmstat) { 1578 hat_freestat(sfmmup->sfmmu_as, NULL); 1579 } 1580 1581 while (sfmmup->sfmmu_tsb != NULL) { 1582 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1583 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1584 sfmmup->sfmmu_tsb = next; 1585 } 1586 sfmmu_free_sfmmu(sfmmup); 1587 1588 kmem_cache_free(sfmmuid_cache, sfmmup); 1589 } 1590 1591 /* 1592 * Set up any translation structures, for the specified address space, 1593 * that are needed or preferred when the process is being swapped in. 1594 */ 1595 /* ARGSUSED */ 1596 void 1597 hat_swapin(struct hat *hat) 1598 { 1599 ASSERT(hat->sfmmu_xhat_provider == NULL); 1600 } 1601 1602 /* 1603 * Free all of the translation resources, for the specified address space, 1604 * that can be freed while the process is swapped out. Called from as_swapout. 1605 * Also, free up the ctx that this process was using. 1606 */ 1607 void 1608 hat_swapout(struct hat *sfmmup) 1609 { 1610 struct hmehash_bucket *hmebp; 1611 struct hme_blk *hmeblkp; 1612 struct hme_blk *pr_hblk = NULL; 1613 struct hme_blk *nx_hblk; 1614 int i; 1615 uint64_t hblkpa, prevpa, nx_pa; 1616 struct hme_blk *list = NULL; 1617 hatlock_t *hatlockp; 1618 struct tsb_info *tsbinfop; 1619 struct free_tsb { 1620 struct free_tsb *next; 1621 struct tsb_info *tsbinfop; 1622 }; /* free list of TSBs */ 1623 struct free_tsb *freelist, *last, *next; 1624 1625 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1626 SFMMU_STAT(sf_swapout); 1627 1628 /* 1629 * There is no way to go from an as to all its translations in sfmmu. 1630 * Here is one of the times when we take the big hit and traverse 1631 * the hash looking for hme_blks to free up. Not only do we free up 1632 * this as hme_blks but all those that are free. We are obviously 1633 * swapping because we need memory so let's free up as much 1634 * as we can. 1635 * 1636 * Note that we don't flush TLB/TSB here -- it's not necessary 1637 * because: 1638 * 1) we free the ctx we're using and throw away the TSB(s); 1639 * 2) processes aren't runnable while being swapped out. 1640 */ 1641 ASSERT(sfmmup != KHATID); 1642 for (i = 0; i <= UHMEHASH_SZ; i++) { 1643 hmebp = &uhme_hash[i]; 1644 SFMMU_HASH_LOCK(hmebp); 1645 hmeblkp = hmebp->hmeblkp; 1646 hblkpa = hmebp->hmeh_nextpa; 1647 prevpa = 0; 1648 pr_hblk = NULL; 1649 while (hmeblkp) { 1650 1651 ASSERT(!hmeblkp->hblk_xhat_bit); 1652 1653 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1654 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1655 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1656 (caddr_t)get_hblk_base(hmeblkp), 1657 get_hblk_endaddr(hmeblkp), 1658 NULL, HAT_UNLOAD); 1659 } 1660 nx_hblk = hmeblkp->hblk_next; 1661 nx_pa = hmeblkp->hblk_nextpa; 1662 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1663 ASSERT(!hmeblkp->hblk_lckcnt); 1664 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 1665 prevpa, pr_hblk); 1666 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 1667 } else { 1668 pr_hblk = hmeblkp; 1669 prevpa = hblkpa; 1670 } 1671 hmeblkp = nx_hblk; 1672 hblkpa = nx_pa; 1673 } 1674 SFMMU_HASH_UNLOCK(hmebp); 1675 } 1676 1677 sfmmu_hblks_list_purge(&list); 1678 1679 /* 1680 * Now free up the ctx so that others can reuse it. 1681 */ 1682 hatlockp = sfmmu_hat_enter(sfmmup); 1683 1684 sfmmu_invalidate_ctx(sfmmup); 1685 1686 /* 1687 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1688 * If TSBs were never swapped in, just return. 1689 * This implies that we don't support partial swapping 1690 * of TSBs -- either all are swapped out, or none are. 1691 * 1692 * We must hold the HAT lock here to prevent racing with another 1693 * thread trying to unmap TTEs from the TSB or running the post- 1694 * relocator after relocating the TSB's memory. Unfortunately, we 1695 * can't free memory while holding the HAT lock or we could 1696 * deadlock, so we build a list of TSBs to be freed after marking 1697 * the tsbinfos as swapped out and free them after dropping the 1698 * lock. 1699 */ 1700 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1701 sfmmu_hat_exit(hatlockp); 1702 return; 1703 } 1704 1705 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1706 last = freelist = NULL; 1707 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1708 tsbinfop = tsbinfop->tsb_next) { 1709 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1710 1711 /* 1712 * Cast the TSB into a struct free_tsb and put it on the free 1713 * list. 1714 */ 1715 if (freelist == NULL) { 1716 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1717 } else { 1718 last->next = (struct free_tsb *)tsbinfop->tsb_va; 1719 last = last->next; 1720 } 1721 last->next = NULL; 1722 last->tsbinfop = tsbinfop; 1723 tsbinfop->tsb_flags |= TSB_SWAPPED; 1724 /* 1725 * Zero out the TTE to clear the valid bit. 1726 * Note we can't use a value like 0xbad because we want to 1727 * ensure diagnostic bits are NEVER set on TTEs that might 1728 * be loaded. The intent is to catch any invalid access 1729 * to the swapped TSB, such as a thread running with a valid 1730 * context without first calling sfmmu_tsb_swapin() to 1731 * allocate TSB memory. 1732 */ 1733 tsbinfop->tsb_tte.ll = 0; 1734 } 1735 1736 #ifdef sun4v 1737 if (freelist) 1738 sfmmu_invalidate_tsbinfo(sfmmup); 1739 #endif /* sun4v */ 1740 1741 /* Now we can drop the lock and free the TSB memory. */ 1742 sfmmu_hat_exit(hatlockp); 1743 for (; freelist != NULL; freelist = next) { 1744 next = freelist->next; 1745 sfmmu_tsb_free(freelist->tsbinfop); 1746 } 1747 } 1748 1749 /* 1750 * Duplicate the translations of an as into another newas 1751 */ 1752 /* ARGSUSED */ 1753 int 1754 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 1755 uint_t flag) 1756 { 1757 ASSERT(hat->sfmmu_xhat_provider == NULL); 1758 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW)); 1759 1760 if (flag == HAT_DUP_COW) { 1761 panic("hat_dup: HAT_DUP_COW not supported"); 1762 } 1763 return (0); 1764 } 1765 1766 /* 1767 * Set up addr to map to page pp with protection prot. 1768 * As an optimization we also load the TSB with the 1769 * corresponding tte but it is no big deal if the tte gets kicked out. 1770 */ 1771 void 1772 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 1773 uint_t attr, uint_t flags) 1774 { 1775 tte_t tte; 1776 1777 1778 ASSERT(hat != NULL); 1779 ASSERT(PAGE_LOCKED(pp)); 1780 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1781 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1782 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1783 1784 if (PP_ISFREE(pp)) { 1785 panic("hat_memload: loading a mapping to free page %p", 1786 (void *)pp); 1787 } 1788 1789 if (hat->sfmmu_xhat_provider) { 1790 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 1791 return; 1792 } 1793 1794 ASSERT((hat == ksfmmup) || 1795 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1796 1797 if (flags & ~SFMMU_LOAD_ALLFLAG) 1798 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 1799 flags & ~SFMMU_LOAD_ALLFLAG); 1800 1801 if (hat->sfmmu_rmstat) 1802 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 1803 1804 #if defined(SF_ERRATA_57) 1805 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1806 (addr < errata57_limit) && (attr & PROT_EXEC) && 1807 !(flags & HAT_LOAD_SHARE)) { 1808 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 1809 " page executable"); 1810 attr &= ~PROT_EXEC; 1811 } 1812 #endif 1813 1814 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 1815 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags); 1816 1817 /* 1818 * Check TSB and TLB page sizes. 1819 */ 1820 if ((flags & HAT_LOAD_SHARE) == 0) { 1821 sfmmu_check_page_sizes(hat, 1); 1822 } 1823 } 1824 1825 /* 1826 * hat_devload can be called to map real memory (e.g. 1827 * /dev/kmem) and even though hat_devload will determine pf is 1828 * for memory, it will be unable to get a shared lock on the 1829 * page (because someone else has it exclusively) and will 1830 * pass dp = NULL. If tteload doesn't get a non-NULL 1831 * page pointer it can't cache memory. 1832 */ 1833 void 1834 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 1835 uint_t attr, int flags) 1836 { 1837 tte_t tte; 1838 struct page *pp = NULL; 1839 int use_lgpg = 0; 1840 1841 ASSERT(hat != NULL); 1842 1843 if (hat->sfmmu_xhat_provider) { 1844 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 1845 return; 1846 } 1847 1848 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1849 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1850 ASSERT((hat == ksfmmup) || 1851 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1852 if (len == 0) 1853 panic("hat_devload: zero len"); 1854 if (flags & ~SFMMU_LOAD_ALLFLAG) 1855 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 1856 flags & ~SFMMU_LOAD_ALLFLAG); 1857 1858 #if defined(SF_ERRATA_57) 1859 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1860 (addr < errata57_limit) && (attr & PROT_EXEC) && 1861 !(flags & HAT_LOAD_SHARE)) { 1862 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 1863 " page executable"); 1864 attr &= ~PROT_EXEC; 1865 } 1866 #endif 1867 1868 /* 1869 * If it's a memory page find its pp 1870 */ 1871 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 1872 pp = page_numtopp_nolock(pfn); 1873 if (pp == NULL) { 1874 flags |= HAT_LOAD_NOCONSIST; 1875 } else { 1876 if (PP_ISFREE(pp)) { 1877 panic("hat_memload: loading " 1878 "a mapping to free page %p", 1879 (void *)pp); 1880 } 1881 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1882 panic("hat_memload: loading a mapping " 1883 "to unlocked relocatable page %p", 1884 (void *)pp); 1885 } 1886 ASSERT(len == MMU_PAGESIZE); 1887 } 1888 } 1889 1890 if (hat->sfmmu_rmstat) 1891 hat_resvstat(len, hat->sfmmu_as, addr); 1892 1893 if (flags & HAT_LOAD_NOCONSIST) { 1894 attr |= SFMMU_UNCACHEVTTE; 1895 use_lgpg = 1; 1896 } 1897 if (!pf_is_memory(pfn)) { 1898 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 1899 use_lgpg = 1; 1900 switch (attr & HAT_ORDER_MASK) { 1901 case HAT_STRICTORDER: 1902 case HAT_UNORDERED_OK: 1903 /* 1904 * we set the side effect bit for all non 1905 * memory mappings unless merging is ok 1906 */ 1907 attr |= SFMMU_SIDEFFECT; 1908 break; 1909 case HAT_MERGING_OK: 1910 case HAT_LOADCACHING_OK: 1911 case HAT_STORECACHING_OK: 1912 break; 1913 default: 1914 panic("hat_devload: bad attr"); 1915 break; 1916 } 1917 } 1918 while (len) { 1919 if (!use_lgpg) { 1920 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1921 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1922 flags); 1923 len -= MMU_PAGESIZE; 1924 addr += MMU_PAGESIZE; 1925 pfn++; 1926 continue; 1927 } 1928 /* 1929 * try to use large pages, check va/pa alignments 1930 * Note that 32M/256M page sizes are not (yet) supported. 1931 */ 1932 if ((len >= MMU_PAGESIZE4M) && 1933 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 1934 !(disable_large_pages & (1 << TTE4M)) && 1935 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 1936 sfmmu_memtte(&tte, pfn, attr, TTE4M); 1937 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1938 flags); 1939 len -= MMU_PAGESIZE4M; 1940 addr += MMU_PAGESIZE4M; 1941 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 1942 } else if ((len >= MMU_PAGESIZE512K) && 1943 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 1944 !(disable_large_pages & (1 << TTE512K)) && 1945 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 1946 sfmmu_memtte(&tte, pfn, attr, TTE512K); 1947 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1948 flags); 1949 len -= MMU_PAGESIZE512K; 1950 addr += MMU_PAGESIZE512K; 1951 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 1952 } else if ((len >= MMU_PAGESIZE64K) && 1953 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 1954 !(disable_large_pages & (1 << TTE64K)) && 1955 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 1956 sfmmu_memtte(&tte, pfn, attr, TTE64K); 1957 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1958 flags); 1959 len -= MMU_PAGESIZE64K; 1960 addr += MMU_PAGESIZE64K; 1961 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 1962 } else { 1963 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1964 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1965 flags); 1966 len -= MMU_PAGESIZE; 1967 addr += MMU_PAGESIZE; 1968 pfn++; 1969 } 1970 } 1971 1972 /* 1973 * Check TSB and TLB page sizes. 1974 */ 1975 if ((flags & HAT_LOAD_SHARE) == 0) { 1976 sfmmu_check_page_sizes(hat, 1); 1977 } 1978 } 1979 1980 /* 1981 * Map the largest extend possible out of the page array. The array may NOT 1982 * be in order. The largest possible mapping a page can have 1983 * is specified in the p_szc field. The p_szc field 1984 * cannot change as long as there any mappings (large or small) 1985 * to any of the pages that make up the large page. (ie. any 1986 * promotion/demotion of page size is not up to the hat but up to 1987 * the page free list manager). The array 1988 * should consist of properly aligned contigous pages that are 1989 * part of a big page for a large mapping to be created. 1990 */ 1991 void 1992 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 1993 struct page **pps, uint_t attr, uint_t flags) 1994 { 1995 int ttesz; 1996 size_t mapsz; 1997 pgcnt_t numpg, npgs; 1998 tte_t tte; 1999 page_t *pp; 2000 uint_t large_pages_disable; 2001 2002 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2003 2004 if (hat->sfmmu_xhat_provider) { 2005 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 2006 return; 2007 } 2008 2009 if (hat->sfmmu_rmstat) 2010 hat_resvstat(len, hat->sfmmu_as, addr); 2011 2012 #if defined(SF_ERRATA_57) 2013 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2014 (addr < errata57_limit) && (attr & PROT_EXEC) && 2015 !(flags & HAT_LOAD_SHARE)) { 2016 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2017 "user page executable"); 2018 attr &= ~PROT_EXEC; 2019 } 2020 #endif 2021 2022 /* Get number of pages */ 2023 npgs = len >> MMU_PAGESHIFT; 2024 2025 if (flags & HAT_LOAD_SHARE) { 2026 large_pages_disable = disable_ism_large_pages; 2027 } else { 2028 large_pages_disable = disable_large_pages; 2029 } 2030 2031 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2032 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 2033 return; 2034 } 2035 2036 while (npgs >= NHMENTS) { 2037 pp = *pps; 2038 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2039 /* 2040 * Check if this page size is disabled. 2041 */ 2042 if (large_pages_disable & (1 << ttesz)) 2043 continue; 2044 2045 numpg = TTEPAGES(ttesz); 2046 mapsz = numpg << MMU_PAGESHIFT; 2047 if ((npgs >= numpg) && 2048 IS_P2ALIGNED(addr, mapsz) && 2049 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2050 /* 2051 * At this point we have enough pages and 2052 * we know the virtual address and the pfn 2053 * are properly aligned. We still need 2054 * to check for physical contiguity but since 2055 * it is very likely that this is the case 2056 * we will assume they are so and undo 2057 * the request if necessary. It would 2058 * be great if we could get a hint flag 2059 * like HAT_CONTIG which would tell us 2060 * the pages are contigous for sure. 2061 */ 2062 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2063 attr, ttesz); 2064 if (!sfmmu_tteload_array(hat, &tte, addr, 2065 pps, flags)) { 2066 break; 2067 } 2068 } 2069 } 2070 if (ttesz == TTE8K) { 2071 /* 2072 * We were not able to map array using a large page 2073 * batch a hmeblk or fraction at a time. 2074 */ 2075 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2076 & (NHMENTS-1); 2077 numpg = NHMENTS - numpg; 2078 ASSERT(numpg <= npgs); 2079 mapsz = numpg * MMU_PAGESIZE; 2080 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2081 numpg); 2082 } 2083 addr += mapsz; 2084 npgs -= numpg; 2085 pps += numpg; 2086 } 2087 2088 if (npgs) { 2089 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 2090 } 2091 2092 /* 2093 * Check TSB and TLB page sizes. 2094 */ 2095 if ((flags & HAT_LOAD_SHARE) == 0) { 2096 sfmmu_check_page_sizes(hat, 1); 2097 } 2098 } 2099 2100 /* 2101 * Function tries to batch 8K pages into the same hme blk. 2102 */ 2103 static void 2104 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2105 uint_t attr, uint_t flags, pgcnt_t npgs) 2106 { 2107 tte_t tte; 2108 page_t *pp; 2109 struct hmehash_bucket *hmebp; 2110 struct hme_blk *hmeblkp; 2111 int index; 2112 2113 while (npgs) { 2114 /* 2115 * Acquire the hash bucket. 2116 */ 2117 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K); 2118 ASSERT(hmebp); 2119 2120 /* 2121 * Find the hment block. 2122 */ 2123 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2124 TTE8K, flags); 2125 ASSERT(hmeblkp); 2126 2127 do { 2128 /* 2129 * Make the tte. 2130 */ 2131 pp = *pps; 2132 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2133 2134 /* 2135 * Add the translation. 2136 */ 2137 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2138 vaddr, pps, flags); 2139 2140 /* 2141 * Goto next page. 2142 */ 2143 pps++; 2144 npgs--; 2145 2146 /* 2147 * Goto next address. 2148 */ 2149 vaddr += MMU_PAGESIZE; 2150 2151 /* 2152 * Don't crossover into a different hmentblk. 2153 */ 2154 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2155 (NHMENTS-1)); 2156 2157 } while (index != 0 && npgs != 0); 2158 2159 /* 2160 * Release the hash bucket. 2161 */ 2162 2163 sfmmu_tteload_release_hashbucket(hmebp); 2164 } 2165 } 2166 2167 /* 2168 * Construct a tte for a page: 2169 * 2170 * tte_valid = 1 2171 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2172 * tte_size = size 2173 * tte_nfo = attr & HAT_NOFAULT 2174 * tte_ie = attr & HAT_STRUCTURE_LE 2175 * tte_hmenum = hmenum 2176 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2177 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2178 * tte_ref = 1 (optimization) 2179 * tte_wr_perm = attr & PROT_WRITE; 2180 * tte_no_sync = attr & HAT_NOSYNC 2181 * tte_lock = attr & SFMMU_LOCKTTE 2182 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2183 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2184 * tte_e = attr & SFMMU_SIDEFFECT 2185 * tte_priv = !(attr & PROT_USER) 2186 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2187 * tte_glb = 0 2188 */ 2189 void 2190 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2191 { 2192 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2193 2194 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2195 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2196 2197 if (TTE_IS_NOSYNC(ttep)) { 2198 TTE_SET_REF(ttep); 2199 if (TTE_IS_WRITABLE(ttep)) { 2200 TTE_SET_MOD(ttep); 2201 } 2202 } 2203 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2204 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2205 } 2206 } 2207 2208 /* 2209 * This function will add a translation to the hme_blk and allocate the 2210 * hme_blk if one does not exist. 2211 * If a page structure is specified then it will add the 2212 * corresponding hment to the mapping list. 2213 * It will also update the hmenum field for the tte. 2214 */ 2215 void 2216 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2217 uint_t flags) 2218 { 2219 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags); 2220 } 2221 2222 /* 2223 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2224 * Assumes that a particular page size may only be resident in one TSB. 2225 */ 2226 static void 2227 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2228 { 2229 struct tsb_info *tsbinfop = NULL; 2230 uint64_t tag; 2231 struct tsbe *tsbe_addr; 2232 uint64_t tsb_base; 2233 uint_t tsb_size; 2234 int vpshift = MMU_PAGESHIFT; 2235 int phys = 0; 2236 2237 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2238 phys = ktsb_phys; 2239 if (ttesz >= TTE4M) { 2240 #ifndef sun4v 2241 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2242 #endif 2243 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2244 tsb_size = ktsb4m_szcode; 2245 } else { 2246 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2247 tsb_size = ktsb_szcode; 2248 } 2249 } else { 2250 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2251 2252 /* 2253 * If there isn't a TSB for this page size, or the TSB is 2254 * swapped out, there is nothing to do. Note that the latter 2255 * case seems impossible but can occur if hat_pageunload() 2256 * is called on an ISM mapping while the process is swapped 2257 * out. 2258 */ 2259 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2260 return; 2261 2262 /* 2263 * If another thread is in the middle of relocating a TSB 2264 * we can't unload the entry so set a flag so that the 2265 * TSB will be flushed before it can be accessed by the 2266 * process. 2267 */ 2268 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2269 if (ttep == NULL) 2270 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2271 return; 2272 } 2273 #if defined(UTSB_PHYS) 2274 phys = 1; 2275 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2276 #else 2277 tsb_base = (uint64_t)tsbinfop->tsb_va; 2278 #endif 2279 tsb_size = tsbinfop->tsb_szc; 2280 } 2281 if (ttesz >= TTE4M) 2282 vpshift = MMU_PAGESHIFT4M; 2283 2284 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2285 tag = sfmmu_make_tsbtag(vaddr); 2286 2287 if (ttep == NULL) { 2288 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2289 } else { 2290 if (ttesz >= TTE4M) { 2291 SFMMU_STAT(sf_tsb_load4m); 2292 } else { 2293 SFMMU_STAT(sf_tsb_load8k); 2294 } 2295 2296 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2297 } 2298 } 2299 2300 /* 2301 * Unmap all entries from [start, end) matching the given page size. 2302 * 2303 * This function is used primarily to unmap replicated 64K or 512K entries 2304 * from the TSB that are inserted using the base page size TSB pointer, but 2305 * it may also be called to unmap a range of addresses from the TSB. 2306 */ 2307 void 2308 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2309 { 2310 struct tsb_info *tsbinfop; 2311 uint64_t tag; 2312 struct tsbe *tsbe_addr; 2313 caddr_t vaddr; 2314 uint64_t tsb_base; 2315 int vpshift, vpgsz; 2316 uint_t tsb_size; 2317 int phys = 0; 2318 2319 /* 2320 * Assumptions: 2321 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2322 * at a time shooting down any valid entries we encounter. 2323 * 2324 * If ttesz >= 4M we walk the range 4M at a time shooting 2325 * down any valid mappings we find. 2326 */ 2327 if (sfmmup == ksfmmup) { 2328 phys = ktsb_phys; 2329 if (ttesz >= TTE4M) { 2330 #ifndef sun4v 2331 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2332 #endif 2333 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2334 tsb_size = ktsb4m_szcode; 2335 } else { 2336 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2337 tsb_size = ktsb_szcode; 2338 } 2339 } else { 2340 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2341 2342 /* 2343 * If there isn't a TSB for this page size, or the TSB is 2344 * swapped out, there is nothing to do. Note that the latter 2345 * case seems impossible but can occur if hat_pageunload() 2346 * is called on an ISM mapping while the process is swapped 2347 * out. 2348 */ 2349 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2350 return; 2351 2352 /* 2353 * If another thread is in the middle of relocating a TSB 2354 * we can't unload the entry so set a flag so that the 2355 * TSB will be flushed before it can be accessed by the 2356 * process. 2357 */ 2358 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2359 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2360 return; 2361 } 2362 #if defined(UTSB_PHYS) 2363 phys = 1; 2364 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2365 #else 2366 tsb_base = (uint64_t)tsbinfop->tsb_va; 2367 #endif 2368 tsb_size = tsbinfop->tsb_szc; 2369 } 2370 if (ttesz >= TTE4M) { 2371 vpshift = MMU_PAGESHIFT4M; 2372 vpgsz = MMU_PAGESIZE4M; 2373 } else { 2374 vpshift = MMU_PAGESHIFT; 2375 vpgsz = MMU_PAGESIZE; 2376 } 2377 2378 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2379 tag = sfmmu_make_tsbtag(vaddr); 2380 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2381 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2382 } 2383 } 2384 2385 /* 2386 * Select the optimum TSB size given the number of mappings 2387 * that need to be cached. 2388 */ 2389 static int 2390 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2391 { 2392 int szc = 0; 2393 2394 #ifdef DEBUG 2395 if (tsb_grow_stress) { 2396 uint32_t randval = (uint32_t)gettick() >> 4; 2397 return (randval % (tsb_max_growsize + 1)); 2398 } 2399 #endif /* DEBUG */ 2400 2401 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2402 szc++; 2403 return (szc); 2404 } 2405 2406 /* 2407 * This function will add a translation to the hme_blk and allocate the 2408 * hme_blk if one does not exist. 2409 * If a page structure is specified then it will add the 2410 * corresponding hment to the mapping list. 2411 * It will also update the hmenum field for the tte. 2412 * Furthermore, it attempts to create a large page translation 2413 * for <addr,hat> at page array pps. It assumes addr and first 2414 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2415 */ 2416 static int 2417 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2418 page_t **pps, uint_t flags) 2419 { 2420 struct hmehash_bucket *hmebp; 2421 struct hme_blk *hmeblkp; 2422 int ret; 2423 uint_t size; 2424 2425 /* 2426 * Get mapping size. 2427 */ 2428 size = TTE_CSZ(ttep); 2429 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2430 2431 /* 2432 * Acquire the hash bucket. 2433 */ 2434 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size); 2435 ASSERT(hmebp); 2436 2437 /* 2438 * Find the hment block. 2439 */ 2440 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags); 2441 ASSERT(hmeblkp); 2442 2443 /* 2444 * Add the translation. 2445 */ 2446 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags); 2447 2448 /* 2449 * Release the hash bucket. 2450 */ 2451 sfmmu_tteload_release_hashbucket(hmebp); 2452 2453 return (ret); 2454 } 2455 2456 /* 2457 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2458 */ 2459 static struct hmehash_bucket * 2460 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size) 2461 { 2462 struct hmehash_bucket *hmebp; 2463 int hmeshift; 2464 2465 hmeshift = HME_HASH_SHIFT(size); 2466 2467 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 2468 2469 SFMMU_HASH_LOCK(hmebp); 2470 2471 return (hmebp); 2472 } 2473 2474 /* 2475 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2476 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2477 * allocated. 2478 */ 2479 static struct hme_blk * 2480 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2481 caddr_t vaddr, uint_t size, uint_t flags) 2482 { 2483 hmeblk_tag hblktag; 2484 int hmeshift; 2485 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2486 uint64_t hblkpa, prevpa; 2487 struct kmem_cache *sfmmu_cache; 2488 uint_t forcefree; 2489 2490 hblktag.htag_id = sfmmup; 2491 hmeshift = HME_HASH_SHIFT(size); 2492 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2493 hblktag.htag_rehash = HME_HASH_REHASH(size); 2494 2495 ttearray_realloc: 2496 2497 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 2498 pr_hblk, prevpa, &list); 2499 2500 /* 2501 * We block until hblk_reserve_lock is released; it's held by 2502 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2503 * replaced by a hblk from sfmmu8_cache. 2504 */ 2505 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2506 hblk_reserve_thread != curthread) { 2507 SFMMU_HASH_UNLOCK(hmebp); 2508 mutex_enter(&hblk_reserve_lock); 2509 mutex_exit(&hblk_reserve_lock); 2510 SFMMU_STAT(sf_hblk_reserve_hit); 2511 SFMMU_HASH_LOCK(hmebp); 2512 goto ttearray_realloc; 2513 } 2514 2515 if (hmeblkp == NULL) { 2516 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2517 hblktag, flags); 2518 } else { 2519 /* 2520 * It is possible for 8k and 64k hblks to collide since they 2521 * have the same rehash value. This is because we 2522 * lazily free hblks and 8K/64K blks could be lingering. 2523 * If we find size mismatch we free the block and & try again. 2524 */ 2525 if (get_hblk_ttesz(hmeblkp) != size) { 2526 ASSERT(!hmeblkp->hblk_vcnt); 2527 ASSERT(!hmeblkp->hblk_hmecnt); 2528 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 2529 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 2530 goto ttearray_realloc; 2531 } 2532 if (hmeblkp->hblk_shw_bit) { 2533 /* 2534 * if the hblk was previously used as a shadow hblk then 2535 * we will change it to a normal hblk 2536 */ 2537 if (hmeblkp->hblk_shw_mask) { 2538 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2539 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2540 goto ttearray_realloc; 2541 } else { 2542 hmeblkp->hblk_shw_bit = 0; 2543 } 2544 } 2545 SFMMU_STAT(sf_hblk_hit); 2546 } 2547 2548 /* 2549 * hat_memload() should never call kmem_cache_free(); see block 2550 * comment showing the stacktrace in sfmmu_hblk_alloc(); 2551 * enqueue each hblk in the list to reserve list if it's created 2552 * from sfmmu8_cache *and* sfmmup == KHATID. 2553 */ 2554 forcefree = (sfmmup == KHATID) ? 1 : 0; 2555 while ((pr_hblk = list) != NULL) { 2556 list = pr_hblk->hblk_next; 2557 sfmmu_cache = get_hblk_cache(pr_hblk); 2558 if ((sfmmu_cache == sfmmu8_cache) && 2559 sfmmu_put_free_hblk(pr_hblk, forcefree)) 2560 continue; 2561 2562 ASSERT(sfmmup != KHATID); 2563 kmem_cache_free(sfmmu_cache, pr_hblk); 2564 } 2565 2566 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2567 ASSERT(!hmeblkp->hblk_shw_bit); 2568 2569 return (hmeblkp); 2570 } 2571 2572 /* 2573 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2574 * otherwise. 2575 */ 2576 static int 2577 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2578 caddr_t vaddr, page_t **pps, uint_t flags) 2579 { 2580 page_t *pp = *pps; 2581 int hmenum, size, remap; 2582 tte_t tteold, flush_tte; 2583 #ifdef DEBUG 2584 tte_t orig_old; 2585 #endif /* DEBUG */ 2586 struct sf_hment *sfhme; 2587 kmutex_t *pml, *pmtx; 2588 hatlock_t *hatlockp; 2589 2590 /* 2591 * remove this panic when we decide to let user virtual address 2592 * space be >= USERLIMIT. 2593 */ 2594 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2595 panic("user addr %p in kernel space", vaddr); 2596 #if defined(TTE_IS_GLOBAL) 2597 if (TTE_IS_GLOBAL(ttep)) 2598 panic("sfmmu_tteload: creating global tte"); 2599 #endif 2600 2601 #ifdef DEBUG 2602 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2603 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2604 panic("sfmmu_tteload: non cacheable memory tte"); 2605 #endif /* DEBUG */ 2606 2607 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2608 !TTE_IS_MOD(ttep)) { 2609 /* 2610 * Don't load TSB for dummy as in ISM. Also don't preload 2611 * the TSB if the TTE isn't writable since we're likely to 2612 * fault on it again -- preloading can be fairly expensive. 2613 */ 2614 flags |= SFMMU_NO_TSBLOAD; 2615 } 2616 2617 size = TTE_CSZ(ttep); 2618 switch (size) { 2619 case TTE8K: 2620 SFMMU_STAT(sf_tteload8k); 2621 break; 2622 case TTE64K: 2623 SFMMU_STAT(sf_tteload64k); 2624 break; 2625 case TTE512K: 2626 SFMMU_STAT(sf_tteload512k); 2627 break; 2628 case TTE4M: 2629 SFMMU_STAT(sf_tteload4m); 2630 break; 2631 case (TTE32M): 2632 SFMMU_STAT(sf_tteload32m); 2633 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2634 break; 2635 case (TTE256M): 2636 SFMMU_STAT(sf_tteload256m); 2637 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2638 break; 2639 } 2640 2641 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2642 2643 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 2644 2645 /* 2646 * Need to grab mlist lock here so that pageunload 2647 * will not change tte behind us. 2648 */ 2649 if (pp) { 2650 pml = sfmmu_mlist_enter(pp); 2651 } 2652 2653 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2654 /* 2655 * Look for corresponding hment and if valid verify 2656 * pfns are equal. 2657 */ 2658 remap = TTE_IS_VALID(&tteold); 2659 if (remap) { 2660 pfn_t new_pfn, old_pfn; 2661 2662 old_pfn = TTE_TO_PFN(vaddr, &tteold); 2663 new_pfn = TTE_TO_PFN(vaddr, ttep); 2664 2665 if (flags & HAT_LOAD_REMAP) { 2666 /* make sure we are remapping same type of pages */ 2667 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 2668 panic("sfmmu_tteload - tte remap io<->memory"); 2669 } 2670 if (old_pfn != new_pfn && 2671 (pp != NULL || sfhme->hme_page != NULL)) { 2672 panic("sfmmu_tteload - tte remap pp != NULL"); 2673 } 2674 } else if (old_pfn != new_pfn) { 2675 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 2676 (void *)hmeblkp); 2677 } 2678 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 2679 } 2680 2681 if (pp) { 2682 if (size == TTE8K) { 2683 #ifdef VAC 2684 /* 2685 * Handle VAC consistency 2686 */ 2687 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 2688 sfmmu_vac_conflict(sfmmup, vaddr, pp); 2689 } 2690 #endif 2691 2692 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2693 pmtx = sfmmu_page_enter(pp); 2694 PP_CLRRO(pp); 2695 sfmmu_page_exit(pmtx); 2696 } else if (!PP_ISMAPPED(pp) && 2697 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 2698 pmtx = sfmmu_page_enter(pp); 2699 if (!(PP_ISMOD(pp))) { 2700 PP_SETRO(pp); 2701 } 2702 sfmmu_page_exit(pmtx); 2703 } 2704 2705 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 2706 /* 2707 * sfmmu_pagearray_setup failed so return 2708 */ 2709 sfmmu_mlist_exit(pml); 2710 return (1); 2711 } 2712 } 2713 2714 /* 2715 * Make sure hment is not on a mapping list. 2716 */ 2717 ASSERT(remap || (sfhme->hme_page == NULL)); 2718 2719 /* if it is not a remap then hme->next better be NULL */ 2720 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 2721 2722 if (flags & HAT_LOAD_LOCK) { 2723 if (((int)hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 2724 panic("too high lckcnt-hmeblk %p", 2725 (void *)hmeblkp); 2726 } 2727 atomic_add_16(&hmeblkp->hblk_lckcnt, 1); 2728 2729 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 2730 } 2731 2732 #ifdef VAC 2733 if (pp && PP_ISNC(pp)) { 2734 /* 2735 * If the physical page is marked to be uncacheable, like 2736 * by a vac conflict, make sure the new mapping is also 2737 * uncacheable. 2738 */ 2739 TTE_CLR_VCACHEABLE(ttep); 2740 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 2741 } 2742 #endif 2743 ttep->tte_hmenum = hmenum; 2744 2745 #ifdef DEBUG 2746 orig_old = tteold; 2747 #endif /* DEBUG */ 2748 2749 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 2750 if ((sfmmup == KHATID) && 2751 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 2752 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2753 } 2754 #ifdef DEBUG 2755 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 2756 #endif /* DEBUG */ 2757 } 2758 2759 if (!TTE_IS_VALID(&tteold)) { 2760 2761 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 2762 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 2763 2764 /* 2765 * HAT_RELOAD_SHARE has been deprecated with lpg DISM. 2766 */ 2767 2768 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 2769 sfmmup != ksfmmup) { 2770 /* 2771 * If this is the first large mapping for the process 2772 * we must force any CPUs running this process to TL=0 2773 * where they will reload the HAT flags from the 2774 * tsbmiss area. This is necessary to make the large 2775 * mappings we are about to load visible to those CPUs; 2776 * otherwise they'll loop forever calling pagefault() 2777 * since we don't search large hash chains by default. 2778 */ 2779 hatlockp = sfmmu_hat_enter(sfmmup); 2780 if (size == TTE512K && 2781 !SFMMU_FLAGS_ISSET(sfmmup, HAT_512K_FLAG)) { 2782 SFMMU_FLAGS_SET(sfmmup, HAT_512K_FLAG); 2783 sfmmu_sync_mmustate(sfmmup); 2784 } else if (size == TTE4M && 2785 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 2786 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 2787 sfmmu_sync_mmustate(sfmmup); 2788 } else if (size == TTE64K && 2789 !SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) { 2790 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 2791 /* no sync mmustate; 64K shares 8K hashes */ 2792 } else if (mmu_page_sizes == max_mmu_page_sizes) { 2793 if (size == TTE32M && 2794 !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 2795 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 2796 sfmmu_sync_mmustate(sfmmup); 2797 } else if (size == TTE256M && 2798 !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 2799 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 2800 sfmmu_sync_mmustate(sfmmup); 2801 } 2802 } 2803 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 2804 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 2805 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 2806 } 2807 sfmmu_hat_exit(hatlockp); 2808 } 2809 } 2810 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 2811 2812 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 2813 hw_tte.tte_intlo; 2814 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 2815 hw_tte.tte_inthi; 2816 2817 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 2818 /* 2819 * If remap and new tte differs from old tte we need 2820 * to sync the mod bit and flush TLB/TSB. We don't 2821 * need to sync ref bit because we currently always set 2822 * ref bit in tteload. 2823 */ 2824 ASSERT(TTE_IS_REF(ttep)); 2825 if (TTE_IS_MOD(&tteold)) { 2826 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 2827 } 2828 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 2829 xt_sync(sfmmup->sfmmu_cpusran); 2830 } 2831 2832 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 2833 /* 2834 * We only preload 8K and 4M mappings into the TSB, since 2835 * 64K and 512K mappings are replicated and hence don't 2836 * have a single, unique TSB entry. Ditto for 32M/256M. 2837 */ 2838 if (size == TTE8K || size == TTE4M) { 2839 hatlockp = sfmmu_hat_enter(sfmmup); 2840 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, size); 2841 sfmmu_hat_exit(hatlockp); 2842 } 2843 } 2844 if (pp) { 2845 if (!remap) { 2846 HME_ADD(sfhme, pp); 2847 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 2848 ASSERT(hmeblkp->hblk_hmecnt > 0); 2849 2850 /* 2851 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 2852 * see pageunload() for comment. 2853 */ 2854 } 2855 sfmmu_mlist_exit(pml); 2856 } 2857 2858 return (0); 2859 } 2860 /* 2861 * Function unlocks hash bucket. 2862 */ 2863 static void 2864 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 2865 { 2866 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2867 SFMMU_HASH_UNLOCK(hmebp); 2868 } 2869 2870 /* 2871 * function which checks and sets up page array for a large 2872 * translation. Will set p_vcolor, p_index, p_ro fields. 2873 * Assumes addr and pfnum of first page are properly aligned. 2874 * Will check for physical contiguity. If check fails it return 2875 * non null. 2876 */ 2877 static int 2878 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 2879 { 2880 int i, index, ttesz; 2881 pfn_t pfnum; 2882 pgcnt_t npgs; 2883 page_t *pp, *pp1; 2884 kmutex_t *pmtx; 2885 #ifdef VAC 2886 int osz; 2887 int cflags = 0; 2888 int vac_err = 0; 2889 #endif 2890 int newidx = 0; 2891 2892 ttesz = TTE_CSZ(ttep); 2893 2894 ASSERT(ttesz > TTE8K); 2895 2896 npgs = TTEPAGES(ttesz); 2897 index = PAGESZ_TO_INDEX(ttesz); 2898 2899 pfnum = (*pps)->p_pagenum; 2900 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 2901 2902 /* 2903 * Save the first pp so we can do HAT_TMPNC at the end. 2904 */ 2905 pp1 = *pps; 2906 #ifdef VAC 2907 osz = fnd_mapping_sz(pp1); 2908 #endif 2909 2910 for (i = 0; i < npgs; i++, pps++) { 2911 pp = *pps; 2912 ASSERT(PAGE_LOCKED(pp)); 2913 ASSERT(pp->p_szc >= ttesz); 2914 ASSERT(pp->p_szc == pp1->p_szc); 2915 ASSERT(sfmmu_mlist_held(pp)); 2916 2917 /* 2918 * XXX is it possible to maintain P_RO on the root only? 2919 */ 2920 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2921 pmtx = sfmmu_page_enter(pp); 2922 PP_CLRRO(pp); 2923 sfmmu_page_exit(pmtx); 2924 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 2925 !PP_ISMOD(pp)) { 2926 pmtx = sfmmu_page_enter(pp); 2927 if (!(PP_ISMOD(pp))) { 2928 PP_SETRO(pp); 2929 } 2930 sfmmu_page_exit(pmtx); 2931 } 2932 2933 /* 2934 * If this is a remap we skip vac & contiguity checks. 2935 */ 2936 if (remap) 2937 continue; 2938 2939 /* 2940 * set p_vcolor and detect any vac conflicts. 2941 */ 2942 #ifdef VAC 2943 if (vac_err == 0) { 2944 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 2945 2946 } 2947 #endif 2948 2949 /* 2950 * Save current index in case we need to undo it. 2951 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 2952 * "SFMMU_INDEX_SHIFT 6" 2953 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 2954 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 2955 * 2956 * So: index = PAGESZ_TO_INDEX(ttesz); 2957 * if ttesz == 1 then index = 0x2 2958 * 2 then index = 0x4 2959 * 3 then index = 0x8 2960 * 4 then index = 0x10 2961 * 5 then index = 0x20 2962 * The code below checks if it's a new pagesize (ie, newidx) 2963 * in case we need to take it back out of p_index, 2964 * and then or's the new index into the existing index. 2965 */ 2966 if ((PP_MAPINDEX(pp) & index) == 0) 2967 newidx = 1; 2968 pp->p_index = (PP_MAPINDEX(pp) | index); 2969 2970 /* 2971 * contiguity check 2972 */ 2973 if (pp->p_pagenum != pfnum) { 2974 /* 2975 * If we fail the contiguity test then 2976 * the only thing we need to fix is the p_index field. 2977 * We might get a few extra flushes but since this 2978 * path is rare that is ok. The p_ro field will 2979 * get automatically fixed on the next tteload to 2980 * the page. NO TNC bit is set yet. 2981 */ 2982 while (i >= 0) { 2983 pp = *pps; 2984 if (newidx) 2985 pp->p_index = (PP_MAPINDEX(pp) & 2986 ~index); 2987 pps--; 2988 i--; 2989 } 2990 return (1); 2991 } 2992 pfnum++; 2993 addr += MMU_PAGESIZE; 2994 } 2995 2996 #ifdef VAC 2997 if (vac_err) { 2998 if (ttesz > osz) { 2999 /* 3000 * There are some smaller mappings that causes vac 3001 * conflicts. Convert all existing small mappings to 3002 * TNC. 3003 */ 3004 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3005 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3006 npgs); 3007 } else { 3008 /* EMPTY */ 3009 /* 3010 * If there exists an big page mapping, 3011 * that means the whole existing big page 3012 * has TNC setting already. No need to covert to 3013 * TNC again. 3014 */ 3015 ASSERT(PP_ISTNC(pp1)); 3016 } 3017 } 3018 #endif /* VAC */ 3019 3020 return (0); 3021 } 3022 3023 #ifdef VAC 3024 /* 3025 * Routine that detects vac consistency for a large page. It also 3026 * sets virtual color for all pp's for this big mapping. 3027 */ 3028 static int 3029 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3030 { 3031 int vcolor, ocolor; 3032 3033 ASSERT(sfmmu_mlist_held(pp)); 3034 3035 if (PP_ISNC(pp)) { 3036 return (HAT_TMPNC); 3037 } 3038 3039 vcolor = addr_to_vcolor(addr); 3040 if (PP_NEWPAGE(pp)) { 3041 PP_SET_VCOLOR(pp, vcolor); 3042 return (0); 3043 } 3044 3045 ocolor = PP_GET_VCOLOR(pp); 3046 if (ocolor == vcolor) { 3047 return (0); 3048 } 3049 3050 if (!PP_ISMAPPED(pp)) { 3051 /* 3052 * Previous user of page had a differnet color 3053 * but since there are no current users 3054 * we just flush the cache and change the color. 3055 * As an optimization for large pages we flush the 3056 * entire cache of that color and set a flag. 3057 */ 3058 SFMMU_STAT(sf_pgcolor_conflict); 3059 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3060 CacheColor_SetFlushed(*cflags, ocolor); 3061 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3062 } 3063 PP_SET_VCOLOR(pp, vcolor); 3064 return (0); 3065 } 3066 3067 /* 3068 * We got a real conflict with a current mapping. 3069 * set flags to start unencaching all mappings 3070 * and return failure so we restart looping 3071 * the pp array from the beginning. 3072 */ 3073 return (HAT_TMPNC); 3074 } 3075 #endif /* VAC */ 3076 3077 /* 3078 * creates a large page shadow hmeblk for a tte. 3079 * The purpose of this routine is to allow us to do quick unloads because 3080 * the vm layer can easily pass a very large but sparsely populated range. 3081 */ 3082 static struct hme_blk * 3083 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3084 { 3085 struct hmehash_bucket *hmebp; 3086 hmeblk_tag hblktag; 3087 int hmeshift, size, vshift; 3088 uint_t shw_mask, newshw_mask; 3089 struct hme_blk *hmeblkp; 3090 3091 ASSERT(sfmmup != KHATID); 3092 if (mmu_page_sizes == max_mmu_page_sizes) { 3093 ASSERT(ttesz < TTE256M); 3094 } else { 3095 ASSERT(ttesz < TTE4M); 3096 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3097 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3098 } 3099 3100 if (ttesz == TTE8K) { 3101 size = TTE512K; 3102 } else { 3103 size = ++ttesz; 3104 } 3105 3106 hblktag.htag_id = sfmmup; 3107 hmeshift = HME_HASH_SHIFT(size); 3108 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3109 hblktag.htag_rehash = HME_HASH_REHASH(size); 3110 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3111 3112 SFMMU_HASH_LOCK(hmebp); 3113 3114 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3115 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3116 if (hmeblkp == NULL) { 3117 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3118 hblktag, flags); 3119 } 3120 ASSERT(hmeblkp); 3121 if (!hmeblkp->hblk_shw_mask) { 3122 /* 3123 * if this is a unused hblk it was just allocated or could 3124 * potentially be a previous large page hblk so we need to 3125 * set the shadow bit. 3126 */ 3127 hmeblkp->hblk_shw_bit = 1; 3128 } 3129 ASSERT(hmeblkp->hblk_shw_bit == 1); 3130 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3131 ASSERT(vshift < 8); 3132 /* 3133 * Atomically set shw mask bit 3134 */ 3135 do { 3136 shw_mask = hmeblkp->hblk_shw_mask; 3137 newshw_mask = shw_mask | (1 << vshift); 3138 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3139 newshw_mask); 3140 } while (newshw_mask != shw_mask); 3141 3142 SFMMU_HASH_UNLOCK(hmebp); 3143 3144 return (hmeblkp); 3145 } 3146 3147 /* 3148 * This routine cleanup a previous shadow hmeblk and changes it to 3149 * a regular hblk. This happens rarely but it is possible 3150 * when a process wants to use large pages and there are hblks still 3151 * lying around from the previous as that used these hmeblks. 3152 * The alternative was to cleanup the shadow hblks at unload time 3153 * but since so few user processes actually use large pages, it is 3154 * better to be lazy and cleanup at this time. 3155 */ 3156 static void 3157 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3158 struct hmehash_bucket *hmebp) 3159 { 3160 caddr_t addr, endaddr; 3161 int hashno, size; 3162 3163 ASSERT(hmeblkp->hblk_shw_bit); 3164 3165 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3166 3167 if (!hmeblkp->hblk_shw_mask) { 3168 hmeblkp->hblk_shw_bit = 0; 3169 return; 3170 } 3171 addr = (caddr_t)get_hblk_base(hmeblkp); 3172 endaddr = get_hblk_endaddr(hmeblkp); 3173 size = get_hblk_ttesz(hmeblkp); 3174 hashno = size - 1; 3175 ASSERT(hashno > 0); 3176 SFMMU_HASH_UNLOCK(hmebp); 3177 3178 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3179 3180 SFMMU_HASH_LOCK(hmebp); 3181 } 3182 3183 static void 3184 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3185 int hashno) 3186 { 3187 int hmeshift, shadow = 0; 3188 hmeblk_tag hblktag; 3189 struct hmehash_bucket *hmebp; 3190 struct hme_blk *hmeblkp; 3191 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3192 uint64_t hblkpa, prevpa, nx_pa; 3193 3194 ASSERT(hashno > 0); 3195 hblktag.htag_id = sfmmup; 3196 hblktag.htag_rehash = hashno; 3197 3198 hmeshift = HME_HASH_SHIFT(hashno); 3199 3200 while (addr < endaddr) { 3201 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3202 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3203 SFMMU_HASH_LOCK(hmebp); 3204 /* inline HME_HASH_SEARCH */ 3205 hmeblkp = hmebp->hmeblkp; 3206 hblkpa = hmebp->hmeh_nextpa; 3207 prevpa = 0; 3208 pr_hblk = NULL; 3209 while (hmeblkp) { 3210 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 3211 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3212 /* found hme_blk */ 3213 if (hmeblkp->hblk_shw_bit) { 3214 if (hmeblkp->hblk_shw_mask) { 3215 shadow = 1; 3216 sfmmu_shadow_hcleanup(sfmmup, 3217 hmeblkp, hmebp); 3218 break; 3219 } else { 3220 hmeblkp->hblk_shw_bit = 0; 3221 } 3222 } 3223 3224 /* 3225 * Hblk_hmecnt and hblk_vcnt could be non zero 3226 * since hblk_unload() does not gurantee that. 3227 * 3228 * XXX - this could cause tteload() to spin 3229 * where sfmmu_shadow_hcleanup() is called. 3230 */ 3231 } 3232 3233 nx_hblk = hmeblkp->hblk_next; 3234 nx_pa = hmeblkp->hblk_nextpa; 3235 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3236 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 3237 pr_hblk); 3238 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3239 } else { 3240 pr_hblk = hmeblkp; 3241 prevpa = hblkpa; 3242 } 3243 hmeblkp = nx_hblk; 3244 hblkpa = nx_pa; 3245 } 3246 3247 SFMMU_HASH_UNLOCK(hmebp); 3248 3249 if (shadow) { 3250 /* 3251 * We found another shadow hblk so cleaned its 3252 * children. We need to go back and cleanup 3253 * the original hblk so we don't change the 3254 * addr. 3255 */ 3256 shadow = 0; 3257 } else { 3258 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3259 (1 << hmeshift)); 3260 } 3261 } 3262 sfmmu_hblks_list_purge(&list); 3263 } 3264 3265 /* 3266 * Release one hardware address translation lock on the given address range. 3267 */ 3268 void 3269 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3270 { 3271 struct hmehash_bucket *hmebp; 3272 hmeblk_tag hblktag; 3273 int hmeshift, hashno = 1; 3274 struct hme_blk *hmeblkp, *list = NULL; 3275 caddr_t endaddr; 3276 3277 ASSERT(sfmmup != NULL); 3278 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3279 3280 ASSERT((sfmmup == ksfmmup) || 3281 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3282 ASSERT((len & MMU_PAGEOFFSET) == 0); 3283 endaddr = addr + len; 3284 hblktag.htag_id = sfmmup; 3285 3286 /* 3287 * Spitfire supports 4 page sizes. 3288 * Most pages are expected to be of the smallest page size (8K) and 3289 * these will not need to be rehashed. 64K pages also don't need to be 3290 * rehashed because an hmeblk spans 64K of address space. 512K pages 3291 * might need 1 rehash and and 4M pages might need 2 rehashes. 3292 */ 3293 while (addr < endaddr) { 3294 hmeshift = HME_HASH_SHIFT(hashno); 3295 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3296 hblktag.htag_rehash = hashno; 3297 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3298 3299 SFMMU_HASH_LOCK(hmebp); 3300 3301 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3302 if (hmeblkp != NULL) { 3303 /* 3304 * If we encounter a shadow hmeblk then 3305 * we know there are no valid hmeblks mapping 3306 * this address at this size or larger. 3307 * Just increment address by the smallest 3308 * page size. 3309 */ 3310 if (hmeblkp->hblk_shw_bit) { 3311 addr += MMU_PAGESIZE; 3312 } else { 3313 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3314 endaddr); 3315 } 3316 SFMMU_HASH_UNLOCK(hmebp); 3317 hashno = 1; 3318 continue; 3319 } 3320 SFMMU_HASH_UNLOCK(hmebp); 3321 3322 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3323 /* 3324 * We have traversed the whole list and rehashed 3325 * if necessary without finding the address to unlock 3326 * which should never happen. 3327 */ 3328 panic("sfmmu_unlock: addr not found. " 3329 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3330 } else { 3331 hashno++; 3332 } 3333 } 3334 3335 sfmmu_hblks_list_purge(&list); 3336 } 3337 3338 /* 3339 * Function to unlock a range of addresses in an hmeblk. It returns the 3340 * next address that needs to be unlocked. 3341 * Should be called with the hash lock held. 3342 */ 3343 static caddr_t 3344 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 3345 { 3346 struct sf_hment *sfhme; 3347 tte_t tteold, ttemod; 3348 int ttesz, ret; 3349 3350 ASSERT(in_hblk_range(hmeblkp, addr)); 3351 ASSERT(hmeblkp->hblk_shw_bit == 0); 3352 3353 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 3354 ttesz = get_hblk_ttesz(hmeblkp); 3355 3356 HBLKTOHME(sfhme, hmeblkp, addr); 3357 while (addr < endaddr) { 3358 readtte: 3359 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3360 if (TTE_IS_VALID(&tteold)) { 3361 3362 ttemod = tteold; 3363 3364 ret = sfmmu_modifytte_try(&tteold, &ttemod, 3365 &sfhme->hme_tte); 3366 3367 if (ret < 0) 3368 goto readtte; 3369 3370 if (hmeblkp->hblk_lckcnt == 0) 3371 panic("zero hblk lckcnt"); 3372 3373 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 3374 (uintptr_t)endaddr) 3375 panic("can't unlock large tte"); 3376 3377 ASSERT(hmeblkp->hblk_lckcnt > 0); 3378 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 3379 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 3380 } else { 3381 panic("sfmmu_hblk_unlock: invalid tte"); 3382 } 3383 addr += TTEBYTES(ttesz); 3384 sfhme++; 3385 } 3386 return (addr); 3387 } 3388 3389 /* 3390 * Physical Address Mapping Framework 3391 * 3392 * General rules: 3393 * 3394 * (1) Applies only to seg_kmem memory pages. To make things easier, 3395 * seg_kpm addresses are also accepted by the routines, but nothing 3396 * is done with them since by definition their PA mappings are static. 3397 * (2) hat_add_callback() may only be called while holding the page lock 3398 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 3399 * or passing HAC_PAGELOCK flag. 3400 * (3) prehandler() and posthandler() may not call hat_add_callback() or 3401 * hat_delete_callback(), nor should they allocate memory. Post quiesce 3402 * callbacks may not sleep or acquire adaptive mutex locks. 3403 * (4) Either prehandler() or posthandler() (but not both) may be specified 3404 * as being NULL. Specifying an errhandler() is optional. 3405 * 3406 * Details of using the framework: 3407 * 3408 * registering a callback (hat_register_callback()) 3409 * 3410 * Pass prehandler, posthandler, errhandler addresses 3411 * as described below. If capture_cpus argument is nonzero, 3412 * suspend callback to the prehandler will occur with CPUs 3413 * captured and executing xc_loop() and CPUs will remain 3414 * captured until after the posthandler suspend callback 3415 * occurs. 3416 * 3417 * adding a callback (hat_add_callback()) 3418 * 3419 * as_pagelock(); 3420 * hat_add_callback(); 3421 * save returned pfn in private data structures or program registers; 3422 * as_pageunlock(); 3423 * 3424 * prehandler() 3425 * 3426 * Stop all accesses by physical address to this memory page. 3427 * Called twice: the first, PRESUSPEND, is a context safe to acquire 3428 * adaptive locks. The second, SUSPEND, is called at high PIL with 3429 * CPUs captured so adaptive locks may NOT be acquired (and all spin 3430 * locks must be XCALL_PIL or higher locks). 3431 * 3432 * May return the following errors: 3433 * EIO: A fatal error has occurred. This will result in panic. 3434 * EAGAIN: The page cannot be suspended. This will fail the 3435 * relocation. 3436 * 0: Success. 3437 * 3438 * posthandler() 3439 * 3440 * Save new pfn in private data structures or program registers; 3441 * not allowed to fail (non-zero return values will result in panic). 3442 * 3443 * errhandler() 3444 * 3445 * called when an error occurs related to the callback. Currently 3446 * the only such error is HAT_CB_ERR_LEAKED which indicates that 3447 * a page is being freed, but there are still outstanding callback(s) 3448 * registered on the page. 3449 * 3450 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 3451 * 3452 * stop using physical address 3453 * hat_delete_callback(); 3454 * 3455 */ 3456 3457 /* 3458 * Register a callback class. Each subsystem should do this once and 3459 * cache the id_t returned for use in setting up and tearing down callbacks. 3460 * 3461 * There is no facility for removing callback IDs once they are created; 3462 * the "key" should be unique for each module, so in case a module is unloaded 3463 * and subsequently re-loaded, we can recycle the module's previous entry. 3464 */ 3465 id_t 3466 hat_register_callback(int key, 3467 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 3468 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 3469 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 3470 int capture_cpus) 3471 { 3472 id_t id; 3473 3474 /* 3475 * Search the table for a pre-existing callback associated with 3476 * the identifier "key". If one exists, we re-use that entry in 3477 * the table for this instance, otherwise we assign the next 3478 * available table slot. 3479 */ 3480 for (id = 0; id < sfmmu_max_cb_id; id++) { 3481 if (sfmmu_cb_table[id].key == key) 3482 break; 3483 } 3484 3485 if (id == sfmmu_max_cb_id) { 3486 id = sfmmu_cb_nextid++; 3487 if (id >= sfmmu_max_cb_id) 3488 panic("hat_register_callback: out of callback IDs"); 3489 } 3490 3491 ASSERT(prehandler != NULL || posthandler != NULL); 3492 3493 sfmmu_cb_table[id].key = key; 3494 sfmmu_cb_table[id].prehandler = prehandler; 3495 sfmmu_cb_table[id].posthandler = posthandler; 3496 sfmmu_cb_table[id].errhandler = errhandler; 3497 sfmmu_cb_table[id].capture_cpus = capture_cpus; 3498 3499 return (id); 3500 } 3501 3502 #define HAC_COOKIE_NONE (void *)-1 3503 3504 /* 3505 * Add relocation callbacks to the specified addr/len which will be called 3506 * when relocating the associated page. See the description of pre and 3507 * posthandler above for more details. 3508 * 3509 * If HAC_PAGELOCK is included in flags, the underlying memory page is 3510 * locked internally so the caller must be able to deal with the callback 3511 * running even before this function has returned. If HAC_PAGELOCK is not 3512 * set, it is assumed that the underlying memory pages are locked. 3513 * 3514 * Since the caller must track the individual page boundaries anyway, 3515 * we only allow a callback to be added to a single page (large 3516 * or small). Thus [addr, addr + len) MUST be contained within a single 3517 * page. 3518 * 3519 * Registering multiple callbacks on the same [addr, addr+len) is supported, 3520 * _provided_that_ a unique parameter is specified for each callback. 3521 * If multiple callbacks are registered on the same range the callback will 3522 * be invoked with each unique parameter. Registering the same callback with 3523 * the same argument more than once will result in corrupted kernel state. 3524 * 3525 * Returns the pfn of the underlying kernel page in *rpfn 3526 * on success, or PFN_INVALID on failure. 3527 * 3528 * cookiep (if passed) provides storage space for an opaque cookie 3529 * to return later to hat_delete_callback(). This cookie makes the callback 3530 * deletion significantly quicker by avoiding a potentially lengthy hash 3531 * search. 3532 * 3533 * Returns values: 3534 * 0: success 3535 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 3536 * EINVAL: callback ID is not valid 3537 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 3538 * space 3539 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 3540 */ 3541 int 3542 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 3543 void *pvt, pfn_t *rpfn, void **cookiep) 3544 { 3545 struct hmehash_bucket *hmebp; 3546 hmeblk_tag hblktag; 3547 struct hme_blk *hmeblkp; 3548 int hmeshift, hashno; 3549 caddr_t saddr, eaddr, baseaddr; 3550 struct pa_hment *pahmep; 3551 struct sf_hment *sfhmep, *osfhmep; 3552 kmutex_t *pml; 3553 tte_t tte; 3554 page_t *pp; 3555 vnode_t *vp; 3556 u_offset_t off; 3557 pfn_t pfn; 3558 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 3559 int locked = 0; 3560 3561 /* 3562 * For KPM mappings, just return the physical address since we 3563 * don't need to register any callbacks. 3564 */ 3565 if (IS_KPM_ADDR(vaddr)) { 3566 uint64_t paddr; 3567 SFMMU_KPM_VTOP(vaddr, paddr); 3568 *rpfn = btop(paddr); 3569 if (cookiep != NULL) 3570 *cookiep = HAC_COOKIE_NONE; 3571 return (0); 3572 } 3573 3574 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 3575 *rpfn = PFN_INVALID; 3576 return (EINVAL); 3577 } 3578 3579 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 3580 *rpfn = PFN_INVALID; 3581 return (ENOMEM); 3582 } 3583 3584 sfhmep = &pahmep->sfment; 3585 3586 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3587 eaddr = saddr + len; 3588 3589 rehash: 3590 /* Find the mapping(s) for this page */ 3591 for (hashno = TTE64K, hmeblkp = NULL; 3592 hmeblkp == NULL && hashno <= mmu_hashcnt; 3593 hashno++) { 3594 hmeshift = HME_HASH_SHIFT(hashno); 3595 hblktag.htag_id = ksfmmup; 3596 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3597 hblktag.htag_rehash = hashno; 3598 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3599 3600 SFMMU_HASH_LOCK(hmebp); 3601 3602 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3603 3604 if (hmeblkp == NULL) 3605 SFMMU_HASH_UNLOCK(hmebp); 3606 } 3607 3608 if (hmeblkp == NULL) { 3609 kmem_cache_free(pa_hment_cache, pahmep); 3610 *rpfn = PFN_INVALID; 3611 return (ENXIO); 3612 } 3613 3614 HBLKTOHME(osfhmep, hmeblkp, saddr); 3615 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3616 3617 if (!TTE_IS_VALID(&tte)) { 3618 SFMMU_HASH_UNLOCK(hmebp); 3619 kmem_cache_free(pa_hment_cache, pahmep); 3620 *rpfn = PFN_INVALID; 3621 return (ENXIO); 3622 } 3623 3624 /* 3625 * Make sure the boundaries for the callback fall within this 3626 * single mapping. 3627 */ 3628 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 3629 ASSERT(saddr >= baseaddr); 3630 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 3631 SFMMU_HASH_UNLOCK(hmebp); 3632 kmem_cache_free(pa_hment_cache, pahmep); 3633 *rpfn = PFN_INVALID; 3634 return (ERANGE); 3635 } 3636 3637 pfn = sfmmu_ttetopfn(&tte, vaddr); 3638 3639 /* 3640 * The pfn may not have a page_t underneath in which case we 3641 * just return it. This can happen if we are doing I/O to a 3642 * static portion of the kernel's address space, for instance. 3643 */ 3644 pp = osfhmep->hme_page; 3645 if (pp == NULL) { 3646 SFMMU_HASH_UNLOCK(hmebp); 3647 kmem_cache_free(pa_hment_cache, pahmep); 3648 *rpfn = pfn; 3649 if (cookiep) 3650 *cookiep = HAC_COOKIE_NONE; 3651 return (0); 3652 } 3653 ASSERT(pp == PP_PAGEROOT(pp)); 3654 3655 vp = pp->p_vnode; 3656 off = pp->p_offset; 3657 3658 pml = sfmmu_mlist_enter(pp); 3659 3660 if (flags & HAC_PAGELOCK) { 3661 if (!page_trylock(pp, SE_SHARED)) { 3662 /* 3663 * Somebody is holding SE_EXCL lock. Might 3664 * even be hat_page_relocate(). Drop all 3665 * our locks, lookup the page in &kvp, and 3666 * retry. If it doesn't exist in &kvp, then 3667 * we must be dealing with a kernel mapped 3668 * page which doesn't actually belong to 3669 * segkmem so we punt. 3670 */ 3671 sfmmu_mlist_exit(pml); 3672 SFMMU_HASH_UNLOCK(hmebp); 3673 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3674 if (pp == NULL) { 3675 kmem_cache_free(pa_hment_cache, pahmep); 3676 *rpfn = pfn; 3677 if (cookiep) 3678 *cookiep = HAC_COOKIE_NONE; 3679 return (0); 3680 } 3681 page_unlock(pp); 3682 goto rehash; 3683 } 3684 locked = 1; 3685 } 3686 3687 if (!PAGE_LOCKED(pp) && !panicstr) 3688 panic("hat_add_callback: page 0x%p not locked", pp); 3689 3690 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 3691 pp->p_offset != off) { 3692 /* 3693 * The page moved before we got our hands on it. Drop 3694 * all the locks and try again. 3695 */ 3696 ASSERT((flags & HAC_PAGELOCK) != 0); 3697 sfmmu_mlist_exit(pml); 3698 SFMMU_HASH_UNLOCK(hmebp); 3699 page_unlock(pp); 3700 locked = 0; 3701 goto rehash; 3702 } 3703 3704 if (vp != &kvp) { 3705 /* 3706 * This is not a segkmem page but another page which 3707 * has been kernel mapped. It had better have at least 3708 * a share lock on it. Return the pfn. 3709 */ 3710 sfmmu_mlist_exit(pml); 3711 SFMMU_HASH_UNLOCK(hmebp); 3712 if (locked) 3713 page_unlock(pp); 3714 kmem_cache_free(pa_hment_cache, pahmep); 3715 ASSERT(PAGE_LOCKED(pp)); 3716 *rpfn = pfn; 3717 if (cookiep) 3718 *cookiep = HAC_COOKIE_NONE; 3719 return (0); 3720 } 3721 3722 /* 3723 * Setup this pa_hment and link its embedded dummy sf_hment into 3724 * the mapping list. 3725 */ 3726 pp->p_share++; 3727 pahmep->cb_id = callback_id; 3728 pahmep->addr = vaddr; 3729 pahmep->len = len; 3730 pahmep->refcnt = 1; 3731 pahmep->flags = 0; 3732 pahmep->pvt = pvt; 3733 3734 sfhmep->hme_tte.ll = 0; 3735 sfhmep->hme_data = pahmep; 3736 sfhmep->hme_prev = osfhmep; 3737 sfhmep->hme_next = osfhmep->hme_next; 3738 3739 if (osfhmep->hme_next) 3740 osfhmep->hme_next->hme_prev = sfhmep; 3741 3742 osfhmep->hme_next = sfhmep; 3743 3744 sfmmu_mlist_exit(pml); 3745 SFMMU_HASH_UNLOCK(hmebp); 3746 3747 if (locked) 3748 page_unlock(pp); 3749 3750 *rpfn = pfn; 3751 if (cookiep) 3752 *cookiep = (void *)pahmep; 3753 3754 return (0); 3755 } 3756 3757 /* 3758 * Remove the relocation callbacks from the specified addr/len. 3759 */ 3760 void 3761 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 3762 void *cookie) 3763 { 3764 struct hmehash_bucket *hmebp; 3765 hmeblk_tag hblktag; 3766 struct hme_blk *hmeblkp; 3767 int hmeshift, hashno; 3768 caddr_t saddr; 3769 struct pa_hment *pahmep; 3770 struct sf_hment *sfhmep, *osfhmep; 3771 kmutex_t *pml; 3772 tte_t tte; 3773 page_t *pp; 3774 vnode_t *vp; 3775 u_offset_t off; 3776 int locked = 0; 3777 3778 /* 3779 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 3780 * remove so just return. 3781 */ 3782 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 3783 return; 3784 3785 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3786 3787 rehash: 3788 /* Find the mapping(s) for this page */ 3789 for (hashno = TTE64K, hmeblkp = NULL; 3790 hmeblkp == NULL && hashno <= mmu_hashcnt; 3791 hashno++) { 3792 hmeshift = HME_HASH_SHIFT(hashno); 3793 hblktag.htag_id = ksfmmup; 3794 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3795 hblktag.htag_rehash = hashno; 3796 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3797 3798 SFMMU_HASH_LOCK(hmebp); 3799 3800 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3801 3802 if (hmeblkp == NULL) 3803 SFMMU_HASH_UNLOCK(hmebp); 3804 } 3805 3806 if (hmeblkp == NULL) 3807 return; 3808 3809 HBLKTOHME(osfhmep, hmeblkp, saddr); 3810 3811 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3812 if (!TTE_IS_VALID(&tte)) { 3813 SFMMU_HASH_UNLOCK(hmebp); 3814 return; 3815 } 3816 3817 pp = osfhmep->hme_page; 3818 if (pp == NULL) { 3819 SFMMU_HASH_UNLOCK(hmebp); 3820 ASSERT(cookie == NULL); 3821 return; 3822 } 3823 3824 vp = pp->p_vnode; 3825 off = pp->p_offset; 3826 3827 pml = sfmmu_mlist_enter(pp); 3828 3829 if (flags & HAC_PAGELOCK) { 3830 if (!page_trylock(pp, SE_SHARED)) { 3831 /* 3832 * Somebody is holding SE_EXCL lock. Might 3833 * even be hat_page_relocate(). Drop all 3834 * our locks, lookup the page in &kvp, and 3835 * retry. If it doesn't exist in &kvp, then 3836 * we must be dealing with a kernel mapped 3837 * page which doesn't actually belong to 3838 * segkmem so we punt. 3839 */ 3840 sfmmu_mlist_exit(pml); 3841 SFMMU_HASH_UNLOCK(hmebp); 3842 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3843 if (pp == NULL) { 3844 ASSERT(cookie == NULL); 3845 return; 3846 } 3847 page_unlock(pp); 3848 goto rehash; 3849 } 3850 locked = 1; 3851 } 3852 3853 ASSERT(PAGE_LOCKED(pp)); 3854 3855 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 3856 pp->p_offset != off) { 3857 /* 3858 * The page moved before we got our hands on it. Drop 3859 * all the locks and try again. 3860 */ 3861 ASSERT((flags & HAC_PAGELOCK) != 0); 3862 sfmmu_mlist_exit(pml); 3863 SFMMU_HASH_UNLOCK(hmebp); 3864 page_unlock(pp); 3865 locked = 0; 3866 goto rehash; 3867 } 3868 3869 if (vp != &kvp) { 3870 /* 3871 * This is not a segkmem page but another page which 3872 * has been kernel mapped. 3873 */ 3874 sfmmu_mlist_exit(pml); 3875 SFMMU_HASH_UNLOCK(hmebp); 3876 if (locked) 3877 page_unlock(pp); 3878 ASSERT(cookie == NULL); 3879 return; 3880 } 3881 3882 if (cookie != NULL) { 3883 pahmep = (struct pa_hment *)cookie; 3884 sfhmep = &pahmep->sfment; 3885 } else { 3886 for (sfhmep = pp->p_mapping; sfhmep != NULL; 3887 sfhmep = sfhmep->hme_next) { 3888 3889 /* 3890 * skip va<->pa mappings 3891 */ 3892 if (!IS_PAHME(sfhmep)) 3893 continue; 3894 3895 pahmep = sfhmep->hme_data; 3896 ASSERT(pahmep != NULL); 3897 3898 /* 3899 * if pa_hment matches, remove it 3900 */ 3901 if ((pahmep->pvt == pvt) && 3902 (pahmep->addr == vaddr) && 3903 (pahmep->len == len)) { 3904 break; 3905 } 3906 } 3907 } 3908 3909 if (sfhmep == NULL) { 3910 if (!panicstr) { 3911 panic("hat_delete_callback: pa_hment not found, pp %p", 3912 (void *)pp); 3913 } 3914 return; 3915 } 3916 3917 /* 3918 * Note: at this point a valid kernel mapping must still be 3919 * present on this page. 3920 */ 3921 pp->p_share--; 3922 if (pp->p_share <= 0) 3923 panic("hat_delete_callback: zero p_share"); 3924 3925 if (--pahmep->refcnt == 0) { 3926 if (pahmep->flags != 0) 3927 panic("hat_delete_callback: pa_hment is busy"); 3928 3929 /* 3930 * Remove sfhmep from the mapping list for the page. 3931 */ 3932 if (sfhmep->hme_prev) { 3933 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 3934 } else { 3935 pp->p_mapping = sfhmep->hme_next; 3936 } 3937 3938 if (sfhmep->hme_next) 3939 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 3940 3941 sfmmu_mlist_exit(pml); 3942 SFMMU_HASH_UNLOCK(hmebp); 3943 3944 if (locked) 3945 page_unlock(pp); 3946 3947 kmem_cache_free(pa_hment_cache, pahmep); 3948 return; 3949 } 3950 3951 sfmmu_mlist_exit(pml); 3952 SFMMU_HASH_UNLOCK(hmebp); 3953 if (locked) 3954 page_unlock(pp); 3955 } 3956 3957 /* 3958 * hat_probe returns 1 if the translation for the address 'addr' is 3959 * loaded, zero otherwise. 3960 * 3961 * hat_probe should be used only for advisorary purposes because it may 3962 * occasionally return the wrong value. The implementation must guarantee that 3963 * returning the wrong value is a very rare event. hat_probe is used 3964 * to implement optimizations in the segment drivers. 3965 * 3966 */ 3967 int 3968 hat_probe(struct hat *sfmmup, caddr_t addr) 3969 { 3970 pfn_t pfn; 3971 tte_t tte; 3972 3973 ASSERT(sfmmup != NULL); 3974 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3975 3976 ASSERT((sfmmup == ksfmmup) || 3977 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3978 3979 if (sfmmup == ksfmmup) { 3980 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 3981 == PFN_SUSPENDED) { 3982 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 3983 } 3984 } else { 3985 pfn = sfmmu_uvatopfn(addr, sfmmup); 3986 } 3987 3988 if (pfn != PFN_INVALID) 3989 return (1); 3990 else 3991 return (0); 3992 } 3993 3994 ssize_t 3995 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 3996 { 3997 tte_t tte; 3998 3999 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4000 4001 sfmmu_gettte(sfmmup, addr, &tte); 4002 if (TTE_IS_VALID(&tte)) { 4003 return (TTEBYTES(TTE_CSZ(&tte))); 4004 } 4005 return (-1); 4006 } 4007 4008 static void 4009 sfmmu_gettte(struct hat *sfmmup, caddr_t addr, tte_t *ttep) 4010 { 4011 struct hmehash_bucket *hmebp; 4012 hmeblk_tag hblktag; 4013 int hmeshift, hashno = 1; 4014 struct hme_blk *hmeblkp, *list = NULL; 4015 struct sf_hment *sfhmep; 4016 4017 /* support for ISM */ 4018 ism_map_t *ism_map; 4019 ism_blk_t *ism_blkp; 4020 int i; 4021 sfmmu_t *ism_hatid = NULL; 4022 sfmmu_t *locked_hatid = NULL; 4023 4024 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4025 4026 ism_blkp = sfmmup->sfmmu_iblk; 4027 if (ism_blkp) { 4028 sfmmu_ismhat_enter(sfmmup, 0); 4029 locked_hatid = sfmmup; 4030 } 4031 while (ism_blkp && ism_hatid == NULL) { 4032 ism_map = ism_blkp->iblk_maps; 4033 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 4034 if (addr >= ism_start(ism_map[i]) && 4035 addr < ism_end(ism_map[i])) { 4036 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 4037 addr = (caddr_t)(addr - 4038 ism_start(ism_map[i])); 4039 break; 4040 } 4041 } 4042 ism_blkp = ism_blkp->iblk_next; 4043 } 4044 if (locked_hatid) { 4045 sfmmu_ismhat_exit(locked_hatid, 0); 4046 } 4047 4048 hblktag.htag_id = sfmmup; 4049 ttep->ll = 0; 4050 4051 do { 4052 hmeshift = HME_HASH_SHIFT(hashno); 4053 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4054 hblktag.htag_rehash = hashno; 4055 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4056 4057 SFMMU_HASH_LOCK(hmebp); 4058 4059 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4060 if (hmeblkp != NULL) { 4061 HBLKTOHME(sfhmep, hmeblkp, addr); 4062 sfmmu_copytte(&sfhmep->hme_tte, ttep); 4063 SFMMU_HASH_UNLOCK(hmebp); 4064 break; 4065 } 4066 SFMMU_HASH_UNLOCK(hmebp); 4067 hashno++; 4068 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 4069 4070 sfmmu_hblks_list_purge(&list); 4071 } 4072 4073 uint_t 4074 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4075 { 4076 tte_t tte; 4077 4078 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4079 4080 sfmmu_gettte(sfmmup, addr, &tte); 4081 if (TTE_IS_VALID(&tte)) { 4082 *attr = sfmmu_ptov_attr(&tte); 4083 return (0); 4084 } 4085 *attr = 0; 4086 return ((uint_t)0xffffffff); 4087 } 4088 4089 /* 4090 * Enables more attributes on specified address range (ie. logical OR) 4091 */ 4092 void 4093 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4094 { 4095 if (hat->sfmmu_xhat_provider) { 4096 XHAT_SETATTR(hat, addr, len, attr); 4097 return; 4098 } else { 4099 /* 4100 * This must be a CPU HAT. If the address space has 4101 * XHATs attached, change attributes for all of them, 4102 * just in case 4103 */ 4104 ASSERT(hat->sfmmu_as != NULL); 4105 if (hat->sfmmu_as->a_xhat != NULL) 4106 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 4107 } 4108 4109 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4110 } 4111 4112 /* 4113 * Assigns attributes to the specified address range. All the attributes 4114 * are specified. 4115 */ 4116 void 4117 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4118 { 4119 if (hat->sfmmu_xhat_provider) { 4120 XHAT_CHGATTR(hat, addr, len, attr); 4121 return; 4122 } else { 4123 /* 4124 * This must be a CPU HAT. If the address space has 4125 * XHATs attached, change attributes for all of them, 4126 * just in case 4127 */ 4128 ASSERT(hat->sfmmu_as != NULL); 4129 if (hat->sfmmu_as->a_xhat != NULL) 4130 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 4131 } 4132 4133 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4134 } 4135 4136 /* 4137 * Remove attributes on the specified address range (ie. loginal NAND) 4138 */ 4139 void 4140 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4141 { 4142 if (hat->sfmmu_xhat_provider) { 4143 XHAT_CLRATTR(hat, addr, len, attr); 4144 return; 4145 } else { 4146 /* 4147 * This must be a CPU HAT. If the address space has 4148 * XHATs attached, change attributes for all of them, 4149 * just in case 4150 */ 4151 ASSERT(hat->sfmmu_as != NULL); 4152 if (hat->sfmmu_as->a_xhat != NULL) 4153 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4154 } 4155 4156 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4157 } 4158 4159 /* 4160 * Change attributes on an address range to that specified by attr and mode. 4161 */ 4162 static void 4163 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4164 int mode) 4165 { 4166 struct hmehash_bucket *hmebp; 4167 hmeblk_tag hblktag; 4168 int hmeshift, hashno = 1; 4169 struct hme_blk *hmeblkp, *list = NULL; 4170 caddr_t endaddr; 4171 cpuset_t cpuset; 4172 demap_range_t dmr; 4173 4174 CPUSET_ZERO(cpuset); 4175 4176 ASSERT((sfmmup == ksfmmup) || 4177 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4178 ASSERT((len & MMU_PAGEOFFSET) == 0); 4179 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4180 4181 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4182 ((addr + len) > (caddr_t)USERLIMIT)) { 4183 panic("user addr %p in kernel space", 4184 (void *)addr); 4185 } 4186 4187 endaddr = addr + len; 4188 hblktag.htag_id = sfmmup; 4189 DEMAP_RANGE_INIT(sfmmup, &dmr); 4190 4191 while (addr < endaddr) { 4192 hmeshift = HME_HASH_SHIFT(hashno); 4193 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4194 hblktag.htag_rehash = hashno; 4195 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4196 4197 SFMMU_HASH_LOCK(hmebp); 4198 4199 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4200 if (hmeblkp != NULL) { 4201 /* 4202 * We've encountered a shadow hmeblk so skip the range 4203 * of the next smaller mapping size. 4204 */ 4205 if (hmeblkp->hblk_shw_bit) { 4206 ASSERT(sfmmup != ksfmmup); 4207 ASSERT(hashno > 1); 4208 addr = (caddr_t)P2END((uintptr_t)addr, 4209 TTEBYTES(hashno - 1)); 4210 } else { 4211 addr = sfmmu_hblk_chgattr(sfmmup, 4212 hmeblkp, addr, endaddr, &dmr, attr, mode); 4213 } 4214 SFMMU_HASH_UNLOCK(hmebp); 4215 hashno = 1; 4216 continue; 4217 } 4218 SFMMU_HASH_UNLOCK(hmebp); 4219 4220 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4221 /* 4222 * We have traversed the whole list and rehashed 4223 * if necessary without finding the address to chgattr. 4224 * This is ok, so we increment the address by the 4225 * smallest hmeblk range for kernel mappings or for 4226 * user mappings with no large pages, and the largest 4227 * hmeblk range, to account for shadow hmeblks, for 4228 * user mappings with large pages and continue. 4229 */ 4230 if (sfmmup == ksfmmup) 4231 addr = (caddr_t)P2END((uintptr_t)addr, 4232 TTEBYTES(1)); 4233 else 4234 addr = (caddr_t)P2END((uintptr_t)addr, 4235 TTEBYTES(hashno)); 4236 hashno = 1; 4237 } else { 4238 hashno++; 4239 } 4240 } 4241 4242 sfmmu_hblks_list_purge(&list); 4243 DEMAP_RANGE_FLUSH(&dmr); 4244 cpuset = sfmmup->sfmmu_cpusran; 4245 xt_sync(cpuset); 4246 } 4247 4248 /* 4249 * This function chgattr on a range of addresses in an hmeblk. It returns the 4250 * next addres that needs to be chgattr. 4251 * It should be called with the hash lock held. 4252 * XXX It should be possible to optimize chgattr by not flushing every time but 4253 * on the other hand: 4254 * 1. do one flush crosscall. 4255 * 2. only flush if we are increasing permissions (make sure this will work) 4256 */ 4257 static caddr_t 4258 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4259 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4260 { 4261 tte_t tte, tteattr, tteflags, ttemod; 4262 struct sf_hment *sfhmep; 4263 int ttesz; 4264 struct page *pp = NULL; 4265 kmutex_t *pml, *pmtx; 4266 int ret; 4267 int use_demap_range; 4268 #if defined(SF_ERRATA_57) 4269 int check_exec; 4270 #endif 4271 4272 ASSERT(in_hblk_range(hmeblkp, addr)); 4273 ASSERT(hmeblkp->hblk_shw_bit == 0); 4274 4275 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4276 ttesz = get_hblk_ttesz(hmeblkp); 4277 4278 /* 4279 * Flush the current demap region if addresses have been 4280 * skipped or the page size doesn't match. 4281 */ 4282 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4283 if (use_demap_range) { 4284 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4285 } else { 4286 DEMAP_RANGE_FLUSH(dmrp); 4287 } 4288 4289 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4290 #if defined(SF_ERRATA_57) 4291 check_exec = (sfmmup != ksfmmup) && 4292 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4293 TTE_IS_EXECUTABLE(&tteattr); 4294 #endif 4295 HBLKTOHME(sfhmep, hmeblkp, addr); 4296 while (addr < endaddr) { 4297 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4298 if (TTE_IS_VALID(&tte)) { 4299 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4300 /* 4301 * if the new attr is the same as old 4302 * continue 4303 */ 4304 goto next_addr; 4305 } 4306 if (!TTE_IS_WRITABLE(&tteattr)) { 4307 /* 4308 * make sure we clear hw modify bit if we 4309 * removing write protections 4310 */ 4311 tteflags.tte_intlo |= TTE_HWWR_INT; 4312 } 4313 4314 pml = NULL; 4315 pp = sfhmep->hme_page; 4316 if (pp) { 4317 pml = sfmmu_mlist_enter(pp); 4318 } 4319 4320 if (pp != sfhmep->hme_page) { 4321 /* 4322 * tte must have been unloaded. 4323 */ 4324 ASSERT(pml); 4325 sfmmu_mlist_exit(pml); 4326 continue; 4327 } 4328 4329 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4330 4331 ttemod = tte; 4332 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4333 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4334 4335 #if defined(SF_ERRATA_57) 4336 if (check_exec && addr < errata57_limit) 4337 ttemod.tte_exec_perm = 0; 4338 #endif 4339 ret = sfmmu_modifytte_try(&tte, &ttemod, 4340 &sfhmep->hme_tte); 4341 4342 if (ret < 0) { 4343 /* tte changed underneath us */ 4344 if (pml) { 4345 sfmmu_mlist_exit(pml); 4346 } 4347 continue; 4348 } 4349 4350 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4351 /* 4352 * need to sync if we are clearing modify bit. 4353 */ 4354 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4355 } 4356 4357 if (pp && PP_ISRO(pp)) { 4358 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4359 pmtx = sfmmu_page_enter(pp); 4360 PP_CLRRO(pp); 4361 sfmmu_page_exit(pmtx); 4362 } 4363 } 4364 4365 if (ret > 0 && use_demap_range) { 4366 DEMAP_RANGE_MARKPG(dmrp, addr); 4367 } else if (ret > 0) { 4368 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4369 } 4370 4371 if (pml) { 4372 sfmmu_mlist_exit(pml); 4373 } 4374 } 4375 next_addr: 4376 addr += TTEBYTES(ttesz); 4377 sfhmep++; 4378 DEMAP_RANGE_NEXTPG(dmrp); 4379 } 4380 return (addr); 4381 } 4382 4383 /* 4384 * This routine converts virtual attributes to physical ones. It will 4385 * update the tteflags field with the tte mask corresponding to the attributes 4386 * affected and it returns the new attributes. It will also clear the modify 4387 * bit if we are taking away write permission. This is necessary since the 4388 * modify bit is the hardware permission bit and we need to clear it in order 4389 * to detect write faults. 4390 */ 4391 static uint64_t 4392 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 4393 { 4394 tte_t ttevalue; 4395 4396 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 4397 4398 switch (mode) { 4399 case SFMMU_CHGATTR: 4400 /* all attributes specified */ 4401 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 4402 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 4403 ttemaskp->tte_inthi = TTEINTHI_ATTR; 4404 ttemaskp->tte_intlo = TTEINTLO_ATTR; 4405 break; 4406 case SFMMU_SETATTR: 4407 ASSERT(!(attr & ~HAT_PROT_MASK)); 4408 ttemaskp->ll = 0; 4409 ttevalue.ll = 0; 4410 /* 4411 * a valid tte implies exec and read for sfmmu 4412 * so no need to do anything about them. 4413 * since priviledged access implies user access 4414 * PROT_USER doesn't make sense either. 4415 */ 4416 if (attr & PROT_WRITE) { 4417 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 4418 ttevalue.tte_intlo |= TTE_WRPRM_INT; 4419 } 4420 break; 4421 case SFMMU_CLRATTR: 4422 /* attributes will be nand with current ones */ 4423 if (attr & ~(PROT_WRITE | PROT_USER)) { 4424 panic("sfmmu: attr %x not supported", attr); 4425 } 4426 ttemaskp->ll = 0; 4427 ttevalue.ll = 0; 4428 if (attr & PROT_WRITE) { 4429 /* clear both writable and modify bit */ 4430 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 4431 } 4432 if (attr & PROT_USER) { 4433 ttemaskp->tte_intlo |= TTE_PRIV_INT; 4434 ttevalue.tte_intlo |= TTE_PRIV_INT; 4435 } 4436 break; 4437 default: 4438 panic("sfmmu_vtop_attr: bad mode %x", mode); 4439 } 4440 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 4441 return (ttevalue.ll); 4442 } 4443 4444 static uint_t 4445 sfmmu_ptov_attr(tte_t *ttep) 4446 { 4447 uint_t attr; 4448 4449 ASSERT(TTE_IS_VALID(ttep)); 4450 4451 attr = PROT_READ; 4452 4453 if (TTE_IS_WRITABLE(ttep)) { 4454 attr |= PROT_WRITE; 4455 } 4456 if (TTE_IS_EXECUTABLE(ttep)) { 4457 attr |= PROT_EXEC; 4458 } 4459 if (!TTE_IS_PRIVILEGED(ttep)) { 4460 attr |= PROT_USER; 4461 } 4462 if (TTE_IS_NFO(ttep)) { 4463 attr |= HAT_NOFAULT; 4464 } 4465 if (TTE_IS_NOSYNC(ttep)) { 4466 attr |= HAT_NOSYNC; 4467 } 4468 if (TTE_IS_SIDEFFECT(ttep)) { 4469 attr |= SFMMU_SIDEFFECT; 4470 } 4471 if (!TTE_IS_VCACHEABLE(ttep)) { 4472 attr |= SFMMU_UNCACHEVTTE; 4473 } 4474 if (!TTE_IS_PCACHEABLE(ttep)) { 4475 attr |= SFMMU_UNCACHEPTTE; 4476 } 4477 return (attr); 4478 } 4479 4480 /* 4481 * hat_chgprot is a deprecated hat call. New segment drivers 4482 * should store all attributes and use hat_*attr calls. 4483 * 4484 * Change the protections in the virtual address range 4485 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 4486 * then remove write permission, leaving the other 4487 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 4488 * 4489 */ 4490 void 4491 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 4492 { 4493 struct hmehash_bucket *hmebp; 4494 hmeblk_tag hblktag; 4495 int hmeshift, hashno = 1; 4496 struct hme_blk *hmeblkp, *list = NULL; 4497 caddr_t endaddr; 4498 cpuset_t cpuset; 4499 demap_range_t dmr; 4500 4501 ASSERT((len & MMU_PAGEOFFSET) == 0); 4502 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4503 4504 if (sfmmup->sfmmu_xhat_provider) { 4505 XHAT_CHGPROT(sfmmup, addr, len, vprot); 4506 return; 4507 } else { 4508 /* 4509 * This must be a CPU HAT. If the address space has 4510 * XHATs attached, change attributes for all of them, 4511 * just in case 4512 */ 4513 ASSERT(sfmmup->sfmmu_as != NULL); 4514 if (sfmmup->sfmmu_as->a_xhat != NULL) 4515 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 4516 } 4517 4518 CPUSET_ZERO(cpuset); 4519 4520 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 4521 ((addr + len) > (caddr_t)USERLIMIT)) { 4522 panic("user addr %p vprot %x in kernel space", 4523 (void *)addr, vprot); 4524 } 4525 endaddr = addr + len; 4526 hblktag.htag_id = sfmmup; 4527 DEMAP_RANGE_INIT(sfmmup, &dmr); 4528 4529 while (addr < endaddr) { 4530 hmeshift = HME_HASH_SHIFT(hashno); 4531 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4532 hblktag.htag_rehash = hashno; 4533 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4534 4535 SFMMU_HASH_LOCK(hmebp); 4536 4537 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4538 if (hmeblkp != NULL) { 4539 /* 4540 * We've encountered a shadow hmeblk so skip the range 4541 * of the next smaller mapping size. 4542 */ 4543 if (hmeblkp->hblk_shw_bit) { 4544 ASSERT(sfmmup != ksfmmup); 4545 ASSERT(hashno > 1); 4546 addr = (caddr_t)P2END((uintptr_t)addr, 4547 TTEBYTES(hashno - 1)); 4548 } else { 4549 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 4550 addr, endaddr, &dmr, vprot); 4551 } 4552 SFMMU_HASH_UNLOCK(hmebp); 4553 hashno = 1; 4554 continue; 4555 } 4556 SFMMU_HASH_UNLOCK(hmebp); 4557 4558 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4559 /* 4560 * We have traversed the whole list and rehashed 4561 * if necessary without finding the address to chgprot. 4562 * This is ok so we increment the address by the 4563 * smallest hmeblk range for kernel mappings and the 4564 * largest hmeblk range, to account for shadow hmeblks, 4565 * for user mappings and continue. 4566 */ 4567 if (sfmmup == ksfmmup) 4568 addr = (caddr_t)P2END((uintptr_t)addr, 4569 TTEBYTES(1)); 4570 else 4571 addr = (caddr_t)P2END((uintptr_t)addr, 4572 TTEBYTES(hashno)); 4573 hashno = 1; 4574 } else { 4575 hashno++; 4576 } 4577 } 4578 4579 sfmmu_hblks_list_purge(&list); 4580 DEMAP_RANGE_FLUSH(&dmr); 4581 cpuset = sfmmup->sfmmu_cpusran; 4582 xt_sync(cpuset); 4583 } 4584 4585 /* 4586 * This function chgprots a range of addresses in an hmeblk. It returns the 4587 * next addres that needs to be chgprot. 4588 * It should be called with the hash lock held. 4589 * XXX It shold be possible to optimize chgprot by not flushing every time but 4590 * on the other hand: 4591 * 1. do one flush crosscall. 4592 * 2. only flush if we are increasing permissions (make sure this will work) 4593 */ 4594 static caddr_t 4595 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4596 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 4597 { 4598 uint_t pprot; 4599 tte_t tte, ttemod; 4600 struct sf_hment *sfhmep; 4601 uint_t tteflags; 4602 int ttesz; 4603 struct page *pp = NULL; 4604 kmutex_t *pml, *pmtx; 4605 int ret; 4606 int use_demap_range; 4607 #if defined(SF_ERRATA_57) 4608 int check_exec; 4609 #endif 4610 4611 ASSERT(in_hblk_range(hmeblkp, addr)); 4612 ASSERT(hmeblkp->hblk_shw_bit == 0); 4613 4614 #ifdef DEBUG 4615 if (get_hblk_ttesz(hmeblkp) != TTE8K && 4616 (endaddr < get_hblk_endaddr(hmeblkp))) { 4617 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 4618 } 4619 #endif /* DEBUG */ 4620 4621 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4622 ttesz = get_hblk_ttesz(hmeblkp); 4623 4624 pprot = sfmmu_vtop_prot(vprot, &tteflags); 4625 #if defined(SF_ERRATA_57) 4626 check_exec = (sfmmup != ksfmmup) && 4627 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4628 ((vprot & PROT_EXEC) == PROT_EXEC); 4629 #endif 4630 HBLKTOHME(sfhmep, hmeblkp, addr); 4631 4632 /* 4633 * Flush the current demap region if addresses have been 4634 * skipped or the page size doesn't match. 4635 */ 4636 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 4637 if (use_demap_range) { 4638 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4639 } else { 4640 DEMAP_RANGE_FLUSH(dmrp); 4641 } 4642 4643 while (addr < endaddr) { 4644 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4645 if (TTE_IS_VALID(&tte)) { 4646 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 4647 /* 4648 * if the new protection is the same as old 4649 * continue 4650 */ 4651 goto next_addr; 4652 } 4653 pml = NULL; 4654 pp = sfhmep->hme_page; 4655 if (pp) { 4656 pml = sfmmu_mlist_enter(pp); 4657 } 4658 if (pp != sfhmep->hme_page) { 4659 /* 4660 * tte most have been unloaded 4661 * underneath us. Recheck 4662 */ 4663 ASSERT(pml); 4664 sfmmu_mlist_exit(pml); 4665 continue; 4666 } 4667 4668 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4669 4670 ttemod = tte; 4671 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 4672 #if defined(SF_ERRATA_57) 4673 if (check_exec && addr < errata57_limit) 4674 ttemod.tte_exec_perm = 0; 4675 #endif 4676 ret = sfmmu_modifytte_try(&tte, &ttemod, 4677 &sfhmep->hme_tte); 4678 4679 if (ret < 0) { 4680 /* tte changed underneath us */ 4681 if (pml) { 4682 sfmmu_mlist_exit(pml); 4683 } 4684 continue; 4685 } 4686 4687 if (tteflags & TTE_HWWR_INT) { 4688 /* 4689 * need to sync if we are clearing modify bit. 4690 */ 4691 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4692 } 4693 4694 if (pp && PP_ISRO(pp)) { 4695 if (pprot & TTE_WRPRM_INT) { 4696 pmtx = sfmmu_page_enter(pp); 4697 PP_CLRRO(pp); 4698 sfmmu_page_exit(pmtx); 4699 } 4700 } 4701 4702 if (ret > 0 && use_demap_range) { 4703 DEMAP_RANGE_MARKPG(dmrp, addr); 4704 } else if (ret > 0) { 4705 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4706 } 4707 4708 if (pml) { 4709 sfmmu_mlist_exit(pml); 4710 } 4711 } 4712 next_addr: 4713 addr += TTEBYTES(ttesz); 4714 sfhmep++; 4715 DEMAP_RANGE_NEXTPG(dmrp); 4716 } 4717 return (addr); 4718 } 4719 4720 /* 4721 * This routine is deprecated and should only be used by hat_chgprot. 4722 * The correct routine is sfmmu_vtop_attr. 4723 * This routine converts virtual page protections to physical ones. It will 4724 * update the tteflags field with the tte mask corresponding to the protections 4725 * affected and it returns the new protections. It will also clear the modify 4726 * bit if we are taking away write permission. This is necessary since the 4727 * modify bit is the hardware permission bit and we need to clear it in order 4728 * to detect write faults. 4729 * It accepts the following special protections: 4730 * ~PROT_WRITE = remove write permissions. 4731 * ~PROT_USER = remove user permissions. 4732 */ 4733 static uint_t 4734 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 4735 { 4736 if (vprot == (uint_t)~PROT_WRITE) { 4737 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 4738 return (0); /* will cause wrprm to be cleared */ 4739 } 4740 if (vprot == (uint_t)~PROT_USER) { 4741 *tteflagsp = TTE_PRIV_INT; 4742 return (0); /* will cause privprm to be cleared */ 4743 } 4744 if ((vprot == 0) || (vprot == PROT_USER) || 4745 ((vprot & PROT_ALL) != vprot)) { 4746 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4747 } 4748 4749 switch (vprot) { 4750 case (PROT_READ): 4751 case (PROT_EXEC): 4752 case (PROT_EXEC | PROT_READ): 4753 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4754 return (TTE_PRIV_INT); /* set prv and clr wrt */ 4755 case (PROT_WRITE): 4756 case (PROT_WRITE | PROT_READ): 4757 case (PROT_EXEC | PROT_WRITE): 4758 case (PROT_EXEC | PROT_WRITE | PROT_READ): 4759 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4760 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 4761 case (PROT_USER | PROT_READ): 4762 case (PROT_USER | PROT_EXEC): 4763 case (PROT_USER | PROT_EXEC | PROT_READ): 4764 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4765 return (0); /* clr prv and wrt */ 4766 case (PROT_USER | PROT_WRITE): 4767 case (PROT_USER | PROT_WRITE | PROT_READ): 4768 case (PROT_USER | PROT_EXEC | PROT_WRITE): 4769 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 4770 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4771 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 4772 default: 4773 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4774 } 4775 return (0); 4776 } 4777 4778 /* 4779 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 4780 * the normal algorithm would take too long for a very large VA range with 4781 * few real mappings. This routine just walks thru all HMEs in the global 4782 * hash table to find and remove mappings. 4783 */ 4784 static void 4785 hat_unload_large_virtual( 4786 struct hat *sfmmup, 4787 caddr_t startaddr, 4788 size_t len, 4789 uint_t flags, 4790 hat_callback_t *callback) 4791 { 4792 struct hmehash_bucket *hmebp; 4793 struct hme_blk *hmeblkp; 4794 struct hme_blk *pr_hblk = NULL; 4795 struct hme_blk *nx_hblk; 4796 struct hme_blk *list = NULL; 4797 int i; 4798 uint64_t hblkpa, prevpa, nx_pa; 4799 demap_range_t dmr, *dmrp; 4800 cpuset_t cpuset; 4801 caddr_t endaddr = startaddr + len; 4802 caddr_t sa; 4803 caddr_t ea; 4804 caddr_t cb_sa[MAX_CB_ADDR]; 4805 caddr_t cb_ea[MAX_CB_ADDR]; 4806 int addr_cnt = 0; 4807 int a = 0; 4808 4809 if (sfmmup->sfmmu_free) { 4810 dmrp = NULL; 4811 } else { 4812 dmrp = &dmr; 4813 DEMAP_RANGE_INIT(sfmmup, dmrp); 4814 } 4815 4816 /* 4817 * Loop through all the hash buckets of HME blocks looking for matches. 4818 */ 4819 for (i = 0; i <= UHMEHASH_SZ; i++) { 4820 hmebp = &uhme_hash[i]; 4821 SFMMU_HASH_LOCK(hmebp); 4822 hmeblkp = hmebp->hmeblkp; 4823 hblkpa = hmebp->hmeh_nextpa; 4824 prevpa = 0; 4825 pr_hblk = NULL; 4826 while (hmeblkp) { 4827 nx_hblk = hmeblkp->hblk_next; 4828 nx_pa = hmeblkp->hblk_nextpa; 4829 4830 /* 4831 * skip if not this context, if a shadow block or 4832 * if the mapping is not in the requested range 4833 */ 4834 if (hmeblkp->hblk_tag.htag_id != sfmmup || 4835 hmeblkp->hblk_shw_bit || 4836 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 4837 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 4838 pr_hblk = hmeblkp; 4839 prevpa = hblkpa; 4840 goto next_block; 4841 } 4842 4843 /* 4844 * unload if there are any current valid mappings 4845 */ 4846 if (hmeblkp->hblk_vcnt != 0 || 4847 hmeblkp->hblk_hmecnt != 0) 4848 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 4849 sa, ea, dmrp, flags); 4850 4851 /* 4852 * on unmap we also release the HME block itself, once 4853 * all mappings are gone. 4854 */ 4855 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 4856 !hmeblkp->hblk_vcnt && 4857 !hmeblkp->hblk_hmecnt) { 4858 ASSERT(!hmeblkp->hblk_lckcnt); 4859 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 4860 prevpa, pr_hblk); 4861 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 4862 } else { 4863 pr_hblk = hmeblkp; 4864 prevpa = hblkpa; 4865 } 4866 4867 if (callback == NULL) 4868 goto next_block; 4869 4870 /* 4871 * HME blocks may span more than one page, but we may be 4872 * unmapping only one page, so check for a smaller range 4873 * for the callback 4874 */ 4875 if (sa < startaddr) 4876 sa = startaddr; 4877 if (--ea > endaddr) 4878 ea = endaddr - 1; 4879 4880 cb_sa[addr_cnt] = sa; 4881 cb_ea[addr_cnt] = ea; 4882 if (++addr_cnt == MAX_CB_ADDR) { 4883 if (dmrp != NULL) { 4884 DEMAP_RANGE_FLUSH(dmrp); 4885 cpuset = sfmmup->sfmmu_cpusran; 4886 xt_sync(cpuset); 4887 } 4888 4889 for (a = 0; a < MAX_CB_ADDR; ++a) { 4890 callback->hcb_start_addr = cb_sa[a]; 4891 callback->hcb_end_addr = cb_ea[a]; 4892 callback->hcb_function(callback); 4893 } 4894 addr_cnt = 0; 4895 } 4896 4897 next_block: 4898 hmeblkp = nx_hblk; 4899 hblkpa = nx_pa; 4900 } 4901 SFMMU_HASH_UNLOCK(hmebp); 4902 } 4903 4904 sfmmu_hblks_list_purge(&list); 4905 if (dmrp != NULL) { 4906 DEMAP_RANGE_FLUSH(dmrp); 4907 cpuset = sfmmup->sfmmu_cpusran; 4908 xt_sync(cpuset); 4909 } 4910 4911 for (a = 0; a < addr_cnt; ++a) { 4912 callback->hcb_start_addr = cb_sa[a]; 4913 callback->hcb_end_addr = cb_ea[a]; 4914 callback->hcb_function(callback); 4915 } 4916 4917 /* 4918 * Check TSB and TLB page sizes if the process isn't exiting. 4919 */ 4920 if (!sfmmup->sfmmu_free) 4921 sfmmu_check_page_sizes(sfmmup, 0); 4922 } 4923 4924 /* 4925 * Unload all the mappings in the range [addr..addr+len). addr and len must 4926 * be MMU_PAGESIZE aligned. 4927 */ 4928 4929 extern struct seg *segkmap; 4930 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 4931 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 4932 4933 4934 void 4935 hat_unload_callback( 4936 struct hat *sfmmup, 4937 caddr_t addr, 4938 size_t len, 4939 uint_t flags, 4940 hat_callback_t *callback) 4941 { 4942 struct hmehash_bucket *hmebp; 4943 hmeblk_tag hblktag; 4944 int hmeshift, hashno, iskernel; 4945 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 4946 caddr_t endaddr; 4947 cpuset_t cpuset; 4948 uint64_t hblkpa, prevpa; 4949 int addr_count = 0; 4950 int a; 4951 caddr_t cb_start_addr[MAX_CB_ADDR]; 4952 caddr_t cb_end_addr[MAX_CB_ADDR]; 4953 int issegkmap = ISSEGKMAP(sfmmup, addr); 4954 demap_range_t dmr, *dmrp; 4955 4956 if (sfmmup->sfmmu_xhat_provider) { 4957 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 4958 return; 4959 } else { 4960 /* 4961 * This must be a CPU HAT. If the address space has 4962 * XHATs attached, unload the mappings for all of them, 4963 * just in case 4964 */ 4965 ASSERT(sfmmup->sfmmu_as != NULL); 4966 if (sfmmup->sfmmu_as->a_xhat != NULL) 4967 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 4968 len, flags, callback); 4969 } 4970 4971 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 4972 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4973 4974 ASSERT(sfmmup != NULL); 4975 ASSERT((len & MMU_PAGEOFFSET) == 0); 4976 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4977 4978 /* 4979 * Probing through a large VA range (say 63 bits) will be slow, even 4980 * at 4 Meg steps between the probes. So, when the virtual address range 4981 * is very large, search the HME entries for what to unload. 4982 * 4983 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 4984 * 4985 * UHMEHASH_SZ is number of hash buckets to examine 4986 * 4987 */ 4988 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 4989 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 4990 return; 4991 } 4992 4993 CPUSET_ZERO(cpuset); 4994 4995 /* 4996 * If the process is exiting, we can save a lot of fuss since 4997 * we'll flush the TLB when we free the ctx anyway. 4998 */ 4999 if (sfmmup->sfmmu_free) 5000 dmrp = NULL; 5001 else 5002 dmrp = &dmr; 5003 5004 DEMAP_RANGE_INIT(sfmmup, dmrp); 5005 endaddr = addr + len; 5006 hblktag.htag_id = sfmmup; 5007 5008 /* 5009 * It is likely for the vm to call unload over a wide range of 5010 * addresses that are actually very sparsely populated by 5011 * translations. In order to speed this up the sfmmu hat supports 5012 * the concept of shadow hmeblks. Dummy large page hmeblks that 5013 * correspond to actual small translations are allocated at tteload 5014 * time and are referred to as shadow hmeblks. Now, during unload 5015 * time, we first check if we have a shadow hmeblk for that 5016 * translation. The absence of one means the corresponding address 5017 * range is empty and can be skipped. 5018 * 5019 * The kernel is an exception to above statement and that is why 5020 * we don't use shadow hmeblks and hash starting from the smallest 5021 * page size. 5022 */ 5023 if (sfmmup == KHATID) { 5024 iskernel = 1; 5025 hashno = TTE64K; 5026 } else { 5027 iskernel = 0; 5028 if (mmu_page_sizes == max_mmu_page_sizes) { 5029 hashno = TTE256M; 5030 } else { 5031 hashno = TTE4M; 5032 } 5033 } 5034 while (addr < endaddr) { 5035 hmeshift = HME_HASH_SHIFT(hashno); 5036 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5037 hblktag.htag_rehash = hashno; 5038 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5039 5040 SFMMU_HASH_LOCK(hmebp); 5041 5042 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 5043 prevpa, &list); 5044 if (hmeblkp == NULL) { 5045 /* 5046 * didn't find an hmeblk. skip the appropiate 5047 * address range. 5048 */ 5049 SFMMU_HASH_UNLOCK(hmebp); 5050 if (iskernel) { 5051 if (hashno < mmu_hashcnt) { 5052 hashno++; 5053 continue; 5054 } else { 5055 hashno = TTE64K; 5056 addr = (caddr_t)roundup((uintptr_t)addr 5057 + 1, MMU_PAGESIZE64K); 5058 continue; 5059 } 5060 } 5061 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5062 (1 << hmeshift)); 5063 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5064 ASSERT(hashno == TTE64K); 5065 continue; 5066 } 5067 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5068 hashno = TTE512K; 5069 continue; 5070 } 5071 if (mmu_page_sizes == max_mmu_page_sizes) { 5072 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5073 hashno = TTE4M; 5074 continue; 5075 } 5076 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5077 hashno = TTE32M; 5078 continue; 5079 } 5080 hashno = TTE256M; 5081 continue; 5082 } else { 5083 hashno = TTE4M; 5084 continue; 5085 } 5086 } 5087 ASSERT(hmeblkp); 5088 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5089 /* 5090 * If the valid count is zero we can skip the range 5091 * mapped by this hmeblk. 5092 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5093 * is used by segment drivers as a hint 5094 * that the mapping resource won't be used any longer. 5095 * The best example of this is during exit(). 5096 */ 5097 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5098 get_hblk_span(hmeblkp)); 5099 if ((flags & HAT_UNLOAD_UNMAP) || 5100 (iskernel && !issegkmap)) { 5101 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5102 pr_hblk); 5103 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5104 } 5105 SFMMU_HASH_UNLOCK(hmebp); 5106 5107 if (iskernel) { 5108 hashno = TTE64K; 5109 continue; 5110 } 5111 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5112 ASSERT(hashno == TTE64K); 5113 continue; 5114 } 5115 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5116 hashno = TTE512K; 5117 continue; 5118 } 5119 if (mmu_page_sizes == max_mmu_page_sizes) { 5120 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5121 hashno = TTE4M; 5122 continue; 5123 } 5124 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5125 hashno = TTE32M; 5126 continue; 5127 } 5128 hashno = TTE256M; 5129 continue; 5130 } else { 5131 hashno = TTE4M; 5132 continue; 5133 } 5134 } 5135 if (hmeblkp->hblk_shw_bit) { 5136 /* 5137 * If we encounter a shadow hmeblk we know there is 5138 * smaller sized hmeblks mapping the same address space. 5139 * Decrement the hash size and rehash. 5140 */ 5141 ASSERT(sfmmup != KHATID); 5142 hashno--; 5143 SFMMU_HASH_UNLOCK(hmebp); 5144 continue; 5145 } 5146 5147 /* 5148 * track callback address ranges. 5149 * only start a new range when it's not contiguous 5150 */ 5151 if (callback != NULL) { 5152 if (addr_count > 0 && 5153 addr == cb_end_addr[addr_count - 1]) 5154 --addr_count; 5155 else 5156 cb_start_addr[addr_count] = addr; 5157 } 5158 5159 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5160 dmrp, flags); 5161 5162 if (callback != NULL) 5163 cb_end_addr[addr_count++] = addr; 5164 5165 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5166 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5167 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5168 pr_hblk); 5169 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5170 } 5171 SFMMU_HASH_UNLOCK(hmebp); 5172 5173 /* 5174 * Notify our caller as to exactly which pages 5175 * have been unloaded. We do these in clumps, 5176 * to minimize the number of xt_sync()s that need to occur. 5177 */ 5178 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5179 DEMAP_RANGE_FLUSH(dmrp); 5180 if (dmrp != NULL) { 5181 cpuset = sfmmup->sfmmu_cpusran; 5182 xt_sync(cpuset); 5183 } 5184 5185 for (a = 0; a < MAX_CB_ADDR; ++a) { 5186 callback->hcb_start_addr = cb_start_addr[a]; 5187 callback->hcb_end_addr = cb_end_addr[a]; 5188 callback->hcb_function(callback); 5189 } 5190 addr_count = 0; 5191 } 5192 if (iskernel) { 5193 hashno = TTE64K; 5194 continue; 5195 } 5196 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5197 ASSERT(hashno == TTE64K); 5198 continue; 5199 } 5200 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5201 hashno = TTE512K; 5202 continue; 5203 } 5204 if (mmu_page_sizes == max_mmu_page_sizes) { 5205 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5206 hashno = TTE4M; 5207 continue; 5208 } 5209 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5210 hashno = TTE32M; 5211 continue; 5212 } 5213 hashno = TTE256M; 5214 } else { 5215 hashno = TTE4M; 5216 } 5217 } 5218 5219 sfmmu_hblks_list_purge(&list); 5220 DEMAP_RANGE_FLUSH(dmrp); 5221 if (dmrp != NULL) { 5222 cpuset = sfmmup->sfmmu_cpusran; 5223 xt_sync(cpuset); 5224 } 5225 if (callback && addr_count != 0) { 5226 for (a = 0; a < addr_count; ++a) { 5227 callback->hcb_start_addr = cb_start_addr[a]; 5228 callback->hcb_end_addr = cb_end_addr[a]; 5229 callback->hcb_function(callback); 5230 } 5231 } 5232 5233 /* 5234 * Check TSB and TLB page sizes if the process isn't exiting. 5235 */ 5236 if (!sfmmup->sfmmu_free) 5237 sfmmu_check_page_sizes(sfmmup, 0); 5238 } 5239 5240 /* 5241 * Unload all the mappings in the range [addr..addr+len). addr and len must 5242 * be MMU_PAGESIZE aligned. 5243 */ 5244 void 5245 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5246 { 5247 if (sfmmup->sfmmu_xhat_provider) { 5248 XHAT_UNLOAD(sfmmup, addr, len, flags); 5249 return; 5250 } 5251 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5252 } 5253 5254 5255 /* 5256 * Find the largest mapping size for this page. 5257 */ 5258 int 5259 fnd_mapping_sz(page_t *pp) 5260 { 5261 int sz; 5262 int p_index; 5263 5264 p_index = PP_MAPINDEX(pp); 5265 5266 sz = 0; 5267 p_index >>= 1; /* don't care about 8K bit */ 5268 for (; p_index; p_index >>= 1) { 5269 sz++; 5270 } 5271 5272 return (sz); 5273 } 5274 5275 /* 5276 * This function unloads a range of addresses for an hmeblk. 5277 * It returns the next address to be unloaded. 5278 * It should be called with the hash lock held. 5279 */ 5280 static caddr_t 5281 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5282 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5283 { 5284 tte_t tte, ttemod; 5285 struct sf_hment *sfhmep; 5286 int ttesz; 5287 long ttecnt; 5288 page_t *pp; 5289 kmutex_t *pml; 5290 int ret; 5291 int use_demap_range; 5292 5293 ASSERT(in_hblk_range(hmeblkp, addr)); 5294 ASSERT(!hmeblkp->hblk_shw_bit); 5295 #ifdef DEBUG 5296 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5297 (endaddr < get_hblk_endaddr(hmeblkp))) { 5298 panic("sfmmu_hblk_unload: partial unload of large page"); 5299 } 5300 #endif /* DEBUG */ 5301 5302 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5303 ttesz = get_hblk_ttesz(hmeblkp); 5304 5305 use_demap_range = (do_virtual_coloring && 5306 ((dmrp == NULL) || TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5307 if (use_demap_range) { 5308 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5309 } else { 5310 DEMAP_RANGE_FLUSH(dmrp); 5311 } 5312 ttecnt = 0; 5313 HBLKTOHME(sfhmep, hmeblkp, addr); 5314 5315 while (addr < endaddr) { 5316 pml = NULL; 5317 again: 5318 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5319 if (TTE_IS_VALID(&tte)) { 5320 pp = sfhmep->hme_page; 5321 if (pp && pml == NULL) { 5322 pml = sfmmu_mlist_enter(pp); 5323 } 5324 5325 /* 5326 * Verify if hme still points to 'pp' now that 5327 * we have p_mapping lock. 5328 */ 5329 if (sfhmep->hme_page != pp) { 5330 if (pp != NULL && sfhmep->hme_page != NULL) { 5331 if (pml) { 5332 sfmmu_mlist_exit(pml); 5333 } 5334 /* Re-start this iteration. */ 5335 continue; 5336 } 5337 ASSERT((pp != NULL) && 5338 (sfhmep->hme_page == NULL)); 5339 goto tte_unloaded; 5340 } 5341 5342 /* 5343 * This point on we have both HASH and p_mapping 5344 * lock. 5345 */ 5346 ASSERT(pp == sfhmep->hme_page); 5347 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5348 5349 /* 5350 * We need to loop on modify tte because it is 5351 * possible for pagesync to come along and 5352 * change the software bits beneath us. 5353 * 5354 * Page_unload can also invalidate the tte after 5355 * we read tte outside of p_mapping lock. 5356 */ 5357 ttemod = tte; 5358 5359 TTE_SET_INVALID(&ttemod); 5360 ret = sfmmu_modifytte_try(&tte, &ttemod, 5361 &sfhmep->hme_tte); 5362 5363 if (ret <= 0) { 5364 if (TTE_IS_VALID(&tte)) { 5365 goto again; 5366 } else { 5367 /* 5368 * We read in a valid pte, but it 5369 * is unloaded by page_unload. 5370 * hme_page has become NULL and 5371 * we hold no p_mapping lock. 5372 */ 5373 ASSERT(pp == NULL && pml == NULL); 5374 goto tte_unloaded; 5375 } 5376 } 5377 5378 if (!(flags & HAT_UNLOAD_NOSYNC)) { 5379 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5380 } 5381 5382 /* 5383 * Ok- we invalidated the tte. Do the rest of the job. 5384 */ 5385 ttecnt++; 5386 5387 if (flags & HAT_UNLOAD_UNLOCK) { 5388 ASSERT(hmeblkp->hblk_lckcnt > 0); 5389 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 5390 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 5391 } 5392 5393 /* 5394 * Normally we would need to flush the page 5395 * from the virtual cache at this point in 5396 * order to prevent a potential cache alias 5397 * inconsistency. 5398 * The particular scenario we need to worry 5399 * about is: 5400 * Given: va1 and va2 are two virtual address 5401 * that alias and map the same physical 5402 * address. 5403 * 1. mapping exists from va1 to pa and data 5404 * has been read into the cache. 5405 * 2. unload va1. 5406 * 3. load va2 and modify data using va2. 5407 * 4 unload va2. 5408 * 5. load va1 and reference data. Unless we 5409 * flush the data cache when we unload we will 5410 * get stale data. 5411 * Fortunately, page coloring eliminates the 5412 * above scenario by remembering the color a 5413 * physical page was last or is currently 5414 * mapped to. Now, we delay the flush until 5415 * the loading of translations. Only when the 5416 * new translation is of a different color 5417 * are we forced to flush. 5418 */ 5419 if (use_demap_range) { 5420 /* 5421 * Mark this page as needing a demap. 5422 */ 5423 DEMAP_RANGE_MARKPG(dmrp, addr); 5424 } else { 5425 if (do_virtual_coloring) { 5426 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 5427 sfmmup->sfmmu_free, 0); 5428 } else { 5429 pfn_t pfnum; 5430 5431 pfnum = TTE_TO_PFN(addr, &tte); 5432 sfmmu_tlbcache_demap(addr, sfmmup, 5433 hmeblkp, pfnum, sfmmup->sfmmu_free, 5434 FLUSH_NECESSARY_CPUS, 5435 CACHE_FLUSH, 0); 5436 } 5437 } 5438 5439 if (pp) { 5440 /* 5441 * Remove the hment from the mapping list 5442 */ 5443 ASSERT(hmeblkp->hblk_hmecnt > 0); 5444 5445 /* 5446 * Again, we cannot 5447 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 5448 */ 5449 HME_SUB(sfhmep, pp); 5450 membar_stst(); 5451 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 5452 } 5453 5454 ASSERT(hmeblkp->hblk_vcnt > 0); 5455 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 5456 5457 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 5458 !hmeblkp->hblk_lckcnt); 5459 5460 #ifdef VAC 5461 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 5462 if (PP_ISTNC(pp)) { 5463 /* 5464 * If page was temporary 5465 * uncached, try to recache 5466 * it. Note that HME_SUB() was 5467 * called above so p_index and 5468 * mlist had been updated. 5469 */ 5470 conv_tnc(pp, ttesz); 5471 } else if (pp->p_mapping == NULL) { 5472 ASSERT(kpm_enable); 5473 /* 5474 * Page is marked to be in VAC conflict 5475 * to an existing kpm mapping and/or is 5476 * kpm mapped using only the regular 5477 * pagesize. 5478 */ 5479 sfmmu_kpm_hme_unload(pp); 5480 } 5481 } 5482 #endif /* VAC */ 5483 } else if ((pp = sfhmep->hme_page) != NULL) { 5484 /* 5485 * TTE is invalid but the hme 5486 * still exists. let pageunload 5487 * complete its job. 5488 */ 5489 ASSERT(pml == NULL); 5490 pml = sfmmu_mlist_enter(pp); 5491 if (sfhmep->hme_page != NULL) { 5492 sfmmu_mlist_exit(pml); 5493 pml = NULL; 5494 goto again; 5495 } 5496 ASSERT(sfhmep->hme_page == NULL); 5497 } else if (hmeblkp->hblk_hmecnt != 0) { 5498 /* 5499 * pageunload may have not finished decrementing 5500 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 5501 * wait for pageunload to finish. Rely on pageunload 5502 * to decrement hblk_hmecnt after hblk_vcnt. 5503 */ 5504 pfn_t pfn = TTE_TO_TTEPFN(&tte); 5505 ASSERT(pml == NULL); 5506 if (pf_is_memory(pfn)) { 5507 pp = page_numtopp_nolock(pfn); 5508 if (pp != NULL) { 5509 pml = sfmmu_mlist_enter(pp); 5510 sfmmu_mlist_exit(pml); 5511 pml = NULL; 5512 } 5513 } 5514 } 5515 5516 tte_unloaded: 5517 /* 5518 * At this point, the tte we are looking at 5519 * should be unloaded, and hme has been unlinked 5520 * from page too. This is important because in 5521 * pageunload, it does ttesync() then HME_SUB. 5522 * We need to make sure HME_SUB has been completed 5523 * so we know ttesync() has been completed. Otherwise, 5524 * at exit time, after return from hat layer, VM will 5525 * release as structure which hat_setstat() (called 5526 * by ttesync()) needs. 5527 */ 5528 #ifdef DEBUG 5529 { 5530 tte_t dtte; 5531 5532 ASSERT(sfhmep->hme_page == NULL); 5533 5534 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 5535 ASSERT(!TTE_IS_VALID(&dtte)); 5536 } 5537 #endif 5538 5539 if (pml) { 5540 sfmmu_mlist_exit(pml); 5541 } 5542 5543 addr += TTEBYTES(ttesz); 5544 sfhmep++; 5545 DEMAP_RANGE_NEXTPG(dmrp); 5546 } 5547 if (ttecnt > 0) 5548 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 5549 return (addr); 5550 } 5551 5552 /* 5553 * Synchronize all the mappings in the range [addr..addr+len). 5554 * Can be called with clearflag having two states: 5555 * HAT_SYNC_DONTZERO means just return the rm stats 5556 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 5557 */ 5558 void 5559 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 5560 { 5561 struct hmehash_bucket *hmebp; 5562 hmeblk_tag hblktag; 5563 int hmeshift, hashno = 1; 5564 struct hme_blk *hmeblkp, *list = NULL; 5565 caddr_t endaddr; 5566 cpuset_t cpuset; 5567 5568 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 5569 ASSERT((sfmmup == ksfmmup) || 5570 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5571 ASSERT((len & MMU_PAGEOFFSET) == 0); 5572 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 5573 (clearflag == HAT_SYNC_ZERORM)); 5574 5575 CPUSET_ZERO(cpuset); 5576 5577 endaddr = addr + len; 5578 hblktag.htag_id = sfmmup; 5579 /* 5580 * Spitfire supports 4 page sizes. 5581 * Most pages are expected to be of the smallest page 5582 * size (8K) and these will not need to be rehashed. 64K 5583 * pages also don't need to be rehashed because the an hmeblk 5584 * spans 64K of address space. 512K pages might need 1 rehash and 5585 * and 4M pages 2 rehashes. 5586 */ 5587 while (addr < endaddr) { 5588 hmeshift = HME_HASH_SHIFT(hashno); 5589 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5590 hblktag.htag_rehash = hashno; 5591 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5592 5593 SFMMU_HASH_LOCK(hmebp); 5594 5595 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5596 if (hmeblkp != NULL) { 5597 /* 5598 * We've encountered a shadow hmeblk so skip the range 5599 * of the next smaller mapping size. 5600 */ 5601 if (hmeblkp->hblk_shw_bit) { 5602 ASSERT(sfmmup != ksfmmup); 5603 ASSERT(hashno > 1); 5604 addr = (caddr_t)P2END((uintptr_t)addr, 5605 TTEBYTES(hashno - 1)); 5606 } else { 5607 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 5608 addr, endaddr, clearflag); 5609 } 5610 SFMMU_HASH_UNLOCK(hmebp); 5611 hashno = 1; 5612 continue; 5613 } 5614 SFMMU_HASH_UNLOCK(hmebp); 5615 5616 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5617 /* 5618 * We have traversed the whole list and rehashed 5619 * if necessary without finding the address to sync. 5620 * This is ok so we increment the address by the 5621 * smallest hmeblk range for kernel mappings and the 5622 * largest hmeblk range, to account for shadow hmeblks, 5623 * for user mappings and continue. 5624 */ 5625 if (sfmmup == ksfmmup) 5626 addr = (caddr_t)P2END((uintptr_t)addr, 5627 TTEBYTES(1)); 5628 else 5629 addr = (caddr_t)P2END((uintptr_t)addr, 5630 TTEBYTES(hashno)); 5631 hashno = 1; 5632 } else { 5633 hashno++; 5634 } 5635 } 5636 sfmmu_hblks_list_purge(&list); 5637 cpuset = sfmmup->sfmmu_cpusran; 5638 xt_sync(cpuset); 5639 } 5640 5641 static caddr_t 5642 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5643 caddr_t endaddr, int clearflag) 5644 { 5645 tte_t tte, ttemod; 5646 struct sf_hment *sfhmep; 5647 int ttesz; 5648 struct page *pp; 5649 kmutex_t *pml; 5650 int ret; 5651 5652 ASSERT(hmeblkp->hblk_shw_bit == 0); 5653 5654 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5655 5656 ttesz = get_hblk_ttesz(hmeblkp); 5657 HBLKTOHME(sfhmep, hmeblkp, addr); 5658 5659 while (addr < endaddr) { 5660 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5661 if (TTE_IS_VALID(&tte)) { 5662 pml = NULL; 5663 pp = sfhmep->hme_page; 5664 if (pp) { 5665 pml = sfmmu_mlist_enter(pp); 5666 } 5667 if (pp != sfhmep->hme_page) { 5668 /* 5669 * tte most have been unloaded 5670 * underneath us. Recheck 5671 */ 5672 ASSERT(pml); 5673 sfmmu_mlist_exit(pml); 5674 continue; 5675 } 5676 5677 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5678 5679 if (clearflag == HAT_SYNC_ZERORM) { 5680 ttemod = tte; 5681 TTE_CLR_RM(&ttemod); 5682 ret = sfmmu_modifytte_try(&tte, &ttemod, 5683 &sfhmep->hme_tte); 5684 if (ret < 0) { 5685 if (pml) { 5686 sfmmu_mlist_exit(pml); 5687 } 5688 continue; 5689 } 5690 5691 if (ret > 0) { 5692 sfmmu_tlb_demap(addr, sfmmup, 5693 hmeblkp, 0, 0); 5694 } 5695 } 5696 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5697 if (pml) { 5698 sfmmu_mlist_exit(pml); 5699 } 5700 } 5701 addr += TTEBYTES(ttesz); 5702 sfhmep++; 5703 } 5704 return (addr); 5705 } 5706 5707 /* 5708 * This function will sync a tte to the page struct and it will 5709 * update the hat stats. Currently it allows us to pass a NULL pp 5710 * and we will simply update the stats. We may want to change this 5711 * so we only keep stats for pages backed by pp's. 5712 */ 5713 static void 5714 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 5715 { 5716 uint_t rm = 0; 5717 int sz; 5718 pgcnt_t npgs; 5719 5720 ASSERT(TTE_IS_VALID(ttep)); 5721 5722 if (TTE_IS_NOSYNC(ttep)) { 5723 return; 5724 } 5725 5726 if (TTE_IS_REF(ttep)) { 5727 rm = P_REF; 5728 } 5729 if (TTE_IS_MOD(ttep)) { 5730 rm |= P_MOD; 5731 } 5732 5733 if (rm == 0) { 5734 return; 5735 } 5736 5737 sz = TTE_CSZ(ttep); 5738 if (sfmmup->sfmmu_rmstat) { 5739 int i; 5740 caddr_t vaddr = addr; 5741 5742 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 5743 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 5744 } 5745 5746 } 5747 5748 /* 5749 * XXX I want to use cas to update nrm bits but they 5750 * currently belong in common/vm and not in hat where 5751 * they should be. 5752 * The nrm bits are protected by the same mutex as 5753 * the one that protects the page's mapping list. 5754 */ 5755 if (!pp) 5756 return; 5757 ASSERT(sfmmu_mlist_held(pp)); 5758 /* 5759 * If the tte is for a large page, we need to sync all the 5760 * pages covered by the tte. 5761 */ 5762 if (sz != TTE8K) { 5763 ASSERT(pp->p_szc != 0); 5764 pp = PP_GROUPLEADER(pp, sz); 5765 ASSERT(sfmmu_mlist_held(pp)); 5766 } 5767 5768 /* Get number of pages from tte size. */ 5769 npgs = TTEPAGES(sz); 5770 5771 do { 5772 ASSERT(pp); 5773 ASSERT(sfmmu_mlist_held(pp)); 5774 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 5775 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 5776 hat_page_setattr(pp, rm); 5777 5778 /* 5779 * Are we done? If not, we must have a large mapping. 5780 * For large mappings we need to sync the rest of the pages 5781 * covered by this tte; goto the next page. 5782 */ 5783 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 5784 } 5785 5786 /* 5787 * Execute pre-callback handler of each pa_hment linked to pp 5788 * 5789 * Inputs: 5790 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 5791 * capture_cpus: pointer to return value (below) 5792 * 5793 * Returns: 5794 * Propagates the subsystem callback return values back to the caller; 5795 * returns 0 on success. If capture_cpus is non-NULL, the value returned 5796 * is zero if all of the pa_hments are of a type that do not require 5797 * capturing CPUs prior to suspending the mapping, else it is 1. 5798 */ 5799 static int 5800 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 5801 { 5802 struct sf_hment *sfhmep; 5803 struct pa_hment *pahmep; 5804 int (*f)(caddr_t, uint_t, uint_t, void *); 5805 int ret; 5806 id_t id; 5807 int locked = 0; 5808 kmutex_t *pml; 5809 5810 ASSERT(PAGE_EXCL(pp)); 5811 if (!sfmmu_mlist_held(pp)) { 5812 pml = sfmmu_mlist_enter(pp); 5813 locked = 1; 5814 } 5815 5816 if (capture_cpus) 5817 *capture_cpus = 0; 5818 5819 top: 5820 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5821 /* 5822 * skip sf_hments corresponding to VA<->PA mappings; 5823 * for pa_hment's, hme_tte.ll is zero 5824 */ 5825 if (!IS_PAHME(sfhmep)) 5826 continue; 5827 5828 pahmep = sfhmep->hme_data; 5829 ASSERT(pahmep != NULL); 5830 5831 /* 5832 * skip if pre-handler has been called earlier in this loop 5833 */ 5834 if (pahmep->flags & flag) 5835 continue; 5836 5837 id = pahmep->cb_id; 5838 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5839 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 5840 *capture_cpus = 1; 5841 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 5842 pahmep->flags |= flag; 5843 continue; 5844 } 5845 5846 /* 5847 * Drop the mapping list lock to avoid locking order issues. 5848 */ 5849 if (locked) 5850 sfmmu_mlist_exit(pml); 5851 5852 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 5853 if (ret != 0) 5854 return (ret); /* caller must do the cleanup */ 5855 5856 if (locked) { 5857 pml = sfmmu_mlist_enter(pp); 5858 pahmep->flags |= flag; 5859 goto top; 5860 } 5861 5862 pahmep->flags |= flag; 5863 } 5864 5865 if (locked) 5866 sfmmu_mlist_exit(pml); 5867 5868 return (0); 5869 } 5870 5871 /* 5872 * Execute post-callback handler of each pa_hment linked to pp 5873 * 5874 * Same overall assumptions and restrictions apply as for 5875 * hat_pageprocess_precallbacks(). 5876 */ 5877 static void 5878 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 5879 { 5880 pfn_t pgpfn = pp->p_pagenum; 5881 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 5882 pfn_t newpfn; 5883 struct sf_hment *sfhmep; 5884 struct pa_hment *pahmep; 5885 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 5886 id_t id; 5887 int locked = 0; 5888 kmutex_t *pml; 5889 5890 ASSERT(PAGE_EXCL(pp)); 5891 if (!sfmmu_mlist_held(pp)) { 5892 pml = sfmmu_mlist_enter(pp); 5893 locked = 1; 5894 } 5895 5896 top: 5897 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5898 /* 5899 * skip sf_hments corresponding to VA<->PA mappings; 5900 * for pa_hment's, hme_tte.ll is zero 5901 */ 5902 if (!IS_PAHME(sfhmep)) 5903 continue; 5904 5905 pahmep = sfhmep->hme_data; 5906 ASSERT(pahmep != NULL); 5907 5908 if ((pahmep->flags & flag) == 0) 5909 continue; 5910 5911 pahmep->flags &= ~flag; 5912 5913 id = pahmep->cb_id; 5914 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5915 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 5916 continue; 5917 5918 /* 5919 * Convert the base page PFN into the constituent PFN 5920 * which is needed by the callback handler. 5921 */ 5922 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 5923 5924 /* 5925 * Drop the mapping list lock to avoid locking order issues. 5926 */ 5927 if (locked) 5928 sfmmu_mlist_exit(pml); 5929 5930 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 5931 != 0) 5932 panic("sfmmu: posthandler failed"); 5933 5934 if (locked) { 5935 pml = sfmmu_mlist_enter(pp); 5936 goto top; 5937 } 5938 } 5939 5940 if (locked) 5941 sfmmu_mlist_exit(pml); 5942 } 5943 5944 /* 5945 * Suspend locked kernel mapping 5946 */ 5947 void 5948 hat_pagesuspend(struct page *pp) 5949 { 5950 struct sf_hment *sfhmep; 5951 sfmmu_t *sfmmup; 5952 tte_t tte, ttemod; 5953 struct hme_blk *hmeblkp; 5954 caddr_t addr; 5955 int index, cons; 5956 cpuset_t cpuset; 5957 5958 ASSERT(PAGE_EXCL(pp)); 5959 ASSERT(sfmmu_mlist_held(pp)); 5960 5961 mutex_enter(&kpr_suspendlock); 5962 5963 /* 5964 * Call into dtrace to tell it we're about to suspend a 5965 * kernel mapping. This prevents us from running into issues 5966 * with probe context trying to touch a suspended page 5967 * in the relocation codepath itself. 5968 */ 5969 if (dtrace_kreloc_init) 5970 (*dtrace_kreloc_init)(); 5971 5972 index = PP_MAPINDEX(pp); 5973 cons = TTE8K; 5974 5975 retry: 5976 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5977 5978 if (IS_PAHME(sfhmep)) 5979 continue; 5980 5981 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 5982 continue; 5983 5984 /* 5985 * Loop until we successfully set the suspend bit in 5986 * the TTE. 5987 */ 5988 again: 5989 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5990 ASSERT(TTE_IS_VALID(&tte)); 5991 5992 ttemod = tte; 5993 TTE_SET_SUSPEND(&ttemod); 5994 if (sfmmu_modifytte_try(&tte, &ttemod, 5995 &sfhmep->hme_tte) < 0) 5996 goto again; 5997 5998 /* 5999 * Invalidate TSB entry 6000 */ 6001 hmeblkp = sfmmu_hmetohblk(sfhmep); 6002 6003 sfmmup = hblktosfmmu(hmeblkp); 6004 ASSERT(sfmmup == ksfmmup); 6005 6006 addr = tte_to_vaddr(hmeblkp, tte); 6007 6008 /* 6009 * No need to make sure that the TSB for this sfmmu is 6010 * not being relocated since it is ksfmmup and thus it 6011 * will never be relocated. 6012 */ 6013 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 6014 6015 /* 6016 * Update xcall stats 6017 */ 6018 cpuset = cpu_ready_set; 6019 CPUSET_DEL(cpuset, CPU->cpu_id); 6020 6021 /* LINTED: constant in conditional context */ 6022 SFMMU_XCALL_STATS(ksfmmup); 6023 6024 /* 6025 * Flush TLB entry on remote CPU's 6026 */ 6027 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6028 (uint64_t)ksfmmup); 6029 xt_sync(cpuset); 6030 6031 /* 6032 * Flush TLB entry on local CPU 6033 */ 6034 vtag_flushpage(addr, (uint64_t)ksfmmup); 6035 } 6036 6037 while (index != 0) { 6038 index = index >> 1; 6039 if (index != 0) 6040 cons++; 6041 if (index & 0x1) { 6042 pp = PP_GROUPLEADER(pp, cons); 6043 goto retry; 6044 } 6045 } 6046 } 6047 6048 #ifdef DEBUG 6049 6050 #define N_PRLE 1024 6051 struct prle { 6052 page_t *targ; 6053 page_t *repl; 6054 int status; 6055 int pausecpus; 6056 hrtime_t whence; 6057 }; 6058 6059 static struct prle page_relocate_log[N_PRLE]; 6060 static int prl_entry; 6061 static kmutex_t prl_mutex; 6062 6063 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6064 mutex_enter(&prl_mutex); \ 6065 page_relocate_log[prl_entry].targ = *(t); \ 6066 page_relocate_log[prl_entry].repl = *(r); \ 6067 page_relocate_log[prl_entry].status = (s); \ 6068 page_relocate_log[prl_entry].pausecpus = (p); \ 6069 page_relocate_log[prl_entry].whence = gethrtime(); \ 6070 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6071 mutex_exit(&prl_mutex); 6072 6073 #else /* !DEBUG */ 6074 #define PAGE_RELOCATE_LOG(t, r, s, p) 6075 #endif 6076 6077 /* 6078 * Core Kernel Page Relocation Algorithm 6079 * 6080 * Input: 6081 * 6082 * target : constituent pages are SE_EXCL locked. 6083 * replacement: constituent pages are SE_EXCL locked. 6084 * 6085 * Output: 6086 * 6087 * nrelocp: number of pages relocated 6088 */ 6089 int 6090 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6091 { 6092 page_t *targ, *repl; 6093 page_t *tpp, *rpp; 6094 kmutex_t *low, *high; 6095 spgcnt_t npages, i; 6096 page_t *pl = NULL; 6097 int old_pil; 6098 cpuset_t cpuset; 6099 int cap_cpus; 6100 int ret; 6101 6102 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 6103 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6104 return (EAGAIN); 6105 } 6106 6107 mutex_enter(&kpr_mutex); 6108 kreloc_thread = curthread; 6109 6110 targ = *target; 6111 repl = *replacement; 6112 ASSERT(repl != NULL); 6113 ASSERT(targ->p_szc == repl->p_szc); 6114 6115 npages = page_get_pagecnt(targ->p_szc); 6116 6117 /* 6118 * unload VA<->PA mappings that are not locked 6119 */ 6120 tpp = targ; 6121 for (i = 0; i < npages; i++) { 6122 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6123 tpp++; 6124 } 6125 6126 /* 6127 * Do "presuspend" callbacks, in a context from which we can still 6128 * block as needed. Note that we don't hold the mapping list lock 6129 * of "targ" at this point due to potential locking order issues; 6130 * we assume that between the hat_pageunload() above and holding 6131 * the SE_EXCL lock that the mapping list *cannot* change at this 6132 * point. 6133 */ 6134 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6135 if (ret != 0) { 6136 /* 6137 * EIO translates to fatal error, for all others cleanup 6138 * and return EAGAIN. 6139 */ 6140 ASSERT(ret != EIO); 6141 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6142 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6143 kreloc_thread = NULL; 6144 mutex_exit(&kpr_mutex); 6145 return (EAGAIN); 6146 } 6147 6148 /* 6149 * acquire p_mapping list lock for both the target and replacement 6150 * root pages. 6151 * 6152 * low and high refer to the need to grab the mlist locks in a 6153 * specific order in order to prevent race conditions. Thus the 6154 * lower lock must be grabbed before the higher lock. 6155 * 6156 * This will block hat_unload's accessing p_mapping list. Since 6157 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6158 * blocked. Thus, no one else will be accessing the p_mapping list 6159 * while we suspend and reload the locked mapping below. 6160 */ 6161 tpp = targ; 6162 rpp = repl; 6163 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6164 6165 kpreempt_disable(); 6166 6167 #ifdef VAC 6168 /* 6169 * If the replacement page is of a different virtual color 6170 * than the page it is replacing, we need to handle the VAC 6171 * consistency for it just as we would if we were setting up 6172 * a new mapping to a page. 6173 */ 6174 if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) { 6175 if (tpp->p_vcolor != rpp->p_vcolor) { 6176 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6177 rpp->p_pagenum); 6178 } 6179 } 6180 #endif 6181 6182 /* 6183 * We raise our PIL to 13 so that we don't get captured by 6184 * another CPU or pinned by an interrupt thread. We can't go to 6185 * PIL 14 since the nexus driver(s) may need to interrupt at 6186 * that level in the case of IOMMU pseudo mappings. 6187 */ 6188 cpuset = cpu_ready_set; 6189 CPUSET_DEL(cpuset, CPU->cpu_id); 6190 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6191 old_pil = splr(XCALL_PIL); 6192 } else { 6193 old_pil = -1; 6194 xc_attention(cpuset); 6195 } 6196 ASSERT(getpil() == XCALL_PIL); 6197 6198 /* 6199 * Now do suspend callbacks. In the case of an IOMMU mapping 6200 * this will suspend all DMA activity to the page while it is 6201 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6202 * may be captured at this point we should have acquired any needed 6203 * locks in the presuspend callback. 6204 */ 6205 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6206 if (ret != 0) { 6207 repl = targ; 6208 goto suspend_fail; 6209 } 6210 6211 /* 6212 * Raise the PIL yet again, this time to block all high-level 6213 * interrupts on this CPU. This is necessary to prevent an 6214 * interrupt routine from pinning the thread which holds the 6215 * mapping suspended and then touching the suspended page. 6216 * 6217 * Once the page is suspended we also need to be careful to 6218 * avoid calling any functions which touch any seg_kmem memory 6219 * since that memory may be backed by the very page we are 6220 * relocating in here! 6221 */ 6222 hat_pagesuspend(targ); 6223 6224 /* 6225 * Now that we are confident everybody has stopped using this page, 6226 * copy the page contents. Note we use a physical copy to prevent 6227 * locking issues and to avoid fpRAS because we can't handle it in 6228 * this context. 6229 */ 6230 for (i = 0; i < npages; i++, tpp++, rpp++) { 6231 /* 6232 * Copy the contents of the page. 6233 */ 6234 ppcopy_kernel(tpp, rpp); 6235 } 6236 6237 tpp = targ; 6238 rpp = repl; 6239 for (i = 0; i < npages; i++, tpp++, rpp++) { 6240 /* 6241 * Copy attributes. VAC consistency was handled above, 6242 * if required. 6243 */ 6244 rpp->p_nrm = tpp->p_nrm; 6245 tpp->p_nrm = 0; 6246 rpp->p_index = tpp->p_index; 6247 tpp->p_index = 0; 6248 #ifdef VAC 6249 rpp->p_vcolor = tpp->p_vcolor; 6250 #endif 6251 } 6252 6253 /* 6254 * First, unsuspend the page, if we set the suspend bit, and transfer 6255 * the mapping list from the target page to the replacement page. 6256 * Next process postcallbacks; since pa_hment's are linked only to the 6257 * p_mapping list of root page, we don't iterate over the constituent 6258 * pages. 6259 */ 6260 hat_pagereload(targ, repl); 6261 6262 suspend_fail: 6263 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6264 6265 /* 6266 * Now lower our PIL and release any captured CPUs since we 6267 * are out of the "danger zone". After this it will again be 6268 * safe to acquire adaptive mutex locks, or to drop them... 6269 */ 6270 if (old_pil != -1) { 6271 splx(old_pil); 6272 } else { 6273 xc_dismissed(cpuset); 6274 } 6275 6276 kpreempt_enable(); 6277 6278 sfmmu_mlist_reloc_exit(low, high); 6279 6280 /* 6281 * Postsuspend callbacks should drop any locks held across 6282 * the suspend callbacks. As before, we don't hold the mapping 6283 * list lock at this point.. our assumption is that the mapping 6284 * list still can't change due to our holding SE_EXCL lock and 6285 * there being no unlocked mappings left. Hence the restriction 6286 * on calling context to hat_delete_callback() 6287 */ 6288 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6289 if (ret != 0) { 6290 /* 6291 * The second presuspend call failed: we got here through 6292 * the suspend_fail label above. 6293 */ 6294 ASSERT(ret != EIO); 6295 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6296 kreloc_thread = NULL; 6297 mutex_exit(&kpr_mutex); 6298 return (EAGAIN); 6299 } 6300 6301 /* 6302 * Now that we're out of the performance critical section we can 6303 * take care of updating the hash table, since we still 6304 * hold all the pages locked SE_EXCL at this point we 6305 * needn't worry about things changing out from under us. 6306 */ 6307 tpp = targ; 6308 rpp = repl; 6309 for (i = 0; i < npages; i++, tpp++, rpp++) { 6310 6311 /* 6312 * replace targ with replacement in page_hash table 6313 */ 6314 targ = tpp; 6315 page_relocate_hash(rpp, targ); 6316 6317 /* 6318 * concatenate target; caller of platform_page_relocate() 6319 * expects target to be concatenated after returning. 6320 */ 6321 ASSERT(targ->p_next == targ); 6322 ASSERT(targ->p_prev == targ); 6323 page_list_concat(&pl, &targ); 6324 } 6325 6326 ASSERT(*target == pl); 6327 *nrelocp = npages; 6328 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6329 kreloc_thread = NULL; 6330 mutex_exit(&kpr_mutex); 6331 return (0); 6332 } 6333 6334 /* 6335 * Called when stray pa_hments are found attached to a page which is 6336 * being freed. Notify the subsystem which attached the pa_hment of 6337 * the error if it registered a suitable handler, else panic. 6338 */ 6339 static void 6340 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6341 { 6342 id_t cb_id = pahmep->cb_id; 6343 6344 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6345 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6346 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6347 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6348 return; /* non-fatal */ 6349 } 6350 panic("pa_hment leaked: 0x%p", pahmep); 6351 } 6352 6353 /* 6354 * Remove all mappings to page 'pp'. 6355 */ 6356 int 6357 hat_pageunload(struct page *pp, uint_t forceflag) 6358 { 6359 struct page *origpp = pp; 6360 struct sf_hment *sfhme, *tmphme; 6361 struct hme_blk *hmeblkp; 6362 kmutex_t *pml; 6363 #ifdef VAC 6364 kmutex_t *pmtx; 6365 #endif 6366 cpuset_t cpuset, tset; 6367 int index, cons; 6368 int xhme_blks; 6369 int pa_hments; 6370 6371 ASSERT(PAGE_EXCL(pp)); 6372 6373 retry_xhat: 6374 tmphme = NULL; 6375 xhme_blks = 0; 6376 pa_hments = 0; 6377 CPUSET_ZERO(cpuset); 6378 6379 pml = sfmmu_mlist_enter(pp); 6380 6381 #ifdef VAC 6382 if (pp->p_kpmref) 6383 sfmmu_kpm_pageunload(pp); 6384 ASSERT(!PP_ISMAPPED_KPM(pp)); 6385 #endif 6386 6387 index = PP_MAPINDEX(pp); 6388 cons = TTE8K; 6389 retry: 6390 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6391 tmphme = sfhme->hme_next; 6392 6393 if (IS_PAHME(sfhme)) { 6394 ASSERT(sfhme->hme_data != NULL); 6395 pa_hments++; 6396 continue; 6397 } 6398 6399 hmeblkp = sfmmu_hmetohblk(sfhme); 6400 if (hmeblkp->hblk_xhat_bit) { 6401 struct xhat_hme_blk *xblk = 6402 (struct xhat_hme_blk *)hmeblkp; 6403 6404 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 6405 pp, forceflag, XBLK2PROVBLK(xblk)); 6406 6407 xhme_blks = 1; 6408 continue; 6409 } 6410 6411 /* 6412 * If there are kernel mappings don't unload them, they will 6413 * be suspended. 6414 */ 6415 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 6416 hmeblkp->hblk_tag.htag_id == ksfmmup) 6417 continue; 6418 6419 tset = sfmmu_pageunload(pp, sfhme, cons); 6420 CPUSET_OR(cpuset, tset); 6421 } 6422 6423 while (index != 0) { 6424 index = index >> 1; 6425 if (index != 0) 6426 cons++; 6427 if (index & 0x1) { 6428 /* Go to leading page */ 6429 pp = PP_GROUPLEADER(pp, cons); 6430 ASSERT(sfmmu_mlist_held(pp)); 6431 goto retry; 6432 } 6433 } 6434 6435 /* 6436 * cpuset may be empty if the page was only mapped by segkpm, 6437 * in which case we won't actually cross-trap. 6438 */ 6439 xt_sync(cpuset); 6440 6441 /* 6442 * The page should have no mappings at this point, unless 6443 * we were called from hat_page_relocate() in which case we 6444 * leave the locked mappings which will be suspended later. 6445 */ 6446 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 6447 (forceflag == SFMMU_KERNEL_RELOC)); 6448 6449 #ifdef VAC 6450 if (PP_ISTNC(pp)) { 6451 if (cons == TTE8K) { 6452 pmtx = sfmmu_page_enter(pp); 6453 PP_CLRTNC(pp); 6454 sfmmu_page_exit(pmtx); 6455 } else { 6456 conv_tnc(pp, cons); 6457 } 6458 } 6459 #endif /* VAC */ 6460 6461 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 6462 /* 6463 * Unlink any pa_hments and free them, calling back 6464 * the responsible subsystem to notify it of the error. 6465 * This can occur in situations such as drivers leaking 6466 * DMA handles: naughty, but common enough that we'd like 6467 * to keep the system running rather than bringing it 6468 * down with an obscure error like "pa_hment leaked" 6469 * which doesn't aid the user in debugging their driver. 6470 */ 6471 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6472 tmphme = sfhme->hme_next; 6473 if (IS_PAHME(sfhme)) { 6474 struct pa_hment *pahmep = sfhme->hme_data; 6475 sfmmu_pahment_leaked(pahmep); 6476 HME_SUB(sfhme, pp); 6477 kmem_cache_free(pa_hment_cache, pahmep); 6478 } 6479 } 6480 6481 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 6482 } 6483 6484 sfmmu_mlist_exit(pml); 6485 6486 /* 6487 * XHAT may not have finished unloading pages 6488 * because some other thread was waiting for 6489 * mlist lock and XHAT_PAGEUNLOAD let it do 6490 * the job. 6491 */ 6492 if (xhme_blks) { 6493 pp = origpp; 6494 goto retry_xhat; 6495 } 6496 6497 return (0); 6498 } 6499 6500 cpuset_t 6501 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 6502 { 6503 struct hme_blk *hmeblkp; 6504 sfmmu_t *sfmmup; 6505 tte_t tte, ttemod; 6506 #ifdef DEBUG 6507 tte_t orig_old; 6508 #endif /* DEBUG */ 6509 caddr_t addr; 6510 int ttesz; 6511 int ret; 6512 cpuset_t cpuset; 6513 6514 ASSERT(pp != NULL); 6515 ASSERT(sfmmu_mlist_held(pp)); 6516 ASSERT(pp->p_vnode != &kvp); 6517 6518 CPUSET_ZERO(cpuset); 6519 6520 hmeblkp = sfmmu_hmetohblk(sfhme); 6521 6522 readtte: 6523 sfmmu_copytte(&sfhme->hme_tte, &tte); 6524 if (TTE_IS_VALID(&tte)) { 6525 sfmmup = hblktosfmmu(hmeblkp); 6526 ttesz = get_hblk_ttesz(hmeblkp); 6527 /* 6528 * Only unload mappings of 'cons' size. 6529 */ 6530 if (ttesz != cons) 6531 return (cpuset); 6532 6533 /* 6534 * Note that we have p_mapping lock, but no hash lock here. 6535 * hblk_unload() has to have both hash lock AND p_mapping 6536 * lock before it tries to modify tte. So, the tte could 6537 * not become invalid in the sfmmu_modifytte_try() below. 6538 */ 6539 ttemod = tte; 6540 #ifdef DEBUG 6541 orig_old = tte; 6542 #endif /* DEBUG */ 6543 6544 TTE_SET_INVALID(&ttemod); 6545 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6546 if (ret < 0) { 6547 #ifdef DEBUG 6548 /* only R/M bits can change. */ 6549 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 6550 #endif /* DEBUG */ 6551 goto readtte; 6552 } 6553 6554 if (ret == 0) { 6555 panic("pageunload: cas failed?"); 6556 } 6557 6558 addr = tte_to_vaddr(hmeblkp, tte); 6559 6560 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6561 6562 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 6563 6564 /* 6565 * We need to flush the page from the virtual cache 6566 * in order to prevent a virtual cache alias 6567 * inconsistency. The particular scenario we need 6568 * to worry about is: 6569 * Given: va1 and va2 are two virtual address that 6570 * alias and will map the same physical address. 6571 * 1. mapping exists from va1 to pa and data has 6572 * been read into the cache. 6573 * 2. unload va1. 6574 * 3. load va2 and modify data using va2. 6575 * 4 unload va2. 6576 * 5. load va1 and reference data. Unless we flush 6577 * the data cache when we unload we will get 6578 * stale data. 6579 * This scenario is taken care of by using virtual 6580 * page coloring. 6581 */ 6582 if (sfmmup->sfmmu_ismhat) { 6583 /* 6584 * Flush TSBs, TLBs and caches 6585 * of every process 6586 * sharing this ism segment. 6587 */ 6588 sfmmu_hat_lock_all(); 6589 mutex_enter(&ism_mlist_lock); 6590 kpreempt_disable(); 6591 if (do_virtual_coloring) 6592 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6593 pp->p_pagenum, CACHE_NO_FLUSH); 6594 else 6595 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6596 pp->p_pagenum, CACHE_FLUSH); 6597 kpreempt_enable(); 6598 mutex_exit(&ism_mlist_lock); 6599 sfmmu_hat_unlock_all(); 6600 cpuset = cpu_ready_set; 6601 } else if (do_virtual_coloring) { 6602 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6603 cpuset = sfmmup->sfmmu_cpusran; 6604 } else { 6605 sfmmu_tlbcache_demap(addr, sfmmup, hmeblkp, 6606 pp->p_pagenum, 0, FLUSH_NECESSARY_CPUS, 6607 CACHE_FLUSH, 0); 6608 cpuset = sfmmup->sfmmu_cpusran; 6609 } 6610 6611 /* 6612 * Hme_sub has to run after ttesync() and a_rss update. 6613 * See hblk_unload(). 6614 */ 6615 HME_SUB(sfhme, pp); 6616 membar_stst(); 6617 6618 /* 6619 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 6620 * since pteload may have done a HME_ADD() right after 6621 * we did the HME_SUB() above. Hmecnt is now maintained 6622 * by cas only. no lock guranteed its value. The only 6623 * gurantee we have is the hmecnt should not be less than 6624 * what it should be so the hblk will not be taken away. 6625 * It's also important that we decremented the hmecnt after 6626 * we are done with hmeblkp so that this hmeblk won't be 6627 * stolen. 6628 */ 6629 ASSERT(hmeblkp->hblk_hmecnt > 0); 6630 ASSERT(hmeblkp->hblk_vcnt > 0); 6631 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6632 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6633 /* 6634 * This is bug 4063182. 6635 * XXX: fixme 6636 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6637 * !hmeblkp->hblk_lckcnt); 6638 */ 6639 } else { 6640 panic("invalid tte? pp %p &tte %p", 6641 (void *)pp, (void *)&tte); 6642 } 6643 6644 return (cpuset); 6645 } 6646 6647 /* 6648 * While relocating a kernel page, this function will move the mappings 6649 * from tpp to dpp and modify any associated data with these mappings. 6650 * It also unsuspends the suspended kernel mapping. 6651 */ 6652 static void 6653 hat_pagereload(struct page *tpp, struct page *dpp) 6654 { 6655 struct sf_hment *sfhme; 6656 tte_t tte, ttemod; 6657 int index, cons; 6658 6659 ASSERT(getpil() == PIL_MAX); 6660 ASSERT(sfmmu_mlist_held(tpp)); 6661 ASSERT(sfmmu_mlist_held(dpp)); 6662 6663 index = PP_MAPINDEX(tpp); 6664 cons = TTE8K; 6665 6666 /* Update real mappings to the page */ 6667 retry: 6668 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 6669 if (IS_PAHME(sfhme)) 6670 continue; 6671 sfmmu_copytte(&sfhme->hme_tte, &tte); 6672 ttemod = tte; 6673 6674 /* 6675 * replace old pfn with new pfn in TTE 6676 */ 6677 PFN_TO_TTE(ttemod, dpp->p_pagenum); 6678 6679 /* 6680 * clear suspend bit 6681 */ 6682 ASSERT(TTE_IS_SUSPEND(&ttemod)); 6683 TTE_CLR_SUSPEND(&ttemod); 6684 6685 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 6686 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 6687 6688 /* 6689 * set hme_page point to new page 6690 */ 6691 sfhme->hme_page = dpp; 6692 } 6693 6694 /* 6695 * move p_mapping list from old page to new page 6696 */ 6697 dpp->p_mapping = tpp->p_mapping; 6698 tpp->p_mapping = NULL; 6699 dpp->p_share = tpp->p_share; 6700 tpp->p_share = 0; 6701 6702 while (index != 0) { 6703 index = index >> 1; 6704 if (index != 0) 6705 cons++; 6706 if (index & 0x1) { 6707 tpp = PP_GROUPLEADER(tpp, cons); 6708 dpp = PP_GROUPLEADER(dpp, cons); 6709 goto retry; 6710 } 6711 } 6712 6713 if (dtrace_kreloc_fini) 6714 (*dtrace_kreloc_fini)(); 6715 mutex_exit(&kpr_suspendlock); 6716 } 6717 6718 uint_t 6719 hat_pagesync(struct page *pp, uint_t clearflag) 6720 { 6721 struct sf_hment *sfhme, *tmphme = NULL; 6722 struct hme_blk *hmeblkp; 6723 kmutex_t *pml; 6724 cpuset_t cpuset, tset; 6725 int index, cons; 6726 extern ulong_t po_share; 6727 page_t *save_pp = pp; 6728 6729 CPUSET_ZERO(cpuset); 6730 6731 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 6732 return (PP_GENERIC_ATTR(pp)); 6733 } 6734 6735 if ((clearflag == (HAT_SYNC_STOPON_REF | HAT_SYNC_DONTZERO)) && 6736 PP_ISREF(pp)) { 6737 return (PP_GENERIC_ATTR(pp)); 6738 } 6739 6740 if ((clearflag == (HAT_SYNC_STOPON_MOD | HAT_SYNC_DONTZERO)) && 6741 PP_ISMOD(pp)) { 6742 return (PP_GENERIC_ATTR(pp)); 6743 } 6744 6745 if ((clearflag & HAT_SYNC_STOPON_SHARED) != 0 && 6746 (pp->p_share > po_share) && 6747 !(clearflag & HAT_SYNC_ZERORM)) { 6748 if (PP_ISRO(pp)) 6749 hat_page_setattr(pp, P_REF); 6750 return (PP_GENERIC_ATTR(pp)); 6751 } 6752 6753 clearflag &= ~HAT_SYNC_STOPON_SHARED; 6754 pml = sfmmu_mlist_enter(pp); 6755 index = PP_MAPINDEX(pp); 6756 cons = TTE8K; 6757 retry: 6758 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6759 /* 6760 * We need to save the next hment on the list since 6761 * it is possible for pagesync to remove an invalid hment 6762 * from the list. 6763 */ 6764 tmphme = sfhme->hme_next; 6765 /* 6766 * If we are looking for large mappings and this hme doesn't 6767 * reach the range we are seeking, just ignore its. 6768 */ 6769 hmeblkp = sfmmu_hmetohblk(sfhme); 6770 if (hmeblkp->hblk_xhat_bit) 6771 continue; 6772 6773 if (hme_size(sfhme) < cons) 6774 continue; 6775 tset = sfmmu_pagesync(pp, sfhme, 6776 clearflag & ~HAT_SYNC_STOPON_RM); 6777 CPUSET_OR(cpuset, tset); 6778 /* 6779 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 6780 * as the "ref" or "mod" is set. 6781 */ 6782 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 6783 ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 6784 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) { 6785 index = 0; 6786 break; 6787 } 6788 } 6789 6790 while (index) { 6791 index = index >> 1; 6792 cons++; 6793 if (index & 0x1) { 6794 /* Go to leading page */ 6795 pp = PP_GROUPLEADER(pp, cons); 6796 goto retry; 6797 } 6798 } 6799 6800 xt_sync(cpuset); 6801 sfmmu_mlist_exit(pml); 6802 return (PP_GENERIC_ATTR(save_pp)); 6803 } 6804 6805 /* 6806 * Get all the hardware dependent attributes for a page struct 6807 */ 6808 static cpuset_t 6809 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 6810 uint_t clearflag) 6811 { 6812 caddr_t addr; 6813 tte_t tte, ttemod; 6814 struct hme_blk *hmeblkp; 6815 int ret; 6816 sfmmu_t *sfmmup; 6817 cpuset_t cpuset; 6818 6819 ASSERT(pp != NULL); 6820 ASSERT(sfmmu_mlist_held(pp)); 6821 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6822 (clearflag == HAT_SYNC_ZERORM)); 6823 6824 SFMMU_STAT(sf_pagesync); 6825 6826 CPUSET_ZERO(cpuset); 6827 6828 sfmmu_pagesync_retry: 6829 6830 sfmmu_copytte(&sfhme->hme_tte, &tte); 6831 if (TTE_IS_VALID(&tte)) { 6832 hmeblkp = sfmmu_hmetohblk(sfhme); 6833 sfmmup = hblktosfmmu(hmeblkp); 6834 addr = tte_to_vaddr(hmeblkp, tte); 6835 if (clearflag == HAT_SYNC_ZERORM) { 6836 ttemod = tte; 6837 TTE_CLR_RM(&ttemod); 6838 ret = sfmmu_modifytte_try(&tte, &ttemod, 6839 &sfhme->hme_tte); 6840 if (ret < 0) { 6841 /* 6842 * cas failed and the new value is not what 6843 * we want. 6844 */ 6845 goto sfmmu_pagesync_retry; 6846 } 6847 6848 if (ret > 0) { 6849 /* we win the cas */ 6850 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6851 cpuset = sfmmup->sfmmu_cpusran; 6852 } 6853 } 6854 6855 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6856 } 6857 return (cpuset); 6858 } 6859 6860 /* 6861 * Remove write permission from a mappings to a page, so that 6862 * we can detect the next modification of it. This requires modifying 6863 * the TTE then invalidating (demap) any TLB entry using that TTE. 6864 * This code is similar to sfmmu_pagesync(). 6865 */ 6866 static cpuset_t 6867 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 6868 { 6869 caddr_t addr; 6870 tte_t tte; 6871 tte_t ttemod; 6872 struct hme_blk *hmeblkp; 6873 int ret; 6874 sfmmu_t *sfmmup; 6875 cpuset_t cpuset; 6876 6877 ASSERT(pp != NULL); 6878 ASSERT(sfmmu_mlist_held(pp)); 6879 6880 CPUSET_ZERO(cpuset); 6881 SFMMU_STAT(sf_clrwrt); 6882 6883 retry: 6884 6885 sfmmu_copytte(&sfhme->hme_tte, &tte); 6886 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 6887 hmeblkp = sfmmu_hmetohblk(sfhme); 6888 6889 /* 6890 * xhat mappings should never be to a VMODSORT page. 6891 */ 6892 ASSERT(hmeblkp->hblk_xhat_bit == 0); 6893 6894 sfmmup = hblktosfmmu(hmeblkp); 6895 addr = tte_to_vaddr(hmeblkp, tte); 6896 6897 ttemod = tte; 6898 TTE_CLR_WRT(&ttemod); 6899 TTE_CLR_MOD(&ttemod); 6900 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6901 6902 /* 6903 * if cas failed and the new value is not what 6904 * we want retry 6905 */ 6906 if (ret < 0) 6907 goto retry; 6908 6909 /* we win the cas */ 6910 if (ret > 0) { 6911 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6912 cpuset = sfmmup->sfmmu_cpusran; 6913 } 6914 } 6915 6916 return (cpuset); 6917 } 6918 6919 /* 6920 * Walk all mappings of a page, removing write permission and clearing the 6921 * ref/mod bits. This code is similar to hat_pagesync() 6922 */ 6923 static void 6924 hat_page_clrwrt(page_t *pp) 6925 { 6926 struct sf_hment *sfhme; 6927 struct sf_hment *tmphme = NULL; 6928 kmutex_t *pml; 6929 cpuset_t cpuset; 6930 cpuset_t tset; 6931 int index; 6932 int cons; 6933 6934 CPUSET_ZERO(cpuset); 6935 6936 pml = sfmmu_mlist_enter(pp); 6937 index = PP_MAPINDEX(pp); 6938 cons = TTE8K; 6939 retry: 6940 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6941 tmphme = sfhme->hme_next; 6942 6943 /* 6944 * If we are looking for large mappings and this hme doesn't 6945 * reach the range we are seeking, just ignore its. 6946 */ 6947 6948 if (hme_size(sfhme) < cons) 6949 continue; 6950 6951 tset = sfmmu_pageclrwrt(pp, sfhme); 6952 CPUSET_OR(cpuset, tset); 6953 } 6954 6955 while (index) { 6956 index = index >> 1; 6957 cons++; 6958 if (index & 0x1) { 6959 /* Go to leading page */ 6960 pp = PP_GROUPLEADER(pp, cons); 6961 goto retry; 6962 } 6963 } 6964 6965 xt_sync(cpuset); 6966 sfmmu_mlist_exit(pml); 6967 } 6968 6969 /* 6970 * Set the given REF/MOD/RO bits for the given page. 6971 * For a vnode with a sorted v_pages list, we need to change 6972 * the attributes and the v_pages list together under page_vnode_mutex. 6973 */ 6974 void 6975 hat_page_setattr(page_t *pp, uint_t flag) 6976 { 6977 vnode_t *vp = pp->p_vnode; 6978 page_t **listp; 6979 kmutex_t *pmtx; 6980 kmutex_t *vphm = NULL; 6981 6982 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 6983 6984 /* 6985 * nothing to do if attribute already set 6986 */ 6987 if ((pp->p_nrm & flag) == flag) 6988 return; 6989 6990 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 6991 vphm = page_vnode_mutex(vp); 6992 mutex_enter(vphm); 6993 } 6994 6995 pmtx = sfmmu_page_enter(pp); 6996 pp->p_nrm |= flag; 6997 sfmmu_page_exit(pmtx); 6998 6999 if (vphm != NULL) { 7000 /* 7001 * Some File Systems examine v_pages for NULL w/o 7002 * grabbing the vphm mutex. Must not let it become NULL when 7003 * pp is the only page on the list. 7004 */ 7005 if (pp->p_vpnext != pp) { 7006 page_vpsub(&vp->v_pages, pp); 7007 if (vp->v_pages != NULL) 7008 listp = &vp->v_pages->p_vpprev->p_vpnext; 7009 else 7010 listp = &vp->v_pages; 7011 page_vpadd(listp, pp); 7012 } 7013 mutex_exit(vphm); 7014 } 7015 } 7016 7017 void 7018 hat_page_clrattr(page_t *pp, uint_t flag) 7019 { 7020 vnode_t *vp = pp->p_vnode; 7021 kmutex_t *pmtx; 7022 7023 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7024 7025 pmtx = sfmmu_page_enter(pp); 7026 7027 /* 7028 * Caller is expected to hold page's io lock for VMODSORT to work 7029 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7030 * bit is cleared. 7031 * We don't have assert to avoid tripping some existing third party 7032 * code. The dirty page is moved back to top of the v_page list 7033 * after IO is done in pvn_write_done(). 7034 */ 7035 pp->p_nrm &= ~flag; 7036 sfmmu_page_exit(pmtx); 7037 7038 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7039 7040 /* 7041 * VMODSORT works by removing write permissions and getting 7042 * a fault when a page is made dirty. At this point 7043 * we need to remove write permission from all mappings 7044 * to this page. 7045 */ 7046 hat_page_clrwrt(pp); 7047 } 7048 } 7049 7050 uint_t 7051 hat_page_getattr(page_t *pp, uint_t flag) 7052 { 7053 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7054 return ((uint_t)(pp->p_nrm & flag)); 7055 } 7056 7057 /* 7058 * DEBUG kernels: verify that a kernel va<->pa translation 7059 * is safe by checking the underlying page_t is in a page 7060 * relocation-safe state. 7061 */ 7062 #ifdef DEBUG 7063 void 7064 sfmmu_check_kpfn(pfn_t pfn) 7065 { 7066 page_t *pp; 7067 int index, cons; 7068 7069 if (hat_check_vtop == 0) 7070 return; 7071 7072 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 7073 return; 7074 7075 pp = page_numtopp_nolock(pfn); 7076 if (!pp) 7077 return; 7078 7079 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7080 return; 7081 7082 /* 7083 * Handed a large kernel page, we dig up the root page since we 7084 * know the root page might have the lock also. 7085 */ 7086 if (pp->p_szc != 0) { 7087 index = PP_MAPINDEX(pp); 7088 cons = TTE8K; 7089 again: 7090 while (index != 0) { 7091 index >>= 1; 7092 if (index != 0) 7093 cons++; 7094 if (index & 0x1) { 7095 pp = PP_GROUPLEADER(pp, cons); 7096 goto again; 7097 } 7098 } 7099 } 7100 7101 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7102 return; 7103 7104 /* 7105 * Pages need to be locked or allocated "permanent" (either from 7106 * static_arena arena or explicitly setting PG_NORELOC when calling 7107 * page_create_va()) for VA->PA translations to be valid. 7108 */ 7109 if (!PP_ISNORELOC(pp)) 7110 panic("Illegal VA->PA translation, pp 0x%p not permanent", pp); 7111 else 7112 panic("Illegal VA->PA translation, pp 0x%p not locked", pp); 7113 } 7114 #endif /* DEBUG */ 7115 7116 /* 7117 * Returns a page frame number for a given virtual address. 7118 * Returns PFN_INVALID to indicate an invalid mapping 7119 */ 7120 pfn_t 7121 hat_getpfnum(struct hat *hat, caddr_t addr) 7122 { 7123 pfn_t pfn; 7124 tte_t tte; 7125 7126 /* 7127 * We would like to 7128 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7129 * but we can't because the iommu driver will call this 7130 * routine at interrupt time and it can't grab the as lock 7131 * or it will deadlock: A thread could have the as lock 7132 * and be waiting for io. The io can't complete 7133 * because the interrupt thread is blocked trying to grab 7134 * the as lock. 7135 */ 7136 7137 ASSERT(hat->sfmmu_xhat_provider == NULL); 7138 7139 if (hat == ksfmmup) { 7140 if (segkpm && IS_KPM_ADDR(addr)) 7141 return (sfmmu_kpm_vatopfn(addr)); 7142 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7143 == PFN_SUSPENDED) { 7144 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7145 } 7146 sfmmu_check_kpfn(pfn); 7147 return (pfn); 7148 } else { 7149 return (sfmmu_uvatopfn(addr, hat)); 7150 } 7151 } 7152 7153 /* 7154 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7155 * Use hat_getpfnum(kas.a_hat, ...) instead. 7156 * 7157 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7158 * but can't right now due to the fact that some software has grown to use 7159 * this interface incorrectly. So for now when the interface is misused, 7160 * return a warning to the user that in the future it won't work in the 7161 * way they're abusing it, and carry on (after disabling page relocation). 7162 */ 7163 pfn_t 7164 hat_getkpfnum(caddr_t addr) 7165 { 7166 pfn_t pfn; 7167 tte_t tte; 7168 int badcaller = 0; 7169 extern int segkmem_reloc; 7170 7171 if (segkpm && IS_KPM_ADDR(addr)) { 7172 badcaller = 1; 7173 pfn = sfmmu_kpm_vatopfn(addr); 7174 } else { 7175 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7176 == PFN_SUSPENDED) { 7177 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7178 } 7179 badcaller = pf_is_memory(pfn); 7180 } 7181 7182 if (badcaller) { 7183 /* 7184 * We can't return PFN_INVALID or the caller may panic 7185 * or corrupt the system. The only alternative is to 7186 * disable page relocation at this point for all kernel 7187 * memory. This will impact any callers of page_relocate() 7188 * such as FMA or DR. 7189 * 7190 * RFE: Add junk here to spit out an ereport so the sysadmin 7191 * can be advised that he should upgrade his device driver 7192 * so that this doesn't happen. 7193 */ 7194 hat_getkpfnum_badcall(caller()); 7195 if (hat_kpr_enabled && segkmem_reloc) { 7196 hat_kpr_enabled = 0; 7197 segkmem_reloc = 0; 7198 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 7199 } 7200 } 7201 return (pfn); 7202 } 7203 7204 pfn_t 7205 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup) 7206 { 7207 struct hmehash_bucket *hmebp; 7208 hmeblk_tag hblktag; 7209 int hmeshift, hashno = 1; 7210 struct hme_blk *hmeblkp = NULL; 7211 7212 struct sf_hment *sfhmep; 7213 tte_t tte; 7214 pfn_t pfn; 7215 7216 /* support for ISM */ 7217 ism_map_t *ism_map; 7218 ism_blk_t *ism_blkp; 7219 int i; 7220 sfmmu_t *ism_hatid = NULL; 7221 sfmmu_t *locked_hatid = NULL; 7222 7223 7224 ASSERT(sfmmup != ksfmmup); 7225 SFMMU_STAT(sf_user_vtop); 7226 /* 7227 * Set ism_hatid if vaddr falls in a ISM segment. 7228 */ 7229 ism_blkp = sfmmup->sfmmu_iblk; 7230 if (ism_blkp) { 7231 sfmmu_ismhat_enter(sfmmup, 0); 7232 locked_hatid = sfmmup; 7233 } 7234 while (ism_blkp && ism_hatid == NULL) { 7235 ism_map = ism_blkp->iblk_maps; 7236 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7237 if (vaddr >= ism_start(ism_map[i]) && 7238 vaddr < ism_end(ism_map[i])) { 7239 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7240 vaddr = (caddr_t)(vaddr - 7241 ism_start(ism_map[i])); 7242 break; 7243 } 7244 } 7245 ism_blkp = ism_blkp->iblk_next; 7246 } 7247 if (locked_hatid) { 7248 sfmmu_ismhat_exit(locked_hatid, 0); 7249 } 7250 7251 hblktag.htag_id = sfmmup; 7252 do { 7253 hmeshift = HME_HASH_SHIFT(hashno); 7254 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7255 hblktag.htag_rehash = hashno; 7256 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7257 7258 SFMMU_HASH_LOCK(hmebp); 7259 7260 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7261 if (hmeblkp != NULL) { 7262 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7263 sfmmu_copytte(&sfhmep->hme_tte, &tte); 7264 if (TTE_IS_VALID(&tte)) { 7265 pfn = TTE_TO_PFN(vaddr, &tte); 7266 } else { 7267 pfn = PFN_INVALID; 7268 } 7269 SFMMU_HASH_UNLOCK(hmebp); 7270 return (pfn); 7271 } 7272 SFMMU_HASH_UNLOCK(hmebp); 7273 hashno++; 7274 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7275 return (PFN_INVALID); 7276 } 7277 7278 7279 /* 7280 * For compatability with AT&T and later optimizations 7281 */ 7282 /* ARGSUSED */ 7283 void 7284 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 7285 { 7286 ASSERT(hat != NULL); 7287 ASSERT(hat->sfmmu_xhat_provider == NULL); 7288 } 7289 7290 /* 7291 * Return the number of mappings to a particular page. 7292 * This number is an approximation of the number of 7293 * number of people sharing the page. 7294 */ 7295 ulong_t 7296 hat_page_getshare(page_t *pp) 7297 { 7298 page_t *spp = pp; /* start page */ 7299 kmutex_t *pml; 7300 ulong_t cnt; 7301 int index, sz = TTE64K; 7302 7303 /* 7304 * We need to grab the mlist lock to make sure any outstanding 7305 * load/unloads complete. Otherwise we could return zero 7306 * even though the unload(s) hasn't finished yet. 7307 */ 7308 pml = sfmmu_mlist_enter(spp); 7309 cnt = spp->p_share; 7310 7311 #ifdef VAC 7312 if (kpm_enable) 7313 cnt += spp->p_kpmref; 7314 #endif 7315 7316 /* 7317 * If we have any large mappings, we count the number of 7318 * mappings that this large page is part of. 7319 */ 7320 index = PP_MAPINDEX(spp); 7321 index >>= 1; 7322 while (index) { 7323 pp = PP_GROUPLEADER(spp, sz); 7324 if ((index & 0x1) && pp != spp) { 7325 cnt += pp->p_share; 7326 spp = pp; 7327 } 7328 index >>= 1; 7329 sz++; 7330 } 7331 sfmmu_mlist_exit(pml); 7332 return (cnt); 7333 } 7334 7335 /* 7336 * Unload all large mappings to the pp and reset the p_szc field of every 7337 * constituent page according to the remaining mappings. 7338 * 7339 * pp must be locked SE_EXCL. Even though no other constituent pages are 7340 * locked it's legal to unload the large mappings to the pp because all 7341 * constituent pages of large locked mappings have to be locked SE_SHARED. 7342 * This means if we have SE_EXCL lock on one of constituent pages none of the 7343 * large mappings to pp are locked. 7344 * 7345 * Decrease p_szc field starting from the last constituent page and ending 7346 * with the root page. This method is used because other threads rely on the 7347 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 7348 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 7349 * ensures that p_szc changes of the constituent pages appears atomic for all 7350 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 7351 * 7352 * This mechanism is only used for file system pages where it's not always 7353 * possible to get SE_EXCL locks on all constituent pages to demote the size 7354 * code (as is done for anonymous or kernel large pages). 7355 * 7356 * See more comments in front of sfmmu_mlspl_enter(). 7357 */ 7358 void 7359 hat_page_demote(page_t *pp) 7360 { 7361 int index; 7362 int sz; 7363 cpuset_t cpuset; 7364 int sync = 0; 7365 page_t *rootpp; 7366 struct sf_hment *sfhme; 7367 struct sf_hment *tmphme = NULL; 7368 struct hme_blk *hmeblkp; 7369 uint_t pszc; 7370 page_t *lastpp; 7371 cpuset_t tset; 7372 pgcnt_t npgs; 7373 kmutex_t *pml; 7374 kmutex_t *pmtx = NULL; 7375 7376 ASSERT(PAGE_EXCL(pp)); 7377 ASSERT(!PP_ISFREE(pp)); 7378 ASSERT(page_szc_lock_assert(pp)); 7379 pml = sfmmu_mlist_enter(pp); 7380 7381 pszc = pp->p_szc; 7382 if (pszc == 0) { 7383 goto out; 7384 } 7385 7386 index = PP_MAPINDEX(pp) >> 1; 7387 7388 if (index) { 7389 CPUSET_ZERO(cpuset); 7390 sz = TTE64K; 7391 sync = 1; 7392 } 7393 7394 while (index) { 7395 if (!(index & 0x1)) { 7396 index >>= 1; 7397 sz++; 7398 continue; 7399 } 7400 ASSERT(sz <= pszc); 7401 rootpp = PP_GROUPLEADER(pp, sz); 7402 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 7403 tmphme = sfhme->hme_next; 7404 hmeblkp = sfmmu_hmetohblk(sfhme); 7405 if (hme_size(sfhme) != sz) { 7406 continue; 7407 } 7408 if (hmeblkp->hblk_xhat_bit) { 7409 cmn_err(CE_PANIC, 7410 "hat_page_demote: xhat hmeblk"); 7411 } 7412 tset = sfmmu_pageunload(rootpp, sfhme, sz); 7413 CPUSET_OR(cpuset, tset); 7414 } 7415 if (index >>= 1) { 7416 sz++; 7417 } 7418 } 7419 7420 ASSERT(!PP_ISMAPPED_LARGE(pp)); 7421 7422 if (sync) { 7423 xt_sync(cpuset); 7424 #ifdef VAC 7425 if (PP_ISTNC(pp)) { 7426 conv_tnc(rootpp, sz); 7427 } 7428 #endif /* VAC */ 7429 } 7430 7431 pmtx = sfmmu_page_enter(pp); 7432 7433 ASSERT(pp->p_szc == pszc); 7434 rootpp = PP_PAGEROOT(pp); 7435 ASSERT(rootpp->p_szc == pszc); 7436 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 7437 7438 while (lastpp != rootpp) { 7439 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 7440 ASSERT(sz < pszc); 7441 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 7442 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 7443 while (--npgs > 0) { 7444 lastpp->p_szc = (uchar_t)sz; 7445 lastpp = PP_PAGEPREV(lastpp); 7446 } 7447 if (sz) { 7448 /* 7449 * make sure before current root's pszc 7450 * is updated all updates to constituent pages pszc 7451 * fields are globally visible. 7452 */ 7453 membar_producer(); 7454 } 7455 lastpp->p_szc = sz; 7456 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 7457 if (lastpp != rootpp) { 7458 lastpp = PP_PAGEPREV(lastpp); 7459 } 7460 } 7461 if (sz == 0) { 7462 /* the loop above doesn't cover this case */ 7463 rootpp->p_szc = 0; 7464 } 7465 out: 7466 ASSERT(pp->p_szc == 0); 7467 if (pmtx != NULL) { 7468 sfmmu_page_exit(pmtx); 7469 } 7470 sfmmu_mlist_exit(pml); 7471 } 7472 7473 /* 7474 * Refresh the HAT ismttecnt[] element for size szc. 7475 * Caller must have set ISM busy flag to prevent mapping 7476 * lists from changing while we're traversing them. 7477 */ 7478 pgcnt_t 7479 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 7480 { 7481 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 7482 ism_map_t *ism_map; 7483 pgcnt_t npgs = 0; 7484 int j; 7485 7486 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 7487 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 7488 ism_map = ism_blkp->iblk_maps; 7489 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) 7490 npgs += ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 7491 } 7492 sfmmup->sfmmu_ismttecnt[szc] = npgs; 7493 return (npgs); 7494 } 7495 7496 /* 7497 * Yield the memory claim requirement for an address space. 7498 * 7499 * This is currently implemented as the number of bytes that have active 7500 * hardware translations that have page structures. Therefore, it can 7501 * underestimate the traditional resident set size, eg, if the 7502 * physical page is present and the hardware translation is missing; 7503 * and it can overestimate the rss, eg, if there are active 7504 * translations to a frame buffer with page structs. 7505 * Also, it does not take sharing into account. 7506 * 7507 * Note that we don't acquire locks here since this function is most often 7508 * called from the clock thread. 7509 */ 7510 size_t 7511 hat_get_mapped_size(struct hat *hat) 7512 { 7513 size_t assize = 0; 7514 int i; 7515 7516 if (hat == NULL) 7517 return (0); 7518 7519 ASSERT(hat->sfmmu_xhat_provider == NULL); 7520 7521 for (i = 0; i < mmu_page_sizes; i++) 7522 assize += (pgcnt_t)hat->sfmmu_ttecnt[i] * TTEBYTES(i); 7523 7524 if (hat->sfmmu_iblk == NULL) 7525 return (assize); 7526 7527 for (i = 0; i < mmu_page_sizes; i++) 7528 assize += (pgcnt_t)hat->sfmmu_ismttecnt[i] * TTEBYTES(i); 7529 7530 return (assize); 7531 } 7532 7533 int 7534 hat_stats_enable(struct hat *hat) 7535 { 7536 hatlock_t *hatlockp; 7537 7538 ASSERT(hat->sfmmu_xhat_provider == NULL); 7539 7540 hatlockp = sfmmu_hat_enter(hat); 7541 hat->sfmmu_rmstat++; 7542 sfmmu_hat_exit(hatlockp); 7543 return (1); 7544 } 7545 7546 void 7547 hat_stats_disable(struct hat *hat) 7548 { 7549 hatlock_t *hatlockp; 7550 7551 ASSERT(hat->sfmmu_xhat_provider == NULL); 7552 7553 hatlockp = sfmmu_hat_enter(hat); 7554 hat->sfmmu_rmstat--; 7555 sfmmu_hat_exit(hatlockp); 7556 } 7557 7558 /* 7559 * Routines for entering or removing ourselves from the 7560 * ism_hat's mapping list. 7561 */ 7562 static void 7563 iment_add(struct ism_ment *iment, struct hat *ism_hat) 7564 { 7565 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7566 7567 iment->iment_prev = NULL; 7568 iment->iment_next = ism_hat->sfmmu_iment; 7569 if (ism_hat->sfmmu_iment) { 7570 ism_hat->sfmmu_iment->iment_prev = iment; 7571 } 7572 ism_hat->sfmmu_iment = iment; 7573 } 7574 7575 static void 7576 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 7577 { 7578 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7579 7580 if (ism_hat->sfmmu_iment == NULL) { 7581 panic("ism map entry remove - no entries"); 7582 } 7583 7584 if (iment->iment_prev) { 7585 ASSERT(ism_hat->sfmmu_iment != iment); 7586 iment->iment_prev->iment_next = iment->iment_next; 7587 } else { 7588 ASSERT(ism_hat->sfmmu_iment == iment); 7589 ism_hat->sfmmu_iment = iment->iment_next; 7590 } 7591 7592 if (iment->iment_next) { 7593 iment->iment_next->iment_prev = iment->iment_prev; 7594 } 7595 7596 /* 7597 * zero out the entry 7598 */ 7599 iment->iment_next = NULL; 7600 iment->iment_prev = NULL; 7601 iment->iment_hat = NULL; 7602 } 7603 7604 /* 7605 * Hat_share()/unshare() return an (non-zero) error 7606 * when saddr and daddr are not properly aligned. 7607 * 7608 * The top level mapping element determines the alignment 7609 * requirement for saddr and daddr, depending on different 7610 * architectures. 7611 * 7612 * When hat_share()/unshare() are not supported, 7613 * HATOP_SHARE()/UNSHARE() return 0 7614 */ 7615 int 7616 hat_share(struct hat *sfmmup, caddr_t addr, 7617 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 7618 { 7619 ism_blk_t *ism_blkp; 7620 ism_blk_t *new_iblk; 7621 ism_map_t *ism_map; 7622 ism_ment_t *ism_ment; 7623 int i, added; 7624 hatlock_t *hatlockp; 7625 int reload_mmu = 0; 7626 uint_t ismshift = page_get_shift(ismszc); 7627 size_t ismpgsz = page_get_pagesize(ismszc); 7628 uint_t ismmask = (uint_t)ismpgsz - 1; 7629 size_t sh_size = ISM_SHIFT(ismshift, len); 7630 ushort_t ismhatflag; 7631 7632 #ifdef DEBUG 7633 caddr_t eaddr = addr + len; 7634 #endif /* DEBUG */ 7635 7636 ASSERT(ism_hatid != NULL && sfmmup != NULL); 7637 ASSERT(sptaddr == ISMID_STARTADDR); 7638 /* 7639 * Check the alignment. 7640 */ 7641 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 7642 return (EINVAL); 7643 7644 /* 7645 * Check size alignment. 7646 */ 7647 if (!ISM_ALIGNED(ismshift, len)) 7648 return (EINVAL); 7649 7650 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 7651 7652 /* 7653 * Allocate ism_ment for the ism_hat's mapping list, and an 7654 * ism map blk in case we need one. We must do our 7655 * allocations before acquiring locks to prevent a deadlock 7656 * in the kmem allocator on the mapping list lock. 7657 */ 7658 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 7659 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 7660 7661 /* 7662 * Serialize ISM mappings with the ISM busy flag, and also the 7663 * trap handlers. 7664 */ 7665 sfmmu_ismhat_enter(sfmmup, 0); 7666 7667 /* 7668 * Allocate an ism map blk if necessary. 7669 */ 7670 if (sfmmup->sfmmu_iblk == NULL) { 7671 sfmmup->sfmmu_iblk = new_iblk; 7672 bzero(new_iblk, sizeof (*new_iblk)); 7673 new_iblk->iblk_nextpa = (uint64_t)-1; 7674 membar_stst(); /* make sure next ptr visible to all CPUs */ 7675 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 7676 reload_mmu = 1; 7677 new_iblk = NULL; 7678 } 7679 7680 #ifdef DEBUG 7681 /* 7682 * Make sure mapping does not already exist. 7683 */ 7684 ism_blkp = sfmmup->sfmmu_iblk; 7685 while (ism_blkp) { 7686 ism_map = ism_blkp->iblk_maps; 7687 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 7688 if ((addr >= ism_start(ism_map[i]) && 7689 addr < ism_end(ism_map[i])) || 7690 eaddr > ism_start(ism_map[i]) && 7691 eaddr <= ism_end(ism_map[i])) { 7692 panic("sfmmu_share: Already mapped!"); 7693 } 7694 } 7695 ism_blkp = ism_blkp->iblk_next; 7696 } 7697 #endif /* DEBUG */ 7698 7699 ASSERT(ismszc >= TTE4M); 7700 if (ismszc == TTE4M) { 7701 ismhatflag = HAT_4M_FLAG; 7702 } else if (ismszc == TTE32M) { 7703 ismhatflag = HAT_32M_FLAG; 7704 } else if (ismszc == TTE256M) { 7705 ismhatflag = HAT_256M_FLAG; 7706 } 7707 /* 7708 * Add mapping to first available mapping slot. 7709 */ 7710 ism_blkp = sfmmup->sfmmu_iblk; 7711 added = 0; 7712 while (!added) { 7713 ism_map = ism_blkp->iblk_maps; 7714 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7715 if (ism_map[i].imap_ismhat == NULL) { 7716 7717 ism_map[i].imap_ismhat = ism_hatid; 7718 ism_map[i].imap_vb_shift = (ushort_t)ismshift; 7719 ism_map[i].imap_hatflags = ismhatflag; 7720 ism_map[i].imap_sz_mask = ismmask; 7721 /* 7722 * imap_seg is checked in ISM_CHECK to see if 7723 * non-NULL, then other info assumed valid. 7724 */ 7725 membar_stst(); 7726 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 7727 ism_map[i].imap_ment = ism_ment; 7728 7729 /* 7730 * Now add ourselves to the ism_hat's 7731 * mapping list. 7732 */ 7733 ism_ment->iment_hat = sfmmup; 7734 ism_ment->iment_base_va = addr; 7735 ism_hatid->sfmmu_ismhat = 1; 7736 ism_hatid->sfmmu_flags = 0; 7737 mutex_enter(&ism_mlist_lock); 7738 iment_add(ism_ment, ism_hatid); 7739 mutex_exit(&ism_mlist_lock); 7740 added = 1; 7741 break; 7742 } 7743 } 7744 if (!added && ism_blkp->iblk_next == NULL) { 7745 ism_blkp->iblk_next = new_iblk; 7746 new_iblk = NULL; 7747 bzero(ism_blkp->iblk_next, 7748 sizeof (*ism_blkp->iblk_next)); 7749 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 7750 membar_stst(); 7751 ism_blkp->iblk_nextpa = 7752 va_to_pa((caddr_t)ism_blkp->iblk_next); 7753 } 7754 ism_blkp = ism_blkp->iblk_next; 7755 } 7756 7757 /* 7758 * Update our counters for this sfmmup's ism mappings. 7759 */ 7760 for (i = 0; i <= ismszc; i++) { 7761 if (!(disable_ism_large_pages & (1 << i))) 7762 (void) ism_tsb_entries(sfmmup, i); 7763 } 7764 7765 hatlockp = sfmmu_hat_enter(sfmmup); 7766 7767 /* 7768 * For ISM and DISM we do not support 512K pages, so we only 7769 * only search the 4M and 8K/64K hashes for 4 pagesize cpus, and search 7770 * the 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 7771 */ 7772 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 7773 7774 if (ismszc > TTE4M && !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) 7775 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7776 7777 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) 7778 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 7779 7780 /* 7781 * If we updated the ismblkpa for this HAT or we need 7782 * to start searching the 256M or 32M or 4M hash, we must 7783 * make sure all CPUs running this process reload their 7784 * tsbmiss area. Otherwise they will fail to load the mappings 7785 * in the tsbmiss handler and will loop calling pagefault(). 7786 */ 7787 switch (ismszc) { 7788 case TTE256M: 7789 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 7790 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 7791 sfmmu_sync_mmustate(sfmmup); 7792 } 7793 break; 7794 case TTE32M: 7795 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 7796 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 7797 sfmmu_sync_mmustate(sfmmup); 7798 } 7799 break; 7800 case TTE4M: 7801 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 7802 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7803 sfmmu_sync_mmustate(sfmmup); 7804 } 7805 break; 7806 default: 7807 break; 7808 } 7809 7810 /* 7811 * Now we can drop the locks. 7812 */ 7813 sfmmu_ismhat_exit(sfmmup, 1); 7814 sfmmu_hat_exit(hatlockp); 7815 7816 /* 7817 * Free up ismblk if we didn't use it. 7818 */ 7819 if (new_iblk != NULL) 7820 kmem_cache_free(ism_blk_cache, new_iblk); 7821 7822 /* 7823 * Check TSB and TLB page sizes. 7824 */ 7825 sfmmu_check_page_sizes(sfmmup, 1); 7826 7827 return (0); 7828 } 7829 7830 /* 7831 * hat_unshare removes exactly one ism_map from 7832 * this process's as. It expects multiple calls 7833 * to hat_unshare for multiple shm segments. 7834 */ 7835 void 7836 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 7837 { 7838 ism_map_t *ism_map; 7839 ism_ment_t *free_ment = NULL; 7840 ism_blk_t *ism_blkp; 7841 struct hat *ism_hatid; 7842 int found, i; 7843 hatlock_t *hatlockp; 7844 struct tsb_info *tsbinfo; 7845 uint_t ismshift = page_get_shift(ismszc); 7846 size_t sh_size = ISM_SHIFT(ismshift, len); 7847 7848 ASSERT(ISM_ALIGNED(ismshift, addr)); 7849 ASSERT(ISM_ALIGNED(ismshift, len)); 7850 ASSERT(sfmmup != NULL); 7851 ASSERT(sfmmup != ksfmmup); 7852 7853 if (sfmmup->sfmmu_xhat_provider) { 7854 XHAT_UNSHARE(sfmmup, addr, len); 7855 return; 7856 } else { 7857 /* 7858 * This must be a CPU HAT. If the address space has 7859 * XHATs attached, inform all XHATs that ISM segment 7860 * is going away 7861 */ 7862 ASSERT(sfmmup->sfmmu_as != NULL); 7863 if (sfmmup->sfmmu_as->a_xhat != NULL) 7864 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 7865 } 7866 7867 /* 7868 * Make sure that during the entire time ISM mappings are removed, 7869 * the trap handlers serialize behind us, and that no one else 7870 * can be mucking with ISM mappings. This also lets us get away 7871 * with not doing expensive cross calls to flush the TLB -- we 7872 * just discard the context, flush the entire TSB, and call it 7873 * a day. 7874 */ 7875 sfmmu_ismhat_enter(sfmmup, 0); 7876 7877 /* 7878 * Remove the mapping. 7879 * 7880 * We can't have any holes in the ism map. 7881 * The tsb miss code while searching the ism map will 7882 * stop on an empty map slot. So we must move 7883 * everyone past the hole up 1 if any. 7884 * 7885 * Also empty ism map blks are not freed until the 7886 * process exits. This is to prevent a MT race condition 7887 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 7888 */ 7889 found = 0; 7890 ism_blkp = sfmmup->sfmmu_iblk; 7891 while (!found && ism_blkp) { 7892 ism_map = ism_blkp->iblk_maps; 7893 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7894 if (addr == ism_start(ism_map[i]) && 7895 sh_size == (size_t)(ism_size(ism_map[i]))) { 7896 found = 1; 7897 break; 7898 } 7899 } 7900 if (!found) 7901 ism_blkp = ism_blkp->iblk_next; 7902 } 7903 7904 if (found) { 7905 ism_hatid = ism_map[i].imap_ismhat; 7906 ASSERT(ism_hatid != NULL); 7907 ASSERT(ism_hatid->sfmmu_ismhat == 1); 7908 7909 /* 7910 * First remove ourselves from the ism mapping list. 7911 */ 7912 mutex_enter(&ism_mlist_lock); 7913 iment_sub(ism_map[i].imap_ment, ism_hatid); 7914 mutex_exit(&ism_mlist_lock); 7915 free_ment = ism_map[i].imap_ment; 7916 7917 /* 7918 * Now gurantee that any other cpu 7919 * that tries to process an ISM miss 7920 * will go to tl=0. 7921 */ 7922 hatlockp = sfmmu_hat_enter(sfmmup); 7923 7924 sfmmu_invalidate_ctx(sfmmup); 7925 7926 sfmmu_hat_exit(hatlockp); 7927 7928 /* 7929 * We delete the ism map by copying 7930 * the next map over the current one. 7931 * We will take the next one in the maps 7932 * array or from the next ism_blk. 7933 */ 7934 while (ism_blkp) { 7935 ism_map = ism_blkp->iblk_maps; 7936 while (i < (ISM_MAP_SLOTS - 1)) { 7937 ism_map[i] = ism_map[i + 1]; 7938 i++; 7939 } 7940 /* i == (ISM_MAP_SLOTS - 1) */ 7941 ism_blkp = ism_blkp->iblk_next; 7942 if (ism_blkp) { 7943 ism_map[i] = ism_blkp->iblk_maps[0]; 7944 i = 0; 7945 } else { 7946 ism_map[i].imap_seg = 0; 7947 ism_map[i].imap_vb_shift = 0; 7948 ism_map[i].imap_hatflags = 0; 7949 ism_map[i].imap_sz_mask = 0; 7950 ism_map[i].imap_ismhat = NULL; 7951 ism_map[i].imap_ment = NULL; 7952 } 7953 } 7954 7955 /* 7956 * Now flush entire TSB for the process, since 7957 * demapping page by page can be too expensive. 7958 * We don't have to flush the TLB here anymore 7959 * since we switch to a new TLB ctx instead. 7960 * Also, there is no need to flush if the process 7961 * is exiting since the TSB will be freed later. 7962 */ 7963 if (!sfmmup->sfmmu_free) { 7964 hatlockp = sfmmu_hat_enter(sfmmup); 7965 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 7966 tsbinfo = tsbinfo->tsb_next) { 7967 if (tsbinfo->tsb_flags & TSB_SWAPPED) 7968 continue; 7969 sfmmu_inv_tsb(tsbinfo->tsb_va, 7970 TSB_BYTES(tsbinfo->tsb_szc)); 7971 } 7972 sfmmu_hat_exit(hatlockp); 7973 } 7974 } 7975 7976 /* 7977 * Update our counters for this sfmmup's ism mappings. 7978 */ 7979 for (i = 0; i <= ismszc; i++) { 7980 if (!(disable_ism_large_pages & (1 << i))) 7981 (void) ism_tsb_entries(sfmmup, i); 7982 } 7983 7984 sfmmu_ismhat_exit(sfmmup, 0); 7985 7986 /* 7987 * We must do our freeing here after dropping locks 7988 * to prevent a deadlock in the kmem allocator on the 7989 * mapping list lock. 7990 */ 7991 if (free_ment != NULL) 7992 kmem_cache_free(ism_ment_cache, free_ment); 7993 7994 /* 7995 * Check TSB and TLB page sizes if the process isn't exiting. 7996 */ 7997 if (!sfmmup->sfmmu_free) 7998 sfmmu_check_page_sizes(sfmmup, 0); 7999 } 8000 8001 /* ARGSUSED */ 8002 static int 8003 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8004 { 8005 /* void *buf is sfmmu_t pointer */ 8006 return (0); 8007 } 8008 8009 /* ARGSUSED */ 8010 static void 8011 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8012 { 8013 /* void *buf is sfmmu_t pointer */ 8014 } 8015 8016 /* 8017 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8018 * field to be the pa of this hmeblk 8019 */ 8020 /* ARGSUSED */ 8021 static int 8022 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8023 { 8024 struct hme_blk *hmeblkp; 8025 8026 bzero(buf, (size_t)cdrarg); 8027 hmeblkp = (struct hme_blk *)buf; 8028 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8029 8030 #ifdef HBLK_TRACE 8031 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8032 #endif /* HBLK_TRACE */ 8033 8034 return (0); 8035 } 8036 8037 /* ARGSUSED */ 8038 static void 8039 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8040 { 8041 8042 #ifdef HBLK_TRACE 8043 8044 struct hme_blk *hmeblkp; 8045 8046 hmeblkp = (struct hme_blk *)buf; 8047 mutex_destroy(&hmeblkp->hblk_audit_lock); 8048 8049 #endif /* HBLK_TRACE */ 8050 } 8051 8052 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 8053 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 8054 /* 8055 * The kmem allocator will callback into our reclaim routine when the system 8056 * is running low in memory. We traverse the hash and free up all unused but 8057 * still cached hme_blks. We also traverse the free list and free them up 8058 * as well. 8059 */ 8060 /*ARGSUSED*/ 8061 static void 8062 sfmmu_hblkcache_reclaim(void *cdrarg) 8063 { 8064 int i; 8065 uint64_t hblkpa, prevpa, nx_pa; 8066 struct hmehash_bucket *hmebp; 8067 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 8068 static struct hmehash_bucket *uhmehash_reclaim_hand; 8069 static struct hmehash_bucket *khmehash_reclaim_hand; 8070 struct hme_blk *list = NULL; 8071 8072 hmebp = uhmehash_reclaim_hand; 8073 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 8074 uhmehash_reclaim_hand = hmebp = uhme_hash; 8075 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8076 8077 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8078 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8079 hmeblkp = hmebp->hmeblkp; 8080 hblkpa = hmebp->hmeh_nextpa; 8081 prevpa = 0; 8082 pr_hblk = NULL; 8083 while (hmeblkp) { 8084 nx_hblk = hmeblkp->hblk_next; 8085 nx_pa = hmeblkp->hblk_nextpa; 8086 if (!hmeblkp->hblk_vcnt && 8087 !hmeblkp->hblk_hmecnt) { 8088 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8089 prevpa, pr_hblk); 8090 sfmmu_hblk_free(hmebp, hmeblkp, 8091 hblkpa, &list); 8092 } else { 8093 pr_hblk = hmeblkp; 8094 prevpa = hblkpa; 8095 } 8096 hmeblkp = nx_hblk; 8097 hblkpa = nx_pa; 8098 } 8099 SFMMU_HASH_UNLOCK(hmebp); 8100 } 8101 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 8102 hmebp = uhme_hash; 8103 } 8104 8105 hmebp = khmehash_reclaim_hand; 8106 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 8107 khmehash_reclaim_hand = hmebp = khme_hash; 8108 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8109 8110 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8111 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8112 hmeblkp = hmebp->hmeblkp; 8113 hblkpa = hmebp->hmeh_nextpa; 8114 prevpa = 0; 8115 pr_hblk = NULL; 8116 while (hmeblkp) { 8117 nx_hblk = hmeblkp->hblk_next; 8118 nx_pa = hmeblkp->hblk_nextpa; 8119 if (!hmeblkp->hblk_vcnt && 8120 !hmeblkp->hblk_hmecnt) { 8121 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8122 prevpa, pr_hblk); 8123 sfmmu_hblk_free(hmebp, hmeblkp, 8124 hblkpa, &list); 8125 } else { 8126 pr_hblk = hmeblkp; 8127 prevpa = hblkpa; 8128 } 8129 hmeblkp = nx_hblk; 8130 hblkpa = nx_pa; 8131 } 8132 SFMMU_HASH_UNLOCK(hmebp); 8133 } 8134 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 8135 hmebp = khme_hash; 8136 } 8137 sfmmu_hblks_list_purge(&list); 8138 } 8139 8140 /* 8141 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 8142 * same goes for sfmmu_get_addrvcolor(). 8143 * 8144 * This function will return the virtual color for the specified page. The 8145 * virtual color corresponds to this page current mapping or its last mapping. 8146 * It is used by memory allocators to choose addresses with the correct 8147 * alignment so vac consistency is automatically maintained. If the page 8148 * has no color it returns -1. 8149 */ 8150 /*ARGSUSED*/ 8151 int 8152 sfmmu_get_ppvcolor(struct page *pp) 8153 { 8154 #ifdef VAC 8155 int color; 8156 8157 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 8158 return (-1); 8159 } 8160 color = PP_GET_VCOLOR(pp); 8161 ASSERT(color < mmu_btop(shm_alignment)); 8162 return (color); 8163 #else 8164 return (-1); 8165 #endif /* VAC */ 8166 } 8167 8168 /* 8169 * This function will return the desired alignment for vac consistency 8170 * (vac color) given a virtual address. If no vac is present it returns -1. 8171 */ 8172 /*ARGSUSED*/ 8173 int 8174 sfmmu_get_addrvcolor(caddr_t vaddr) 8175 { 8176 #ifdef VAC 8177 if (cache & CACHE_VAC) { 8178 return (addr_to_vcolor(vaddr)); 8179 } else { 8180 return (-1); 8181 } 8182 #else 8183 return (-1); 8184 #endif /* VAC */ 8185 } 8186 8187 #ifdef VAC 8188 /* 8189 * Check for conflicts. 8190 * A conflict exists if the new and existent mappings do not match in 8191 * their "shm_alignment fields. If conflicts exist, the existant mappings 8192 * are flushed unless one of them is locked. If one of them is locked, then 8193 * the mappings are flushed and converted to non-cacheable mappings. 8194 */ 8195 static void 8196 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 8197 { 8198 struct hat *tmphat; 8199 struct sf_hment *sfhmep, *tmphme = NULL; 8200 struct hme_blk *hmeblkp; 8201 int vcolor; 8202 tte_t tte; 8203 8204 ASSERT(sfmmu_mlist_held(pp)); 8205 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 8206 8207 vcolor = addr_to_vcolor(addr); 8208 if (PP_NEWPAGE(pp)) { 8209 PP_SET_VCOLOR(pp, vcolor); 8210 return; 8211 } 8212 8213 if (PP_GET_VCOLOR(pp) == vcolor) { 8214 return; 8215 } 8216 8217 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 8218 /* 8219 * Previous user of page had a different color 8220 * but since there are no current users 8221 * we just flush the cache and change the color. 8222 */ 8223 SFMMU_STAT(sf_pgcolor_conflict); 8224 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8225 PP_SET_VCOLOR(pp, vcolor); 8226 return; 8227 } 8228 8229 /* 8230 * If we get here we have a vac conflict with a current 8231 * mapping. VAC conflict policy is as follows. 8232 * - The default is to unload the other mappings unless: 8233 * - If we have a large mapping we uncache the page. 8234 * We need to uncache the rest of the large page too. 8235 * - If any of the mappings are locked we uncache the page. 8236 * - If the requested mapping is inconsistent 8237 * with another mapping and that mapping 8238 * is in the same address space we have to 8239 * make it non-cached. The default thing 8240 * to do is unload the inconsistent mapping 8241 * but if they are in the same address space 8242 * we run the risk of unmapping the pc or the 8243 * stack which we will use as we return to the user, 8244 * in which case we can then fault on the thing 8245 * we just unloaded and get into an infinite loop. 8246 */ 8247 if (PP_ISMAPPED_LARGE(pp)) { 8248 int sz; 8249 8250 /* 8251 * Existing mapping is for big pages. We don't unload 8252 * existing big mappings to satisfy new mappings. 8253 * Always convert all mappings to TNC. 8254 */ 8255 sz = fnd_mapping_sz(pp); 8256 pp = PP_GROUPLEADER(pp, sz); 8257 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 8258 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 8259 TTEPAGES(sz)); 8260 8261 return; 8262 } 8263 8264 /* 8265 * check if any mapping is in same as or if it is locked 8266 * since in that case we need to uncache. 8267 */ 8268 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8269 tmphme = sfhmep->hme_next; 8270 hmeblkp = sfmmu_hmetohblk(sfhmep); 8271 if (hmeblkp->hblk_xhat_bit) 8272 continue; 8273 tmphat = hblktosfmmu(hmeblkp); 8274 sfmmu_copytte(&sfhmep->hme_tte, &tte); 8275 ASSERT(TTE_IS_VALID(&tte)); 8276 if ((tmphat == hat) || hmeblkp->hblk_lckcnt) { 8277 /* 8278 * We have an uncache conflict 8279 */ 8280 SFMMU_STAT(sf_uncache_conflict); 8281 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 8282 return; 8283 } 8284 } 8285 8286 /* 8287 * We have an unload conflict 8288 * We have already checked for LARGE mappings, therefore 8289 * the remaining mapping(s) must be TTE8K. 8290 */ 8291 SFMMU_STAT(sf_unload_conflict); 8292 8293 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8294 tmphme = sfhmep->hme_next; 8295 hmeblkp = sfmmu_hmetohblk(sfhmep); 8296 if (hmeblkp->hblk_xhat_bit) 8297 continue; 8298 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 8299 } 8300 8301 if (PP_ISMAPPED_KPM(pp)) 8302 sfmmu_kpm_vac_unload(pp, addr); 8303 8304 /* 8305 * Unloads only do TLB flushes so we need to flush the 8306 * cache here. 8307 */ 8308 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8309 PP_SET_VCOLOR(pp, vcolor); 8310 } 8311 8312 /* 8313 * Whenever a mapping is unloaded and the page is in TNC state, 8314 * we see if the page can be made cacheable again. 'pp' is 8315 * the page that we just unloaded a mapping from, the size 8316 * of mapping that was unloaded is 'ottesz'. 8317 * Remark: 8318 * The recache policy for mpss pages can leave a performance problem 8319 * under the following circumstances: 8320 * . A large page in uncached mode has just been unmapped. 8321 * . All constituent pages are TNC due to a conflicting small mapping. 8322 * . There are many other, non conflicting, small mappings around for 8323 * a lot of the constituent pages. 8324 * . We're called w/ the "old" groupleader page and the old ottesz, 8325 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 8326 * we end up w/ TTE8K or npages == 1. 8327 * . We call tst_tnc w/ the old groupleader only, and if there is no 8328 * conflict, we re-cache only this page. 8329 * . All other small mappings are not checked and will be left in TNC mode. 8330 * The problem is not very serious because: 8331 * . mpss is actually only defined for heap and stack, so the probability 8332 * is not very high that a large page mapping exists in parallel to a small 8333 * one (this is possible, but seems to be bad programming style in the 8334 * appl). 8335 * . The problem gets a little bit more serious, when those TNC pages 8336 * have to be mapped into kernel space, e.g. for networking. 8337 * . When VAC alias conflicts occur in applications, this is regarded 8338 * as an application bug. So if kstat's show them, the appl should 8339 * be changed anyway. 8340 */ 8341 void 8342 conv_tnc(page_t *pp, int ottesz) 8343 { 8344 int cursz, dosz; 8345 pgcnt_t curnpgs, dopgs; 8346 pgcnt_t pg64k; 8347 page_t *pp2; 8348 8349 /* 8350 * Determine how big a range we check for TNC and find 8351 * leader page. cursz is the size of the biggest 8352 * mapping that still exist on 'pp'. 8353 */ 8354 if (PP_ISMAPPED_LARGE(pp)) { 8355 cursz = fnd_mapping_sz(pp); 8356 } else { 8357 cursz = TTE8K; 8358 } 8359 8360 if (ottesz >= cursz) { 8361 dosz = ottesz; 8362 pp2 = pp; 8363 } else { 8364 dosz = cursz; 8365 pp2 = PP_GROUPLEADER(pp, dosz); 8366 } 8367 8368 pg64k = TTEPAGES(TTE64K); 8369 dopgs = TTEPAGES(dosz); 8370 8371 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 8372 8373 while (dopgs != 0) { 8374 curnpgs = TTEPAGES(cursz); 8375 if (tst_tnc(pp2, curnpgs)) { 8376 SFMMU_STAT_ADD(sf_recache, curnpgs); 8377 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 8378 curnpgs); 8379 } 8380 8381 ASSERT(dopgs >= curnpgs); 8382 dopgs -= curnpgs; 8383 8384 if (dopgs == 0) { 8385 break; 8386 } 8387 8388 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 8389 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 8390 cursz = fnd_mapping_sz(pp2); 8391 } else { 8392 cursz = TTE8K; 8393 } 8394 } 8395 } 8396 8397 /* 8398 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 8399 * returns 0 otherwise. Note that oaddr argument is valid for only 8400 * 8k pages. 8401 */ 8402 int 8403 tst_tnc(page_t *pp, pgcnt_t npages) 8404 { 8405 struct sf_hment *sfhme; 8406 struct hme_blk *hmeblkp; 8407 tte_t tte; 8408 caddr_t vaddr; 8409 int clr_valid = 0; 8410 int color, color1, bcolor; 8411 int i, ncolors; 8412 8413 ASSERT(pp != NULL); 8414 ASSERT(!(cache & CACHE_WRITEBACK)); 8415 8416 if (npages > 1) { 8417 ncolors = CACHE_NUM_COLOR; 8418 } 8419 8420 for (i = 0; i < npages; i++) { 8421 ASSERT(sfmmu_mlist_held(pp)); 8422 ASSERT(PP_ISTNC(pp)); 8423 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 8424 8425 if (PP_ISPNC(pp)) { 8426 return (0); 8427 } 8428 8429 clr_valid = 0; 8430 if (PP_ISMAPPED_KPM(pp)) { 8431 caddr_t kpmvaddr; 8432 8433 ASSERT(kpm_enable); 8434 kpmvaddr = hat_kpm_page2va(pp, 1); 8435 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 8436 color1 = addr_to_vcolor(kpmvaddr); 8437 clr_valid = 1; 8438 } 8439 8440 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8441 hmeblkp = sfmmu_hmetohblk(sfhme); 8442 if (hmeblkp->hblk_xhat_bit) 8443 continue; 8444 8445 sfmmu_copytte(&sfhme->hme_tte, &tte); 8446 ASSERT(TTE_IS_VALID(&tte)); 8447 8448 vaddr = tte_to_vaddr(hmeblkp, tte); 8449 color = addr_to_vcolor(vaddr); 8450 8451 if (npages > 1) { 8452 /* 8453 * If there is a big mapping, make sure 8454 * 8K mapping is consistent with the big 8455 * mapping. 8456 */ 8457 bcolor = i % ncolors; 8458 if (color != bcolor) { 8459 return (0); 8460 } 8461 } 8462 if (!clr_valid) { 8463 clr_valid = 1; 8464 color1 = color; 8465 } 8466 8467 if (color1 != color) { 8468 return (0); 8469 } 8470 } 8471 8472 pp = PP_PAGENEXT(pp); 8473 } 8474 8475 return (1); 8476 } 8477 8478 void 8479 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 8480 pgcnt_t npages) 8481 { 8482 kmutex_t *pmtx; 8483 int i, ncolors, bcolor; 8484 kpm_hlk_t *kpmp; 8485 cpuset_t cpuset; 8486 8487 ASSERT(pp != NULL); 8488 ASSERT(!(cache & CACHE_WRITEBACK)); 8489 8490 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 8491 pmtx = sfmmu_page_enter(pp); 8492 8493 /* 8494 * Fast path caching single unmapped page 8495 */ 8496 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 8497 flags == HAT_CACHE) { 8498 PP_CLRTNC(pp); 8499 PP_CLRPNC(pp); 8500 sfmmu_page_exit(pmtx); 8501 sfmmu_kpm_kpmp_exit(kpmp); 8502 return; 8503 } 8504 8505 /* 8506 * We need to capture all cpus in order to change cacheability 8507 * because we can't allow one cpu to access the same physical 8508 * page using a cacheable and a non-cachebale mapping at the same 8509 * time. Since we may end up walking the ism mapping list 8510 * have to grab it's lock now since we can't after all the 8511 * cpus have been captured. 8512 */ 8513 sfmmu_hat_lock_all(); 8514 mutex_enter(&ism_mlist_lock); 8515 kpreempt_disable(); 8516 cpuset = cpu_ready_set; 8517 xc_attention(cpuset); 8518 8519 if (npages > 1) { 8520 /* 8521 * Make sure all colors are flushed since the 8522 * sfmmu_page_cache() only flushes one color- 8523 * it does not know big pages. 8524 */ 8525 ncolors = CACHE_NUM_COLOR; 8526 if (flags & HAT_TMPNC) { 8527 for (i = 0; i < ncolors; i++) { 8528 sfmmu_cache_flushcolor(i, pp->p_pagenum); 8529 } 8530 cache_flush_flag = CACHE_NO_FLUSH; 8531 } 8532 } 8533 8534 for (i = 0; i < npages; i++) { 8535 8536 ASSERT(sfmmu_mlist_held(pp)); 8537 8538 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 8539 8540 if (npages > 1) { 8541 bcolor = i % ncolors; 8542 } else { 8543 bcolor = NO_VCOLOR; 8544 } 8545 8546 sfmmu_page_cache(pp, flags, cache_flush_flag, 8547 bcolor); 8548 } 8549 8550 pp = PP_PAGENEXT(pp); 8551 } 8552 8553 xt_sync(cpuset); 8554 xc_dismissed(cpuset); 8555 mutex_exit(&ism_mlist_lock); 8556 sfmmu_hat_unlock_all(); 8557 sfmmu_page_exit(pmtx); 8558 sfmmu_kpm_kpmp_exit(kpmp); 8559 kpreempt_enable(); 8560 } 8561 8562 /* 8563 * This function changes the virtual cacheability of all mappings to a 8564 * particular page. When changing from uncache to cacheable the mappings will 8565 * only be changed if all of them have the same virtual color. 8566 * We need to flush the cache in all cpus. It is possible that 8567 * a process referenced a page as cacheable but has sinced exited 8568 * and cleared the mapping list. We still to flush it but have no 8569 * state so all cpus is the only alternative. 8570 */ 8571 static void 8572 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 8573 { 8574 struct sf_hment *sfhme; 8575 struct hme_blk *hmeblkp; 8576 sfmmu_t *sfmmup; 8577 tte_t tte, ttemod; 8578 caddr_t vaddr; 8579 int ret, color; 8580 pfn_t pfn; 8581 8582 color = bcolor; 8583 pfn = pp->p_pagenum; 8584 8585 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8586 8587 hmeblkp = sfmmu_hmetohblk(sfhme); 8588 8589 if (hmeblkp->hblk_xhat_bit) 8590 continue; 8591 8592 sfmmu_copytte(&sfhme->hme_tte, &tte); 8593 ASSERT(TTE_IS_VALID(&tte)); 8594 vaddr = tte_to_vaddr(hmeblkp, tte); 8595 color = addr_to_vcolor(vaddr); 8596 8597 #ifdef DEBUG 8598 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 8599 ASSERT(color == bcolor); 8600 } 8601 #endif 8602 8603 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 8604 8605 ttemod = tte; 8606 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 8607 TTE_CLR_VCACHEABLE(&ttemod); 8608 } else { /* flags & HAT_CACHE */ 8609 TTE_SET_VCACHEABLE(&ttemod); 8610 } 8611 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 8612 if (ret < 0) { 8613 /* 8614 * Since all cpus are captured modifytte should not 8615 * fail. 8616 */ 8617 panic("sfmmu_page_cache: write to tte failed"); 8618 } 8619 8620 sfmmup = hblktosfmmu(hmeblkp); 8621 if (cache_flush_flag == CACHE_FLUSH) { 8622 /* 8623 * Flush TSBs, TLBs and caches 8624 */ 8625 if (sfmmup->sfmmu_ismhat) { 8626 if (flags & HAT_CACHE) { 8627 SFMMU_STAT(sf_ism_recache); 8628 } else { 8629 SFMMU_STAT(sf_ism_uncache); 8630 } 8631 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8632 pfn, CACHE_FLUSH); 8633 } else { 8634 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 8635 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 8636 } 8637 8638 /* 8639 * all cache entries belonging to this pfn are 8640 * now flushed. 8641 */ 8642 cache_flush_flag = CACHE_NO_FLUSH; 8643 } else { 8644 8645 /* 8646 * Flush only TSBs and TLBs. 8647 */ 8648 if (sfmmup->sfmmu_ismhat) { 8649 if (flags & HAT_CACHE) { 8650 SFMMU_STAT(sf_ism_recache); 8651 } else { 8652 SFMMU_STAT(sf_ism_uncache); 8653 } 8654 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8655 pfn, CACHE_NO_FLUSH); 8656 } else { 8657 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 8658 } 8659 } 8660 } 8661 8662 if (PP_ISMAPPED_KPM(pp)) 8663 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 8664 8665 switch (flags) { 8666 8667 default: 8668 panic("sfmmu_pagecache: unknown flags"); 8669 break; 8670 8671 case HAT_CACHE: 8672 PP_CLRTNC(pp); 8673 PP_CLRPNC(pp); 8674 PP_SET_VCOLOR(pp, color); 8675 break; 8676 8677 case HAT_TMPNC: 8678 PP_SETTNC(pp); 8679 PP_SET_VCOLOR(pp, NO_VCOLOR); 8680 break; 8681 8682 case HAT_UNCACHE: 8683 PP_SETPNC(pp); 8684 PP_CLRTNC(pp); 8685 PP_SET_VCOLOR(pp, NO_VCOLOR); 8686 break; 8687 } 8688 } 8689 #endif /* VAC */ 8690 8691 8692 /* 8693 * Wrapper routine used to return a context. 8694 * 8695 * It's the responsibility of the caller to guarantee that the 8696 * process serializes on calls here by taking the HAT lock for 8697 * the hat. 8698 * 8699 */ 8700 static void 8701 sfmmu_get_ctx(sfmmu_t *sfmmup) 8702 { 8703 mmu_ctx_t *mmu_ctxp; 8704 uint_t pstate_save; 8705 8706 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8707 ASSERT(sfmmup != ksfmmup); 8708 8709 kpreempt_disable(); 8710 8711 mmu_ctxp = CPU_MMU_CTXP(CPU); 8712 ASSERT(mmu_ctxp); 8713 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 8714 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 8715 8716 /* 8717 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 8718 */ 8719 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 8720 sfmmu_ctx_wrap_around(mmu_ctxp); 8721 8722 /* 8723 * Let the MMU set up the page sizes to use for 8724 * this context in the TLB. Don't program 2nd dtlb for ism hat. 8725 */ 8726 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 8727 mmu_set_ctx_page_sizes(sfmmup); 8728 } 8729 8730 /* 8731 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 8732 * interrupts disabled to prevent race condition with wrap-around 8733 * ctx invalidatation. In sun4v, ctx invalidation also involves 8734 * a HV call to set the number of TSBs to 0. If interrupts are not 8735 * disabled until after sfmmu_load_mmustate is complete TSBs may 8736 * become assigned to INVALID_CONTEXT. This is not allowed. 8737 */ 8738 pstate_save = sfmmu_disable_intrs(); 8739 8740 sfmmu_alloc_ctx(sfmmup, 1, CPU); 8741 sfmmu_load_mmustate(sfmmup); 8742 8743 sfmmu_enable_intrs(pstate_save); 8744 8745 kpreempt_enable(); 8746 } 8747 8748 /* 8749 * When all cnums are used up in a MMU, cnum will wrap around to the 8750 * next generation and start from 2. 8751 */ 8752 static void 8753 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp) 8754 { 8755 8756 /* caller must have disabled the preemption */ 8757 ASSERT(curthread->t_preempt >= 1); 8758 ASSERT(mmu_ctxp != NULL); 8759 8760 /* acquire Per-MMU (PM) spin lock */ 8761 mutex_enter(&mmu_ctxp->mmu_lock); 8762 8763 /* re-check to see if wrap-around is needed */ 8764 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 8765 goto done; 8766 8767 SFMMU_MMU_STAT(mmu_wrap_around); 8768 8769 /* update gnum */ 8770 ASSERT(mmu_ctxp->mmu_gnum != 0); 8771 mmu_ctxp->mmu_gnum++; 8772 if (mmu_ctxp->mmu_gnum == 0 || 8773 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 8774 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 8775 (void *)mmu_ctxp); 8776 } 8777 8778 if (mmu_ctxp->mmu_ncpus > 1) { 8779 cpuset_t cpuset; 8780 8781 membar_enter(); /* make sure updated gnum visible */ 8782 8783 SFMMU_XCALL_STATS(NULL); 8784 8785 /* xcall to others on the same MMU to invalidate ctx */ 8786 cpuset = mmu_ctxp->mmu_cpuset; 8787 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id)); 8788 CPUSET_DEL(cpuset, CPU->cpu_id); 8789 CPUSET_AND(cpuset, cpu_ready_set); 8790 8791 /* 8792 * Pass in INVALID_CONTEXT as the first parameter to 8793 * sfmmu_raise_tsb_exception, which invalidates the context 8794 * of any process running on the CPUs in the MMU. 8795 */ 8796 xt_some(cpuset, sfmmu_raise_tsb_exception, 8797 INVALID_CONTEXT, INVALID_CONTEXT); 8798 xt_sync(cpuset); 8799 8800 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 8801 } 8802 8803 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 8804 sfmmu_setctx_sec(INVALID_CONTEXT); 8805 sfmmu_clear_utsbinfo(); 8806 } 8807 8808 /* 8809 * No xcall is needed here. For sun4u systems all CPUs in context 8810 * domain share a single physical MMU therefore it's enough to flush 8811 * TLB on local CPU. On sun4v systems we use 1 global context 8812 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 8813 * handler. Note that vtag_flushall_uctxs() is called 8814 * for Ultra II machine, where the equivalent flushall functionality 8815 * is implemented in SW, and only user ctx TLB entries are flushed. 8816 */ 8817 if (&vtag_flushall_uctxs != NULL) { 8818 vtag_flushall_uctxs(); 8819 } else { 8820 vtag_flushall(); 8821 } 8822 8823 /* reset mmu cnum, skips cnum 0 and 1 */ 8824 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 8825 8826 done: 8827 mutex_exit(&mmu_ctxp->mmu_lock); 8828 } 8829 8830 8831 /* 8832 * For multi-threaded process, set the process context to INVALID_CONTEXT 8833 * so that it faults and reloads the MMU state from TL=0. For single-threaded 8834 * process, we can just load the MMU state directly without having to 8835 * set context invalid. Caller must hold the hat lock since we don't 8836 * acquire it here. 8837 */ 8838 static void 8839 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 8840 { 8841 uint_t cnum; 8842 uint_t pstate_save; 8843 8844 ASSERT(sfmmup != ksfmmup); 8845 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8846 8847 kpreempt_disable(); 8848 8849 /* 8850 * We check whether the pass'ed-in sfmmup is the same as the 8851 * current running proc. This is to makes sure the current proc 8852 * stays single-threaded if it already is. 8853 */ 8854 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 8855 (curthread->t_procp->p_lwpcnt == 1)) { 8856 /* single-thread */ 8857 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 8858 if (cnum != INVALID_CONTEXT) { 8859 uint_t curcnum; 8860 /* 8861 * Disable interrupts to prevent race condition 8862 * with sfmmu_ctx_wrap_around ctx invalidation. 8863 * In sun4v, ctx invalidation involves setting 8864 * TSB to NULL, hence, interrupts should be disabled 8865 * untill after sfmmu_load_mmustate is completed. 8866 */ 8867 pstate_save = sfmmu_disable_intrs(); 8868 curcnum = sfmmu_getctx_sec(); 8869 if (curcnum == cnum) 8870 sfmmu_load_mmustate(sfmmup); 8871 sfmmu_enable_intrs(pstate_save); 8872 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 8873 } 8874 } else { 8875 /* 8876 * multi-thread 8877 * or when sfmmup is not the same as the curproc. 8878 */ 8879 sfmmu_invalidate_ctx(sfmmup); 8880 } 8881 8882 kpreempt_enable(); 8883 } 8884 8885 8886 /* 8887 * Replace the specified TSB with a new TSB. This function gets called when 8888 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 8889 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 8890 * (8K). 8891 * 8892 * Caller must hold the HAT lock, but should assume any tsb_info 8893 * pointers it has are no longer valid after calling this function. 8894 * 8895 * Return values: 8896 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 8897 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 8898 * something to this tsbinfo/TSB 8899 * TSB_SUCCESS Operation succeeded 8900 */ 8901 static tsb_replace_rc_t 8902 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 8903 hatlock_t *hatlockp, uint_t flags) 8904 { 8905 struct tsb_info *new_tsbinfo = NULL; 8906 struct tsb_info *curtsb, *prevtsb; 8907 uint_t tte_sz_mask; 8908 int i; 8909 8910 ASSERT(sfmmup != ksfmmup); 8911 ASSERT(sfmmup->sfmmu_ismhat == 0); 8912 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8913 ASSERT(szc <= tsb_max_growsize); 8914 8915 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 8916 return (TSB_LOSTRACE); 8917 8918 /* 8919 * Find the tsb_info ahead of this one in the list, and 8920 * also make sure that the tsb_info passed in really 8921 * exists! 8922 */ 8923 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8924 curtsb != old_tsbinfo && curtsb != NULL; 8925 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8926 ASSERT(curtsb != NULL); 8927 8928 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8929 /* 8930 * The process is swapped out, so just set the new size 8931 * code. When it swaps back in, we'll allocate a new one 8932 * of the new chosen size. 8933 */ 8934 curtsb->tsb_szc = szc; 8935 return (TSB_SUCCESS); 8936 } 8937 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 8938 8939 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 8940 8941 /* 8942 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 8943 * If we fail to allocate a TSB, exit. 8944 */ 8945 sfmmu_hat_exit(hatlockp); 8946 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, tte_sz_mask, 8947 flags, sfmmup)) { 8948 (void) sfmmu_hat_enter(sfmmup); 8949 if (!(flags & TSB_SWAPIN)) 8950 SFMMU_STAT(sf_tsb_resize_failures); 8951 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8952 return (TSB_ALLOCFAIL); 8953 } 8954 (void) sfmmu_hat_enter(sfmmup); 8955 8956 /* 8957 * Re-check to make sure somebody else didn't muck with us while we 8958 * didn't hold the HAT lock. If the process swapped out, fine, just 8959 * exit; this can happen if we try to shrink the TSB from the context 8960 * of another process (such as on an ISM unmap), though it is rare. 8961 */ 8962 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8963 SFMMU_STAT(sf_tsb_resize_failures); 8964 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8965 sfmmu_hat_exit(hatlockp); 8966 sfmmu_tsbinfo_free(new_tsbinfo); 8967 (void) sfmmu_hat_enter(sfmmup); 8968 return (TSB_LOSTRACE); 8969 } 8970 8971 #ifdef DEBUG 8972 /* Reverify that the tsb_info still exists.. for debugging only */ 8973 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8974 curtsb != old_tsbinfo && curtsb != NULL; 8975 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8976 ASSERT(curtsb != NULL); 8977 #endif /* DEBUG */ 8978 8979 /* 8980 * Quiesce any CPUs running this process on their next TLB miss 8981 * so they atomically see the new tsb_info. We temporarily set the 8982 * context to invalid context so new threads that come on processor 8983 * after we do the xcall to cpusran will also serialize behind the 8984 * HAT lock on TLB miss and will see the new TSB. Since this short 8985 * race with a new thread coming on processor is relatively rare, 8986 * this synchronization mechanism should be cheaper than always 8987 * pausing all CPUs for the duration of the setup, which is what 8988 * the old implementation did. This is particuarly true if we are 8989 * copying a huge chunk of memory around during that window. 8990 * 8991 * The memory barriers are to make sure things stay consistent 8992 * with resume() since it does not hold the HAT lock while 8993 * walking the list of tsb_info structures. 8994 */ 8995 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 8996 /* The TSB is either growing or shrinking. */ 8997 sfmmu_invalidate_ctx(sfmmup); 8998 } else { 8999 /* 9000 * It is illegal to swap in TSBs from a process other 9001 * than a process being swapped in. This in turn 9002 * implies we do not have a valid MMU context here 9003 * since a process needs one to resolve translation 9004 * misses. 9005 */ 9006 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 9007 } 9008 9009 #ifdef DEBUG 9010 ASSERT(max_mmu_ctxdoms > 0); 9011 9012 /* 9013 * Process should have INVALID_CONTEXT on all MMUs 9014 */ 9015 for (i = 0; i < max_mmu_ctxdoms; i++) { 9016 9017 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 9018 } 9019 #endif 9020 9021 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 9022 membar_stst(); /* strict ordering required */ 9023 if (prevtsb) 9024 prevtsb->tsb_next = new_tsbinfo; 9025 else 9026 sfmmup->sfmmu_tsb = new_tsbinfo; 9027 membar_enter(); /* make sure new TSB globally visible */ 9028 sfmmu_setup_tsbinfo(sfmmup); 9029 9030 /* 9031 * We need to migrate TSB entries from the old TSB to the new TSB 9032 * if tsb_remap_ttes is set and the TSB is growing. 9033 */ 9034 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 9035 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 9036 9037 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9038 9039 /* 9040 * Drop the HAT lock to free our old tsb_info. 9041 */ 9042 sfmmu_hat_exit(hatlockp); 9043 9044 if ((flags & TSB_GROW) == TSB_GROW) { 9045 SFMMU_STAT(sf_tsb_grow); 9046 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 9047 SFMMU_STAT(sf_tsb_shrink); 9048 } 9049 9050 sfmmu_tsbinfo_free(old_tsbinfo); 9051 9052 (void) sfmmu_hat_enter(sfmmup); 9053 return (TSB_SUCCESS); 9054 } 9055 9056 /* 9057 * This function will re-program hat pgsz array, and invalidate the 9058 * process' context, forcing the process to switch to another 9059 * context on the next TLB miss, and therefore start using the 9060 * TLB that is reprogrammed for the new page sizes. 9061 */ 9062 void 9063 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 9064 { 9065 int i; 9066 hatlock_t *hatlockp = NULL; 9067 9068 hatlockp = sfmmu_hat_enter(sfmmup); 9069 /* USIII+-IV+ optimization, requires hat lock */ 9070 if (tmp_pgsz) { 9071 for (i = 0; i < mmu_page_sizes; i++) 9072 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 9073 } 9074 SFMMU_STAT(sf_tlb_reprog_pgsz); 9075 9076 sfmmu_invalidate_ctx(sfmmup); 9077 9078 sfmmu_hat_exit(hatlockp); 9079 } 9080 9081 /* 9082 * This function assumes that there are either four or six supported page 9083 * sizes and at most two programmable TLBs, so we need to decide which 9084 * page sizes are most important and then tell the MMU layer so it 9085 * can adjust the TLB page sizes accordingly (if supported). 9086 * 9087 * If these assumptions change, this function will need to be 9088 * updated to support whatever the new limits are. 9089 * 9090 * The growing flag is nonzero if we are growing the address space, 9091 * and zero if it is shrinking. This allows us to decide whether 9092 * to grow or shrink our TSB, depending upon available memory 9093 * conditions. 9094 */ 9095 static void 9096 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 9097 { 9098 uint64_t ttecnt[MMU_PAGE_SIZES]; 9099 uint64_t tte8k_cnt, tte4m_cnt; 9100 uint8_t i; 9101 int sectsb_thresh; 9102 9103 /* 9104 * Kernel threads, processes with small address spaces not using 9105 * large pages, and dummy ISM HATs need not apply. 9106 */ 9107 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 9108 return; 9109 9110 if ((sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) == 0 && 9111 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 9112 return; 9113 9114 for (i = 0; i < mmu_page_sizes; i++) { 9115 ttecnt[i] = SFMMU_TTE_CNT(sfmmup, i); 9116 } 9117 9118 /* Check pagesizes in use, and possibly reprogram DTLB. */ 9119 if (&mmu_check_page_sizes) 9120 mmu_check_page_sizes(sfmmup, ttecnt); 9121 9122 /* 9123 * Calculate the number of 8k ttes to represent the span of these 9124 * pages. 9125 */ 9126 tte8k_cnt = ttecnt[TTE8K] + 9127 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 9128 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 9129 if (mmu_page_sizes == max_mmu_page_sizes) { 9130 tte4m_cnt = ttecnt[TTE4M] + 9131 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 9132 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 9133 } else { 9134 tte4m_cnt = ttecnt[TTE4M]; 9135 } 9136 9137 /* 9138 * Inflate TSB sizes by a factor of 2 if this process 9139 * uses 4M text pages to minimize extra conflict misses 9140 * in the first TSB since without counting text pages 9141 * 8K TSB may become too small. 9142 * 9143 * Also double the size of the second TSB to minimize 9144 * extra conflict misses due to competition between 4M text pages 9145 * and data pages. 9146 * 9147 * We need to adjust the second TSB allocation threshold by the 9148 * inflation factor, since there is no point in creating a second 9149 * TSB when we know all the mappings can fit in the I/D TLBs. 9150 */ 9151 sectsb_thresh = tsb_sectsb_threshold; 9152 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 9153 tte8k_cnt <<= 1; 9154 tte4m_cnt <<= 1; 9155 sectsb_thresh <<= 1; 9156 } 9157 9158 /* 9159 * Check to see if our TSB is the right size; we may need to 9160 * grow or shrink it. If the process is small, our work is 9161 * finished at this point. 9162 */ 9163 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 9164 return; 9165 } 9166 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 9167 } 9168 9169 static void 9170 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 9171 uint64_t tte4m_cnt, int sectsb_thresh) 9172 { 9173 int tsb_bits; 9174 uint_t tsb_szc; 9175 struct tsb_info *tsbinfop; 9176 hatlock_t *hatlockp = NULL; 9177 9178 hatlockp = sfmmu_hat_enter(sfmmup); 9179 ASSERT(hatlockp != NULL); 9180 tsbinfop = sfmmup->sfmmu_tsb; 9181 ASSERT(tsbinfop != NULL); 9182 9183 /* 9184 * If we're growing, select the size based on RSS. If we're 9185 * shrinking, leave some room so we don't have to turn around and 9186 * grow again immediately. 9187 */ 9188 if (growing) 9189 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 9190 else 9191 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 9192 9193 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9194 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9195 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9196 hatlockp, TSB_SHRINK); 9197 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 9198 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9199 hatlockp, TSB_GROW); 9200 } 9201 tsbinfop = sfmmup->sfmmu_tsb; 9202 9203 /* 9204 * With the TLB and first TSB out of the way, we need to see if 9205 * we need a second TSB for 4M pages. If we managed to reprogram 9206 * the TLB page sizes above, the process will start using this new 9207 * TSB right away; otherwise, it will start using it on the next 9208 * context switch. Either way, it's no big deal so there's no 9209 * synchronization with the trap handlers here unless we grow the 9210 * TSB (in which case it's required to prevent using the old one 9211 * after it's freed). Note: second tsb is required for 32M/256M 9212 * page sizes. 9213 */ 9214 if (tte4m_cnt > sectsb_thresh) { 9215 /* 9216 * If we're growing, select the size based on RSS. If we're 9217 * shrinking, leave some room so we don't have to turn 9218 * around and grow again immediately. 9219 */ 9220 if (growing) 9221 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 9222 else 9223 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 9224 if (tsbinfop->tsb_next == NULL) { 9225 struct tsb_info *newtsb; 9226 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 9227 0 : TSB_ALLOC; 9228 9229 sfmmu_hat_exit(hatlockp); 9230 9231 /* 9232 * Try to allocate a TSB for 4[32|256]M pages. If we 9233 * can't get the size we want, retry w/a minimum sized 9234 * TSB. If that still didn't work, give up; we can 9235 * still run without one. 9236 */ 9237 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 9238 TSB4M|TSB32M|TSB256M:TSB4M; 9239 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 9240 allocflags, sfmmup) != 0) && 9241 (sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 9242 tsb_bits, allocflags, sfmmup) != 0)) { 9243 return; 9244 } 9245 9246 hatlockp = sfmmu_hat_enter(sfmmup); 9247 9248 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 9249 sfmmup->sfmmu_tsb->tsb_next = newtsb; 9250 SFMMU_STAT(sf_tsb_sectsb_create); 9251 sfmmu_setup_tsbinfo(sfmmup); 9252 sfmmu_hat_exit(hatlockp); 9253 return; 9254 } else { 9255 /* 9256 * It's annoying, but possible for us 9257 * to get here.. we dropped the HAT lock 9258 * because of locking order in the kmem 9259 * allocator, and while we were off getting 9260 * our memory, some other thread decided to 9261 * do us a favor and won the race to get a 9262 * second TSB for this process. Sigh. 9263 */ 9264 sfmmu_hat_exit(hatlockp); 9265 sfmmu_tsbinfo_free(newtsb); 9266 return; 9267 } 9268 } 9269 9270 /* 9271 * We have a second TSB, see if it's big enough. 9272 */ 9273 tsbinfop = tsbinfop->tsb_next; 9274 9275 /* 9276 * Check to see if our second TSB is the right size; 9277 * we may need to grow or shrink it. 9278 * To prevent thrashing (e.g. growing the TSB on a 9279 * subsequent map operation), only try to shrink if 9280 * the TSB reach exceeds twice the virtual address 9281 * space size. 9282 */ 9283 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9284 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9285 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9286 tsb_szc, hatlockp, TSB_SHRINK); 9287 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 9288 TSB_OK_GROW()) { 9289 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9290 tsb_szc, hatlockp, TSB_GROW); 9291 } 9292 } 9293 9294 sfmmu_hat_exit(hatlockp); 9295 } 9296 9297 /* 9298 * Free up a sfmmu 9299 * Since the sfmmu is currently embedded in the hat struct we simply zero 9300 * out our fields and free up the ism map blk list if any. 9301 */ 9302 static void 9303 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 9304 { 9305 ism_blk_t *blkp, *nx_blkp; 9306 #ifdef DEBUG 9307 ism_map_t *map; 9308 int i; 9309 #endif 9310 9311 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 9312 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 9313 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 9314 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 9315 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 9316 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 9317 9318 sfmmup->sfmmu_free = 0; 9319 sfmmup->sfmmu_ismhat = 0; 9320 9321 blkp = sfmmup->sfmmu_iblk; 9322 sfmmup->sfmmu_iblk = NULL; 9323 9324 while (blkp) { 9325 #ifdef DEBUG 9326 map = blkp->iblk_maps; 9327 for (i = 0; i < ISM_MAP_SLOTS; i++) { 9328 ASSERT(map[i].imap_seg == 0); 9329 ASSERT(map[i].imap_ismhat == NULL); 9330 ASSERT(map[i].imap_ment == NULL); 9331 } 9332 #endif 9333 nx_blkp = blkp->iblk_next; 9334 blkp->iblk_next = NULL; 9335 blkp->iblk_nextpa = (uint64_t)-1; 9336 kmem_cache_free(ism_blk_cache, blkp); 9337 blkp = nx_blkp; 9338 } 9339 } 9340 9341 /* 9342 * Locking primitves accessed by HATLOCK macros 9343 */ 9344 9345 #define SFMMU_SPL_MTX (0x0) 9346 #define SFMMU_ML_MTX (0x1) 9347 9348 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 9349 SPL_HASH(pg) : MLIST_HASH(pg)) 9350 9351 kmutex_t * 9352 sfmmu_page_enter(struct page *pp) 9353 { 9354 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 9355 } 9356 9357 void 9358 sfmmu_page_exit(kmutex_t *spl) 9359 { 9360 mutex_exit(spl); 9361 } 9362 9363 int 9364 sfmmu_page_spl_held(struct page *pp) 9365 { 9366 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 9367 } 9368 9369 kmutex_t * 9370 sfmmu_mlist_enter(struct page *pp) 9371 { 9372 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 9373 } 9374 9375 void 9376 sfmmu_mlist_exit(kmutex_t *mml) 9377 { 9378 mutex_exit(mml); 9379 } 9380 9381 int 9382 sfmmu_mlist_held(struct page *pp) 9383 { 9384 9385 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 9386 } 9387 9388 /* 9389 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 9390 * sfmmu_mlist_enter() case mml_table lock array is used and for 9391 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 9392 * 9393 * The lock is taken on a root page so that it protects an operation on all 9394 * constituent pages of a large page pp belongs to. 9395 * 9396 * The routine takes a lock from the appropriate array. The lock is determined 9397 * by hashing the root page. After taking the lock this routine checks if the 9398 * root page has the same size code that was used to determine the root (i.e 9399 * that root hasn't changed). If root page has the expected p_szc field we 9400 * have the right lock and it's returned to the caller. If root's p_szc 9401 * decreased we release the lock and retry from the beginning. This case can 9402 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 9403 * value and taking the lock. The number of retries due to p_szc decrease is 9404 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 9405 * determined by hashing pp itself. 9406 * 9407 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 9408 * possible that p_szc can increase. To increase p_szc a thread has to lock 9409 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 9410 * callers that don't hold a page locked recheck if hmeblk through which pp 9411 * was found still maps this pp. If it doesn't map it anymore returned lock 9412 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 9413 * p_szc increase after taking the lock it returns this lock without further 9414 * retries because in this case the caller doesn't care about which lock was 9415 * taken. The caller will drop it right away. 9416 * 9417 * After the routine returns it's guaranteed that hat_page_demote() can't 9418 * change p_szc field of any of constituent pages of a large page pp belongs 9419 * to as long as pp was either locked at least SHARED prior to this call or 9420 * the caller finds that hment that pointed to this pp still references this 9421 * pp (this also assumes that the caller holds hme hash bucket lock so that 9422 * the same pp can't be remapped into the same hmeblk after it was unmapped by 9423 * hat_pageunload()). 9424 */ 9425 static kmutex_t * 9426 sfmmu_mlspl_enter(struct page *pp, int type) 9427 { 9428 kmutex_t *mtx; 9429 uint_t prev_rszc = UINT_MAX; 9430 page_t *rootpp; 9431 uint_t szc; 9432 uint_t rszc; 9433 uint_t pszc = pp->p_szc; 9434 9435 ASSERT(pp != NULL); 9436 9437 again: 9438 if (pszc == 0) { 9439 mtx = SFMMU_MLSPL_MTX(type, pp); 9440 mutex_enter(mtx); 9441 return (mtx); 9442 } 9443 9444 /* The lock lives in the root page */ 9445 rootpp = PP_GROUPLEADER(pp, pszc); 9446 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9447 mutex_enter(mtx); 9448 9449 /* 9450 * Return mml in the following 3 cases: 9451 * 9452 * 1) If pp itself is root since if its p_szc decreased before we took 9453 * the lock pp is still the root of smaller szc page. And if its p_szc 9454 * increased it doesn't matter what lock we return (see comment in 9455 * front of this routine). 9456 * 9457 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 9458 * large page we have the right lock since any previous potential 9459 * hat_page_demote() is done demoting from greater than current root's 9460 * p_szc because hat_page_demote() changes root's p_szc last. No 9461 * further hat_page_demote() can start or be in progress since it 9462 * would need the same lock we currently hold. 9463 * 9464 * 3) If rootpp's p_szc increased since previous iteration it doesn't 9465 * matter what lock we return (see comment in front of this routine). 9466 */ 9467 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 9468 rszc >= prev_rszc) { 9469 return (mtx); 9470 } 9471 9472 /* 9473 * hat_page_demote() could have decreased root's p_szc. 9474 * In this case pp's p_szc must also be smaller than pszc. 9475 * Retry. 9476 */ 9477 if (rszc < pszc) { 9478 szc = pp->p_szc; 9479 if (szc < pszc) { 9480 mutex_exit(mtx); 9481 pszc = szc; 9482 goto again; 9483 } 9484 /* 9485 * pp's p_szc increased after it was decreased. 9486 * page cannot be mapped. Return current lock. The caller 9487 * will drop it right away. 9488 */ 9489 return (mtx); 9490 } 9491 9492 /* 9493 * root's p_szc is greater than pp's p_szc. 9494 * hat_page_demote() is not done with all pages 9495 * yet. Wait for it to complete. 9496 */ 9497 mutex_exit(mtx); 9498 rootpp = PP_GROUPLEADER(rootpp, rszc); 9499 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9500 mutex_enter(mtx); 9501 mutex_exit(mtx); 9502 prev_rszc = rszc; 9503 goto again; 9504 } 9505 9506 static int 9507 sfmmu_mlspl_held(struct page *pp, int type) 9508 { 9509 kmutex_t *mtx; 9510 9511 ASSERT(pp != NULL); 9512 /* The lock lives in the root page */ 9513 pp = PP_PAGEROOT(pp); 9514 ASSERT(pp != NULL); 9515 9516 mtx = SFMMU_MLSPL_MTX(type, pp); 9517 return (MUTEX_HELD(mtx)); 9518 } 9519 9520 static uint_t 9521 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 9522 { 9523 struct hme_blk *hblkp; 9524 9525 if (freehblkp != NULL) { 9526 mutex_enter(&freehblkp_lock); 9527 if (freehblkp != NULL) { 9528 /* 9529 * If the current thread is owning hblk_reserve, 9530 * let it succede even if freehblkcnt is really low. 9531 */ 9532 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 9533 SFMMU_STAT(sf_get_free_throttle); 9534 mutex_exit(&freehblkp_lock); 9535 return (0); 9536 } 9537 freehblkcnt--; 9538 *hmeblkpp = freehblkp; 9539 hblkp = *hmeblkpp; 9540 freehblkp = hblkp->hblk_next; 9541 mutex_exit(&freehblkp_lock); 9542 hblkp->hblk_next = NULL; 9543 SFMMU_STAT(sf_get_free_success); 9544 return (1); 9545 } 9546 mutex_exit(&freehblkp_lock); 9547 } 9548 SFMMU_STAT(sf_get_free_fail); 9549 return (0); 9550 } 9551 9552 static uint_t 9553 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 9554 { 9555 struct hme_blk *hblkp; 9556 9557 /* 9558 * If the current thread is mapping into kernel space, 9559 * let it succede even if freehblkcnt is max 9560 * so that it will avoid freeing it to kmem. 9561 * This will prevent stack overflow due to 9562 * possible recursion since kmem_cache_free() 9563 * might require creation of a slab which 9564 * in turn needs an hmeblk to map that slab; 9565 * let's break this vicious chain at the first 9566 * opportunity. 9567 */ 9568 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9569 mutex_enter(&freehblkp_lock); 9570 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9571 SFMMU_STAT(sf_put_free_success); 9572 freehblkcnt++; 9573 hmeblkp->hblk_next = freehblkp; 9574 freehblkp = hmeblkp; 9575 mutex_exit(&freehblkp_lock); 9576 return (1); 9577 } 9578 mutex_exit(&freehblkp_lock); 9579 } 9580 9581 /* 9582 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 9583 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 9584 * we are not in the process of mapping into kernel space. 9585 */ 9586 ASSERT(!critical); 9587 while (freehblkcnt > HBLK_RESERVE_CNT) { 9588 mutex_enter(&freehblkp_lock); 9589 if (freehblkcnt > HBLK_RESERVE_CNT) { 9590 freehblkcnt--; 9591 hblkp = freehblkp; 9592 freehblkp = hblkp->hblk_next; 9593 mutex_exit(&freehblkp_lock); 9594 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 9595 kmem_cache_free(sfmmu8_cache, hblkp); 9596 continue; 9597 } 9598 mutex_exit(&freehblkp_lock); 9599 } 9600 SFMMU_STAT(sf_put_free_fail); 9601 return (0); 9602 } 9603 9604 static void 9605 sfmmu_hblk_swap(struct hme_blk *new) 9606 { 9607 struct hme_blk *old, *hblkp, *prev; 9608 uint64_t hblkpa, prevpa, newpa; 9609 caddr_t base, vaddr, endaddr; 9610 struct hmehash_bucket *hmebp; 9611 struct sf_hment *osfhme, *nsfhme; 9612 page_t *pp; 9613 kmutex_t *pml; 9614 tte_t tte; 9615 9616 #ifdef DEBUG 9617 hmeblk_tag hblktag; 9618 struct hme_blk *found; 9619 #endif 9620 old = HBLK_RESERVE; 9621 9622 /* 9623 * save pa before bcopy clobbers it 9624 */ 9625 newpa = new->hblk_nextpa; 9626 9627 base = (caddr_t)get_hblk_base(old); 9628 endaddr = base + get_hblk_span(old); 9629 9630 /* 9631 * acquire hash bucket lock. 9632 */ 9633 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K); 9634 9635 /* 9636 * copy contents from old to new 9637 */ 9638 bcopy((void *)old, (void *)new, HME8BLK_SZ); 9639 9640 /* 9641 * add new to hash chain 9642 */ 9643 sfmmu_hblk_hash_add(hmebp, new, newpa); 9644 9645 /* 9646 * search hash chain for hblk_reserve; this needs to be performed 9647 * after adding new, otherwise prevpa and prev won't correspond 9648 * to the hblk which is prior to old in hash chain when we call 9649 * sfmmu_hblk_hash_rm to remove old later. 9650 */ 9651 for (prevpa = 0, prev = NULL, 9652 hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp; 9653 hblkp != NULL && hblkp != old; 9654 prevpa = hblkpa, prev = hblkp, 9655 hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next); 9656 9657 if (hblkp != old) 9658 panic("sfmmu_hblk_swap: hblk_reserve not found"); 9659 9660 /* 9661 * p_mapping list is still pointing to hments in hblk_reserve; 9662 * fix up p_mapping list so that they point to hments in new. 9663 * 9664 * Since all these mappings are created by hblk_reserve_thread 9665 * on the way and it's using at least one of the buffers from each of 9666 * the newly minted slabs, there is no danger of any of these 9667 * mappings getting unloaded by another thread. 9668 * 9669 * tsbmiss could only modify ref/mod bits of hments in old/new. 9670 * Since all of these hments hold mappings established by segkmem 9671 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 9672 * have no meaning for the mappings in hblk_reserve. hments in 9673 * old and new are identical except for ref/mod bits. 9674 */ 9675 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 9676 9677 HBLKTOHME(osfhme, old, vaddr); 9678 sfmmu_copytte(&osfhme->hme_tte, &tte); 9679 9680 if (TTE_IS_VALID(&tte)) { 9681 if ((pp = osfhme->hme_page) == NULL) 9682 panic("sfmmu_hblk_swap: page not mapped"); 9683 9684 pml = sfmmu_mlist_enter(pp); 9685 9686 if (pp != osfhme->hme_page) 9687 panic("sfmmu_hblk_swap: mapping changed"); 9688 9689 HBLKTOHME(nsfhme, new, vaddr); 9690 9691 HME_ADD(nsfhme, pp); 9692 HME_SUB(osfhme, pp); 9693 9694 sfmmu_mlist_exit(pml); 9695 } 9696 } 9697 9698 /* 9699 * remove old from hash chain 9700 */ 9701 sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev); 9702 9703 #ifdef DEBUG 9704 9705 hblktag.htag_id = ksfmmup; 9706 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 9707 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 9708 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 9709 9710 if (found != new) 9711 panic("sfmmu_hblk_swap: new hblk not found"); 9712 #endif 9713 9714 SFMMU_HASH_UNLOCK(hmebp); 9715 9716 /* 9717 * Reset hblk_reserve 9718 */ 9719 bzero((void *)old, HME8BLK_SZ); 9720 old->hblk_nextpa = va_to_pa((caddr_t)old); 9721 } 9722 9723 /* 9724 * Grab the mlist mutex for both pages passed in. 9725 * 9726 * low and high will be returned as pointers to the mutexes for these pages. 9727 * low refers to the mutex residing in the lower bin of the mlist hash, while 9728 * high refers to the mutex residing in the higher bin of the mlist hash. This 9729 * is due to the locking order restrictions on the same thread grabbing 9730 * multiple mlist mutexes. The low lock must be acquired before the high lock. 9731 * 9732 * If both pages hash to the same mutex, only grab that single mutex, and 9733 * high will be returned as NULL 9734 * If the pages hash to different bins in the hash, grab the lower addressed 9735 * lock first and then the higher addressed lock in order to follow the locking 9736 * rules involved with the same thread grabbing multiple mlist mutexes. 9737 * low and high will both have non-NULL values. 9738 */ 9739 static void 9740 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 9741 kmutex_t **low, kmutex_t **high) 9742 { 9743 kmutex_t *mml_targ, *mml_repl; 9744 9745 /* 9746 * no need to do the dance around szc as in sfmmu_mlist_enter() 9747 * because this routine is only called by hat_page_relocate() and all 9748 * targ and repl pages are already locked EXCL so szc can't change. 9749 */ 9750 9751 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 9752 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 9753 9754 if (mml_targ == mml_repl) { 9755 *low = mml_targ; 9756 *high = NULL; 9757 } else { 9758 if (mml_targ < mml_repl) { 9759 *low = mml_targ; 9760 *high = mml_repl; 9761 } else { 9762 *low = mml_repl; 9763 *high = mml_targ; 9764 } 9765 } 9766 9767 mutex_enter(*low); 9768 if (*high) 9769 mutex_enter(*high); 9770 } 9771 9772 static void 9773 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 9774 { 9775 if (high) 9776 mutex_exit(high); 9777 mutex_exit(low); 9778 } 9779 9780 static hatlock_t * 9781 sfmmu_hat_enter(sfmmu_t *sfmmup) 9782 { 9783 hatlock_t *hatlockp; 9784 9785 if (sfmmup != ksfmmup) { 9786 hatlockp = TSB_HASH(sfmmup); 9787 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 9788 return (hatlockp); 9789 } 9790 return (NULL); 9791 } 9792 9793 static hatlock_t * 9794 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 9795 { 9796 hatlock_t *hatlockp; 9797 9798 if (sfmmup != ksfmmup) { 9799 hatlockp = TSB_HASH(sfmmup); 9800 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 9801 return (NULL); 9802 return (hatlockp); 9803 } 9804 return (NULL); 9805 } 9806 9807 static void 9808 sfmmu_hat_exit(hatlock_t *hatlockp) 9809 { 9810 if (hatlockp != NULL) 9811 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 9812 } 9813 9814 static void 9815 sfmmu_hat_lock_all(void) 9816 { 9817 int i; 9818 for (i = 0; i < SFMMU_NUM_LOCK; i++) 9819 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 9820 } 9821 9822 static void 9823 sfmmu_hat_unlock_all(void) 9824 { 9825 int i; 9826 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 9827 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 9828 } 9829 9830 int 9831 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 9832 { 9833 ASSERT(sfmmup != ksfmmup); 9834 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 9835 } 9836 9837 /* 9838 * Locking primitives to provide consistency between ISM unmap 9839 * and other operations. Since ISM unmap can take a long time, we 9840 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 9841 * contention on the hatlock buckets while ISM segments are being 9842 * unmapped. The tradeoff is that the flags don't prevent priority 9843 * inversion from occurring, so we must request kernel priority in 9844 * case we have to sleep to keep from getting buried while holding 9845 * the HAT_ISMBUSY flag set, which in turn could block other kernel 9846 * threads from running (for example, in sfmmu_uvatopfn()). 9847 */ 9848 static void 9849 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 9850 { 9851 hatlock_t *hatlockp; 9852 9853 THREAD_KPRI_REQUEST(); 9854 if (!hatlock_held) 9855 hatlockp = sfmmu_hat_enter(sfmmup); 9856 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 9857 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 9858 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 9859 if (!hatlock_held) 9860 sfmmu_hat_exit(hatlockp); 9861 } 9862 9863 static void 9864 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 9865 { 9866 hatlock_t *hatlockp; 9867 9868 if (!hatlock_held) 9869 hatlockp = sfmmu_hat_enter(sfmmup); 9870 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 9871 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 9872 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 9873 if (!hatlock_held) 9874 sfmmu_hat_exit(hatlockp); 9875 THREAD_KPRI_RELEASE(); 9876 } 9877 9878 /* 9879 * 9880 * Algorithm: 9881 * 9882 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 9883 * hblks. 9884 * 9885 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 9886 * 9887 * (a) try to return an hblk from reserve pool of free hblks; 9888 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 9889 * and return hblk_reserve. 9890 * 9891 * (3) call kmem_cache_alloc() to allocate hblk; 9892 * 9893 * (a) if hblk_reserve_lock is held by the current thread, 9894 * atomically replace hblk_reserve by the hblk that is 9895 * returned by kmem_cache_alloc; release hblk_reserve_lock 9896 * and call kmem_cache_alloc() again. 9897 * (b) if reserve pool is not full, add the hblk that is 9898 * returned by kmem_cache_alloc to reserve pool and 9899 * call kmem_cache_alloc again. 9900 * 9901 */ 9902 static struct hme_blk * 9903 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 9904 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 9905 uint_t flags) 9906 { 9907 struct hme_blk *hmeblkp = NULL; 9908 struct hme_blk *newhblkp; 9909 struct hme_blk *shw_hblkp = NULL; 9910 struct kmem_cache *sfmmu_cache = NULL; 9911 uint64_t hblkpa; 9912 ulong_t index; 9913 uint_t owner; /* set to 1 if using hblk_reserve */ 9914 uint_t forcefree; 9915 int sleep; 9916 9917 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 9918 9919 /* 9920 * If segkmem is not created yet, allocate from static hmeblks 9921 * created at the end of startup_modules(). See the block comment 9922 * in startup_modules() describing how we estimate the number of 9923 * static hmeblks that will be needed during re-map. 9924 */ 9925 if (!hblk_alloc_dynamic) { 9926 9927 if (size == TTE8K) { 9928 index = nucleus_hblk8.index; 9929 if (index >= nucleus_hblk8.len) { 9930 /* 9931 * If we panic here, see startup_modules() to 9932 * make sure that we are calculating the 9933 * number of hblk8's that we need correctly. 9934 */ 9935 panic("no nucleus hblk8 to allocate"); 9936 } 9937 hmeblkp = 9938 (struct hme_blk *)&nucleus_hblk8.list[index]; 9939 nucleus_hblk8.index++; 9940 SFMMU_STAT(sf_hblk8_nalloc); 9941 } else { 9942 index = nucleus_hblk1.index; 9943 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 9944 /* 9945 * If we panic here, see startup_modules() 9946 * and H8TOH1; most likely you need to 9947 * update the calculation of the number 9948 * of hblk1's the kernel needs to boot. 9949 */ 9950 panic("no nucleus hblk1 to allocate"); 9951 } 9952 hmeblkp = 9953 (struct hme_blk *)&nucleus_hblk1.list[index]; 9954 nucleus_hblk1.index++; 9955 SFMMU_STAT(sf_hblk1_nalloc); 9956 } 9957 9958 goto hblk_init; 9959 } 9960 9961 SFMMU_HASH_UNLOCK(hmebp); 9962 9963 if (sfmmup != KHATID) { 9964 if (mmu_page_sizes == max_mmu_page_sizes) { 9965 if (size < TTE256M) 9966 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 9967 size, flags); 9968 } else { 9969 if (size < TTE4M) 9970 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 9971 size, flags); 9972 } 9973 } 9974 9975 fill_hblk: 9976 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 9977 9978 if (owner && size == TTE8K) { 9979 9980 /* 9981 * We are really in a tight spot. We already own 9982 * hblk_reserve and we need another hblk. In anticipation 9983 * of this kind of scenario, we specifically set aside 9984 * HBLK_RESERVE_MIN number of hblks to be used exclusively 9985 * by owner of hblk_reserve. 9986 */ 9987 SFMMU_STAT(sf_hblk_recurse_cnt); 9988 9989 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 9990 panic("sfmmu_hblk_alloc: reserve list is empty"); 9991 9992 goto hblk_verify; 9993 } 9994 9995 ASSERT(!owner); 9996 9997 if ((flags & HAT_NO_KALLOC) == 0) { 9998 9999 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 10000 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 10001 10002 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 10003 hmeblkp = sfmmu_hblk_steal(size); 10004 } else { 10005 /* 10006 * if we are the owner of hblk_reserve, 10007 * swap hblk_reserve with hmeblkp and 10008 * start a fresh life. Hope things go 10009 * better this time. 10010 */ 10011 if (hblk_reserve_thread == curthread) { 10012 ASSERT(sfmmu_cache == sfmmu8_cache); 10013 sfmmu_hblk_swap(hmeblkp); 10014 hblk_reserve_thread = NULL; 10015 mutex_exit(&hblk_reserve_lock); 10016 goto fill_hblk; 10017 } 10018 /* 10019 * let's donate this hblk to our reserve list if 10020 * we are not mapping kernel range 10021 */ 10022 if (size == TTE8K && sfmmup != KHATID) 10023 if (sfmmu_put_free_hblk(hmeblkp, 0)) 10024 goto fill_hblk; 10025 } 10026 } else { 10027 /* 10028 * We are here to map the slab in sfmmu8_cache; let's 10029 * check if we could tap our reserve list; if successful, 10030 * this will avoid the pain of going thru sfmmu_hblk_swap 10031 */ 10032 SFMMU_STAT(sf_hblk_slab_cnt); 10033 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 10034 /* 10035 * let's start hblk_reserve dance 10036 */ 10037 SFMMU_STAT(sf_hblk_reserve_cnt); 10038 owner = 1; 10039 mutex_enter(&hblk_reserve_lock); 10040 hmeblkp = HBLK_RESERVE; 10041 hblk_reserve_thread = curthread; 10042 } 10043 } 10044 10045 hblk_verify: 10046 ASSERT(hmeblkp != NULL); 10047 set_hblk_sz(hmeblkp, size); 10048 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10049 SFMMU_HASH_LOCK(hmebp); 10050 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10051 if (newhblkp != NULL) { 10052 SFMMU_HASH_UNLOCK(hmebp); 10053 if (hmeblkp != HBLK_RESERVE) { 10054 /* 10055 * This is really tricky! 10056 * 10057 * vmem_alloc(vmem_seg_arena) 10058 * vmem_alloc(vmem_internal_arena) 10059 * segkmem_alloc(heap_arena) 10060 * vmem_alloc(heap_arena) 10061 * page_create() 10062 * hat_memload() 10063 * kmem_cache_free() 10064 * kmem_cache_alloc() 10065 * kmem_slab_create() 10066 * vmem_alloc(kmem_internal_arena) 10067 * segkmem_alloc(heap_arena) 10068 * vmem_alloc(heap_arena) 10069 * page_create() 10070 * hat_memload() 10071 * kmem_cache_free() 10072 * ... 10073 * 10074 * Thus, hat_memload() could call kmem_cache_free 10075 * for enough number of times that we could easily 10076 * hit the bottom of the stack or run out of reserve 10077 * list of vmem_seg structs. So, we must donate 10078 * this hblk to reserve list if it's allocated 10079 * from sfmmu8_cache *and* mapping kernel range. 10080 * We don't need to worry about freeing hmeblk1's 10081 * to kmem since they don't map any kmem slabs. 10082 * 10083 * Note: When segkmem supports largepages, we must 10084 * free hmeblk1's to reserve list as well. 10085 */ 10086 forcefree = (sfmmup == KHATID) ? 1 : 0; 10087 if (size == TTE8K && 10088 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 10089 goto re_verify; 10090 } 10091 ASSERT(sfmmup != KHATID); 10092 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10093 } else { 10094 /* 10095 * Hey! we don't need hblk_reserve any more. 10096 */ 10097 ASSERT(owner); 10098 hblk_reserve_thread = NULL; 10099 mutex_exit(&hblk_reserve_lock); 10100 owner = 0; 10101 } 10102 re_verify: 10103 /* 10104 * let's check if the goodies are still present 10105 */ 10106 SFMMU_HASH_LOCK(hmebp); 10107 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10108 if (newhblkp != NULL) { 10109 /* 10110 * return newhblkp if it's not hblk_reserve; 10111 * if newhblkp is hblk_reserve, return it 10112 * _only if_ we are the owner of hblk_reserve. 10113 */ 10114 if (newhblkp != HBLK_RESERVE || owner) { 10115 return (newhblkp); 10116 } else { 10117 /* 10118 * we just hit hblk_reserve in the hash and 10119 * we are not the owner of that; 10120 * 10121 * block until hblk_reserve_thread completes 10122 * swapping hblk_reserve and try the dance 10123 * once again. 10124 */ 10125 SFMMU_HASH_UNLOCK(hmebp); 10126 mutex_enter(&hblk_reserve_lock); 10127 mutex_exit(&hblk_reserve_lock); 10128 SFMMU_STAT(sf_hblk_reserve_hit); 10129 goto fill_hblk; 10130 } 10131 } else { 10132 /* 10133 * it's no more! try the dance once again. 10134 */ 10135 SFMMU_HASH_UNLOCK(hmebp); 10136 goto fill_hblk; 10137 } 10138 } 10139 10140 hblk_init: 10141 set_hblk_sz(hmeblkp, size); 10142 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10143 hmeblkp->hblk_next = (struct hme_blk *)NULL; 10144 hmeblkp->hblk_tag = hblktag; 10145 hmeblkp->hblk_shadow = shw_hblkp; 10146 hblkpa = hmeblkp->hblk_nextpa; 10147 hmeblkp->hblk_nextpa = 0; 10148 10149 ASSERT(get_hblk_ttesz(hmeblkp) == size); 10150 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 10151 ASSERT(hmeblkp->hblk_hmecnt == 0); 10152 ASSERT(hmeblkp->hblk_vcnt == 0); 10153 ASSERT(hmeblkp->hblk_lckcnt == 0); 10154 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10155 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 10156 return (hmeblkp); 10157 } 10158 10159 /* 10160 * This function performs any cleanup required on the hme_blk 10161 * and returns it to the free list. 10162 */ 10163 /* ARGSUSED */ 10164 static void 10165 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10166 uint64_t hblkpa, struct hme_blk **listp) 10167 { 10168 int shw_size, vshift; 10169 struct hme_blk *shw_hblkp; 10170 uint_t shw_mask, newshw_mask; 10171 uintptr_t vaddr; 10172 int size; 10173 uint_t critical; 10174 10175 ASSERT(hmeblkp); 10176 ASSERT(!hmeblkp->hblk_hmecnt); 10177 ASSERT(!hmeblkp->hblk_vcnt); 10178 ASSERT(!hmeblkp->hblk_lckcnt); 10179 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10180 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 10181 10182 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 10183 10184 size = get_hblk_ttesz(hmeblkp); 10185 shw_hblkp = hmeblkp->hblk_shadow; 10186 if (shw_hblkp) { 10187 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 10188 if (mmu_page_sizes == max_mmu_page_sizes) { 10189 ASSERT(size < TTE256M); 10190 } else { 10191 ASSERT(size < TTE4M); 10192 } 10193 10194 shw_size = get_hblk_ttesz(shw_hblkp); 10195 vaddr = get_hblk_base(hmeblkp); 10196 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10197 ASSERT(vshift < 8); 10198 /* 10199 * Atomically clear shadow mask bit 10200 */ 10201 do { 10202 shw_mask = shw_hblkp->hblk_shw_mask; 10203 ASSERT(shw_mask & (1 << vshift)); 10204 newshw_mask = shw_mask & ~(1 << vshift); 10205 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10206 shw_mask, newshw_mask); 10207 } while (newshw_mask != shw_mask); 10208 hmeblkp->hblk_shadow = NULL; 10209 } 10210 hmeblkp->hblk_next = NULL; 10211 hmeblkp->hblk_nextpa = hblkpa; 10212 hmeblkp->hblk_shw_bit = 0; 10213 10214 if (hmeblkp->hblk_nuc_bit == 0) { 10215 10216 if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical)) 10217 return; 10218 10219 hmeblkp->hblk_next = *listp; 10220 *listp = hmeblkp; 10221 } 10222 } 10223 10224 static void 10225 sfmmu_hblks_list_purge(struct hme_blk **listp) 10226 { 10227 struct hme_blk *hmeblkp; 10228 10229 while ((hmeblkp = *listp) != NULL) { 10230 *listp = hmeblkp->hblk_next; 10231 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10232 } 10233 } 10234 10235 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 10236 10237 static uint_t sfmmu_hblk_steal_twice; 10238 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 10239 10240 /* 10241 * Steal a hmeblk 10242 * Enough hmeblks were allocated at startup (nucleus hmeblks) and also 10243 * hmeblks were added dynamically. We should never ever not be able to 10244 * find one. Look for an unused/unlocked hmeblk in user hash table. 10245 */ 10246 static struct hme_blk * 10247 sfmmu_hblk_steal(int size) 10248 { 10249 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 10250 struct hmehash_bucket *hmebp; 10251 struct hme_blk *hmeblkp = NULL, *pr_hblk; 10252 uint64_t hblkpa, prevpa; 10253 int i; 10254 10255 for (;;) { 10256 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 10257 uhmehash_steal_hand; 10258 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 10259 10260 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 10261 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 10262 SFMMU_HASH_LOCK(hmebp); 10263 hmeblkp = hmebp->hmeblkp; 10264 hblkpa = hmebp->hmeh_nextpa; 10265 prevpa = 0; 10266 pr_hblk = NULL; 10267 while (hmeblkp) { 10268 /* 10269 * check if it is a hmeblk that is not locked 10270 * and not shared. skip shadow hmeblks with 10271 * shadow_mask set i.e valid count non zero. 10272 */ 10273 if ((get_hblk_ttesz(hmeblkp) == size) && 10274 (hmeblkp->hblk_shw_bit == 0 || 10275 hmeblkp->hblk_vcnt == 0) && 10276 (hmeblkp->hblk_lckcnt == 0)) { 10277 /* 10278 * there is a high probability that we 10279 * will find a free one. search some 10280 * buckets for a free hmeblk initially 10281 * before unloading a valid hmeblk. 10282 */ 10283 if ((hmeblkp->hblk_vcnt == 0 && 10284 hmeblkp->hblk_hmecnt == 0) || (i >= 10285 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 10286 if (sfmmu_steal_this_hblk(hmebp, 10287 hmeblkp, hblkpa, prevpa, 10288 pr_hblk)) { 10289 /* 10290 * Hblk is unloaded 10291 * successfully 10292 */ 10293 break; 10294 } 10295 } 10296 } 10297 pr_hblk = hmeblkp; 10298 prevpa = hblkpa; 10299 hblkpa = hmeblkp->hblk_nextpa; 10300 hmeblkp = hmeblkp->hblk_next; 10301 } 10302 10303 SFMMU_HASH_UNLOCK(hmebp); 10304 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 10305 hmebp = uhme_hash; 10306 } 10307 uhmehash_steal_hand = hmebp; 10308 10309 if (hmeblkp != NULL) 10310 break; 10311 10312 /* 10313 * in the worst case, look for a free one in the kernel 10314 * hash table. 10315 */ 10316 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 10317 SFMMU_HASH_LOCK(hmebp); 10318 hmeblkp = hmebp->hmeblkp; 10319 hblkpa = hmebp->hmeh_nextpa; 10320 prevpa = 0; 10321 pr_hblk = NULL; 10322 while (hmeblkp) { 10323 /* 10324 * check if it is free hmeblk 10325 */ 10326 if ((get_hblk_ttesz(hmeblkp) == size) && 10327 (hmeblkp->hblk_lckcnt == 0) && 10328 (hmeblkp->hblk_vcnt == 0) && 10329 (hmeblkp->hblk_hmecnt == 0)) { 10330 if (sfmmu_steal_this_hblk(hmebp, 10331 hmeblkp, hblkpa, prevpa, pr_hblk)) { 10332 break; 10333 } else { 10334 /* 10335 * Cannot fail since we have 10336 * hash lock. 10337 */ 10338 panic("fail to steal?"); 10339 } 10340 } 10341 10342 pr_hblk = hmeblkp; 10343 prevpa = hblkpa; 10344 hblkpa = hmeblkp->hblk_nextpa; 10345 hmeblkp = hmeblkp->hblk_next; 10346 } 10347 10348 SFMMU_HASH_UNLOCK(hmebp); 10349 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 10350 hmebp = khme_hash; 10351 } 10352 10353 if (hmeblkp != NULL) 10354 break; 10355 sfmmu_hblk_steal_twice++; 10356 } 10357 return (hmeblkp); 10358 } 10359 10360 /* 10361 * This routine does real work to prepare a hblk to be "stolen" by 10362 * unloading the mappings, updating shadow counts .... 10363 * It returns 1 if the block is ready to be reused (stolen), or 0 10364 * means the block cannot be stolen yet- pageunload is still working 10365 * on this hblk. 10366 */ 10367 static int 10368 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10369 uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk) 10370 { 10371 int shw_size, vshift; 10372 struct hme_blk *shw_hblkp; 10373 uintptr_t vaddr; 10374 uint_t shw_mask, newshw_mask; 10375 10376 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10377 10378 /* 10379 * check if the hmeblk is free, unload if necessary 10380 */ 10381 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10382 sfmmu_t *sfmmup; 10383 demap_range_t dmr; 10384 10385 sfmmup = hblktosfmmu(hmeblkp); 10386 DEMAP_RANGE_INIT(sfmmup, &dmr); 10387 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 10388 (caddr_t)get_hblk_base(hmeblkp), 10389 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 10390 DEMAP_RANGE_FLUSH(&dmr); 10391 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10392 /* 10393 * Pageunload is working on the same hblk. 10394 */ 10395 return (0); 10396 } 10397 10398 sfmmu_hblk_steal_unload_count++; 10399 } 10400 10401 ASSERT(hmeblkp->hblk_lckcnt == 0); 10402 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 10403 10404 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 10405 hmeblkp->hblk_nextpa = hblkpa; 10406 10407 shw_hblkp = hmeblkp->hblk_shadow; 10408 if (shw_hblkp) { 10409 shw_size = get_hblk_ttesz(shw_hblkp); 10410 vaddr = get_hblk_base(hmeblkp); 10411 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10412 ASSERT(vshift < 8); 10413 /* 10414 * Atomically clear shadow mask bit 10415 */ 10416 do { 10417 shw_mask = shw_hblkp->hblk_shw_mask; 10418 ASSERT(shw_mask & (1 << vshift)); 10419 newshw_mask = shw_mask & ~(1 << vshift); 10420 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10421 shw_mask, newshw_mask); 10422 } while (newshw_mask != shw_mask); 10423 hmeblkp->hblk_shadow = NULL; 10424 } 10425 10426 /* 10427 * remove shadow bit if we are stealing an unused shadow hmeblk. 10428 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 10429 * we are indeed allocating a shadow hmeblk. 10430 */ 10431 hmeblkp->hblk_shw_bit = 0; 10432 10433 sfmmu_hblk_steal_count++; 10434 SFMMU_STAT(sf_steal_count); 10435 10436 return (1); 10437 } 10438 10439 struct hme_blk * 10440 sfmmu_hmetohblk(struct sf_hment *sfhme) 10441 { 10442 struct hme_blk *hmeblkp; 10443 struct sf_hment *sfhme0; 10444 struct hme_blk *hblk_dummy = 0; 10445 10446 /* 10447 * No dummy sf_hments, please. 10448 */ 10449 ASSERT(sfhme->hme_tte.ll != 0); 10450 10451 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 10452 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 10453 (uintptr_t)&hblk_dummy->hblk_hme[0]); 10454 10455 return (hmeblkp); 10456 } 10457 10458 /* 10459 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 10460 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 10461 * KM_SLEEP allocation. 10462 * 10463 * Return 0 on success, -1 otherwise. 10464 */ 10465 static void 10466 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 10467 { 10468 struct tsb_info *tsbinfop, *next; 10469 tsb_replace_rc_t rc; 10470 boolean_t gotfirst = B_FALSE; 10471 10472 ASSERT(sfmmup != ksfmmup); 10473 ASSERT(sfmmu_hat_lock_held(sfmmup)); 10474 10475 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 10476 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10477 } 10478 10479 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10480 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 10481 } else { 10482 return; 10483 } 10484 10485 ASSERT(sfmmup->sfmmu_tsb != NULL); 10486 10487 /* 10488 * Loop over all tsbinfo's replacing them with ones that actually have 10489 * a TSB. If any of the replacements ever fail, bail out of the loop. 10490 */ 10491 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 10492 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 10493 next = tsbinfop->tsb_next; 10494 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 10495 hatlockp, TSB_SWAPIN); 10496 if (rc != TSB_SUCCESS) { 10497 break; 10498 } 10499 gotfirst = B_TRUE; 10500 } 10501 10502 switch (rc) { 10503 case TSB_SUCCESS: 10504 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10505 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10506 return; 10507 case TSB_ALLOCFAIL: 10508 break; 10509 default: 10510 panic("sfmmu_replace_tsb returned unrecognized failure code " 10511 "%d", rc); 10512 } 10513 10514 /* 10515 * In this case, we failed to get one of our TSBs. If we failed to 10516 * get the first TSB, get one of minimum size (8KB). Walk the list 10517 * and throw away the tsbinfos, starting where the allocation failed; 10518 * we can get by with just one TSB as long as we don't leave the 10519 * SWAPPED tsbinfo structures lying around. 10520 */ 10521 tsbinfop = sfmmup->sfmmu_tsb; 10522 next = tsbinfop->tsb_next; 10523 tsbinfop->tsb_next = NULL; 10524 10525 sfmmu_hat_exit(hatlockp); 10526 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 10527 next = tsbinfop->tsb_next; 10528 sfmmu_tsbinfo_free(tsbinfop); 10529 } 10530 hatlockp = sfmmu_hat_enter(sfmmup); 10531 10532 /* 10533 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 10534 * pages. 10535 */ 10536 if (!gotfirst) { 10537 tsbinfop = sfmmup->sfmmu_tsb; 10538 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 10539 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 10540 ASSERT(rc == TSB_SUCCESS); 10541 } else { 10542 /* update machine specific tsbinfo */ 10543 sfmmu_setup_tsbinfo(sfmmup); 10544 } 10545 10546 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10547 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10548 } 10549 10550 /* 10551 * Handle exceptions for low level tsb_handler. 10552 * 10553 * There are many scenarios that could land us here: 10554 * 10555 * If the context is invalid we land here. The context can be invalid 10556 * for 3 reasons: 1) we couldn't allocate a new context and now need to 10557 * perform a wrap around operation in order to allocate a new context. 10558 * 2) Context was invalidated to change pagesize programming 3) ISMs or 10559 * TSBs configuration is changeing for this process and we are forced into 10560 * here to do a syncronization operation. If the context is valid we can 10561 * be here from window trap hanlder. In this case just call trap to handle 10562 * the fault. 10563 * 10564 * Note that the process will run in INVALID_CONTEXT before 10565 * faulting into here and subsequently loading the MMU registers 10566 * (including the TSB base register) associated with this process. 10567 * For this reason, the trap handlers must all test for 10568 * INVALID_CONTEXT before attempting to access any registers other 10569 * than the context registers. 10570 */ 10571 void 10572 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 10573 { 10574 sfmmu_t *sfmmup; 10575 uint_t ctxnum; 10576 klwp_id_t lwp; 10577 char lwp_save_state; 10578 hatlock_t *hatlockp; 10579 struct tsb_info *tsbinfop; 10580 10581 SFMMU_STAT(sf_tsb_exceptions); 10582 SFMMU_MMU_STAT(mmu_tsb_exceptions); 10583 sfmmup = astosfmmu(curthread->t_procp->p_as); 10584 ctxnum = tagaccess & TAGACC_CTX_MASK; 10585 10586 ASSERT(sfmmup != ksfmmup && ctxnum != KCONTEXT); 10587 ASSERT(sfmmup->sfmmu_ismhat == 0); 10588 /* 10589 * First, make sure we come out of here with a valid ctx, 10590 * since if we don't get one we'll simply loop on the 10591 * faulting instruction. 10592 * 10593 * If the ISM mappings are changing, the TSB is being relocated, or 10594 * the process is swapped out we serialize behind the controlling 10595 * thread with the sfmmu_flags and sfmmu_tsb_cv condition variable. 10596 * Otherwise we synchronize with the context stealer or the thread 10597 * that required us to change out our MMU registers (such 10598 * as a thread changing out our TSB while we were running) by 10599 * locking the HAT and grabbing the rwlock on the context as a 10600 * reader temporarily. 10601 */ 10602 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 10603 ctxnum == INVALID_CONTEXT); 10604 10605 if (ctxnum == INVALID_CONTEXT) { 10606 /* 10607 * Must set lwp state to LWP_SYS before 10608 * trying to acquire any adaptive lock 10609 */ 10610 lwp = ttolwp(curthread); 10611 ASSERT(lwp); 10612 lwp_save_state = lwp->lwp_state; 10613 lwp->lwp_state = LWP_SYS; 10614 10615 hatlockp = sfmmu_hat_enter(sfmmup); 10616 retry: 10617 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 10618 tsbinfop = tsbinfop->tsb_next) { 10619 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 10620 cv_wait(&sfmmup->sfmmu_tsb_cv, 10621 HATLOCK_MUTEXP(hatlockp)); 10622 goto retry; 10623 } 10624 } 10625 10626 /* 10627 * Wait for ISM maps to be updated. 10628 */ 10629 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 10630 cv_wait(&sfmmup->sfmmu_tsb_cv, 10631 HATLOCK_MUTEXP(hatlockp)); 10632 goto retry; 10633 } 10634 10635 /* 10636 * If we're swapping in, get TSB(s). Note that we must do 10637 * this before we get a ctx or load the MMU state. Once 10638 * we swap in we have to recheck to make sure the TSB(s) and 10639 * ISM mappings didn't change while we slept. 10640 */ 10641 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10642 sfmmu_tsb_swapin(sfmmup, hatlockp); 10643 goto retry; 10644 } 10645 10646 sfmmu_get_ctx(sfmmup); 10647 10648 sfmmu_hat_exit(hatlockp); 10649 /* 10650 * Must restore lwp_state if not calling 10651 * trap() for further processing. Restore 10652 * it anyway. 10653 */ 10654 lwp->lwp_state = lwp_save_state; 10655 if (sfmmup->sfmmu_ttecnt[TTE8K] != 0 || 10656 sfmmup->sfmmu_ttecnt[TTE64K] != 0 || 10657 sfmmup->sfmmu_ttecnt[TTE512K] != 0 || 10658 sfmmup->sfmmu_ttecnt[TTE4M] != 0 || 10659 sfmmup->sfmmu_ttecnt[TTE32M] != 0 || 10660 sfmmup->sfmmu_ttecnt[TTE256M] != 0) { 10661 return; 10662 } 10663 if (traptype == T_DATA_PROT) { 10664 traptype = T_DATA_MMU_MISS; 10665 } 10666 } 10667 trap(rp, (caddr_t)tagaccess, traptype, 0); 10668 } 10669 10670 /* 10671 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 10672 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 10673 * rather than spinning to avoid send mondo timeouts with 10674 * interrupts enabled. When the lock is acquired it is immediately 10675 * released and we return back to sfmmu_vatopfn just after 10676 * the GET_TTE call. 10677 */ 10678 void 10679 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 10680 { 10681 struct page **pp; 10682 10683 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10684 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10685 } 10686 10687 /* 10688 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 10689 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 10690 * cross traps which cannot be handled while spinning in the 10691 * trap handlers. Simply enter and exit the kpr_suspendlock spin 10692 * mutex, which is held by the holder of the suspend bit, and then 10693 * retry the trapped instruction after unwinding. 10694 */ 10695 /*ARGSUSED*/ 10696 void 10697 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 10698 { 10699 ASSERT(curthread != kreloc_thread); 10700 mutex_enter(&kpr_suspendlock); 10701 mutex_exit(&kpr_suspendlock); 10702 } 10703 10704 /* 10705 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 10706 * This routine may be called with all cpu's captured. Therefore, the 10707 * caller is responsible for holding all locks and disabling kernel 10708 * preemption. 10709 */ 10710 /* ARGSUSED */ 10711 static void 10712 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 10713 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 10714 { 10715 cpuset_t cpuset; 10716 caddr_t va; 10717 ism_ment_t *ment; 10718 sfmmu_t *sfmmup; 10719 #ifdef VAC 10720 int vcolor; 10721 #endif 10722 int ttesz; 10723 10724 /* 10725 * Walk the ism_hat's mapping list and flush the page 10726 * from every hat sharing this ism_hat. This routine 10727 * may be called while all cpu's have been captured. 10728 * Therefore we can't attempt to grab any locks. For now 10729 * this means we will protect the ism mapping list under 10730 * a single lock which will be grabbed by the caller. 10731 * If hat_share/unshare scalibility becomes a performance 10732 * problem then we may need to re-think ism mapping list locking. 10733 */ 10734 ASSERT(ism_sfmmup->sfmmu_ismhat); 10735 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 10736 addr = addr - ISMID_STARTADDR; 10737 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 10738 10739 sfmmup = ment->iment_hat; 10740 10741 va = ment->iment_base_va; 10742 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 10743 10744 /* 10745 * Flush TSB of ISM mappings. 10746 */ 10747 ttesz = get_hblk_ttesz(hmeblkp); 10748 if (ttesz == TTE8K || ttesz == TTE4M) { 10749 sfmmu_unload_tsb(sfmmup, va, ttesz); 10750 } else { 10751 caddr_t sva = va; 10752 caddr_t eva; 10753 ASSERT(addr == (caddr_t)get_hblk_base(hmeblkp)); 10754 eva = sva + get_hblk_span(hmeblkp); 10755 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); 10756 } 10757 10758 cpuset = sfmmup->sfmmu_cpusran; 10759 CPUSET_AND(cpuset, cpu_ready_set); 10760 CPUSET_DEL(cpuset, CPU->cpu_id); 10761 10762 SFMMU_XCALL_STATS(sfmmup); 10763 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 10764 (uint64_t)sfmmup); 10765 10766 vtag_flushpage(va, (uint64_t)sfmmup); 10767 10768 #ifdef VAC 10769 /* 10770 * Flush D$ 10771 * When flushing D$ we must flush all 10772 * cpu's. See sfmmu_cache_flush(). 10773 */ 10774 if (cache_flush_flag == CACHE_FLUSH) { 10775 cpuset = cpu_ready_set; 10776 CPUSET_DEL(cpuset, CPU->cpu_id); 10777 10778 SFMMU_XCALL_STATS(sfmmup); 10779 vcolor = addr_to_vcolor(va); 10780 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10781 vac_flushpage(pfnum, vcolor); 10782 } 10783 #endif /* VAC */ 10784 } 10785 } 10786 10787 /* 10788 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 10789 * a particular virtual address and ctx. If noflush is set we do not 10790 * flush the TLB/TSB. This function may or may not be called with the 10791 * HAT lock held. 10792 */ 10793 static void 10794 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10795 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 10796 int hat_lock_held) 10797 { 10798 #ifdef VAC 10799 int vcolor; 10800 #endif 10801 cpuset_t cpuset; 10802 hatlock_t *hatlockp; 10803 10804 #if defined(lint) && !defined(VAC) 10805 pfnum = pfnum; 10806 cpu_flag = cpu_flag; 10807 cache_flush_flag = cache_flush_flag; 10808 #endif 10809 /* 10810 * There is no longer a need to protect against ctx being 10811 * stolen here since we don't store the ctx in the TSB anymore. 10812 */ 10813 #ifdef VAC 10814 vcolor = addr_to_vcolor(addr); 10815 #endif 10816 10817 /* 10818 * We must hold the hat lock during the flush of TLB, 10819 * to avoid a race with sfmmu_invalidate_ctx(), where 10820 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 10821 * causing TLB demap routine to skip flush on that MMU. 10822 * If the context on a MMU has already been set to 10823 * INVALID_CONTEXT, we just get an extra flush on 10824 * that MMU. 10825 */ 10826 if (!hat_lock_held && !tlb_noflush) 10827 hatlockp = sfmmu_hat_enter(sfmmup); 10828 10829 kpreempt_disable(); 10830 if (!tlb_noflush) { 10831 /* 10832 * Flush the TSB and TLB. 10833 */ 10834 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10835 10836 cpuset = sfmmup->sfmmu_cpusran; 10837 CPUSET_AND(cpuset, cpu_ready_set); 10838 CPUSET_DEL(cpuset, CPU->cpu_id); 10839 10840 SFMMU_XCALL_STATS(sfmmup); 10841 10842 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 10843 (uint64_t)sfmmup); 10844 10845 vtag_flushpage(addr, (uint64_t)sfmmup); 10846 } 10847 10848 if (!hat_lock_held && !tlb_noflush) 10849 sfmmu_hat_exit(hatlockp); 10850 10851 #ifdef VAC 10852 /* 10853 * Flush the D$ 10854 * 10855 * Even if the ctx is stolen, we need to flush the 10856 * cache. Our ctx stealer only flushes the TLBs. 10857 */ 10858 if (cache_flush_flag == CACHE_FLUSH) { 10859 if (cpu_flag & FLUSH_ALL_CPUS) { 10860 cpuset = cpu_ready_set; 10861 } else { 10862 cpuset = sfmmup->sfmmu_cpusran; 10863 CPUSET_AND(cpuset, cpu_ready_set); 10864 } 10865 CPUSET_DEL(cpuset, CPU->cpu_id); 10866 SFMMU_XCALL_STATS(sfmmup); 10867 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10868 vac_flushpage(pfnum, vcolor); 10869 } 10870 #endif /* VAC */ 10871 kpreempt_enable(); 10872 } 10873 10874 /* 10875 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 10876 * address and ctx. If noflush is set we do not currently do anything. 10877 * This function may or may not be called with the HAT lock held. 10878 */ 10879 static void 10880 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10881 int tlb_noflush, int hat_lock_held) 10882 { 10883 cpuset_t cpuset; 10884 hatlock_t *hatlockp; 10885 10886 /* 10887 * If the process is exiting we have nothing to do. 10888 */ 10889 if (tlb_noflush) 10890 return; 10891 10892 /* 10893 * Flush TSB. 10894 */ 10895 if (!hat_lock_held) 10896 hatlockp = sfmmu_hat_enter(sfmmup); 10897 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10898 10899 kpreempt_disable(); 10900 10901 cpuset = sfmmup->sfmmu_cpusran; 10902 CPUSET_AND(cpuset, cpu_ready_set); 10903 CPUSET_DEL(cpuset, CPU->cpu_id); 10904 10905 SFMMU_XCALL_STATS(sfmmup); 10906 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 10907 10908 vtag_flushpage(addr, (uint64_t)sfmmup); 10909 10910 if (!hat_lock_held) 10911 sfmmu_hat_exit(hatlockp); 10912 10913 kpreempt_enable(); 10914 10915 } 10916 10917 /* 10918 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 10919 * call handler that can flush a range of pages to save on xcalls. 10920 */ 10921 static int sfmmu_xcall_save; 10922 10923 static void 10924 sfmmu_tlb_range_demap(demap_range_t *dmrp) 10925 { 10926 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 10927 hatlock_t *hatlockp; 10928 cpuset_t cpuset; 10929 uint64_t sfmmu_pgcnt; 10930 pgcnt_t pgcnt = 0; 10931 int pgunload = 0; 10932 int dirtypg = 0; 10933 caddr_t addr = dmrp->dmr_addr; 10934 caddr_t eaddr; 10935 uint64_t bitvec = dmrp->dmr_bitvec; 10936 10937 ASSERT(bitvec & 1); 10938 10939 /* 10940 * Flush TSB and calculate number of pages to flush. 10941 */ 10942 while (bitvec != 0) { 10943 dirtypg = 0; 10944 /* 10945 * Find the first page to flush and then count how many 10946 * pages there are after it that also need to be flushed. 10947 * This way the number of TSB flushes is minimized. 10948 */ 10949 while ((bitvec & 1) == 0) { 10950 pgcnt++; 10951 addr += MMU_PAGESIZE; 10952 bitvec >>= 1; 10953 } 10954 while (bitvec & 1) { 10955 dirtypg++; 10956 bitvec >>= 1; 10957 } 10958 eaddr = addr + ptob(dirtypg); 10959 hatlockp = sfmmu_hat_enter(sfmmup); 10960 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 10961 sfmmu_hat_exit(hatlockp); 10962 pgunload += dirtypg; 10963 addr = eaddr; 10964 pgcnt += dirtypg; 10965 } 10966 10967 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 10968 if (sfmmup->sfmmu_free == 0) { 10969 addr = dmrp->dmr_addr; 10970 bitvec = dmrp->dmr_bitvec; 10971 10972 /* 10973 * make sure it has SFMMU_PGCNT_SHIFT bits only, 10974 * as it will be used to pack argument for xt_some 10975 */ 10976 ASSERT((pgcnt > 0) && 10977 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 10978 10979 /* 10980 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 10981 * the low 6 bits of sfmmup. This is doable since pgcnt 10982 * always >= 1. 10983 */ 10984 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 10985 sfmmu_pgcnt = (uint64_t)sfmmup | 10986 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 10987 10988 /* 10989 * We must hold the hat lock during the flush of TLB, 10990 * to avoid a race with sfmmu_invalidate_ctx(), where 10991 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 10992 * causing TLB demap routine to skip flush on that MMU. 10993 * If the context on a MMU has already been set to 10994 * INVALID_CONTEXT, we just get an extra flush on 10995 * that MMU. 10996 */ 10997 hatlockp = sfmmu_hat_enter(sfmmup); 10998 kpreempt_disable(); 10999 11000 cpuset = sfmmup->sfmmu_cpusran; 11001 CPUSET_AND(cpuset, cpu_ready_set); 11002 CPUSET_DEL(cpuset, CPU->cpu_id); 11003 11004 SFMMU_XCALL_STATS(sfmmup); 11005 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 11006 sfmmu_pgcnt); 11007 11008 for (; bitvec != 0; bitvec >>= 1) { 11009 if (bitvec & 1) 11010 vtag_flushpage(addr, (uint64_t)sfmmup); 11011 addr += MMU_PAGESIZE; 11012 } 11013 kpreempt_enable(); 11014 sfmmu_hat_exit(hatlockp); 11015 11016 sfmmu_xcall_save += (pgunload-1); 11017 } 11018 dmrp->dmr_bitvec = 0; 11019 } 11020 11021 /* 11022 * In cases where we need to synchronize with TLB/TSB miss trap 11023 * handlers, _and_ need to flush the TLB, it's a lot easier to 11024 * throw away the context from the process than to do a 11025 * special song and dance to keep things consistent for the 11026 * handlers. 11027 * 11028 * Since the process suddenly ends up without a context and our caller 11029 * holds the hat lock, threads that fault after this function is called 11030 * will pile up on the lock. We can then do whatever we need to 11031 * atomically from the context of the caller. The first blocked thread 11032 * to resume executing will get the process a new context, and the 11033 * process will resume executing. 11034 * 11035 * One added advantage of this approach is that on MMUs that 11036 * support a "flush all" operation, we will delay the flush until 11037 * cnum wrap-around, and then flush the TLB one time. This 11038 * is rather rare, so it's a lot less expensive than making 8000 11039 * x-calls to flush the TLB 8000 times. 11040 * 11041 * A per-process (PP) lock is used to synchronize ctx allocations in 11042 * resume() and ctx invalidations here. 11043 */ 11044 static void 11045 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 11046 { 11047 cpuset_t cpuset; 11048 int cnum, currcnum; 11049 mmu_ctx_t *mmu_ctxp; 11050 int i; 11051 uint_t pstate_save; 11052 11053 SFMMU_STAT(sf_ctx_inv); 11054 11055 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11056 ASSERT(sfmmup != ksfmmup); 11057 11058 kpreempt_disable(); 11059 11060 mmu_ctxp = CPU_MMU_CTXP(CPU); 11061 ASSERT(mmu_ctxp); 11062 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 11063 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 11064 11065 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 11066 11067 pstate_save = sfmmu_disable_intrs(); 11068 11069 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 11070 /* set HAT cnum invalid across all context domains. */ 11071 for (i = 0; i < max_mmu_ctxdoms; i++) { 11072 11073 cnum = sfmmup->sfmmu_ctxs[i].cnum; 11074 if (cnum == INVALID_CONTEXT) { 11075 continue; 11076 } 11077 11078 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 11079 } 11080 membar_enter(); /* make sure globally visible to all CPUs */ 11081 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 11082 11083 sfmmu_enable_intrs(pstate_save); 11084 11085 cpuset = sfmmup->sfmmu_cpusran; 11086 CPUSET_DEL(cpuset, CPU->cpu_id); 11087 CPUSET_AND(cpuset, cpu_ready_set); 11088 if (!CPUSET_ISNULL(cpuset)) { 11089 SFMMU_XCALL_STATS(sfmmup); 11090 xt_some(cpuset, sfmmu_raise_tsb_exception, 11091 (uint64_t)sfmmup, INVALID_CONTEXT); 11092 xt_sync(cpuset); 11093 SFMMU_STAT(sf_tsb_raise_exception); 11094 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 11095 } 11096 11097 /* 11098 * If the hat to-be-invalidated is the same as the current 11099 * process on local CPU we need to invalidate 11100 * this CPU context as well. 11101 */ 11102 if ((sfmmu_getctx_sec() == currcnum) && 11103 (currcnum != INVALID_CONTEXT)) { 11104 sfmmu_setctx_sec(INVALID_CONTEXT); 11105 sfmmu_clear_utsbinfo(); 11106 } 11107 11108 kpreempt_enable(); 11109 11110 /* 11111 * we hold the hat lock, so nobody should allocate a context 11112 * for us yet 11113 */ 11114 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 11115 } 11116 11117 #ifdef VAC 11118 /* 11119 * We need to flush the cache in all cpus. It is possible that 11120 * a process referenced a page as cacheable but has sinced exited 11121 * and cleared the mapping list. We still to flush it but have no 11122 * state so all cpus is the only alternative. 11123 */ 11124 void 11125 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 11126 { 11127 cpuset_t cpuset; 11128 11129 kpreempt_disable(); 11130 cpuset = cpu_ready_set; 11131 CPUSET_DEL(cpuset, CPU->cpu_id); 11132 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 11133 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 11134 xt_sync(cpuset); 11135 vac_flushpage(pfnum, vcolor); 11136 kpreempt_enable(); 11137 } 11138 11139 void 11140 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 11141 { 11142 cpuset_t cpuset; 11143 11144 ASSERT(vcolor >= 0); 11145 11146 kpreempt_disable(); 11147 cpuset = cpu_ready_set; 11148 CPUSET_DEL(cpuset, CPU->cpu_id); 11149 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 11150 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 11151 xt_sync(cpuset); 11152 vac_flushcolor(vcolor, pfnum); 11153 kpreempt_enable(); 11154 } 11155 #endif /* VAC */ 11156 11157 /* 11158 * We need to prevent processes from accessing the TSB using a cached physical 11159 * address. It's alright if they try to access the TSB via virtual address 11160 * since they will just fault on that virtual address once the mapping has 11161 * been suspended. 11162 */ 11163 #pragma weak sendmondo_in_recover 11164 11165 /* ARGSUSED */ 11166 static int 11167 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 11168 { 11169 hatlock_t *hatlockp; 11170 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11171 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11172 extern uint32_t sendmondo_in_recover; 11173 11174 if (flags != HAT_PRESUSPEND) 11175 return (0); 11176 11177 hatlockp = sfmmu_hat_enter(sfmmup); 11178 11179 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 11180 11181 /* 11182 * For Cheetah+ Erratum 25: 11183 * Wait for any active recovery to finish. We can't risk 11184 * relocating the TSB of the thread running mondo_recover_proc() 11185 * since, if we did that, we would deadlock. The scenario we are 11186 * trying to avoid is as follows: 11187 * 11188 * THIS CPU RECOVER CPU 11189 * -------- ----------- 11190 * Begins recovery, walking through TSB 11191 * hat_pagesuspend() TSB TTE 11192 * TLB miss on TSB TTE, spins at TL1 11193 * xt_sync() 11194 * send_mondo_timeout() 11195 * mondo_recover_proc() 11196 * ((deadlocked)) 11197 * 11198 * The second half of the workaround is that mondo_recover_proc() 11199 * checks to see if the tsb_info has the RELOC flag set, and if it 11200 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 11201 * and hence avoiding the TLB miss that could result in a deadlock. 11202 */ 11203 if (&sendmondo_in_recover) { 11204 membar_enter(); /* make sure RELOC flag visible */ 11205 while (sendmondo_in_recover) { 11206 drv_usecwait(1); 11207 membar_consumer(); 11208 } 11209 } 11210 11211 sfmmu_invalidate_ctx(sfmmup); 11212 sfmmu_hat_exit(hatlockp); 11213 11214 return (0); 11215 } 11216 11217 /* ARGSUSED */ 11218 static int 11219 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 11220 void *tsbinfo, pfn_t newpfn) 11221 { 11222 hatlock_t *hatlockp; 11223 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11224 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11225 11226 if (flags != HAT_POSTUNSUSPEND) 11227 return (0); 11228 11229 hatlockp = sfmmu_hat_enter(sfmmup); 11230 11231 SFMMU_STAT(sf_tsb_reloc); 11232 11233 /* 11234 * The process may have swapped out while we were relocating one 11235 * of its TSBs. If so, don't bother doing the setup since the 11236 * process can't be using the memory anymore. 11237 */ 11238 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 11239 ASSERT(va == tsbinfop->tsb_va); 11240 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 11241 sfmmu_setup_tsbinfo(sfmmup); 11242 11243 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 11244 sfmmu_inv_tsb(tsbinfop->tsb_va, 11245 TSB_BYTES(tsbinfop->tsb_szc)); 11246 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 11247 } 11248 } 11249 11250 membar_exit(); 11251 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 11252 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11253 11254 sfmmu_hat_exit(hatlockp); 11255 11256 return (0); 11257 } 11258 11259 /* 11260 * Allocate and initialize a tsb_info structure. Note that we may or may not 11261 * allocate a TSB here, depending on the flags passed in. 11262 */ 11263 static int 11264 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 11265 uint_t flags, sfmmu_t *sfmmup) 11266 { 11267 int err; 11268 11269 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 11270 sfmmu_tsbinfo_cache, KM_SLEEP); 11271 11272 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 11273 tsb_szc, flags, sfmmup)) != 0) { 11274 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 11275 SFMMU_STAT(sf_tsb_allocfail); 11276 *tsbinfopp = NULL; 11277 return (err); 11278 } 11279 SFMMU_STAT(sf_tsb_alloc); 11280 11281 /* 11282 * Bump the TSB size counters for this TSB size. 11283 */ 11284 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 11285 return (0); 11286 } 11287 11288 static void 11289 sfmmu_tsb_free(struct tsb_info *tsbinfo) 11290 { 11291 caddr_t tsbva = tsbinfo->tsb_va; 11292 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 11293 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 11294 vmem_t *vmp = tsbinfo->tsb_vmp; 11295 11296 /* 11297 * If we allocated this TSB from relocatable kernel memory, then we 11298 * need to uninstall the callback handler. 11299 */ 11300 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 11301 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11302 caddr_t slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 11303 page_t **ppl; 11304 int ret; 11305 11306 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 11307 ASSERT(ret == 0); 11308 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 11309 0, NULL); 11310 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 11311 } 11312 11313 if (kmem_cachep != NULL) { 11314 kmem_cache_free(kmem_cachep, tsbva); 11315 } else { 11316 vmem_xfree(vmp, (void *)tsbva, tsb_size); 11317 } 11318 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 11319 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 11320 } 11321 11322 static void 11323 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 11324 { 11325 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 11326 sfmmu_tsb_free(tsbinfo); 11327 } 11328 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 11329 11330 } 11331 11332 /* 11333 * Setup all the references to physical memory for this tsbinfo. 11334 * The underlying page(s) must be locked. 11335 */ 11336 static void 11337 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 11338 { 11339 ASSERT(pfn != PFN_INVALID); 11340 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 11341 11342 #ifndef sun4v 11343 if (tsbinfo->tsb_szc == 0) { 11344 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 11345 PROT_WRITE|PROT_READ, TTE8K); 11346 } else { 11347 /* 11348 * Round down PA and use a large mapping; the handlers will 11349 * compute the TSB pointer at the correct offset into the 11350 * big virtual page. NOTE: this assumes all TSBs larger 11351 * than 8K must come from physically contiguous slabs of 11352 * size tsb_slab_size. 11353 */ 11354 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 11355 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 11356 } 11357 tsbinfo->tsb_pa = ptob(pfn); 11358 11359 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 11360 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 11361 11362 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 11363 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 11364 #else /* sun4v */ 11365 tsbinfo->tsb_pa = ptob(pfn); 11366 #endif /* sun4v */ 11367 } 11368 11369 11370 /* 11371 * Returns zero on success, ENOMEM if over the high water mark, 11372 * or EAGAIN if the caller needs to retry with a smaller TSB 11373 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 11374 * 11375 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 11376 * is specified and the TSB requested is PAGESIZE, though it 11377 * may sleep waiting for memory if sufficient memory is not 11378 * available. 11379 */ 11380 static int 11381 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 11382 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 11383 { 11384 caddr_t vaddr = NULL; 11385 caddr_t slab_vaddr; 11386 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11387 int tsbbytes = TSB_BYTES(tsbcode); 11388 int lowmem = 0; 11389 struct kmem_cache *kmem_cachep = NULL; 11390 vmem_t *vmp = NULL; 11391 lgrp_id_t lgrpid = LGRP_NONE; 11392 pfn_t pfn; 11393 uint_t cbflags = HAC_SLEEP; 11394 page_t **pplist; 11395 int ret; 11396 11397 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 11398 flags |= TSB_ALLOC; 11399 11400 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 11401 11402 tsbinfo->tsb_sfmmu = sfmmup; 11403 11404 /* 11405 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 11406 * return. 11407 */ 11408 if ((flags & TSB_ALLOC) == 0) { 11409 tsbinfo->tsb_szc = tsbcode; 11410 tsbinfo->tsb_ttesz_mask = tteszmask; 11411 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 11412 tsbinfo->tsb_pa = -1; 11413 tsbinfo->tsb_tte.ll = 0; 11414 tsbinfo->tsb_next = NULL; 11415 tsbinfo->tsb_flags = TSB_SWAPPED; 11416 tsbinfo->tsb_cache = NULL; 11417 tsbinfo->tsb_vmp = NULL; 11418 return (0); 11419 } 11420 11421 #ifdef DEBUG 11422 /* 11423 * For debugging: 11424 * Randomly force allocation failures every tsb_alloc_mtbf 11425 * tries if TSB_FORCEALLOC is not specified. This will 11426 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 11427 * it is even, to allow testing of both failure paths... 11428 */ 11429 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 11430 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 11431 tsb_alloc_count = 0; 11432 tsb_alloc_fail_mtbf++; 11433 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 11434 } 11435 #endif /* DEBUG */ 11436 11437 /* 11438 * Enforce high water mark if we are not doing a forced allocation 11439 * and are not shrinking a process' TSB. 11440 */ 11441 if ((flags & TSB_SHRINK) == 0 && 11442 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 11443 if ((flags & TSB_FORCEALLOC) == 0) 11444 return (ENOMEM); 11445 lowmem = 1; 11446 } 11447 11448 /* 11449 * Allocate from the correct location based upon the size of the TSB 11450 * compared to the base page size, and what memory conditions dictate. 11451 * Note we always do nonblocking allocations from the TSB arena since 11452 * we don't want memory fragmentation to cause processes to block 11453 * indefinitely waiting for memory; until the kernel algorithms that 11454 * coalesce large pages are improved this is our best option. 11455 * 11456 * Algorithm: 11457 * If allocating a "large" TSB (>8K), allocate from the 11458 * appropriate kmem_tsb_default_arena vmem arena 11459 * else if low on memory or the TSB_FORCEALLOC flag is set or 11460 * tsb_forceheap is set 11461 * Allocate from kernel heap via sfmmu_tsb8k_cache with 11462 * KM_SLEEP (never fails) 11463 * else 11464 * Allocate from appropriate sfmmu_tsb_cache with 11465 * KM_NOSLEEP 11466 * endif 11467 */ 11468 if (tsb_lgrp_affinity) 11469 lgrpid = lgrp_home_id(curthread); 11470 if (lgrpid == LGRP_NONE) 11471 lgrpid = 0; /* use lgrp of boot CPU */ 11472 11473 if (tsbbytes > MMU_PAGESIZE) { 11474 vmp = kmem_tsb_default_arena[lgrpid]; 11475 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 0, 0, 11476 NULL, NULL, VM_NOSLEEP); 11477 #ifdef DEBUG 11478 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 11479 #else /* !DEBUG */ 11480 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 11481 #endif /* DEBUG */ 11482 kmem_cachep = sfmmu_tsb8k_cache; 11483 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 11484 ASSERT(vaddr != NULL); 11485 } else { 11486 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 11487 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 11488 } 11489 11490 tsbinfo->tsb_cache = kmem_cachep; 11491 tsbinfo->tsb_vmp = vmp; 11492 11493 if (vaddr == NULL) { 11494 return (EAGAIN); 11495 } 11496 11497 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 11498 kmem_cachep = tsbinfo->tsb_cache; 11499 11500 /* 11501 * If we are allocating from outside the cage, then we need to 11502 * register a relocation callback handler. Note that for now 11503 * since pseudo mappings always hang off of the slab's root page, 11504 * we need only lock the first 8K of the TSB slab. This is a bit 11505 * hacky but it is good for performance. 11506 */ 11507 if (kmem_cachep != sfmmu_tsb8k_cache) { 11508 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 11509 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 11510 ASSERT(ret == 0); 11511 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 11512 cbflags, (void *)tsbinfo, &pfn, NULL); 11513 11514 /* 11515 * Need to free up resources if we could not successfully 11516 * add the callback function and return an error condition. 11517 */ 11518 if (ret != 0) { 11519 if (kmem_cachep) { 11520 kmem_cache_free(kmem_cachep, vaddr); 11521 } else { 11522 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 11523 } 11524 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 11525 S_WRITE); 11526 return (EAGAIN); 11527 } 11528 } else { 11529 /* 11530 * Since allocation of 8K TSBs from heap is rare and occurs 11531 * during memory pressure we allocate them from permanent 11532 * memory rather than using callbacks to get the PFN. 11533 */ 11534 pfn = hat_getpfnum(kas.a_hat, vaddr); 11535 } 11536 11537 tsbinfo->tsb_va = vaddr; 11538 tsbinfo->tsb_szc = tsbcode; 11539 tsbinfo->tsb_ttesz_mask = tteszmask; 11540 tsbinfo->tsb_next = NULL; 11541 tsbinfo->tsb_flags = 0; 11542 11543 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 11544 11545 if (kmem_cachep != sfmmu_tsb8k_cache) { 11546 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 11547 } 11548 11549 sfmmu_inv_tsb(vaddr, tsbbytes); 11550 return (0); 11551 } 11552 11553 /* 11554 * Initialize per cpu tsb and per cpu tsbmiss_area 11555 */ 11556 void 11557 sfmmu_init_tsbs(void) 11558 { 11559 int i; 11560 struct tsbmiss *tsbmissp; 11561 struct kpmtsbm *kpmtsbmp; 11562 #ifndef sun4v 11563 extern int dcache_line_mask; 11564 #endif /* sun4v */ 11565 extern uint_t vac_colors; 11566 11567 /* 11568 * Init. tsb miss area. 11569 */ 11570 tsbmissp = tsbmiss_area; 11571 11572 for (i = 0; i < NCPU; tsbmissp++, i++) { 11573 /* 11574 * initialize the tsbmiss area. 11575 * Do this for all possible CPUs as some may be added 11576 * while the system is running. There is no cost to this. 11577 */ 11578 tsbmissp->ksfmmup = ksfmmup; 11579 #ifndef sun4v 11580 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 11581 #endif /* sun4v */ 11582 tsbmissp->khashstart = 11583 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 11584 tsbmissp->uhashstart = 11585 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 11586 tsbmissp->khashsz = khmehash_num; 11587 tsbmissp->uhashsz = uhmehash_num; 11588 } 11589 11590 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 11591 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 11592 11593 if (kpm_enable == 0) 11594 return; 11595 11596 /* -- Begin KPM specific init -- */ 11597 11598 if (kpm_smallpages) { 11599 /* 11600 * If we're using base pagesize pages for seg_kpm 11601 * mappings, we use the kernel TSB since we can't afford 11602 * to allocate a second huge TSB for these mappings. 11603 */ 11604 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11605 kpm_tsbsz = ktsb_szcode; 11606 kpmsm_tsbbase = kpm_tsbbase; 11607 kpmsm_tsbsz = kpm_tsbsz; 11608 } else { 11609 /* 11610 * In VAC conflict case, just put the entries in the 11611 * kernel 8K indexed TSB for now so we can find them. 11612 * This could really be changed in the future if we feel 11613 * the need... 11614 */ 11615 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11616 kpmsm_tsbsz = ktsb_szcode; 11617 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 11618 kpm_tsbsz = ktsb4m_szcode; 11619 } 11620 11621 kpmtsbmp = kpmtsbm_area; 11622 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 11623 /* 11624 * Initialize the kpmtsbm area. 11625 * Do this for all possible CPUs as some may be added 11626 * while the system is running. There is no cost to this. 11627 */ 11628 kpmtsbmp->vbase = kpm_vbase; 11629 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 11630 kpmtsbmp->sz_shift = kpm_size_shift; 11631 kpmtsbmp->kpmp_shift = kpmp_shift; 11632 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 11633 if (kpm_smallpages == 0) { 11634 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 11635 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 11636 } else { 11637 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 11638 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 11639 } 11640 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 11641 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 11642 #ifdef DEBUG 11643 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 11644 #endif /* DEBUG */ 11645 if (ktsb_phys) 11646 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 11647 } 11648 11649 /* -- End KPM specific init -- */ 11650 } 11651 11652 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 11653 struct tsb_info ktsb_info[2]; 11654 11655 /* 11656 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 11657 */ 11658 void 11659 sfmmu_init_ktsbinfo() 11660 { 11661 ASSERT(ksfmmup != NULL); 11662 ASSERT(ksfmmup->sfmmu_tsb == NULL); 11663 /* 11664 * Allocate tsbinfos for kernel and copy in data 11665 * to make debug easier and sun4v setup easier. 11666 */ 11667 ktsb_info[0].tsb_sfmmu = ksfmmup; 11668 ktsb_info[0].tsb_szc = ktsb_szcode; 11669 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 11670 ktsb_info[0].tsb_va = ktsb_base; 11671 ktsb_info[0].tsb_pa = ktsb_pbase; 11672 ktsb_info[0].tsb_flags = 0; 11673 ktsb_info[0].tsb_tte.ll = 0; 11674 ktsb_info[0].tsb_cache = NULL; 11675 11676 ktsb_info[1].tsb_sfmmu = ksfmmup; 11677 ktsb_info[1].tsb_szc = ktsb4m_szcode; 11678 ktsb_info[1].tsb_ttesz_mask = TSB4M; 11679 ktsb_info[1].tsb_va = ktsb4m_base; 11680 ktsb_info[1].tsb_pa = ktsb4m_pbase; 11681 ktsb_info[1].tsb_flags = 0; 11682 ktsb_info[1].tsb_tte.ll = 0; 11683 ktsb_info[1].tsb_cache = NULL; 11684 11685 /* Link them into ksfmmup. */ 11686 ktsb_info[0].tsb_next = &ktsb_info[1]; 11687 ktsb_info[1].tsb_next = NULL; 11688 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 11689 11690 sfmmu_setup_tsbinfo(ksfmmup); 11691 } 11692 11693 /* 11694 * Cache the last value returned from va_to_pa(). If the VA specified 11695 * in the current call to cached_va_to_pa() maps to the same Page (as the 11696 * previous call to cached_va_to_pa()), then compute the PA using 11697 * cached info, else call va_to_pa(). 11698 * 11699 * Note: this function is neither MT-safe nor consistent in the presence 11700 * of multiple, interleaved threads. This function was created to enable 11701 * an optimization used during boot (at a point when there's only one thread 11702 * executing on the "boot CPU", and before startup_vm() has been called). 11703 */ 11704 static uint64_t 11705 cached_va_to_pa(void *vaddr) 11706 { 11707 static uint64_t prev_vaddr_base = 0; 11708 static uint64_t prev_pfn = 0; 11709 11710 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 11711 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 11712 } else { 11713 uint64_t pa = va_to_pa(vaddr); 11714 11715 if (pa != ((uint64_t)-1)) { 11716 /* 11717 * Computed physical address is valid. Cache its 11718 * related info for the next cached_va_to_pa() call. 11719 */ 11720 prev_pfn = pa & MMU_PAGEMASK; 11721 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 11722 } 11723 11724 return (pa); 11725 } 11726 } 11727 11728 /* 11729 * Carve up our nucleus hblk region. We may allocate more hblks than 11730 * asked due to rounding errors but we are guaranteed to have at least 11731 * enough space to allocate the requested number of hblk8's and hblk1's. 11732 */ 11733 void 11734 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 11735 { 11736 struct hme_blk *hmeblkp; 11737 size_t hme8blk_sz, hme1blk_sz; 11738 size_t i; 11739 size_t hblk8_bound; 11740 ulong_t j = 0, k = 0; 11741 11742 ASSERT(addr != NULL && size != 0); 11743 11744 /* Need to use proper structure alignment */ 11745 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 11746 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 11747 11748 nucleus_hblk8.list = (void *)addr; 11749 nucleus_hblk8.index = 0; 11750 11751 /* 11752 * Use as much memory as possible for hblk8's since we 11753 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 11754 * We need to hold back enough space for the hblk1's which 11755 * we'll allocate next. 11756 */ 11757 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 11758 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 11759 hmeblkp = (struct hme_blk *)addr; 11760 addr += hme8blk_sz; 11761 hmeblkp->hblk_nuc_bit = 1; 11762 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11763 } 11764 nucleus_hblk8.len = j; 11765 ASSERT(j >= nhblk8); 11766 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 11767 11768 nucleus_hblk1.list = (void *)addr; 11769 nucleus_hblk1.index = 0; 11770 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 11771 hmeblkp = (struct hme_blk *)addr; 11772 addr += hme1blk_sz; 11773 hmeblkp->hblk_nuc_bit = 1; 11774 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11775 } 11776 ASSERT(k >= nhblk1); 11777 nucleus_hblk1.len = k; 11778 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 11779 } 11780 11781 /* 11782 * This function is currently not supported on this platform. For what 11783 * it's supposed to do, see hat.c and hat_srmmu.c 11784 */ 11785 /* ARGSUSED */ 11786 faultcode_t 11787 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 11788 uint_t flags) 11789 { 11790 ASSERT(hat->sfmmu_xhat_provider == NULL); 11791 return (FC_NOSUPPORT); 11792 } 11793 11794 /* 11795 * Searchs the mapping list of the page for a mapping of the same size. If not 11796 * found the corresponding bit is cleared in the p_index field. When large 11797 * pages are more prevalent in the system, we can maintain the mapping list 11798 * in order and we don't have to traverse the list each time. Just check the 11799 * next and prev entries, and if both are of different size, we clear the bit. 11800 */ 11801 static void 11802 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 11803 { 11804 struct sf_hment *sfhmep; 11805 struct hme_blk *hmeblkp; 11806 int index; 11807 pgcnt_t npgs; 11808 11809 ASSERT(ttesz > TTE8K); 11810 11811 ASSERT(sfmmu_mlist_held(pp)); 11812 11813 ASSERT(PP_ISMAPPED_LARGE(pp)); 11814 11815 /* 11816 * Traverse mapping list looking for another mapping of same size. 11817 * since we only want to clear index field if all mappings of 11818 * that size are gone. 11819 */ 11820 11821 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 11822 hmeblkp = sfmmu_hmetohblk(sfhmep); 11823 if (hmeblkp->hblk_xhat_bit) 11824 continue; 11825 if (hme_size(sfhmep) == ttesz) { 11826 /* 11827 * another mapping of the same size. don't clear index. 11828 */ 11829 return; 11830 } 11831 } 11832 11833 /* 11834 * Clear the p_index bit for large page. 11835 */ 11836 index = PAGESZ_TO_INDEX(ttesz); 11837 npgs = TTEPAGES(ttesz); 11838 while (npgs-- > 0) { 11839 ASSERT(pp->p_index & index); 11840 pp->p_index &= ~index; 11841 pp = PP_PAGENEXT(pp); 11842 } 11843 } 11844 11845 /* 11846 * return supported features 11847 */ 11848 /* ARGSUSED */ 11849 int 11850 hat_supported(enum hat_features feature, void *arg) 11851 { 11852 switch (feature) { 11853 case HAT_SHARED_PT: 11854 case HAT_DYNAMIC_ISM_UNMAP: 11855 case HAT_VMODSORT: 11856 return (1); 11857 default: 11858 return (0); 11859 } 11860 } 11861 11862 void 11863 hat_enter(struct hat *hat) 11864 { 11865 hatlock_t *hatlockp; 11866 11867 if (hat != ksfmmup) { 11868 hatlockp = TSB_HASH(hat); 11869 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 11870 } 11871 } 11872 11873 void 11874 hat_exit(struct hat *hat) 11875 { 11876 hatlock_t *hatlockp; 11877 11878 if (hat != ksfmmup) { 11879 hatlockp = TSB_HASH(hat); 11880 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 11881 } 11882 } 11883 11884 /*ARGSUSED*/ 11885 void 11886 hat_reserve(struct as *as, caddr_t addr, size_t len) 11887 { 11888 } 11889 11890 static void 11891 hat_kstat_init(void) 11892 { 11893 kstat_t *ksp; 11894 11895 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 11896 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 11897 KSTAT_FLAG_VIRTUAL); 11898 if (ksp) { 11899 ksp->ks_data = (void *) &sfmmu_global_stat; 11900 kstat_install(ksp); 11901 } 11902 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 11903 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 11904 KSTAT_FLAG_VIRTUAL); 11905 if (ksp) { 11906 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 11907 kstat_install(ksp); 11908 } 11909 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 11910 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 11911 KSTAT_FLAG_WRITABLE); 11912 if (ksp) { 11913 ksp->ks_update = sfmmu_kstat_percpu_update; 11914 kstat_install(ksp); 11915 } 11916 } 11917 11918 /* ARGSUSED */ 11919 static int 11920 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 11921 { 11922 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 11923 struct tsbmiss *tsbm = tsbmiss_area; 11924 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 11925 int i; 11926 11927 ASSERT(cpu_kstat); 11928 if (rw == KSTAT_READ) { 11929 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 11930 cpu_kstat->sf_itlb_misses = tsbm->itlb_misses; 11931 cpu_kstat->sf_dtlb_misses = tsbm->dtlb_misses; 11932 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 11933 tsbm->uprot_traps; 11934 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 11935 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 11936 11937 if (tsbm->itlb_misses > 0 && tsbm->dtlb_misses > 0) { 11938 cpu_kstat->sf_tsb_hits = 11939 (tsbm->itlb_misses + tsbm->dtlb_misses) - 11940 (tsbm->utsb_misses + tsbm->ktsb_misses + 11941 kpmtsbm->kpm_tsb_misses); 11942 } else { 11943 cpu_kstat->sf_tsb_hits = 0; 11944 } 11945 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 11946 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 11947 } 11948 } else { 11949 /* KSTAT_WRITE is used to clear stats */ 11950 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 11951 tsbm->itlb_misses = 0; 11952 tsbm->dtlb_misses = 0; 11953 tsbm->utsb_misses = 0; 11954 tsbm->ktsb_misses = 0; 11955 tsbm->uprot_traps = 0; 11956 tsbm->kprot_traps = 0; 11957 kpmtsbm->kpm_dtlb_misses = 0; 11958 kpmtsbm->kpm_tsb_misses = 0; 11959 } 11960 } 11961 return (0); 11962 } 11963 11964 #ifdef DEBUG 11965 11966 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 11967 11968 /* 11969 * A tte checker. *orig_old is the value we read before cas. 11970 * *cur is the value returned by cas. 11971 * *new is the desired value when we do the cas. 11972 * 11973 * *hmeblkp is currently unused. 11974 */ 11975 11976 /* ARGSUSED */ 11977 void 11978 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 11979 { 11980 pfn_t i, j, k; 11981 int cpuid = CPU->cpu_id; 11982 11983 gorig[cpuid] = orig_old; 11984 gcur[cpuid] = cur; 11985 gnew[cpuid] = new; 11986 11987 #ifdef lint 11988 hmeblkp = hmeblkp; 11989 #endif 11990 11991 if (TTE_IS_VALID(orig_old)) { 11992 if (TTE_IS_VALID(cur)) { 11993 i = TTE_TO_TTEPFN(orig_old); 11994 j = TTE_TO_TTEPFN(cur); 11995 k = TTE_TO_TTEPFN(new); 11996 if (i != j) { 11997 /* remap error? */ 11998 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 11999 } 12000 12001 if (i != k) { 12002 /* remap error? */ 12003 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 12004 } 12005 } else { 12006 if (TTE_IS_VALID(new)) { 12007 panic("chk_tte: invalid cur? "); 12008 } 12009 12010 i = TTE_TO_TTEPFN(orig_old); 12011 k = TTE_TO_TTEPFN(new); 12012 if (i != k) { 12013 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 12014 } 12015 } 12016 } else { 12017 if (TTE_IS_VALID(cur)) { 12018 j = TTE_TO_TTEPFN(cur); 12019 if (TTE_IS_VALID(new)) { 12020 k = TTE_TO_TTEPFN(new); 12021 if (j != k) { 12022 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 12023 j, k); 12024 } 12025 } else { 12026 panic("chk_tte: why here?"); 12027 } 12028 } else { 12029 if (!TTE_IS_VALID(new)) { 12030 panic("chk_tte: why here2 ?"); 12031 } 12032 } 12033 } 12034 } 12035 12036 #endif /* DEBUG */ 12037 12038 extern void prefetch_tsbe_read(struct tsbe *); 12039 extern void prefetch_tsbe_write(struct tsbe *); 12040 12041 12042 /* 12043 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 12044 * us optimal performance on Cheetah+. You can only have 8 outstanding 12045 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 12046 * prefetch to make the most utilization of the prefetch capability. 12047 */ 12048 #define TSBE_PREFETCH_STRIDE (7) 12049 12050 void 12051 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 12052 { 12053 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 12054 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 12055 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 12056 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 12057 struct tsbe *old; 12058 struct tsbe *new; 12059 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 12060 uint64_t va; 12061 int new_offset; 12062 int i; 12063 int vpshift; 12064 int last_prefetch; 12065 12066 if (old_bytes == new_bytes) { 12067 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 12068 } else { 12069 12070 /* 12071 * A TSBE is 16 bytes which means there are four TSBE's per 12072 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 12073 */ 12074 old = (struct tsbe *)old_tsbinfo->tsb_va; 12075 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 12076 for (i = 0; i < old_entries; i++, old++) { 12077 if (((i & (4-1)) == 0) && (i < last_prefetch)) 12078 prefetch_tsbe_read(old); 12079 if (!old->tte_tag.tag_invalid) { 12080 /* 12081 * We have a valid TTE to remap. Check the 12082 * size. We won't remap 64K or 512K TTEs 12083 * because they span more than one TSB entry 12084 * and are indexed using an 8K virt. page. 12085 * Ditto for 32M and 256M TTEs. 12086 */ 12087 if (TTE_CSZ(&old->tte_data) == TTE64K || 12088 TTE_CSZ(&old->tte_data) == TTE512K) 12089 continue; 12090 if (mmu_page_sizes == max_mmu_page_sizes) { 12091 if (TTE_CSZ(&old->tte_data) == TTE32M || 12092 TTE_CSZ(&old->tte_data) == TTE256M) 12093 continue; 12094 } 12095 12096 /* clear the lower 22 bits of the va */ 12097 va = *(uint64_t *)old << 22; 12098 /* turn va into a virtual pfn */ 12099 va >>= 22 - TSB_START_SIZE; 12100 /* 12101 * or in bits from the offset in the tsb 12102 * to get the real virtual pfn. These 12103 * correspond to bits [21:13] in the va 12104 */ 12105 vpshift = 12106 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 12107 0x1ff; 12108 va |= (i << vpshift); 12109 va >>= vpshift; 12110 new_offset = va & (new_entries - 1); 12111 new = new_base + new_offset; 12112 prefetch_tsbe_write(new); 12113 *new = *old; 12114 } 12115 } 12116 } 12117 } 12118 12119 /* 12120 * unused in sfmmu 12121 */ 12122 void 12123 hat_dump(void) 12124 { 12125 } 12126 12127 /* 12128 * Called when a thread is exiting and we have switched to the kernel address 12129 * space. Perform the same VM initialization resume() uses when switching 12130 * processes. 12131 * 12132 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 12133 * we call it anyway in case the semantics change in the future. 12134 */ 12135 /*ARGSUSED*/ 12136 void 12137 hat_thread_exit(kthread_t *thd) 12138 { 12139 uint64_t pgsz_cnum; 12140 uint_t pstate_save; 12141 12142 ASSERT(thd->t_procp->p_as == &kas); 12143 12144 pgsz_cnum = KCONTEXT; 12145 #ifdef sun4u 12146 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 12147 #endif 12148 /* 12149 * Note that sfmmu_load_mmustate() is currently a no-op for 12150 * kernel threads. We need to disable interrupts here, 12151 * simply because otherwise sfmmu_load_mmustate() would panic 12152 * if the caller does not disable interrupts. 12153 */ 12154 pstate_save = sfmmu_disable_intrs(); 12155 sfmmu_setctx_sec(pgsz_cnum); 12156 sfmmu_load_mmustate(ksfmmup); 12157 sfmmu_enable_intrs(pstate_save); 12158 } 12159