1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * VM - Hardware Address Translation management for Spitfire MMU. 30 * 31 * This file implements the machine specific hardware translation 32 * needed by the VM system. The machine independent interface is 33 * described in <vm/hat.h> while the machine dependent interface 34 * and data structures are described in <vm/hat_sfmmu.h>. 35 * 36 * The hat layer manages the address translation hardware as a cache 37 * driven by calls from the higher levels in the VM system. 38 */ 39 40 #include <sys/types.h> 41 #include <sys/kstat.h> 42 #include <vm/hat.h> 43 #include <vm/hat_sfmmu.h> 44 #include <vm/page.h> 45 #include <sys/pte.h> 46 #include <sys/systm.h> 47 #include <sys/mman.h> 48 #include <sys/sysmacros.h> 49 #include <sys/machparam.h> 50 #include <sys/vtrace.h> 51 #include <sys/kmem.h> 52 #include <sys/mmu.h> 53 #include <sys/cmn_err.h> 54 #include <sys/cpu.h> 55 #include <sys/cpuvar.h> 56 #include <sys/debug.h> 57 #include <sys/lgrp.h> 58 #include <sys/archsystm.h> 59 #include <sys/machsystm.h> 60 #include <sys/vmsystm.h> 61 #include <vm/as.h> 62 #include <vm/seg.h> 63 #include <vm/seg_kp.h> 64 #include <vm/seg_kmem.h> 65 #include <vm/seg_kpm.h> 66 #include <vm/rm.h> 67 #include <sys/t_lock.h> 68 #include <sys/obpdefs.h> 69 #include <sys/vm_machparam.h> 70 #include <sys/var.h> 71 #include <sys/trap.h> 72 #include <sys/machtrap.h> 73 #include <sys/scb.h> 74 #include <sys/bitmap.h> 75 #include <sys/machlock.h> 76 #include <sys/membar.h> 77 #include <sys/atomic.h> 78 #include <sys/cpu_module.h> 79 #include <sys/prom_debug.h> 80 #include <sys/ksynch.h> 81 #include <sys/mem_config.h> 82 #include <sys/mem_cage.h> 83 #include <sys/dtrace.h> 84 #include <vm/vm_dep.h> 85 #include <vm/xhat_sfmmu.h> 86 #include <sys/fpu/fpusystm.h> 87 #include <vm/mach_kpm.h> 88 89 #if defined(SF_ERRATA_57) 90 extern caddr_t errata57_limit; 91 #endif 92 93 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 94 (sizeof (int64_t))) 95 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 96 97 #define HBLK_RESERVE_CNT 128 98 #define HBLK_RESERVE_MIN 20 99 100 static struct hme_blk *freehblkp; 101 static kmutex_t freehblkp_lock; 102 static int freehblkcnt; 103 104 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 105 static kmutex_t hblk_reserve_lock; 106 static kthread_t *hblk_reserve_thread; 107 108 static nucleus_hblk8_info_t nucleus_hblk8; 109 static nucleus_hblk1_info_t nucleus_hblk1; 110 111 /* 112 * SFMMU specific hat functions 113 */ 114 void hat_pagecachectl(struct page *, int); 115 116 /* flags for hat_pagecachectl */ 117 #define HAT_CACHE 0x1 118 #define HAT_UNCACHE 0x2 119 #define HAT_TMPNC 0x4 120 121 /* 122 * Flag to allow the creation of non-cacheable translations 123 * to system memory. It is off by default. At the moment this 124 * flag is used by the ecache error injector. The error injector 125 * will turn it on when creating such a translation then shut it 126 * off when it's finished. 127 */ 128 129 int sfmmu_allow_nc_trans = 0; 130 131 /* 132 * Flag to disable large page support. 133 * value of 1 => disable all large pages. 134 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 135 * 136 * For example, use the value 0x4 to disable 512K pages. 137 * 138 */ 139 #define LARGE_PAGES_OFF 0x1 140 141 /* 142 * The disable_large_pages and disable_ism_large_pages variables control 143 * hat_memload_array and the page sizes to be used by ISM and the kernel. 144 * 145 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 146 * are only used to control which OOB pages to use at upper VM segment creation 147 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 148 * Their values may come from platform or CPU specific code to disable page 149 * sizes that should not be used. 150 * 151 * WARNING: 512K pages are currently not supported for ISM/DISM. 152 */ 153 uint_t disable_large_pages = 0; 154 uint_t disable_ism_large_pages = (1 << TTE512K); 155 uint_t disable_auto_data_large_pages = 0; 156 uint_t disable_auto_text_large_pages = 0; 157 158 /* 159 * Private sfmmu data structures for hat management 160 */ 161 static struct kmem_cache *sfmmuid_cache; 162 static struct kmem_cache *mmuctxdom_cache; 163 164 /* 165 * Private sfmmu data structures for tsb management 166 */ 167 static struct kmem_cache *sfmmu_tsbinfo_cache; 168 static struct kmem_cache *sfmmu_tsb8k_cache; 169 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 170 static vmem_t *kmem_tsb_arena; 171 172 /* 173 * sfmmu static variables for hmeblk resource management. 174 */ 175 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 176 static struct kmem_cache *sfmmu8_cache; 177 static struct kmem_cache *sfmmu1_cache; 178 static struct kmem_cache *pa_hment_cache; 179 180 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 181 /* 182 * private data for ism 183 */ 184 static struct kmem_cache *ism_blk_cache; 185 static struct kmem_cache *ism_ment_cache; 186 #define ISMID_STARTADDR NULL 187 188 /* 189 * Whether to delay TLB flushes and use Cheetah's flush-all support 190 * when removing contexts from the dirty list. 191 */ 192 int delay_tlb_flush; 193 int disable_delay_tlb_flush; 194 195 /* 196 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 197 * HAT flags, synchronizing TLB/TSB coherency, and context management. 198 * The lock is hashed on the sfmmup since the case where we need to lock 199 * all processes is rare but does occur (e.g. we need to unload a shared 200 * mapping from all processes using the mapping). We have a lot of buckets, 201 * and each slab of sfmmu_t's can use about a quarter of them, giving us 202 * a fairly good distribution without wasting too much space and overhead 203 * when we have to grab them all. 204 */ 205 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 206 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 207 208 /* 209 * Hash algorithm optimized for a small number of slabs. 210 * 7 is (highbit((sizeof sfmmu_t)) - 1) 211 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 212 * kmem_cache, and thus they will be sequential within that cache. In 213 * addition, each new slab will have a different "color" up to cache_maxcolor 214 * which will skew the hashing for each successive slab which is allocated. 215 * If the size of sfmmu_t changed to a larger size, this algorithm may need 216 * to be revisited. 217 */ 218 #define TSB_HASH_SHIFT_BITS (7) 219 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 220 221 #ifdef DEBUG 222 int tsb_hash_debug = 0; 223 #define TSB_HASH(sfmmup) \ 224 (tsb_hash_debug ? &hat_lock[0] : \ 225 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 226 #else /* DEBUG */ 227 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 228 #endif /* DEBUG */ 229 230 231 /* sfmmu_replace_tsb() return codes. */ 232 typedef enum tsb_replace_rc { 233 TSB_SUCCESS, 234 TSB_ALLOCFAIL, 235 TSB_LOSTRACE, 236 TSB_ALREADY_SWAPPED, 237 TSB_CANTGROW 238 } tsb_replace_rc_t; 239 240 /* 241 * Flags for TSB allocation routines. 242 */ 243 #define TSB_ALLOC 0x01 244 #define TSB_FORCEALLOC 0x02 245 #define TSB_GROW 0x04 246 #define TSB_SHRINK 0x08 247 #define TSB_SWAPIN 0x10 248 249 /* 250 * Support for HAT callbacks. 251 */ 252 #define SFMMU_MAX_RELOC_CALLBACKS 10 253 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 254 static id_t sfmmu_cb_nextid = 0; 255 static id_t sfmmu_tsb_cb_id; 256 struct sfmmu_callback *sfmmu_cb_table; 257 258 /* 259 * Kernel page relocation is enabled by default for non-caged 260 * kernel pages. This has little effect unless segkmem_reloc is 261 * set, since by default kernel memory comes from inside the 262 * kernel cage. 263 */ 264 int hat_kpr_enabled = 1; 265 266 kmutex_t kpr_mutex; 267 kmutex_t kpr_suspendlock; 268 kthread_t *kreloc_thread; 269 270 /* 271 * Enable VA->PA translation sanity checking on DEBUG kernels. 272 * Disabled by default. This is incompatible with some 273 * drivers (error injector, RSM) so if it breaks you get 274 * to keep both pieces. 275 */ 276 int hat_check_vtop = 0; 277 278 /* 279 * Private sfmmu routines (prototypes) 280 */ 281 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 282 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 283 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t); 284 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 285 caddr_t, demap_range_t *, uint_t); 286 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 287 caddr_t, int); 288 static void sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *, 289 uint64_t, struct hme_blk **); 290 static void sfmmu_hblks_list_purge(struct hme_blk **); 291 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 292 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 293 static struct hme_blk *sfmmu_hblk_steal(int); 294 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 295 struct hme_blk *, uint64_t, uint64_t, 296 struct hme_blk *); 297 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 298 299 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 300 uint_t, uint_t, pgcnt_t); 301 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 302 uint_t); 303 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 304 uint_t); 305 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 306 caddr_t, int); 307 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 308 struct hmehash_bucket *, caddr_t, uint_t, uint_t); 309 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 310 caddr_t, page_t **, uint_t); 311 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 312 313 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 314 pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *); 315 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 316 #ifdef VAC 317 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 318 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 319 int tst_tnc(page_t *pp, pgcnt_t); 320 void conv_tnc(page_t *pp, int); 321 #endif 322 323 static void sfmmu_get_ctx(sfmmu_t *); 324 static void sfmmu_free_sfmmu(sfmmu_t *); 325 326 static void sfmmu_gettte(struct hat *, caddr_t, tte_t *); 327 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 328 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 329 330 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 331 static void hat_pagereload(struct page *, struct page *); 332 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 333 #ifdef VAC 334 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 335 static void sfmmu_page_cache(page_t *, int, int, int); 336 #endif 337 338 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 339 pfn_t, int, int, int, int); 340 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 341 pfn_t, int); 342 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 343 static void sfmmu_tlb_range_demap(demap_range_t *); 344 static void sfmmu_invalidate_ctx(sfmmu_t *); 345 static void sfmmu_sync_mmustate(sfmmu_t *); 346 347 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 348 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 349 sfmmu_t *); 350 static void sfmmu_tsb_free(struct tsb_info *); 351 static void sfmmu_tsbinfo_free(struct tsb_info *); 352 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 353 sfmmu_t *); 354 355 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 356 static int sfmmu_select_tsb_szc(pgcnt_t); 357 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 358 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 359 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 360 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 361 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 362 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 363 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 364 hatlock_t *, uint_t); 365 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 366 367 #ifdef VAC 368 void sfmmu_cache_flush(pfn_t, int); 369 void sfmmu_cache_flushcolor(int, pfn_t); 370 #endif 371 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 372 caddr_t, demap_range_t *, uint_t, int); 373 374 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 375 static uint_t sfmmu_ptov_attr(tte_t *); 376 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 377 caddr_t, demap_range_t *, uint_t); 378 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 379 static int sfmmu_idcache_constructor(void *, void *, int); 380 static void sfmmu_idcache_destructor(void *, void *); 381 static int sfmmu_hblkcache_constructor(void *, void *, int); 382 static void sfmmu_hblkcache_destructor(void *, void *); 383 static void sfmmu_hblkcache_reclaim(void *); 384 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 385 struct hmehash_bucket *); 386 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 387 static void sfmmu_rm_large_mappings(page_t *, int); 388 389 static void hat_lock_init(void); 390 static void hat_kstat_init(void); 391 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 392 static void sfmmu_check_page_sizes(sfmmu_t *, int); 393 int fnd_mapping_sz(page_t *); 394 static void iment_add(struct ism_ment *, struct hat *); 395 static void iment_sub(struct ism_ment *, struct hat *); 396 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 397 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 398 #ifdef sun4v 399 extern void sfmmu_invalidate_tsbinfo(sfmmu_t *); 400 #endif /* sun4v */ 401 extern void sfmmu_clear_utsbinfo(void); 402 403 static void sfmmu_ctx_wrap_around(mmu_ctx_t *); 404 405 /* kpm globals */ 406 #ifdef DEBUG 407 /* 408 * Enable trap level tsbmiss handling 409 */ 410 int kpm_tsbmtl = 1; 411 412 /* 413 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 414 * required TLB shootdowns in this case, so handle w/ care. Off by default. 415 */ 416 int kpm_tlb_flush; 417 #endif /* DEBUG */ 418 419 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 420 421 #ifdef DEBUG 422 static void sfmmu_check_hblk_flist(); 423 #endif 424 425 /* 426 * Semi-private sfmmu data structures. Some of them are initialize in 427 * startup or in hat_init. Some of them are private but accessed by 428 * assembly code or mach_sfmmu.c 429 */ 430 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 431 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 432 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 433 uint64_t khme_hash_pa; /* PA of khme_hash */ 434 int uhmehash_num; /* # of buckets in user hash table */ 435 int khmehash_num; /* # of buckets in kernel hash table */ 436 437 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 438 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 439 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 440 441 #define DEFAULT_NUM_CTXS_PER_MMU 8192 442 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 443 444 int cache; /* describes system cache */ 445 446 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 447 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 448 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 449 int ktsb_sz; /* kernel 8k-indexed tsb size */ 450 451 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 452 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 453 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 454 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 455 456 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 457 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 458 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 459 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 460 461 #ifndef sun4v 462 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 463 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 464 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 465 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 466 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 467 #endif /* sun4v */ 468 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 469 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 470 471 /* 472 * Size to use for TSB slabs. Future platforms that support page sizes 473 * larger than 4M may wish to change these values, and provide their own 474 * assembly macros for building and decoding the TSB base register contents. 475 * Note disable_large_pages will override the value set here. 476 */ 477 uint_t tsb_slab_ttesz = TTE4M; 478 uint_t tsb_slab_size; 479 uint_t tsb_slab_shift; 480 uint_t tsb_slab_mask; /* PFN mask for TTE */ 481 482 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 483 int tsb_max_growsize = UTSB_MAX_SZCODE; 484 485 /* 486 * Tunable parameters dealing with TSB policies. 487 */ 488 489 /* 490 * This undocumented tunable forces all 8K TSBs to be allocated from 491 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 492 */ 493 #ifdef DEBUG 494 int tsb_forceheap = 0; 495 #endif /* DEBUG */ 496 497 /* 498 * Decide whether to use per-lgroup arenas, or one global set of 499 * TSB arenas. The default is not to break up per-lgroup, since 500 * most platforms don't recognize any tangible benefit from it. 501 */ 502 int tsb_lgrp_affinity = 0; 503 504 /* 505 * Used for growing the TSB based on the process RSS. 506 * tsb_rss_factor is based on the smallest TSB, and is 507 * shifted by the TSB size to determine if we need to grow. 508 * The default will grow the TSB if the number of TTEs for 509 * this page size exceeds 75% of the number of TSB entries, 510 * which should _almost_ eliminate all conflict misses 511 * (at the expense of using up lots and lots of memory). 512 */ 513 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 514 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 515 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 516 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 517 default_tsb_size) 518 #define TSB_OK_SHRINK() \ 519 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 520 #define TSB_OK_GROW() \ 521 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 522 523 int enable_tsb_rss_sizing = 1; 524 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 525 526 /* which TSB size code to use for new address spaces or if rss sizing off */ 527 int default_tsb_size = TSB_8K_SZCODE; 528 529 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 530 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 531 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 532 533 #ifdef DEBUG 534 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 535 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 536 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 537 static int tsb_alloc_fail_mtbf = 0; 538 static int tsb_alloc_count = 0; 539 #endif /* DEBUG */ 540 541 /* if set to 1, will remap valid TTEs when growing TSB. */ 542 int tsb_remap_ttes = 1; 543 544 /* 545 * If we have more than this many mappings, allocate a second TSB. 546 * This default is chosen because the I/D fully associative TLBs are 547 * assumed to have at least 8 available entries. Platforms with a 548 * larger fully-associative TLB could probably override the default. 549 */ 550 int tsb_sectsb_threshold = 8; 551 552 /* 553 * kstat data 554 */ 555 struct sfmmu_global_stat sfmmu_global_stat; 556 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 557 558 /* 559 * Global data 560 */ 561 sfmmu_t *ksfmmup; /* kernel's hat id */ 562 563 #ifdef DEBUG 564 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 565 #endif 566 567 /* sfmmu locking operations */ 568 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 569 static int sfmmu_mlspl_held(struct page *, int); 570 571 kmutex_t *sfmmu_page_enter(page_t *); 572 void sfmmu_page_exit(kmutex_t *); 573 int sfmmu_page_spl_held(struct page *); 574 575 /* sfmmu internal locking operations - accessed directly */ 576 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 577 kmutex_t **, kmutex_t **); 578 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 579 static hatlock_t * 580 sfmmu_hat_enter(sfmmu_t *); 581 static hatlock_t * 582 sfmmu_hat_tryenter(sfmmu_t *); 583 static void sfmmu_hat_exit(hatlock_t *); 584 static void sfmmu_hat_lock_all(void); 585 static void sfmmu_hat_unlock_all(void); 586 static void sfmmu_ismhat_enter(sfmmu_t *, int); 587 static void sfmmu_ismhat_exit(sfmmu_t *, int); 588 589 /* 590 * Array of mutexes protecting a page's mapping list and p_nrm field. 591 * 592 * The hash function looks complicated, but is made up so that: 593 * 594 * "pp" not shifted, so adjacent pp values will hash to different cache lines 595 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 596 * 597 * "pp" >> mml_shift, incorporates more source bits into the hash result 598 * 599 * "& (mml_table_size - 1), should be faster than using remainder "%" 600 * 601 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 602 * cacheline, since they get declared next to each other below. We'll trust 603 * ld not to do something random. 604 */ 605 #ifdef DEBUG 606 int mlist_hash_debug = 0; 607 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 608 &mml_table[((uintptr_t)(pp) + \ 609 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 610 #else /* !DEBUG */ 611 #define MLIST_HASH(pp) &mml_table[ \ 612 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 613 #endif /* !DEBUG */ 614 615 kmutex_t *mml_table; 616 uint_t mml_table_sz; /* must be a power of 2 */ 617 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 618 619 kpm_hlk_t *kpmp_table; 620 uint_t kpmp_table_sz; /* must be a power of 2 */ 621 uchar_t kpmp_shift; 622 623 kpm_shlk_t *kpmp_stable; 624 uint_t kpmp_stable_sz; /* must be a power of 2 */ 625 626 /* 627 * SPL_HASH was improved to avoid false cache line sharing 628 */ 629 #define SPL_TABLE_SIZE 128 630 #define SPL_MASK (SPL_TABLE_SIZE - 1) 631 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 632 633 #define SPL_INDEX(pp) \ 634 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 635 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 636 (SPL_TABLE_SIZE - 1)) 637 638 #define SPL_HASH(pp) \ 639 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 640 641 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 642 643 644 /* 645 * hat_unload_callback() will group together callbacks in order 646 * to avoid xt_sync() calls. This is the maximum size of the group. 647 */ 648 #define MAX_CB_ADDR 32 649 650 tte_t hw_tte; 651 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 652 653 static char *mmu_ctx_kstat_names[] = { 654 "mmu_ctx_tsb_exceptions", 655 "mmu_ctx_tsb_raise_exception", 656 "mmu_ctx_wrap_around", 657 }; 658 659 /* 660 * Wrapper for vmem_xalloc since vmem_create only allows limited 661 * parameters for vm_source_alloc functions. This function allows us 662 * to specify alignment consistent with the size of the object being 663 * allocated. 664 */ 665 static void * 666 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 667 { 668 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 669 } 670 671 /* Common code for setting tsb_alloc_hiwater. */ 672 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 673 ptob(pages) / tsb_alloc_hiwater_factor 674 675 /* 676 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 677 * a single TSB. physmem is the number of physical pages so we need physmem 8K 678 * TTEs to represent all those physical pages. We round this up by using 679 * 1<<highbit(). To figure out which size code to use, remember that the size 680 * code is just an amount to shift the smallest TSB size to get the size of 681 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 682 * highbit() - 1) to get the size code for the smallest TSB that can represent 683 * all of physical memory, while erring on the side of too much. 684 * 685 * If the computed size code is less than the current tsb_max_growsize, we set 686 * tsb_max_growsize to the computed size code. In the case where the computed 687 * size code is greater than tsb_max_growsize, we have these restrictions that 688 * apply to increasing tsb_max_growsize: 689 * 1) TSBs can't grow larger than the TSB slab size 690 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 691 */ 692 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 693 int i, szc; \ 694 \ 695 i = highbit(pages); \ 696 if ((1 << (i - 1)) == (pages)) \ 697 i--; /* 2^n case, round down */ \ 698 szc = i - TSB_START_SIZE; \ 699 if (szc < tsb_max_growsize) \ 700 tsb_max_growsize = szc; \ 701 else if ((szc > tsb_max_growsize) && \ 702 (szc <= tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT))) \ 703 tsb_max_growsize = MIN(szc, UTSB_MAX_SZCODE); \ 704 } 705 706 /* 707 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 708 * tsb_info which handles that TTE size. 709 */ 710 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) \ 711 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 712 ASSERT(sfmmu_hat_lock_held(sfmmup)); \ 713 if ((tte_szc) >= TTE4M) \ 714 (tsbinfop) = (tsbinfop)->tsb_next; 715 716 /* 717 * Return the number of mappings present in the HAT 718 * for a particular process and page size. 719 */ 720 #define SFMMU_TTE_CNT(sfmmup, szc) \ 721 (sfmmup)->sfmmu_iblk? \ 722 (sfmmup)->sfmmu_ismttecnt[(szc)] + \ 723 (sfmmup)->sfmmu_ttecnt[(szc)] : \ 724 (sfmmup)->sfmmu_ttecnt[(szc)]; 725 726 /* 727 * Macro to use to unload entries from the TSB. 728 * It has knowledge of which page sizes get replicated in the TSB 729 * and will call the appropriate unload routine for the appropriate size. 730 */ 731 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp) \ 732 { \ 733 int ttesz = get_hblk_ttesz(hmeblkp); \ 734 if (ttesz == TTE8K || ttesz == TTE4M) { \ 735 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 736 } else { \ 737 caddr_t sva = (caddr_t)get_hblk_base(hmeblkp); \ 738 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 739 ASSERT(addr >= sva && addr < eva); \ 740 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 741 } \ 742 } 743 744 745 /* Update tsb_alloc_hiwater after memory is configured. */ 746 /*ARGSUSED*/ 747 static void 748 sfmmu_update_tsb_post_add(void *arg, pgcnt_t delta_pages) 749 { 750 /* Assumes physmem has already been updated. */ 751 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 752 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 753 } 754 755 /* 756 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 757 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 758 * deleted. 759 */ 760 /*ARGSUSED*/ 761 static int 762 sfmmu_update_tsb_pre_del(void *arg, pgcnt_t delta_pages) 763 { 764 return (0); 765 } 766 767 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 768 /*ARGSUSED*/ 769 static void 770 sfmmu_update_tsb_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 771 { 772 /* 773 * Whether the delete was cancelled or not, just go ahead and update 774 * tsb_alloc_hiwater and tsb_max_growsize. 775 */ 776 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 777 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 778 } 779 780 static kphysm_setup_vector_t sfmmu_update_tsb_vec = { 781 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 782 sfmmu_update_tsb_post_add, /* post_add */ 783 sfmmu_update_tsb_pre_del, /* pre_del */ 784 sfmmu_update_tsb_post_del /* post_del */ 785 }; 786 787 788 /* 789 * HME_BLK HASH PRIMITIVES 790 */ 791 792 /* 793 * Enter a hme on the mapping list for page pp. 794 * When large pages are more prevalent in the system we might want to 795 * keep the mapping list in ascending order by the hment size. For now, 796 * small pages are more frequent, so don't slow it down. 797 */ 798 #define HME_ADD(hme, pp) \ 799 { \ 800 ASSERT(sfmmu_mlist_held(pp)); \ 801 \ 802 hme->hme_prev = NULL; \ 803 hme->hme_next = pp->p_mapping; \ 804 hme->hme_page = pp; \ 805 if (pp->p_mapping) { \ 806 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 807 ASSERT(pp->p_share > 0); \ 808 } else { \ 809 /* EMPTY */ \ 810 ASSERT(pp->p_share == 0); \ 811 } \ 812 pp->p_mapping = hme; \ 813 pp->p_share++; \ 814 } 815 816 /* 817 * Enter a hme on the mapping list for page pp. 818 * If we are unmapping a large translation, we need to make sure that the 819 * change is reflect in the corresponding bit of the p_index field. 820 */ 821 #define HME_SUB(hme, pp) \ 822 { \ 823 ASSERT(sfmmu_mlist_held(pp)); \ 824 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 825 \ 826 if (pp->p_mapping == NULL) { \ 827 panic("hme_remove - no mappings"); \ 828 } \ 829 \ 830 membar_stst(); /* ensure previous stores finish */ \ 831 \ 832 ASSERT(pp->p_share > 0); \ 833 pp->p_share--; \ 834 \ 835 if (hme->hme_prev) { \ 836 ASSERT(pp->p_mapping != hme); \ 837 ASSERT(hme->hme_prev->hme_page == pp || \ 838 IS_PAHME(hme->hme_prev)); \ 839 hme->hme_prev->hme_next = hme->hme_next; \ 840 } else { \ 841 ASSERT(pp->p_mapping == hme); \ 842 pp->p_mapping = hme->hme_next; \ 843 ASSERT((pp->p_mapping == NULL) ? \ 844 (pp->p_share == 0) : 1); \ 845 } \ 846 \ 847 if (hme->hme_next) { \ 848 ASSERT(hme->hme_next->hme_page == pp || \ 849 IS_PAHME(hme->hme_next)); \ 850 hme->hme_next->hme_prev = hme->hme_prev; \ 851 } \ 852 \ 853 /* zero out the entry */ \ 854 hme->hme_next = NULL; \ 855 hme->hme_prev = NULL; \ 856 hme->hme_page = NULL; \ 857 \ 858 if (hme_size(hme) > TTE8K) { \ 859 /* remove mappings for remainder of large pg */ \ 860 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 861 } \ 862 } 863 864 /* 865 * This function returns the hment given the hme_blk and a vaddr. 866 * It assumes addr has already been checked to belong to hme_blk's 867 * range. 868 */ 869 #define HBLKTOHME(hment, hmeblkp, addr) \ 870 { \ 871 int index; \ 872 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 873 } 874 875 /* 876 * Version of HBLKTOHME that also returns the index in hmeblkp 877 * of the hment. 878 */ 879 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 880 { \ 881 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 882 \ 883 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 884 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 885 } else \ 886 idx = 0; \ 887 \ 888 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 889 } 890 891 /* 892 * Disable any page sizes not supported by the CPU 893 */ 894 void 895 hat_init_pagesizes() 896 { 897 int i; 898 899 mmu_exported_page_sizes = 0; 900 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 901 902 szc_2_userszc[i] = (uint_t)-1; 903 userszc_2_szc[i] = (uint_t)-1; 904 905 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 906 disable_large_pages |= (1 << i); 907 } else { 908 szc_2_userszc[i] = mmu_exported_page_sizes; 909 userszc_2_szc[mmu_exported_page_sizes] = i; 910 mmu_exported_page_sizes++; 911 } 912 } 913 914 disable_ism_large_pages |= disable_large_pages; 915 disable_auto_data_large_pages = disable_large_pages; 916 disable_auto_text_large_pages = disable_large_pages; 917 918 /* 919 * Initialize mmu-specific large page sizes. 920 */ 921 if (&mmu_large_pages_disabled) { 922 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 923 disable_ism_large_pages |= 924 mmu_large_pages_disabled(HAT_LOAD_SHARE); 925 disable_auto_data_large_pages |= 926 mmu_large_pages_disabled(HAT_AUTO_DATA); 927 disable_auto_text_large_pages |= 928 mmu_large_pages_disabled(HAT_AUTO_TEXT); 929 } 930 } 931 932 /* 933 * Initialize the hardware address translation structures. 934 */ 935 void 936 hat_init(void) 937 { 938 int i; 939 uint_t sz; 940 uint_t maxtsb; 941 size_t size; 942 943 hat_lock_init(); 944 hat_kstat_init(); 945 946 /* 947 * Hardware-only bits in a TTE 948 */ 949 MAKE_TTE_MASK(&hw_tte); 950 951 hat_init_pagesizes(); 952 953 /* Initialize the hash locks */ 954 for (i = 0; i < khmehash_num; i++) { 955 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 956 MUTEX_DEFAULT, NULL); 957 } 958 for (i = 0; i < uhmehash_num; i++) { 959 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 960 MUTEX_DEFAULT, NULL); 961 } 962 khmehash_num--; /* make sure counter starts from 0 */ 963 uhmehash_num--; /* make sure counter starts from 0 */ 964 965 /* 966 * Allocate context domain structures. 967 * 968 * A platform may choose to modify max_mmu_ctxdoms in 969 * set_platform_defaults(). If a platform does not define 970 * a set_platform_defaults() or does not choose to modify 971 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 972 * 973 * For sun4v, there will be one global context domain, this is to 974 * avoid the ldom cpu substitution problem. 975 * 976 * For all platforms that have CPUs sharing MMUs, this 977 * value must be defined. 978 */ 979 if (max_mmu_ctxdoms == 0) { 980 #ifndef sun4v 981 max_mmu_ctxdoms = max_ncpus; 982 #else /* sun4v */ 983 max_mmu_ctxdoms = 1; 984 #endif /* sun4v */ 985 } 986 987 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 988 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 989 990 /* mmu_ctx_t is 64 bytes aligned */ 991 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 992 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 993 /* 994 * MMU context domain initialization for the Boot CPU. 995 * This needs the context domains array allocated above. 996 */ 997 mutex_enter(&cpu_lock); 998 sfmmu_cpu_init(CPU); 999 mutex_exit(&cpu_lock); 1000 1001 /* 1002 * Intialize ism mapping list lock. 1003 */ 1004 1005 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1006 1007 /* 1008 * Each sfmmu structure carries an array of MMU context info 1009 * structures, one per context domain. The size of this array depends 1010 * on the maximum number of context domains. So, the size of the 1011 * sfmmu structure varies per platform. 1012 * 1013 * sfmmu is allocated from static arena, because trap 1014 * handler at TL > 0 is not allowed to touch kernel relocatable 1015 * memory. sfmmu's alignment is changed to 64 bytes from 1016 * default 8 bytes, as the lower 6 bits will be used to pass 1017 * pgcnt to vtag_flush_pgcnt_tl1. 1018 */ 1019 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1020 1021 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1022 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1023 NULL, NULL, static_arena, 0); 1024 1025 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1026 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1027 1028 /* 1029 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1030 * from the heap when low on memory or when TSB_FORCEALLOC is 1031 * specified, don't use magazines to cache them--we want to return 1032 * them to the system as quickly as possible. 1033 */ 1034 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1035 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1036 static_arena, KMC_NOMAGAZINE); 1037 1038 /* 1039 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1040 * memory, which corresponds to the old static reserve for TSBs. 1041 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1042 * memory we'll allocate for TSB slabs; beyond this point TSB 1043 * allocations will be taken from the kernel heap (via 1044 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1045 * consumer. 1046 */ 1047 if (tsb_alloc_hiwater_factor == 0) { 1048 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1049 } 1050 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1051 1052 /* Set tsb_max_growsize. */ 1053 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1054 1055 /* 1056 * On smaller memory systems, allocate TSB memory in smaller chunks 1057 * than the default 4M slab size. We also honor disable_large_pages 1058 * here. 1059 * 1060 * The trap handlers need to be patched with the final slab shift, 1061 * since they need to be able to construct the TSB pointer at runtime. 1062 */ 1063 if (tsb_max_growsize <= TSB_512K_SZCODE) 1064 tsb_slab_ttesz = TTE512K; 1065 1066 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1067 if (!(disable_large_pages & (1 << sz))) 1068 break; 1069 } 1070 1071 tsb_slab_ttesz = sz; 1072 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1073 tsb_slab_size = 1 << tsb_slab_shift; 1074 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1075 1076 maxtsb = tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); 1077 if (tsb_max_growsize > maxtsb) 1078 tsb_max_growsize = maxtsb; 1079 1080 /* 1081 * Set up memory callback to update tsb_alloc_hiwater and 1082 * tsb_max_growsize. 1083 */ 1084 i = kphysm_setup_func_register(&sfmmu_update_tsb_vec, (void *) 0); 1085 ASSERT(i == 0); 1086 1087 /* 1088 * kmem_tsb_arena is the source from which large TSB slabs are 1089 * drawn. The quantum of this arena corresponds to the largest 1090 * TSB size we can dynamically allocate for user processes. 1091 * Currently it must also be a supported page size since we 1092 * use exactly one translation entry to map each slab page. 1093 * 1094 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1095 * which most TSBs are allocated. Since most TSB allocations are 1096 * typically 8K we have a kmem cache we stack on top of each 1097 * kmem_tsb_default_arena to speed up those allocations. 1098 * 1099 * Note the two-level scheme of arenas is required only 1100 * because vmem_create doesn't allow us to specify alignment 1101 * requirements. If this ever changes the code could be 1102 * simplified to use only one level of arenas. 1103 */ 1104 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1105 sfmmu_vmem_xalloc_aligned_wrapper, vmem_xfree, heap_arena, 1106 0, VM_SLEEP); 1107 1108 if (tsb_lgrp_affinity) { 1109 char s[50]; 1110 for (i = 0; i < NLGRPS_MAX; i++) { 1111 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1112 kmem_tsb_default_arena[i] = 1113 vmem_create(s, NULL, 0, PAGESIZE, 1114 sfmmu_tsb_segkmem_alloc, sfmmu_tsb_segkmem_free, 1115 kmem_tsb_arena, 0, VM_SLEEP | VM_BESTFIT); 1116 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1117 sfmmu_tsb_cache[i] = kmem_cache_create(s, PAGESIZE, 1118 PAGESIZE, NULL, NULL, NULL, NULL, 1119 kmem_tsb_default_arena[i], 0); 1120 } 1121 } else { 1122 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1123 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1124 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1125 VM_SLEEP | VM_BESTFIT); 1126 1127 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1128 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1129 kmem_tsb_default_arena[0], 0); 1130 } 1131 1132 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1133 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1134 sfmmu_hblkcache_destructor, 1135 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1136 hat_memload_arena, KMC_NOHASH); 1137 1138 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1139 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 1140 1141 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1142 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1143 sfmmu_hblkcache_destructor, 1144 NULL, (void *)HME1BLK_SZ, 1145 hat_memload1_arena, KMC_NOHASH); 1146 1147 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1148 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1149 1150 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1151 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1152 NULL, NULL, static_arena, KMC_NOHASH); 1153 1154 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1155 sizeof (ism_ment_t), 0, NULL, NULL, 1156 NULL, NULL, NULL, 0); 1157 1158 /* 1159 * We grab the first hat for the kernel, 1160 */ 1161 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1162 kas.a_hat = hat_alloc(&kas); 1163 AS_LOCK_EXIT(&kas, &kas.a_lock); 1164 1165 /* 1166 * Initialize hblk_reserve. 1167 */ 1168 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1169 va_to_pa((caddr_t)hblk_reserve); 1170 1171 #ifndef UTSB_PHYS 1172 /* 1173 * Reserve some kernel virtual address space for the locked TTEs 1174 * that allow us to probe the TSB from TL>0. 1175 */ 1176 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1177 0, 0, NULL, NULL, VM_SLEEP); 1178 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1179 0, 0, NULL, NULL, VM_SLEEP); 1180 #endif 1181 1182 #ifdef VAC 1183 /* 1184 * The big page VAC handling code assumes VAC 1185 * will not be bigger than the smallest big 1186 * page- which is 64K. 1187 */ 1188 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1189 cmn_err(CE_PANIC, "VAC too big!"); 1190 } 1191 #endif 1192 1193 (void) xhat_init(); 1194 1195 uhme_hash_pa = va_to_pa(uhme_hash); 1196 khme_hash_pa = va_to_pa(khme_hash); 1197 1198 /* 1199 * Initialize relocation locks. kpr_suspendlock is held 1200 * at PIL_MAX to prevent interrupts from pinning the holder 1201 * of a suspended TTE which may access it leading to a 1202 * deadlock condition. 1203 */ 1204 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1205 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1206 1207 /* 1208 * Pre-allocate hrm_hashtab before enabling the collection of 1209 * refmod statistics. Allocating on the fly would mean us 1210 * running the risk of suffering recursive mutex enters or 1211 * deadlocks. 1212 */ 1213 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 1214 KM_SLEEP); 1215 } 1216 1217 /* 1218 * Initialize locking for the hat layer, called early during boot. 1219 */ 1220 static void 1221 hat_lock_init() 1222 { 1223 int i; 1224 1225 /* 1226 * initialize the array of mutexes protecting a page's mapping 1227 * list and p_nrm field. 1228 */ 1229 for (i = 0; i < mml_table_sz; i++) 1230 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1231 1232 if (kpm_enable) { 1233 for (i = 0; i < kpmp_table_sz; i++) { 1234 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1235 MUTEX_DEFAULT, NULL); 1236 } 1237 } 1238 1239 /* 1240 * Initialize array of mutex locks that protects sfmmu fields and 1241 * TSB lists. 1242 */ 1243 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1244 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1245 NULL); 1246 } 1247 1248 extern caddr_t kmem64_base, kmem64_end; 1249 1250 #define SFMMU_KERNEL_MAXVA \ 1251 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1252 1253 /* 1254 * Allocate a hat structure. 1255 * Called when an address space first uses a hat. 1256 */ 1257 struct hat * 1258 hat_alloc(struct as *as) 1259 { 1260 sfmmu_t *sfmmup; 1261 int i; 1262 uint64_t cnum; 1263 extern uint_t get_color_start(struct as *); 1264 1265 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1266 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1267 sfmmup->sfmmu_as = as; 1268 sfmmup->sfmmu_flags = 0; 1269 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1270 1271 if (as == &kas) { 1272 ksfmmup = sfmmup; 1273 sfmmup->sfmmu_cext = 0; 1274 cnum = KCONTEXT; 1275 1276 sfmmup->sfmmu_clrstart = 0; 1277 sfmmup->sfmmu_tsb = NULL; 1278 /* 1279 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1280 * to setup tsb_info for ksfmmup. 1281 */ 1282 } else { 1283 1284 /* 1285 * Just set to invalid ctx. When it faults, it will 1286 * get a valid ctx. This would avoid the situation 1287 * where we get a ctx, but it gets stolen and then 1288 * we fault when we try to run and so have to get 1289 * another ctx. 1290 */ 1291 sfmmup->sfmmu_cext = 0; 1292 cnum = INVALID_CONTEXT; 1293 1294 /* initialize original physical page coloring bin */ 1295 sfmmup->sfmmu_clrstart = get_color_start(as); 1296 #ifdef DEBUG 1297 if (tsb_random_size) { 1298 uint32_t randval = (uint32_t)gettick() >> 4; 1299 int size = randval % (tsb_max_growsize + 1); 1300 1301 /* chose a random tsb size for stress testing */ 1302 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1303 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1304 } else 1305 #endif /* DEBUG */ 1306 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1307 default_tsb_size, 1308 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1309 sfmmup->sfmmu_flags = HAT_SWAPPED; 1310 ASSERT(sfmmup->sfmmu_tsb != NULL); 1311 } 1312 1313 ASSERT(max_mmu_ctxdoms > 0); 1314 for (i = 0; i < max_mmu_ctxdoms; i++) { 1315 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1316 sfmmup->sfmmu_ctxs[i].gnum = 0; 1317 } 1318 1319 sfmmu_setup_tsbinfo(sfmmup); 1320 for (i = 0; i < max_mmu_page_sizes; i++) { 1321 sfmmup->sfmmu_ttecnt[i] = 0; 1322 sfmmup->sfmmu_ismttecnt[i] = 0; 1323 sfmmup->sfmmu_pgsz[i] = TTE8K; 1324 } 1325 1326 sfmmup->sfmmu_iblk = NULL; 1327 sfmmup->sfmmu_ismhat = 0; 1328 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1329 if (sfmmup == ksfmmup) { 1330 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1331 } else { 1332 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1333 } 1334 sfmmup->sfmmu_free = 0; 1335 sfmmup->sfmmu_rmstat = 0; 1336 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1337 sfmmup->sfmmu_xhat_provider = NULL; 1338 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1339 return (sfmmup); 1340 } 1341 1342 /* 1343 * Create per-MMU context domain kstats for a given MMU ctx. 1344 */ 1345 static void 1346 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1347 { 1348 mmu_ctx_stat_t stat; 1349 kstat_t *mmu_kstat; 1350 1351 ASSERT(MUTEX_HELD(&cpu_lock)); 1352 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1353 1354 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1355 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1356 1357 if (mmu_kstat == NULL) { 1358 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1359 mmu_ctxp->mmu_idx); 1360 } else { 1361 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1362 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1363 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1364 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1365 mmu_ctxp->mmu_kstat = mmu_kstat; 1366 kstat_install(mmu_kstat); 1367 } 1368 } 1369 1370 /* 1371 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1372 * context domain information for a given CPU. If a platform does not 1373 * specify that interface, then the function below is used instead to return 1374 * default information. The defaults are as follows: 1375 * 1376 * - For sun4u systems there's one MMU context domain per CPU. 1377 * This default is used by all sun4u systems except OPL. OPL systems 1378 * provide platform specific interface to map CPU ids to MMU ids 1379 * because on OPL more than 1 CPU shares a single MMU. 1380 * Note that on sun4v, there is one global context domain for 1381 * the entire system. This is to avoid running into potential problem 1382 * with ldom physical cpu substitution feature. 1383 * - The number of MMU context IDs supported on any CPU in the 1384 * system is 8K. 1385 */ 1386 /*ARGSUSED*/ 1387 static void 1388 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1389 { 1390 infop->mmu_nctxs = nctxs; 1391 #ifndef sun4v 1392 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1393 #else /* sun4v */ 1394 infop->mmu_idx = 0; 1395 #endif /* sun4v */ 1396 } 1397 1398 /* 1399 * Called during CPU initialization to set the MMU context-related information 1400 * for a CPU. 1401 * 1402 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1403 */ 1404 void 1405 sfmmu_cpu_init(cpu_t *cp) 1406 { 1407 mmu_ctx_info_t info; 1408 mmu_ctx_t *mmu_ctxp; 1409 1410 ASSERT(MUTEX_HELD(&cpu_lock)); 1411 1412 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1413 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1414 else 1415 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1416 1417 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1418 1419 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1420 /* Each mmu_ctx is cacheline aligned. */ 1421 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1422 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1423 1424 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1425 (void *)ipltospl(DISP_LEVEL)); 1426 mmu_ctxp->mmu_idx = info.mmu_idx; 1427 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1428 /* 1429 * Globally for lifetime of a system, 1430 * gnum must always increase. 1431 * mmu_saved_gnum is protected by the cpu_lock. 1432 */ 1433 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1434 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1435 1436 sfmmu_mmu_kstat_create(mmu_ctxp); 1437 1438 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1439 } else { 1440 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1441 } 1442 1443 /* 1444 * The mmu_lock is acquired here to prevent races with 1445 * the wrap-around code. 1446 */ 1447 mutex_enter(&mmu_ctxp->mmu_lock); 1448 1449 1450 mmu_ctxp->mmu_ncpus++; 1451 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1452 CPU_MMU_IDX(cp) = info.mmu_idx; 1453 CPU_MMU_CTXP(cp) = mmu_ctxp; 1454 1455 mutex_exit(&mmu_ctxp->mmu_lock); 1456 } 1457 1458 /* 1459 * Called to perform MMU context-related cleanup for a CPU. 1460 */ 1461 void 1462 sfmmu_cpu_cleanup(cpu_t *cp) 1463 { 1464 mmu_ctx_t *mmu_ctxp; 1465 1466 ASSERT(MUTEX_HELD(&cpu_lock)); 1467 1468 mmu_ctxp = CPU_MMU_CTXP(cp); 1469 ASSERT(mmu_ctxp != NULL); 1470 1471 /* 1472 * The mmu_lock is acquired here to prevent races with 1473 * the wrap-around code. 1474 */ 1475 mutex_enter(&mmu_ctxp->mmu_lock); 1476 1477 CPU_MMU_CTXP(cp) = NULL; 1478 1479 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1480 if (--mmu_ctxp->mmu_ncpus == 0) { 1481 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1482 mutex_exit(&mmu_ctxp->mmu_lock); 1483 mutex_destroy(&mmu_ctxp->mmu_lock); 1484 1485 if (mmu_ctxp->mmu_kstat) 1486 kstat_delete(mmu_ctxp->mmu_kstat); 1487 1488 /* mmu_saved_gnum is protected by the cpu_lock. */ 1489 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1490 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1491 1492 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1493 1494 return; 1495 } 1496 1497 mutex_exit(&mmu_ctxp->mmu_lock); 1498 } 1499 1500 /* 1501 * Hat_setup, makes an address space context the current active one. 1502 * In sfmmu this translates to setting the secondary context with the 1503 * corresponding context. 1504 */ 1505 void 1506 hat_setup(struct hat *sfmmup, int allocflag) 1507 { 1508 hatlock_t *hatlockp; 1509 1510 /* Init needs some special treatment. */ 1511 if (allocflag == HAT_INIT) { 1512 /* 1513 * Make sure that we have 1514 * 1. a TSB 1515 * 2. a valid ctx that doesn't get stolen after this point. 1516 */ 1517 hatlockp = sfmmu_hat_enter(sfmmup); 1518 1519 /* 1520 * Swap in the TSB. hat_init() allocates tsbinfos without 1521 * TSBs, but we need one for init, since the kernel does some 1522 * special things to set up its stack and needs the TSB to 1523 * resolve page faults. 1524 */ 1525 sfmmu_tsb_swapin(sfmmup, hatlockp); 1526 1527 sfmmu_get_ctx(sfmmup); 1528 1529 sfmmu_hat_exit(hatlockp); 1530 } else { 1531 ASSERT(allocflag == HAT_ALLOC); 1532 1533 hatlockp = sfmmu_hat_enter(sfmmup); 1534 kpreempt_disable(); 1535 1536 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1537 1538 /* 1539 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1540 * pagesize bits don't matter in this case since we are passing 1541 * INVALID_CONTEXT to it. 1542 */ 1543 sfmmu_setctx_sec(INVALID_CONTEXT); 1544 sfmmu_clear_utsbinfo(); 1545 1546 kpreempt_enable(); 1547 sfmmu_hat_exit(hatlockp); 1548 } 1549 } 1550 1551 /* 1552 * Free all the translation resources for the specified address space. 1553 * Called from as_free when an address space is being destroyed. 1554 */ 1555 void 1556 hat_free_start(struct hat *sfmmup) 1557 { 1558 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1559 ASSERT(sfmmup != ksfmmup); 1560 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1561 1562 sfmmup->sfmmu_free = 1; 1563 } 1564 1565 void 1566 hat_free_end(struct hat *sfmmup) 1567 { 1568 int i; 1569 1570 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1571 if (sfmmup->sfmmu_ismhat) { 1572 for (i = 0; i < mmu_page_sizes; i++) { 1573 sfmmup->sfmmu_ttecnt[i] = 0; 1574 sfmmup->sfmmu_ismttecnt[i] = 0; 1575 } 1576 } else { 1577 /* EMPTY */ 1578 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1579 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1580 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1581 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1582 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1583 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1584 } 1585 1586 if (sfmmup->sfmmu_rmstat) { 1587 hat_freestat(sfmmup->sfmmu_as, NULL); 1588 } 1589 1590 while (sfmmup->sfmmu_tsb != NULL) { 1591 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1592 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1593 sfmmup->sfmmu_tsb = next; 1594 } 1595 sfmmu_free_sfmmu(sfmmup); 1596 1597 kmem_cache_free(sfmmuid_cache, sfmmup); 1598 } 1599 1600 /* 1601 * Set up any translation structures, for the specified address space, 1602 * that are needed or preferred when the process is being swapped in. 1603 */ 1604 /* ARGSUSED */ 1605 void 1606 hat_swapin(struct hat *hat) 1607 { 1608 ASSERT(hat->sfmmu_xhat_provider == NULL); 1609 } 1610 1611 /* 1612 * Free all of the translation resources, for the specified address space, 1613 * that can be freed while the process is swapped out. Called from as_swapout. 1614 * Also, free up the ctx that this process was using. 1615 */ 1616 void 1617 hat_swapout(struct hat *sfmmup) 1618 { 1619 struct hmehash_bucket *hmebp; 1620 struct hme_blk *hmeblkp; 1621 struct hme_blk *pr_hblk = NULL; 1622 struct hme_blk *nx_hblk; 1623 int i; 1624 uint64_t hblkpa, prevpa, nx_pa; 1625 struct hme_blk *list = NULL; 1626 hatlock_t *hatlockp; 1627 struct tsb_info *tsbinfop; 1628 struct free_tsb { 1629 struct free_tsb *next; 1630 struct tsb_info *tsbinfop; 1631 }; /* free list of TSBs */ 1632 struct free_tsb *freelist, *last, *next; 1633 1634 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1635 SFMMU_STAT(sf_swapout); 1636 1637 /* 1638 * There is no way to go from an as to all its translations in sfmmu. 1639 * Here is one of the times when we take the big hit and traverse 1640 * the hash looking for hme_blks to free up. Not only do we free up 1641 * this as hme_blks but all those that are free. We are obviously 1642 * swapping because we need memory so let's free up as much 1643 * as we can. 1644 * 1645 * Note that we don't flush TLB/TSB here -- it's not necessary 1646 * because: 1647 * 1) we free the ctx we're using and throw away the TSB(s); 1648 * 2) processes aren't runnable while being swapped out. 1649 */ 1650 ASSERT(sfmmup != KHATID); 1651 for (i = 0; i <= UHMEHASH_SZ; i++) { 1652 hmebp = &uhme_hash[i]; 1653 SFMMU_HASH_LOCK(hmebp); 1654 hmeblkp = hmebp->hmeblkp; 1655 hblkpa = hmebp->hmeh_nextpa; 1656 prevpa = 0; 1657 pr_hblk = NULL; 1658 while (hmeblkp) { 1659 1660 ASSERT(!hmeblkp->hblk_xhat_bit); 1661 1662 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1663 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1664 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1665 (caddr_t)get_hblk_base(hmeblkp), 1666 get_hblk_endaddr(hmeblkp), 1667 NULL, HAT_UNLOAD); 1668 } 1669 nx_hblk = hmeblkp->hblk_next; 1670 nx_pa = hmeblkp->hblk_nextpa; 1671 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1672 ASSERT(!hmeblkp->hblk_lckcnt); 1673 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 1674 prevpa, pr_hblk); 1675 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 1676 } else { 1677 pr_hblk = hmeblkp; 1678 prevpa = hblkpa; 1679 } 1680 hmeblkp = nx_hblk; 1681 hblkpa = nx_pa; 1682 } 1683 SFMMU_HASH_UNLOCK(hmebp); 1684 } 1685 1686 sfmmu_hblks_list_purge(&list); 1687 1688 /* 1689 * Now free up the ctx so that others can reuse it. 1690 */ 1691 hatlockp = sfmmu_hat_enter(sfmmup); 1692 1693 sfmmu_invalidate_ctx(sfmmup); 1694 1695 /* 1696 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1697 * If TSBs were never swapped in, just return. 1698 * This implies that we don't support partial swapping 1699 * of TSBs -- either all are swapped out, or none are. 1700 * 1701 * We must hold the HAT lock here to prevent racing with another 1702 * thread trying to unmap TTEs from the TSB or running the post- 1703 * relocator after relocating the TSB's memory. Unfortunately, we 1704 * can't free memory while holding the HAT lock or we could 1705 * deadlock, so we build a list of TSBs to be freed after marking 1706 * the tsbinfos as swapped out and free them after dropping the 1707 * lock. 1708 */ 1709 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1710 sfmmu_hat_exit(hatlockp); 1711 return; 1712 } 1713 1714 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1715 last = freelist = NULL; 1716 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1717 tsbinfop = tsbinfop->tsb_next) { 1718 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1719 1720 /* 1721 * Cast the TSB into a struct free_tsb and put it on the free 1722 * list. 1723 */ 1724 if (freelist == NULL) { 1725 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1726 } else { 1727 last->next = (struct free_tsb *)tsbinfop->tsb_va; 1728 last = last->next; 1729 } 1730 last->next = NULL; 1731 last->tsbinfop = tsbinfop; 1732 tsbinfop->tsb_flags |= TSB_SWAPPED; 1733 /* 1734 * Zero out the TTE to clear the valid bit. 1735 * Note we can't use a value like 0xbad because we want to 1736 * ensure diagnostic bits are NEVER set on TTEs that might 1737 * be loaded. The intent is to catch any invalid access 1738 * to the swapped TSB, such as a thread running with a valid 1739 * context without first calling sfmmu_tsb_swapin() to 1740 * allocate TSB memory. 1741 */ 1742 tsbinfop->tsb_tte.ll = 0; 1743 } 1744 1745 #ifdef sun4v 1746 if (freelist) 1747 sfmmu_invalidate_tsbinfo(sfmmup); 1748 #endif /* sun4v */ 1749 1750 /* Now we can drop the lock and free the TSB memory. */ 1751 sfmmu_hat_exit(hatlockp); 1752 for (; freelist != NULL; freelist = next) { 1753 next = freelist->next; 1754 sfmmu_tsb_free(freelist->tsbinfop); 1755 } 1756 } 1757 1758 /* 1759 * Duplicate the translations of an as into another newas 1760 */ 1761 /* ARGSUSED */ 1762 int 1763 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 1764 uint_t flag) 1765 { 1766 ASSERT(hat->sfmmu_xhat_provider == NULL); 1767 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW)); 1768 1769 if (flag == HAT_DUP_COW) { 1770 panic("hat_dup: HAT_DUP_COW not supported"); 1771 } 1772 return (0); 1773 } 1774 1775 /* 1776 * Set up addr to map to page pp with protection prot. 1777 * As an optimization we also load the TSB with the 1778 * corresponding tte but it is no big deal if the tte gets kicked out. 1779 */ 1780 void 1781 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 1782 uint_t attr, uint_t flags) 1783 { 1784 tte_t tte; 1785 1786 1787 ASSERT(hat != NULL); 1788 ASSERT(PAGE_LOCKED(pp)); 1789 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1790 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1791 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1792 1793 if (PP_ISFREE(pp)) { 1794 panic("hat_memload: loading a mapping to free page %p", 1795 (void *)pp); 1796 } 1797 1798 if (hat->sfmmu_xhat_provider) { 1799 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 1800 return; 1801 } 1802 1803 ASSERT((hat == ksfmmup) || 1804 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1805 1806 if (flags & ~SFMMU_LOAD_ALLFLAG) 1807 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 1808 flags & ~SFMMU_LOAD_ALLFLAG); 1809 1810 if (hat->sfmmu_rmstat) 1811 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 1812 1813 #if defined(SF_ERRATA_57) 1814 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1815 (addr < errata57_limit) && (attr & PROT_EXEC) && 1816 !(flags & HAT_LOAD_SHARE)) { 1817 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 1818 " page executable"); 1819 attr &= ~PROT_EXEC; 1820 } 1821 #endif 1822 1823 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 1824 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags); 1825 1826 /* 1827 * Check TSB and TLB page sizes. 1828 */ 1829 if ((flags & HAT_LOAD_SHARE) == 0) { 1830 sfmmu_check_page_sizes(hat, 1); 1831 } 1832 } 1833 1834 /* 1835 * hat_devload can be called to map real memory (e.g. 1836 * /dev/kmem) and even though hat_devload will determine pf is 1837 * for memory, it will be unable to get a shared lock on the 1838 * page (because someone else has it exclusively) and will 1839 * pass dp = NULL. If tteload doesn't get a non-NULL 1840 * page pointer it can't cache memory. 1841 */ 1842 void 1843 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 1844 uint_t attr, int flags) 1845 { 1846 tte_t tte; 1847 struct page *pp = NULL; 1848 int use_lgpg = 0; 1849 1850 ASSERT(hat != NULL); 1851 1852 if (hat->sfmmu_xhat_provider) { 1853 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 1854 return; 1855 } 1856 1857 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1858 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1859 ASSERT((hat == ksfmmup) || 1860 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1861 if (len == 0) 1862 panic("hat_devload: zero len"); 1863 if (flags & ~SFMMU_LOAD_ALLFLAG) 1864 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 1865 flags & ~SFMMU_LOAD_ALLFLAG); 1866 1867 #if defined(SF_ERRATA_57) 1868 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1869 (addr < errata57_limit) && (attr & PROT_EXEC) && 1870 !(flags & HAT_LOAD_SHARE)) { 1871 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 1872 " page executable"); 1873 attr &= ~PROT_EXEC; 1874 } 1875 #endif 1876 1877 /* 1878 * If it's a memory page find its pp 1879 */ 1880 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 1881 pp = page_numtopp_nolock(pfn); 1882 if (pp == NULL) { 1883 flags |= HAT_LOAD_NOCONSIST; 1884 } else { 1885 if (PP_ISFREE(pp)) { 1886 panic("hat_memload: loading " 1887 "a mapping to free page %p", 1888 (void *)pp); 1889 } 1890 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1891 panic("hat_memload: loading a mapping " 1892 "to unlocked relocatable page %p", 1893 (void *)pp); 1894 } 1895 ASSERT(len == MMU_PAGESIZE); 1896 } 1897 } 1898 1899 if (hat->sfmmu_rmstat) 1900 hat_resvstat(len, hat->sfmmu_as, addr); 1901 1902 if (flags & HAT_LOAD_NOCONSIST) { 1903 attr |= SFMMU_UNCACHEVTTE; 1904 use_lgpg = 1; 1905 } 1906 if (!pf_is_memory(pfn)) { 1907 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 1908 use_lgpg = 1; 1909 switch (attr & HAT_ORDER_MASK) { 1910 case HAT_STRICTORDER: 1911 case HAT_UNORDERED_OK: 1912 /* 1913 * we set the side effect bit for all non 1914 * memory mappings unless merging is ok 1915 */ 1916 attr |= SFMMU_SIDEFFECT; 1917 break; 1918 case HAT_MERGING_OK: 1919 case HAT_LOADCACHING_OK: 1920 case HAT_STORECACHING_OK: 1921 break; 1922 default: 1923 panic("hat_devload: bad attr"); 1924 break; 1925 } 1926 } 1927 while (len) { 1928 if (!use_lgpg) { 1929 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1930 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1931 flags); 1932 len -= MMU_PAGESIZE; 1933 addr += MMU_PAGESIZE; 1934 pfn++; 1935 continue; 1936 } 1937 /* 1938 * try to use large pages, check va/pa alignments 1939 * Note that 32M/256M page sizes are not (yet) supported. 1940 */ 1941 if ((len >= MMU_PAGESIZE4M) && 1942 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 1943 !(disable_large_pages & (1 << TTE4M)) && 1944 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 1945 sfmmu_memtte(&tte, pfn, attr, TTE4M); 1946 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1947 flags); 1948 len -= MMU_PAGESIZE4M; 1949 addr += MMU_PAGESIZE4M; 1950 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 1951 } else if ((len >= MMU_PAGESIZE512K) && 1952 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 1953 !(disable_large_pages & (1 << TTE512K)) && 1954 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 1955 sfmmu_memtte(&tte, pfn, attr, TTE512K); 1956 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1957 flags); 1958 len -= MMU_PAGESIZE512K; 1959 addr += MMU_PAGESIZE512K; 1960 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 1961 } else if ((len >= MMU_PAGESIZE64K) && 1962 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 1963 !(disable_large_pages & (1 << TTE64K)) && 1964 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 1965 sfmmu_memtte(&tte, pfn, attr, TTE64K); 1966 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1967 flags); 1968 len -= MMU_PAGESIZE64K; 1969 addr += MMU_PAGESIZE64K; 1970 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 1971 } else { 1972 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1973 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1974 flags); 1975 len -= MMU_PAGESIZE; 1976 addr += MMU_PAGESIZE; 1977 pfn++; 1978 } 1979 } 1980 1981 /* 1982 * Check TSB and TLB page sizes. 1983 */ 1984 if ((flags & HAT_LOAD_SHARE) == 0) { 1985 sfmmu_check_page_sizes(hat, 1); 1986 } 1987 } 1988 1989 /* 1990 * Map the largest extend possible out of the page array. The array may NOT 1991 * be in order. The largest possible mapping a page can have 1992 * is specified in the p_szc field. The p_szc field 1993 * cannot change as long as there any mappings (large or small) 1994 * to any of the pages that make up the large page. (ie. any 1995 * promotion/demotion of page size is not up to the hat but up to 1996 * the page free list manager). The array 1997 * should consist of properly aligned contigous pages that are 1998 * part of a big page for a large mapping to be created. 1999 */ 2000 void 2001 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 2002 struct page **pps, uint_t attr, uint_t flags) 2003 { 2004 int ttesz; 2005 size_t mapsz; 2006 pgcnt_t numpg, npgs; 2007 tte_t tte; 2008 page_t *pp; 2009 uint_t large_pages_disable; 2010 2011 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2012 2013 if (hat->sfmmu_xhat_provider) { 2014 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 2015 return; 2016 } 2017 2018 if (hat->sfmmu_rmstat) 2019 hat_resvstat(len, hat->sfmmu_as, addr); 2020 2021 #if defined(SF_ERRATA_57) 2022 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2023 (addr < errata57_limit) && (attr & PROT_EXEC) && 2024 !(flags & HAT_LOAD_SHARE)) { 2025 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2026 "user page executable"); 2027 attr &= ~PROT_EXEC; 2028 } 2029 #endif 2030 2031 /* Get number of pages */ 2032 npgs = len >> MMU_PAGESHIFT; 2033 2034 if (flags & HAT_LOAD_SHARE) { 2035 large_pages_disable = disable_ism_large_pages; 2036 } else { 2037 large_pages_disable = disable_large_pages; 2038 } 2039 2040 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2041 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 2042 return; 2043 } 2044 2045 while (npgs >= NHMENTS) { 2046 pp = *pps; 2047 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2048 /* 2049 * Check if this page size is disabled. 2050 */ 2051 if (large_pages_disable & (1 << ttesz)) 2052 continue; 2053 2054 numpg = TTEPAGES(ttesz); 2055 mapsz = numpg << MMU_PAGESHIFT; 2056 if ((npgs >= numpg) && 2057 IS_P2ALIGNED(addr, mapsz) && 2058 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2059 /* 2060 * At this point we have enough pages and 2061 * we know the virtual address and the pfn 2062 * are properly aligned. We still need 2063 * to check for physical contiguity but since 2064 * it is very likely that this is the case 2065 * we will assume they are so and undo 2066 * the request if necessary. It would 2067 * be great if we could get a hint flag 2068 * like HAT_CONTIG which would tell us 2069 * the pages are contigous for sure. 2070 */ 2071 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2072 attr, ttesz); 2073 if (!sfmmu_tteload_array(hat, &tte, addr, 2074 pps, flags)) { 2075 break; 2076 } 2077 } 2078 } 2079 if (ttesz == TTE8K) { 2080 /* 2081 * We were not able to map array using a large page 2082 * batch a hmeblk or fraction at a time. 2083 */ 2084 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2085 & (NHMENTS-1); 2086 numpg = NHMENTS - numpg; 2087 ASSERT(numpg <= npgs); 2088 mapsz = numpg * MMU_PAGESIZE; 2089 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2090 numpg); 2091 } 2092 addr += mapsz; 2093 npgs -= numpg; 2094 pps += numpg; 2095 } 2096 2097 if (npgs) { 2098 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 2099 } 2100 2101 /* 2102 * Check TSB and TLB page sizes. 2103 */ 2104 if ((flags & HAT_LOAD_SHARE) == 0) { 2105 sfmmu_check_page_sizes(hat, 1); 2106 } 2107 } 2108 2109 /* 2110 * Function tries to batch 8K pages into the same hme blk. 2111 */ 2112 static void 2113 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2114 uint_t attr, uint_t flags, pgcnt_t npgs) 2115 { 2116 tte_t tte; 2117 page_t *pp; 2118 struct hmehash_bucket *hmebp; 2119 struct hme_blk *hmeblkp; 2120 int index; 2121 2122 while (npgs) { 2123 /* 2124 * Acquire the hash bucket. 2125 */ 2126 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K); 2127 ASSERT(hmebp); 2128 2129 /* 2130 * Find the hment block. 2131 */ 2132 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2133 TTE8K, flags); 2134 ASSERT(hmeblkp); 2135 2136 do { 2137 /* 2138 * Make the tte. 2139 */ 2140 pp = *pps; 2141 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2142 2143 /* 2144 * Add the translation. 2145 */ 2146 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2147 vaddr, pps, flags); 2148 2149 /* 2150 * Goto next page. 2151 */ 2152 pps++; 2153 npgs--; 2154 2155 /* 2156 * Goto next address. 2157 */ 2158 vaddr += MMU_PAGESIZE; 2159 2160 /* 2161 * Don't crossover into a different hmentblk. 2162 */ 2163 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2164 (NHMENTS-1)); 2165 2166 } while (index != 0 && npgs != 0); 2167 2168 /* 2169 * Release the hash bucket. 2170 */ 2171 2172 sfmmu_tteload_release_hashbucket(hmebp); 2173 } 2174 } 2175 2176 /* 2177 * Construct a tte for a page: 2178 * 2179 * tte_valid = 1 2180 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2181 * tte_size = size 2182 * tte_nfo = attr & HAT_NOFAULT 2183 * tte_ie = attr & HAT_STRUCTURE_LE 2184 * tte_hmenum = hmenum 2185 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2186 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2187 * tte_ref = 1 (optimization) 2188 * tte_wr_perm = attr & PROT_WRITE; 2189 * tte_no_sync = attr & HAT_NOSYNC 2190 * tte_lock = attr & SFMMU_LOCKTTE 2191 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2192 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2193 * tte_e = attr & SFMMU_SIDEFFECT 2194 * tte_priv = !(attr & PROT_USER) 2195 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2196 * tte_glb = 0 2197 */ 2198 void 2199 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2200 { 2201 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2202 2203 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2204 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2205 2206 if (TTE_IS_NOSYNC(ttep)) { 2207 TTE_SET_REF(ttep); 2208 if (TTE_IS_WRITABLE(ttep)) { 2209 TTE_SET_MOD(ttep); 2210 } 2211 } 2212 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2213 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2214 } 2215 } 2216 2217 /* 2218 * This function will add a translation to the hme_blk and allocate the 2219 * hme_blk if one does not exist. 2220 * If a page structure is specified then it will add the 2221 * corresponding hment to the mapping list. 2222 * It will also update the hmenum field for the tte. 2223 */ 2224 void 2225 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2226 uint_t flags) 2227 { 2228 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags); 2229 } 2230 2231 /* 2232 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2233 * Assumes that a particular page size may only be resident in one TSB. 2234 */ 2235 static void 2236 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2237 { 2238 struct tsb_info *tsbinfop = NULL; 2239 uint64_t tag; 2240 struct tsbe *tsbe_addr; 2241 uint64_t tsb_base; 2242 uint_t tsb_size; 2243 int vpshift = MMU_PAGESHIFT; 2244 int phys = 0; 2245 2246 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2247 phys = ktsb_phys; 2248 if (ttesz >= TTE4M) { 2249 #ifndef sun4v 2250 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2251 #endif 2252 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2253 tsb_size = ktsb4m_szcode; 2254 } else { 2255 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2256 tsb_size = ktsb_szcode; 2257 } 2258 } else { 2259 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2260 2261 /* 2262 * If there isn't a TSB for this page size, or the TSB is 2263 * swapped out, there is nothing to do. Note that the latter 2264 * case seems impossible but can occur if hat_pageunload() 2265 * is called on an ISM mapping while the process is swapped 2266 * out. 2267 */ 2268 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2269 return; 2270 2271 /* 2272 * If another thread is in the middle of relocating a TSB 2273 * we can't unload the entry so set a flag so that the 2274 * TSB will be flushed before it can be accessed by the 2275 * process. 2276 */ 2277 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2278 if (ttep == NULL) 2279 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2280 return; 2281 } 2282 #if defined(UTSB_PHYS) 2283 phys = 1; 2284 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2285 #else 2286 tsb_base = (uint64_t)tsbinfop->tsb_va; 2287 #endif 2288 tsb_size = tsbinfop->tsb_szc; 2289 } 2290 if (ttesz >= TTE4M) 2291 vpshift = MMU_PAGESHIFT4M; 2292 2293 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2294 tag = sfmmu_make_tsbtag(vaddr); 2295 2296 if (ttep == NULL) { 2297 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2298 } else { 2299 if (ttesz >= TTE4M) { 2300 SFMMU_STAT(sf_tsb_load4m); 2301 } else { 2302 SFMMU_STAT(sf_tsb_load8k); 2303 } 2304 2305 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2306 } 2307 } 2308 2309 /* 2310 * Unmap all entries from [start, end) matching the given page size. 2311 * 2312 * This function is used primarily to unmap replicated 64K or 512K entries 2313 * from the TSB that are inserted using the base page size TSB pointer, but 2314 * it may also be called to unmap a range of addresses from the TSB. 2315 */ 2316 void 2317 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2318 { 2319 struct tsb_info *tsbinfop; 2320 uint64_t tag; 2321 struct tsbe *tsbe_addr; 2322 caddr_t vaddr; 2323 uint64_t tsb_base; 2324 int vpshift, vpgsz; 2325 uint_t tsb_size; 2326 int phys = 0; 2327 2328 /* 2329 * Assumptions: 2330 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2331 * at a time shooting down any valid entries we encounter. 2332 * 2333 * If ttesz >= 4M we walk the range 4M at a time shooting 2334 * down any valid mappings we find. 2335 */ 2336 if (sfmmup == ksfmmup) { 2337 phys = ktsb_phys; 2338 if (ttesz >= TTE4M) { 2339 #ifndef sun4v 2340 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2341 #endif 2342 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2343 tsb_size = ktsb4m_szcode; 2344 } else { 2345 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2346 tsb_size = ktsb_szcode; 2347 } 2348 } else { 2349 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2350 2351 /* 2352 * If there isn't a TSB for this page size, or the TSB is 2353 * swapped out, there is nothing to do. Note that the latter 2354 * case seems impossible but can occur if hat_pageunload() 2355 * is called on an ISM mapping while the process is swapped 2356 * out. 2357 */ 2358 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2359 return; 2360 2361 /* 2362 * If another thread is in the middle of relocating a TSB 2363 * we can't unload the entry so set a flag so that the 2364 * TSB will be flushed before it can be accessed by the 2365 * process. 2366 */ 2367 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2368 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2369 return; 2370 } 2371 #if defined(UTSB_PHYS) 2372 phys = 1; 2373 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2374 #else 2375 tsb_base = (uint64_t)tsbinfop->tsb_va; 2376 #endif 2377 tsb_size = tsbinfop->tsb_szc; 2378 } 2379 if (ttesz >= TTE4M) { 2380 vpshift = MMU_PAGESHIFT4M; 2381 vpgsz = MMU_PAGESIZE4M; 2382 } else { 2383 vpshift = MMU_PAGESHIFT; 2384 vpgsz = MMU_PAGESIZE; 2385 } 2386 2387 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2388 tag = sfmmu_make_tsbtag(vaddr); 2389 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2390 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2391 } 2392 } 2393 2394 /* 2395 * Select the optimum TSB size given the number of mappings 2396 * that need to be cached. 2397 */ 2398 static int 2399 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2400 { 2401 int szc = 0; 2402 2403 #ifdef DEBUG 2404 if (tsb_grow_stress) { 2405 uint32_t randval = (uint32_t)gettick() >> 4; 2406 return (randval % (tsb_max_growsize + 1)); 2407 } 2408 #endif /* DEBUG */ 2409 2410 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2411 szc++; 2412 return (szc); 2413 } 2414 2415 /* 2416 * This function will add a translation to the hme_blk and allocate the 2417 * hme_blk if one does not exist. 2418 * If a page structure is specified then it will add the 2419 * corresponding hment to the mapping list. 2420 * It will also update the hmenum field for the tte. 2421 * Furthermore, it attempts to create a large page translation 2422 * for <addr,hat> at page array pps. It assumes addr and first 2423 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2424 */ 2425 static int 2426 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2427 page_t **pps, uint_t flags) 2428 { 2429 struct hmehash_bucket *hmebp; 2430 struct hme_blk *hmeblkp; 2431 int ret; 2432 uint_t size; 2433 2434 /* 2435 * Get mapping size. 2436 */ 2437 size = TTE_CSZ(ttep); 2438 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2439 2440 /* 2441 * Acquire the hash bucket. 2442 */ 2443 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size); 2444 ASSERT(hmebp); 2445 2446 /* 2447 * Find the hment block. 2448 */ 2449 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags); 2450 ASSERT(hmeblkp); 2451 2452 /* 2453 * Add the translation. 2454 */ 2455 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags); 2456 2457 /* 2458 * Release the hash bucket. 2459 */ 2460 sfmmu_tteload_release_hashbucket(hmebp); 2461 2462 return (ret); 2463 } 2464 2465 /* 2466 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2467 */ 2468 static struct hmehash_bucket * 2469 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size) 2470 { 2471 struct hmehash_bucket *hmebp; 2472 int hmeshift; 2473 2474 hmeshift = HME_HASH_SHIFT(size); 2475 2476 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 2477 2478 SFMMU_HASH_LOCK(hmebp); 2479 2480 return (hmebp); 2481 } 2482 2483 /* 2484 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2485 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2486 * allocated. 2487 */ 2488 static struct hme_blk * 2489 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2490 caddr_t vaddr, uint_t size, uint_t flags) 2491 { 2492 hmeblk_tag hblktag; 2493 int hmeshift; 2494 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2495 uint64_t hblkpa, prevpa; 2496 struct kmem_cache *sfmmu_cache; 2497 uint_t forcefree; 2498 2499 hblktag.htag_id = sfmmup; 2500 hmeshift = HME_HASH_SHIFT(size); 2501 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2502 hblktag.htag_rehash = HME_HASH_REHASH(size); 2503 2504 ttearray_realloc: 2505 2506 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 2507 pr_hblk, prevpa, &list); 2508 2509 /* 2510 * We block until hblk_reserve_lock is released; it's held by 2511 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2512 * replaced by a hblk from sfmmu8_cache. 2513 */ 2514 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2515 hblk_reserve_thread != curthread) { 2516 SFMMU_HASH_UNLOCK(hmebp); 2517 mutex_enter(&hblk_reserve_lock); 2518 mutex_exit(&hblk_reserve_lock); 2519 SFMMU_STAT(sf_hblk_reserve_hit); 2520 SFMMU_HASH_LOCK(hmebp); 2521 goto ttearray_realloc; 2522 } 2523 2524 if (hmeblkp == NULL) { 2525 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2526 hblktag, flags); 2527 } else { 2528 /* 2529 * It is possible for 8k and 64k hblks to collide since they 2530 * have the same rehash value. This is because we 2531 * lazily free hblks and 8K/64K blks could be lingering. 2532 * If we find size mismatch we free the block and & try again. 2533 */ 2534 if (get_hblk_ttesz(hmeblkp) != size) { 2535 ASSERT(!hmeblkp->hblk_vcnt); 2536 ASSERT(!hmeblkp->hblk_hmecnt); 2537 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 2538 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 2539 goto ttearray_realloc; 2540 } 2541 if (hmeblkp->hblk_shw_bit) { 2542 /* 2543 * if the hblk was previously used as a shadow hblk then 2544 * we will change it to a normal hblk 2545 */ 2546 if (hmeblkp->hblk_shw_mask) { 2547 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2548 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2549 goto ttearray_realloc; 2550 } else { 2551 hmeblkp->hblk_shw_bit = 0; 2552 } 2553 } 2554 SFMMU_STAT(sf_hblk_hit); 2555 } 2556 2557 /* 2558 * hat_memload() should never call kmem_cache_free(); see block 2559 * comment showing the stacktrace in sfmmu_hblk_alloc(); 2560 * enqueue each hblk in the list to reserve list if it's created 2561 * from sfmmu8_cache *and* sfmmup == KHATID. 2562 */ 2563 forcefree = (sfmmup == KHATID) ? 1 : 0; 2564 while ((pr_hblk = list) != NULL) { 2565 list = pr_hblk->hblk_next; 2566 sfmmu_cache = get_hblk_cache(pr_hblk); 2567 if ((sfmmu_cache == sfmmu8_cache) && 2568 sfmmu_put_free_hblk(pr_hblk, forcefree)) 2569 continue; 2570 2571 ASSERT(sfmmup != KHATID); 2572 kmem_cache_free(sfmmu_cache, pr_hblk); 2573 } 2574 2575 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2576 ASSERT(!hmeblkp->hblk_shw_bit); 2577 2578 return (hmeblkp); 2579 } 2580 2581 /* 2582 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2583 * otherwise. 2584 */ 2585 static int 2586 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2587 caddr_t vaddr, page_t **pps, uint_t flags) 2588 { 2589 page_t *pp = *pps; 2590 int hmenum, size, remap; 2591 tte_t tteold, flush_tte; 2592 #ifdef DEBUG 2593 tte_t orig_old; 2594 #endif /* DEBUG */ 2595 struct sf_hment *sfhme; 2596 kmutex_t *pml, *pmtx; 2597 hatlock_t *hatlockp; 2598 2599 /* 2600 * remove this panic when we decide to let user virtual address 2601 * space be >= USERLIMIT. 2602 */ 2603 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2604 panic("user addr %p in kernel space", vaddr); 2605 #if defined(TTE_IS_GLOBAL) 2606 if (TTE_IS_GLOBAL(ttep)) 2607 panic("sfmmu_tteload: creating global tte"); 2608 #endif 2609 2610 #ifdef DEBUG 2611 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2612 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2613 panic("sfmmu_tteload: non cacheable memory tte"); 2614 #endif /* DEBUG */ 2615 2616 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2617 !TTE_IS_MOD(ttep)) { 2618 /* 2619 * Don't load TSB for dummy as in ISM. Also don't preload 2620 * the TSB if the TTE isn't writable since we're likely to 2621 * fault on it again -- preloading can be fairly expensive. 2622 */ 2623 flags |= SFMMU_NO_TSBLOAD; 2624 } 2625 2626 size = TTE_CSZ(ttep); 2627 switch (size) { 2628 case TTE8K: 2629 SFMMU_STAT(sf_tteload8k); 2630 break; 2631 case TTE64K: 2632 SFMMU_STAT(sf_tteload64k); 2633 break; 2634 case TTE512K: 2635 SFMMU_STAT(sf_tteload512k); 2636 break; 2637 case TTE4M: 2638 SFMMU_STAT(sf_tteload4m); 2639 break; 2640 case (TTE32M): 2641 SFMMU_STAT(sf_tteload32m); 2642 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2643 break; 2644 case (TTE256M): 2645 SFMMU_STAT(sf_tteload256m); 2646 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2647 break; 2648 } 2649 2650 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2651 2652 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 2653 2654 /* 2655 * Need to grab mlist lock here so that pageunload 2656 * will not change tte behind us. 2657 */ 2658 if (pp) { 2659 pml = sfmmu_mlist_enter(pp); 2660 } 2661 2662 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2663 /* 2664 * Look for corresponding hment and if valid verify 2665 * pfns are equal. 2666 */ 2667 remap = TTE_IS_VALID(&tteold); 2668 if (remap) { 2669 pfn_t new_pfn, old_pfn; 2670 2671 old_pfn = TTE_TO_PFN(vaddr, &tteold); 2672 new_pfn = TTE_TO_PFN(vaddr, ttep); 2673 2674 if (flags & HAT_LOAD_REMAP) { 2675 /* make sure we are remapping same type of pages */ 2676 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 2677 panic("sfmmu_tteload - tte remap io<->memory"); 2678 } 2679 if (old_pfn != new_pfn && 2680 (pp != NULL || sfhme->hme_page != NULL)) { 2681 panic("sfmmu_tteload - tte remap pp != NULL"); 2682 } 2683 } else if (old_pfn != new_pfn) { 2684 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 2685 (void *)hmeblkp); 2686 } 2687 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 2688 } 2689 2690 if (pp) { 2691 if (size == TTE8K) { 2692 #ifdef VAC 2693 /* 2694 * Handle VAC consistency 2695 */ 2696 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 2697 sfmmu_vac_conflict(sfmmup, vaddr, pp); 2698 } 2699 #endif 2700 2701 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2702 pmtx = sfmmu_page_enter(pp); 2703 PP_CLRRO(pp); 2704 sfmmu_page_exit(pmtx); 2705 } else if (!PP_ISMAPPED(pp) && 2706 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 2707 pmtx = sfmmu_page_enter(pp); 2708 if (!(PP_ISMOD(pp))) { 2709 PP_SETRO(pp); 2710 } 2711 sfmmu_page_exit(pmtx); 2712 } 2713 2714 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 2715 /* 2716 * sfmmu_pagearray_setup failed so return 2717 */ 2718 sfmmu_mlist_exit(pml); 2719 return (1); 2720 } 2721 } 2722 2723 /* 2724 * Make sure hment is not on a mapping list. 2725 */ 2726 ASSERT(remap || (sfhme->hme_page == NULL)); 2727 2728 /* if it is not a remap then hme->next better be NULL */ 2729 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 2730 2731 if (flags & HAT_LOAD_LOCK) { 2732 if (((int)hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 2733 panic("too high lckcnt-hmeblk %p", 2734 (void *)hmeblkp); 2735 } 2736 atomic_add_16(&hmeblkp->hblk_lckcnt, 1); 2737 2738 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 2739 } 2740 2741 #ifdef VAC 2742 if (pp && PP_ISNC(pp)) { 2743 /* 2744 * If the physical page is marked to be uncacheable, like 2745 * by a vac conflict, make sure the new mapping is also 2746 * uncacheable. 2747 */ 2748 TTE_CLR_VCACHEABLE(ttep); 2749 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 2750 } 2751 #endif 2752 ttep->tte_hmenum = hmenum; 2753 2754 #ifdef DEBUG 2755 orig_old = tteold; 2756 #endif /* DEBUG */ 2757 2758 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 2759 if ((sfmmup == KHATID) && 2760 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 2761 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2762 } 2763 #ifdef DEBUG 2764 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 2765 #endif /* DEBUG */ 2766 } 2767 2768 if (!TTE_IS_VALID(&tteold)) { 2769 2770 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 2771 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 2772 2773 /* 2774 * HAT_RELOAD_SHARE has been deprecated with lpg DISM. 2775 */ 2776 2777 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 2778 sfmmup != ksfmmup) { 2779 /* 2780 * If this is the first large mapping for the process 2781 * we must force any CPUs running this process to TL=0 2782 * where they will reload the HAT flags from the 2783 * tsbmiss area. This is necessary to make the large 2784 * mappings we are about to load visible to those CPUs; 2785 * otherwise they'll loop forever calling pagefault() 2786 * since we don't search large hash chains by default. 2787 */ 2788 hatlockp = sfmmu_hat_enter(sfmmup); 2789 if (size == TTE512K && 2790 !SFMMU_FLAGS_ISSET(sfmmup, HAT_512K_FLAG)) { 2791 SFMMU_FLAGS_SET(sfmmup, HAT_512K_FLAG); 2792 sfmmu_sync_mmustate(sfmmup); 2793 } else if (size == TTE4M && 2794 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 2795 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 2796 sfmmu_sync_mmustate(sfmmup); 2797 } else if (size == TTE64K && 2798 !SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) { 2799 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 2800 /* no sync mmustate; 64K shares 8K hashes */ 2801 } else if (mmu_page_sizes == max_mmu_page_sizes) { 2802 if (size == TTE32M && 2803 !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 2804 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 2805 sfmmu_sync_mmustate(sfmmup); 2806 } else if (size == TTE256M && 2807 !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 2808 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 2809 sfmmu_sync_mmustate(sfmmup); 2810 } 2811 } 2812 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 2813 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 2814 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 2815 } 2816 sfmmu_hat_exit(hatlockp); 2817 } 2818 } 2819 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 2820 2821 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 2822 hw_tte.tte_intlo; 2823 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 2824 hw_tte.tte_inthi; 2825 2826 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 2827 /* 2828 * If remap and new tte differs from old tte we need 2829 * to sync the mod bit and flush TLB/TSB. We don't 2830 * need to sync ref bit because we currently always set 2831 * ref bit in tteload. 2832 */ 2833 ASSERT(TTE_IS_REF(ttep)); 2834 if (TTE_IS_MOD(&tteold)) { 2835 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 2836 } 2837 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 2838 xt_sync(sfmmup->sfmmu_cpusran); 2839 } 2840 2841 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 2842 /* 2843 * We only preload 8K and 4M mappings into the TSB, since 2844 * 64K and 512K mappings are replicated and hence don't 2845 * have a single, unique TSB entry. Ditto for 32M/256M. 2846 */ 2847 if (size == TTE8K || size == TTE4M) { 2848 hatlockp = sfmmu_hat_enter(sfmmup); 2849 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, size); 2850 sfmmu_hat_exit(hatlockp); 2851 } 2852 } 2853 if (pp) { 2854 if (!remap) { 2855 HME_ADD(sfhme, pp); 2856 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 2857 ASSERT(hmeblkp->hblk_hmecnt > 0); 2858 2859 /* 2860 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 2861 * see pageunload() for comment. 2862 */ 2863 } 2864 sfmmu_mlist_exit(pml); 2865 } 2866 2867 return (0); 2868 } 2869 /* 2870 * Function unlocks hash bucket. 2871 */ 2872 static void 2873 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 2874 { 2875 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2876 SFMMU_HASH_UNLOCK(hmebp); 2877 } 2878 2879 /* 2880 * function which checks and sets up page array for a large 2881 * translation. Will set p_vcolor, p_index, p_ro fields. 2882 * Assumes addr and pfnum of first page are properly aligned. 2883 * Will check for physical contiguity. If check fails it return 2884 * non null. 2885 */ 2886 static int 2887 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 2888 { 2889 int i, index, ttesz; 2890 pfn_t pfnum; 2891 pgcnt_t npgs; 2892 page_t *pp, *pp1; 2893 kmutex_t *pmtx; 2894 #ifdef VAC 2895 int osz; 2896 int cflags = 0; 2897 int vac_err = 0; 2898 #endif 2899 int newidx = 0; 2900 2901 ttesz = TTE_CSZ(ttep); 2902 2903 ASSERT(ttesz > TTE8K); 2904 2905 npgs = TTEPAGES(ttesz); 2906 index = PAGESZ_TO_INDEX(ttesz); 2907 2908 pfnum = (*pps)->p_pagenum; 2909 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 2910 2911 /* 2912 * Save the first pp so we can do HAT_TMPNC at the end. 2913 */ 2914 pp1 = *pps; 2915 #ifdef VAC 2916 osz = fnd_mapping_sz(pp1); 2917 #endif 2918 2919 for (i = 0; i < npgs; i++, pps++) { 2920 pp = *pps; 2921 ASSERT(PAGE_LOCKED(pp)); 2922 ASSERT(pp->p_szc >= ttesz); 2923 ASSERT(pp->p_szc == pp1->p_szc); 2924 ASSERT(sfmmu_mlist_held(pp)); 2925 2926 /* 2927 * XXX is it possible to maintain P_RO on the root only? 2928 */ 2929 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2930 pmtx = sfmmu_page_enter(pp); 2931 PP_CLRRO(pp); 2932 sfmmu_page_exit(pmtx); 2933 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 2934 !PP_ISMOD(pp)) { 2935 pmtx = sfmmu_page_enter(pp); 2936 if (!(PP_ISMOD(pp))) { 2937 PP_SETRO(pp); 2938 } 2939 sfmmu_page_exit(pmtx); 2940 } 2941 2942 /* 2943 * If this is a remap we skip vac & contiguity checks. 2944 */ 2945 if (remap) 2946 continue; 2947 2948 /* 2949 * set p_vcolor and detect any vac conflicts. 2950 */ 2951 #ifdef VAC 2952 if (vac_err == 0) { 2953 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 2954 2955 } 2956 #endif 2957 2958 /* 2959 * Save current index in case we need to undo it. 2960 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 2961 * "SFMMU_INDEX_SHIFT 6" 2962 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 2963 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 2964 * 2965 * So: index = PAGESZ_TO_INDEX(ttesz); 2966 * if ttesz == 1 then index = 0x2 2967 * 2 then index = 0x4 2968 * 3 then index = 0x8 2969 * 4 then index = 0x10 2970 * 5 then index = 0x20 2971 * The code below checks if it's a new pagesize (ie, newidx) 2972 * in case we need to take it back out of p_index, 2973 * and then or's the new index into the existing index. 2974 */ 2975 if ((PP_MAPINDEX(pp) & index) == 0) 2976 newidx = 1; 2977 pp->p_index = (PP_MAPINDEX(pp) | index); 2978 2979 /* 2980 * contiguity check 2981 */ 2982 if (pp->p_pagenum != pfnum) { 2983 /* 2984 * If we fail the contiguity test then 2985 * the only thing we need to fix is the p_index field. 2986 * We might get a few extra flushes but since this 2987 * path is rare that is ok. The p_ro field will 2988 * get automatically fixed on the next tteload to 2989 * the page. NO TNC bit is set yet. 2990 */ 2991 while (i >= 0) { 2992 pp = *pps; 2993 if (newidx) 2994 pp->p_index = (PP_MAPINDEX(pp) & 2995 ~index); 2996 pps--; 2997 i--; 2998 } 2999 return (1); 3000 } 3001 pfnum++; 3002 addr += MMU_PAGESIZE; 3003 } 3004 3005 #ifdef VAC 3006 if (vac_err) { 3007 if (ttesz > osz) { 3008 /* 3009 * There are some smaller mappings that causes vac 3010 * conflicts. Convert all existing small mappings to 3011 * TNC. 3012 */ 3013 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3014 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3015 npgs); 3016 } else { 3017 /* EMPTY */ 3018 /* 3019 * If there exists an big page mapping, 3020 * that means the whole existing big page 3021 * has TNC setting already. No need to covert to 3022 * TNC again. 3023 */ 3024 ASSERT(PP_ISTNC(pp1)); 3025 } 3026 } 3027 #endif /* VAC */ 3028 3029 return (0); 3030 } 3031 3032 #ifdef VAC 3033 /* 3034 * Routine that detects vac consistency for a large page. It also 3035 * sets virtual color for all pp's for this big mapping. 3036 */ 3037 static int 3038 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3039 { 3040 int vcolor, ocolor; 3041 3042 ASSERT(sfmmu_mlist_held(pp)); 3043 3044 if (PP_ISNC(pp)) { 3045 return (HAT_TMPNC); 3046 } 3047 3048 vcolor = addr_to_vcolor(addr); 3049 if (PP_NEWPAGE(pp)) { 3050 PP_SET_VCOLOR(pp, vcolor); 3051 return (0); 3052 } 3053 3054 ocolor = PP_GET_VCOLOR(pp); 3055 if (ocolor == vcolor) { 3056 return (0); 3057 } 3058 3059 if (!PP_ISMAPPED(pp)) { 3060 /* 3061 * Previous user of page had a differnet color 3062 * but since there are no current users 3063 * we just flush the cache and change the color. 3064 * As an optimization for large pages we flush the 3065 * entire cache of that color and set a flag. 3066 */ 3067 SFMMU_STAT(sf_pgcolor_conflict); 3068 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3069 CacheColor_SetFlushed(*cflags, ocolor); 3070 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3071 } 3072 PP_SET_VCOLOR(pp, vcolor); 3073 return (0); 3074 } 3075 3076 /* 3077 * We got a real conflict with a current mapping. 3078 * set flags to start unencaching all mappings 3079 * and return failure so we restart looping 3080 * the pp array from the beginning. 3081 */ 3082 return (HAT_TMPNC); 3083 } 3084 #endif /* VAC */ 3085 3086 /* 3087 * creates a large page shadow hmeblk for a tte. 3088 * The purpose of this routine is to allow us to do quick unloads because 3089 * the vm layer can easily pass a very large but sparsely populated range. 3090 */ 3091 static struct hme_blk * 3092 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3093 { 3094 struct hmehash_bucket *hmebp; 3095 hmeblk_tag hblktag; 3096 int hmeshift, size, vshift; 3097 uint_t shw_mask, newshw_mask; 3098 struct hme_blk *hmeblkp; 3099 3100 ASSERT(sfmmup != KHATID); 3101 if (mmu_page_sizes == max_mmu_page_sizes) { 3102 ASSERT(ttesz < TTE256M); 3103 } else { 3104 ASSERT(ttesz < TTE4M); 3105 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3106 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3107 } 3108 3109 if (ttesz == TTE8K) { 3110 size = TTE512K; 3111 } else { 3112 size = ++ttesz; 3113 } 3114 3115 hblktag.htag_id = sfmmup; 3116 hmeshift = HME_HASH_SHIFT(size); 3117 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3118 hblktag.htag_rehash = HME_HASH_REHASH(size); 3119 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3120 3121 SFMMU_HASH_LOCK(hmebp); 3122 3123 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3124 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3125 if (hmeblkp == NULL) { 3126 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3127 hblktag, flags); 3128 } 3129 ASSERT(hmeblkp); 3130 if (!hmeblkp->hblk_shw_mask) { 3131 /* 3132 * if this is a unused hblk it was just allocated or could 3133 * potentially be a previous large page hblk so we need to 3134 * set the shadow bit. 3135 */ 3136 hmeblkp->hblk_shw_bit = 1; 3137 } 3138 ASSERT(hmeblkp->hblk_shw_bit == 1); 3139 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3140 ASSERT(vshift < 8); 3141 /* 3142 * Atomically set shw mask bit 3143 */ 3144 do { 3145 shw_mask = hmeblkp->hblk_shw_mask; 3146 newshw_mask = shw_mask | (1 << vshift); 3147 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3148 newshw_mask); 3149 } while (newshw_mask != shw_mask); 3150 3151 SFMMU_HASH_UNLOCK(hmebp); 3152 3153 return (hmeblkp); 3154 } 3155 3156 /* 3157 * This routine cleanup a previous shadow hmeblk and changes it to 3158 * a regular hblk. This happens rarely but it is possible 3159 * when a process wants to use large pages and there are hblks still 3160 * lying around from the previous as that used these hmeblks. 3161 * The alternative was to cleanup the shadow hblks at unload time 3162 * but since so few user processes actually use large pages, it is 3163 * better to be lazy and cleanup at this time. 3164 */ 3165 static void 3166 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3167 struct hmehash_bucket *hmebp) 3168 { 3169 caddr_t addr, endaddr; 3170 int hashno, size; 3171 3172 ASSERT(hmeblkp->hblk_shw_bit); 3173 3174 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3175 3176 if (!hmeblkp->hblk_shw_mask) { 3177 hmeblkp->hblk_shw_bit = 0; 3178 return; 3179 } 3180 addr = (caddr_t)get_hblk_base(hmeblkp); 3181 endaddr = get_hblk_endaddr(hmeblkp); 3182 size = get_hblk_ttesz(hmeblkp); 3183 hashno = size - 1; 3184 ASSERT(hashno > 0); 3185 SFMMU_HASH_UNLOCK(hmebp); 3186 3187 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3188 3189 SFMMU_HASH_LOCK(hmebp); 3190 } 3191 3192 static void 3193 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3194 int hashno) 3195 { 3196 int hmeshift, shadow = 0; 3197 hmeblk_tag hblktag; 3198 struct hmehash_bucket *hmebp; 3199 struct hme_blk *hmeblkp; 3200 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3201 uint64_t hblkpa, prevpa, nx_pa; 3202 3203 ASSERT(hashno > 0); 3204 hblktag.htag_id = sfmmup; 3205 hblktag.htag_rehash = hashno; 3206 3207 hmeshift = HME_HASH_SHIFT(hashno); 3208 3209 while (addr < endaddr) { 3210 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3211 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3212 SFMMU_HASH_LOCK(hmebp); 3213 /* inline HME_HASH_SEARCH */ 3214 hmeblkp = hmebp->hmeblkp; 3215 hblkpa = hmebp->hmeh_nextpa; 3216 prevpa = 0; 3217 pr_hblk = NULL; 3218 while (hmeblkp) { 3219 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 3220 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3221 /* found hme_blk */ 3222 if (hmeblkp->hblk_shw_bit) { 3223 if (hmeblkp->hblk_shw_mask) { 3224 shadow = 1; 3225 sfmmu_shadow_hcleanup(sfmmup, 3226 hmeblkp, hmebp); 3227 break; 3228 } else { 3229 hmeblkp->hblk_shw_bit = 0; 3230 } 3231 } 3232 3233 /* 3234 * Hblk_hmecnt and hblk_vcnt could be non zero 3235 * since hblk_unload() does not gurantee that. 3236 * 3237 * XXX - this could cause tteload() to spin 3238 * where sfmmu_shadow_hcleanup() is called. 3239 */ 3240 } 3241 3242 nx_hblk = hmeblkp->hblk_next; 3243 nx_pa = hmeblkp->hblk_nextpa; 3244 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3245 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 3246 pr_hblk); 3247 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3248 } else { 3249 pr_hblk = hmeblkp; 3250 prevpa = hblkpa; 3251 } 3252 hmeblkp = nx_hblk; 3253 hblkpa = nx_pa; 3254 } 3255 3256 SFMMU_HASH_UNLOCK(hmebp); 3257 3258 if (shadow) { 3259 /* 3260 * We found another shadow hblk so cleaned its 3261 * children. We need to go back and cleanup 3262 * the original hblk so we don't change the 3263 * addr. 3264 */ 3265 shadow = 0; 3266 } else { 3267 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3268 (1 << hmeshift)); 3269 } 3270 } 3271 sfmmu_hblks_list_purge(&list); 3272 } 3273 3274 /* 3275 * Release one hardware address translation lock on the given address range. 3276 */ 3277 void 3278 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3279 { 3280 struct hmehash_bucket *hmebp; 3281 hmeblk_tag hblktag; 3282 int hmeshift, hashno = 1; 3283 struct hme_blk *hmeblkp, *list = NULL; 3284 caddr_t endaddr; 3285 3286 ASSERT(sfmmup != NULL); 3287 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3288 3289 ASSERT((sfmmup == ksfmmup) || 3290 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3291 ASSERT((len & MMU_PAGEOFFSET) == 0); 3292 endaddr = addr + len; 3293 hblktag.htag_id = sfmmup; 3294 3295 /* 3296 * Spitfire supports 4 page sizes. 3297 * Most pages are expected to be of the smallest page size (8K) and 3298 * these will not need to be rehashed. 64K pages also don't need to be 3299 * rehashed because an hmeblk spans 64K of address space. 512K pages 3300 * might need 1 rehash and and 4M pages might need 2 rehashes. 3301 */ 3302 while (addr < endaddr) { 3303 hmeshift = HME_HASH_SHIFT(hashno); 3304 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3305 hblktag.htag_rehash = hashno; 3306 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3307 3308 SFMMU_HASH_LOCK(hmebp); 3309 3310 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3311 if (hmeblkp != NULL) { 3312 /* 3313 * If we encounter a shadow hmeblk then 3314 * we know there are no valid hmeblks mapping 3315 * this address at this size or larger. 3316 * Just increment address by the smallest 3317 * page size. 3318 */ 3319 if (hmeblkp->hblk_shw_bit) { 3320 addr += MMU_PAGESIZE; 3321 } else { 3322 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3323 endaddr); 3324 } 3325 SFMMU_HASH_UNLOCK(hmebp); 3326 hashno = 1; 3327 continue; 3328 } 3329 SFMMU_HASH_UNLOCK(hmebp); 3330 3331 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3332 /* 3333 * We have traversed the whole list and rehashed 3334 * if necessary without finding the address to unlock 3335 * which should never happen. 3336 */ 3337 panic("sfmmu_unlock: addr not found. " 3338 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3339 } else { 3340 hashno++; 3341 } 3342 } 3343 3344 sfmmu_hblks_list_purge(&list); 3345 } 3346 3347 /* 3348 * Function to unlock a range of addresses in an hmeblk. It returns the 3349 * next address that needs to be unlocked. 3350 * Should be called with the hash lock held. 3351 */ 3352 static caddr_t 3353 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 3354 { 3355 struct sf_hment *sfhme; 3356 tte_t tteold, ttemod; 3357 int ttesz, ret; 3358 3359 ASSERT(in_hblk_range(hmeblkp, addr)); 3360 ASSERT(hmeblkp->hblk_shw_bit == 0); 3361 3362 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 3363 ttesz = get_hblk_ttesz(hmeblkp); 3364 3365 HBLKTOHME(sfhme, hmeblkp, addr); 3366 while (addr < endaddr) { 3367 readtte: 3368 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3369 if (TTE_IS_VALID(&tteold)) { 3370 3371 ttemod = tteold; 3372 3373 ret = sfmmu_modifytte_try(&tteold, &ttemod, 3374 &sfhme->hme_tte); 3375 3376 if (ret < 0) 3377 goto readtte; 3378 3379 if (hmeblkp->hblk_lckcnt == 0) 3380 panic("zero hblk lckcnt"); 3381 3382 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 3383 (uintptr_t)endaddr) 3384 panic("can't unlock large tte"); 3385 3386 ASSERT(hmeblkp->hblk_lckcnt > 0); 3387 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 3388 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 3389 } else { 3390 panic("sfmmu_hblk_unlock: invalid tte"); 3391 } 3392 addr += TTEBYTES(ttesz); 3393 sfhme++; 3394 } 3395 return (addr); 3396 } 3397 3398 /* 3399 * Physical Address Mapping Framework 3400 * 3401 * General rules: 3402 * 3403 * (1) Applies only to seg_kmem memory pages. To make things easier, 3404 * seg_kpm addresses are also accepted by the routines, but nothing 3405 * is done with them since by definition their PA mappings are static. 3406 * (2) hat_add_callback() may only be called while holding the page lock 3407 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 3408 * or passing HAC_PAGELOCK flag. 3409 * (3) prehandler() and posthandler() may not call hat_add_callback() or 3410 * hat_delete_callback(), nor should they allocate memory. Post quiesce 3411 * callbacks may not sleep or acquire adaptive mutex locks. 3412 * (4) Either prehandler() or posthandler() (but not both) may be specified 3413 * as being NULL. Specifying an errhandler() is optional. 3414 * 3415 * Details of using the framework: 3416 * 3417 * registering a callback (hat_register_callback()) 3418 * 3419 * Pass prehandler, posthandler, errhandler addresses 3420 * as described below. If capture_cpus argument is nonzero, 3421 * suspend callback to the prehandler will occur with CPUs 3422 * captured and executing xc_loop() and CPUs will remain 3423 * captured until after the posthandler suspend callback 3424 * occurs. 3425 * 3426 * adding a callback (hat_add_callback()) 3427 * 3428 * as_pagelock(); 3429 * hat_add_callback(); 3430 * save returned pfn in private data structures or program registers; 3431 * as_pageunlock(); 3432 * 3433 * prehandler() 3434 * 3435 * Stop all accesses by physical address to this memory page. 3436 * Called twice: the first, PRESUSPEND, is a context safe to acquire 3437 * adaptive locks. The second, SUSPEND, is called at high PIL with 3438 * CPUs captured so adaptive locks may NOT be acquired (and all spin 3439 * locks must be XCALL_PIL or higher locks). 3440 * 3441 * May return the following errors: 3442 * EIO: A fatal error has occurred. This will result in panic. 3443 * EAGAIN: The page cannot be suspended. This will fail the 3444 * relocation. 3445 * 0: Success. 3446 * 3447 * posthandler() 3448 * 3449 * Save new pfn in private data structures or program registers; 3450 * not allowed to fail (non-zero return values will result in panic). 3451 * 3452 * errhandler() 3453 * 3454 * called when an error occurs related to the callback. Currently 3455 * the only such error is HAT_CB_ERR_LEAKED which indicates that 3456 * a page is being freed, but there are still outstanding callback(s) 3457 * registered on the page. 3458 * 3459 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 3460 * 3461 * stop using physical address 3462 * hat_delete_callback(); 3463 * 3464 */ 3465 3466 /* 3467 * Register a callback class. Each subsystem should do this once and 3468 * cache the id_t returned for use in setting up and tearing down callbacks. 3469 * 3470 * There is no facility for removing callback IDs once they are created; 3471 * the "key" should be unique for each module, so in case a module is unloaded 3472 * and subsequently re-loaded, we can recycle the module's previous entry. 3473 */ 3474 id_t 3475 hat_register_callback(int key, 3476 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 3477 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 3478 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 3479 int capture_cpus) 3480 { 3481 id_t id; 3482 3483 /* 3484 * Search the table for a pre-existing callback associated with 3485 * the identifier "key". If one exists, we re-use that entry in 3486 * the table for this instance, otherwise we assign the next 3487 * available table slot. 3488 */ 3489 for (id = 0; id < sfmmu_max_cb_id; id++) { 3490 if (sfmmu_cb_table[id].key == key) 3491 break; 3492 } 3493 3494 if (id == sfmmu_max_cb_id) { 3495 id = sfmmu_cb_nextid++; 3496 if (id >= sfmmu_max_cb_id) 3497 panic("hat_register_callback: out of callback IDs"); 3498 } 3499 3500 ASSERT(prehandler != NULL || posthandler != NULL); 3501 3502 sfmmu_cb_table[id].key = key; 3503 sfmmu_cb_table[id].prehandler = prehandler; 3504 sfmmu_cb_table[id].posthandler = posthandler; 3505 sfmmu_cb_table[id].errhandler = errhandler; 3506 sfmmu_cb_table[id].capture_cpus = capture_cpus; 3507 3508 return (id); 3509 } 3510 3511 #define HAC_COOKIE_NONE (void *)-1 3512 3513 /* 3514 * Add relocation callbacks to the specified addr/len which will be called 3515 * when relocating the associated page. See the description of pre and 3516 * posthandler above for more details. 3517 * 3518 * If HAC_PAGELOCK is included in flags, the underlying memory page is 3519 * locked internally so the caller must be able to deal with the callback 3520 * running even before this function has returned. If HAC_PAGELOCK is not 3521 * set, it is assumed that the underlying memory pages are locked. 3522 * 3523 * Since the caller must track the individual page boundaries anyway, 3524 * we only allow a callback to be added to a single page (large 3525 * or small). Thus [addr, addr + len) MUST be contained within a single 3526 * page. 3527 * 3528 * Registering multiple callbacks on the same [addr, addr+len) is supported, 3529 * _provided_that_ a unique parameter is specified for each callback. 3530 * If multiple callbacks are registered on the same range the callback will 3531 * be invoked with each unique parameter. Registering the same callback with 3532 * the same argument more than once will result in corrupted kernel state. 3533 * 3534 * Returns the pfn of the underlying kernel page in *rpfn 3535 * on success, or PFN_INVALID on failure. 3536 * 3537 * cookiep (if passed) provides storage space for an opaque cookie 3538 * to return later to hat_delete_callback(). This cookie makes the callback 3539 * deletion significantly quicker by avoiding a potentially lengthy hash 3540 * search. 3541 * 3542 * Returns values: 3543 * 0: success 3544 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 3545 * EINVAL: callback ID is not valid 3546 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 3547 * space 3548 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 3549 */ 3550 int 3551 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 3552 void *pvt, pfn_t *rpfn, void **cookiep) 3553 { 3554 struct hmehash_bucket *hmebp; 3555 hmeblk_tag hblktag; 3556 struct hme_blk *hmeblkp; 3557 int hmeshift, hashno; 3558 caddr_t saddr, eaddr, baseaddr; 3559 struct pa_hment *pahmep; 3560 struct sf_hment *sfhmep, *osfhmep; 3561 kmutex_t *pml; 3562 tte_t tte; 3563 page_t *pp; 3564 vnode_t *vp; 3565 u_offset_t off; 3566 pfn_t pfn; 3567 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 3568 int locked = 0; 3569 3570 /* 3571 * For KPM mappings, just return the physical address since we 3572 * don't need to register any callbacks. 3573 */ 3574 if (IS_KPM_ADDR(vaddr)) { 3575 uint64_t paddr; 3576 SFMMU_KPM_VTOP(vaddr, paddr); 3577 *rpfn = btop(paddr); 3578 if (cookiep != NULL) 3579 *cookiep = HAC_COOKIE_NONE; 3580 return (0); 3581 } 3582 3583 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 3584 *rpfn = PFN_INVALID; 3585 return (EINVAL); 3586 } 3587 3588 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 3589 *rpfn = PFN_INVALID; 3590 return (ENOMEM); 3591 } 3592 3593 sfhmep = &pahmep->sfment; 3594 3595 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3596 eaddr = saddr + len; 3597 3598 rehash: 3599 /* Find the mapping(s) for this page */ 3600 for (hashno = TTE64K, hmeblkp = NULL; 3601 hmeblkp == NULL && hashno <= mmu_hashcnt; 3602 hashno++) { 3603 hmeshift = HME_HASH_SHIFT(hashno); 3604 hblktag.htag_id = ksfmmup; 3605 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3606 hblktag.htag_rehash = hashno; 3607 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3608 3609 SFMMU_HASH_LOCK(hmebp); 3610 3611 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3612 3613 if (hmeblkp == NULL) 3614 SFMMU_HASH_UNLOCK(hmebp); 3615 } 3616 3617 if (hmeblkp == NULL) { 3618 kmem_cache_free(pa_hment_cache, pahmep); 3619 *rpfn = PFN_INVALID; 3620 return (ENXIO); 3621 } 3622 3623 HBLKTOHME(osfhmep, hmeblkp, saddr); 3624 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3625 3626 if (!TTE_IS_VALID(&tte)) { 3627 SFMMU_HASH_UNLOCK(hmebp); 3628 kmem_cache_free(pa_hment_cache, pahmep); 3629 *rpfn = PFN_INVALID; 3630 return (ENXIO); 3631 } 3632 3633 /* 3634 * Make sure the boundaries for the callback fall within this 3635 * single mapping. 3636 */ 3637 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 3638 ASSERT(saddr >= baseaddr); 3639 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 3640 SFMMU_HASH_UNLOCK(hmebp); 3641 kmem_cache_free(pa_hment_cache, pahmep); 3642 *rpfn = PFN_INVALID; 3643 return (ERANGE); 3644 } 3645 3646 pfn = sfmmu_ttetopfn(&tte, vaddr); 3647 3648 /* 3649 * The pfn may not have a page_t underneath in which case we 3650 * just return it. This can happen if we are doing I/O to a 3651 * static portion of the kernel's address space, for instance. 3652 */ 3653 pp = osfhmep->hme_page; 3654 if (pp == NULL) { 3655 SFMMU_HASH_UNLOCK(hmebp); 3656 kmem_cache_free(pa_hment_cache, pahmep); 3657 *rpfn = pfn; 3658 if (cookiep) 3659 *cookiep = HAC_COOKIE_NONE; 3660 return (0); 3661 } 3662 ASSERT(pp == PP_PAGEROOT(pp)); 3663 3664 vp = pp->p_vnode; 3665 off = pp->p_offset; 3666 3667 pml = sfmmu_mlist_enter(pp); 3668 3669 if (flags & HAC_PAGELOCK) { 3670 if (!page_trylock(pp, SE_SHARED)) { 3671 /* 3672 * Somebody is holding SE_EXCL lock. Might 3673 * even be hat_page_relocate(). Drop all 3674 * our locks, lookup the page in &kvp, and 3675 * retry. If it doesn't exist in &kvp, then 3676 * we must be dealing with a kernel mapped 3677 * page which doesn't actually belong to 3678 * segkmem so we punt. 3679 */ 3680 sfmmu_mlist_exit(pml); 3681 SFMMU_HASH_UNLOCK(hmebp); 3682 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3683 if (pp == NULL) { 3684 kmem_cache_free(pa_hment_cache, pahmep); 3685 *rpfn = pfn; 3686 if (cookiep) 3687 *cookiep = HAC_COOKIE_NONE; 3688 return (0); 3689 } 3690 page_unlock(pp); 3691 goto rehash; 3692 } 3693 locked = 1; 3694 } 3695 3696 if (!PAGE_LOCKED(pp) && !panicstr) 3697 panic("hat_add_callback: page 0x%p not locked", pp); 3698 3699 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 3700 pp->p_offset != off) { 3701 /* 3702 * The page moved before we got our hands on it. Drop 3703 * all the locks and try again. 3704 */ 3705 ASSERT((flags & HAC_PAGELOCK) != 0); 3706 sfmmu_mlist_exit(pml); 3707 SFMMU_HASH_UNLOCK(hmebp); 3708 page_unlock(pp); 3709 locked = 0; 3710 goto rehash; 3711 } 3712 3713 if (vp != &kvp) { 3714 /* 3715 * This is not a segkmem page but another page which 3716 * has been kernel mapped. It had better have at least 3717 * a share lock on it. Return the pfn. 3718 */ 3719 sfmmu_mlist_exit(pml); 3720 SFMMU_HASH_UNLOCK(hmebp); 3721 if (locked) 3722 page_unlock(pp); 3723 kmem_cache_free(pa_hment_cache, pahmep); 3724 ASSERT(PAGE_LOCKED(pp)); 3725 *rpfn = pfn; 3726 if (cookiep) 3727 *cookiep = HAC_COOKIE_NONE; 3728 return (0); 3729 } 3730 3731 /* 3732 * Setup this pa_hment and link its embedded dummy sf_hment into 3733 * the mapping list. 3734 */ 3735 pp->p_share++; 3736 pahmep->cb_id = callback_id; 3737 pahmep->addr = vaddr; 3738 pahmep->len = len; 3739 pahmep->refcnt = 1; 3740 pahmep->flags = 0; 3741 pahmep->pvt = pvt; 3742 3743 sfhmep->hme_tte.ll = 0; 3744 sfhmep->hme_data = pahmep; 3745 sfhmep->hme_prev = osfhmep; 3746 sfhmep->hme_next = osfhmep->hme_next; 3747 3748 if (osfhmep->hme_next) 3749 osfhmep->hme_next->hme_prev = sfhmep; 3750 3751 osfhmep->hme_next = sfhmep; 3752 3753 sfmmu_mlist_exit(pml); 3754 SFMMU_HASH_UNLOCK(hmebp); 3755 3756 if (locked) 3757 page_unlock(pp); 3758 3759 *rpfn = pfn; 3760 if (cookiep) 3761 *cookiep = (void *)pahmep; 3762 3763 return (0); 3764 } 3765 3766 /* 3767 * Remove the relocation callbacks from the specified addr/len. 3768 */ 3769 void 3770 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 3771 void *cookie) 3772 { 3773 struct hmehash_bucket *hmebp; 3774 hmeblk_tag hblktag; 3775 struct hme_blk *hmeblkp; 3776 int hmeshift, hashno; 3777 caddr_t saddr; 3778 struct pa_hment *pahmep; 3779 struct sf_hment *sfhmep, *osfhmep; 3780 kmutex_t *pml; 3781 tte_t tte; 3782 page_t *pp; 3783 vnode_t *vp; 3784 u_offset_t off; 3785 int locked = 0; 3786 3787 /* 3788 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 3789 * remove so just return. 3790 */ 3791 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 3792 return; 3793 3794 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3795 3796 rehash: 3797 /* Find the mapping(s) for this page */ 3798 for (hashno = TTE64K, hmeblkp = NULL; 3799 hmeblkp == NULL && hashno <= mmu_hashcnt; 3800 hashno++) { 3801 hmeshift = HME_HASH_SHIFT(hashno); 3802 hblktag.htag_id = ksfmmup; 3803 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3804 hblktag.htag_rehash = hashno; 3805 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3806 3807 SFMMU_HASH_LOCK(hmebp); 3808 3809 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3810 3811 if (hmeblkp == NULL) 3812 SFMMU_HASH_UNLOCK(hmebp); 3813 } 3814 3815 if (hmeblkp == NULL) 3816 return; 3817 3818 HBLKTOHME(osfhmep, hmeblkp, saddr); 3819 3820 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3821 if (!TTE_IS_VALID(&tte)) { 3822 SFMMU_HASH_UNLOCK(hmebp); 3823 return; 3824 } 3825 3826 pp = osfhmep->hme_page; 3827 if (pp == NULL) { 3828 SFMMU_HASH_UNLOCK(hmebp); 3829 ASSERT(cookie == NULL); 3830 return; 3831 } 3832 3833 vp = pp->p_vnode; 3834 off = pp->p_offset; 3835 3836 pml = sfmmu_mlist_enter(pp); 3837 3838 if (flags & HAC_PAGELOCK) { 3839 if (!page_trylock(pp, SE_SHARED)) { 3840 /* 3841 * Somebody is holding SE_EXCL lock. Might 3842 * even be hat_page_relocate(). Drop all 3843 * our locks, lookup the page in &kvp, and 3844 * retry. If it doesn't exist in &kvp, then 3845 * we must be dealing with a kernel mapped 3846 * page which doesn't actually belong to 3847 * segkmem so we punt. 3848 */ 3849 sfmmu_mlist_exit(pml); 3850 SFMMU_HASH_UNLOCK(hmebp); 3851 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3852 if (pp == NULL) { 3853 ASSERT(cookie == NULL); 3854 return; 3855 } 3856 page_unlock(pp); 3857 goto rehash; 3858 } 3859 locked = 1; 3860 } 3861 3862 ASSERT(PAGE_LOCKED(pp)); 3863 3864 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 3865 pp->p_offset != off) { 3866 /* 3867 * The page moved before we got our hands on it. Drop 3868 * all the locks and try again. 3869 */ 3870 ASSERT((flags & HAC_PAGELOCK) != 0); 3871 sfmmu_mlist_exit(pml); 3872 SFMMU_HASH_UNLOCK(hmebp); 3873 page_unlock(pp); 3874 locked = 0; 3875 goto rehash; 3876 } 3877 3878 if (vp != &kvp) { 3879 /* 3880 * This is not a segkmem page but another page which 3881 * has been kernel mapped. 3882 */ 3883 sfmmu_mlist_exit(pml); 3884 SFMMU_HASH_UNLOCK(hmebp); 3885 if (locked) 3886 page_unlock(pp); 3887 ASSERT(cookie == NULL); 3888 return; 3889 } 3890 3891 if (cookie != NULL) { 3892 pahmep = (struct pa_hment *)cookie; 3893 sfhmep = &pahmep->sfment; 3894 } else { 3895 for (sfhmep = pp->p_mapping; sfhmep != NULL; 3896 sfhmep = sfhmep->hme_next) { 3897 3898 /* 3899 * skip va<->pa mappings 3900 */ 3901 if (!IS_PAHME(sfhmep)) 3902 continue; 3903 3904 pahmep = sfhmep->hme_data; 3905 ASSERT(pahmep != NULL); 3906 3907 /* 3908 * if pa_hment matches, remove it 3909 */ 3910 if ((pahmep->pvt == pvt) && 3911 (pahmep->addr == vaddr) && 3912 (pahmep->len == len)) { 3913 break; 3914 } 3915 } 3916 } 3917 3918 if (sfhmep == NULL) { 3919 if (!panicstr) { 3920 panic("hat_delete_callback: pa_hment not found, pp %p", 3921 (void *)pp); 3922 } 3923 return; 3924 } 3925 3926 /* 3927 * Note: at this point a valid kernel mapping must still be 3928 * present on this page. 3929 */ 3930 pp->p_share--; 3931 if (pp->p_share <= 0) 3932 panic("hat_delete_callback: zero p_share"); 3933 3934 if (--pahmep->refcnt == 0) { 3935 if (pahmep->flags != 0) 3936 panic("hat_delete_callback: pa_hment is busy"); 3937 3938 /* 3939 * Remove sfhmep from the mapping list for the page. 3940 */ 3941 if (sfhmep->hme_prev) { 3942 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 3943 } else { 3944 pp->p_mapping = sfhmep->hme_next; 3945 } 3946 3947 if (sfhmep->hme_next) 3948 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 3949 3950 sfmmu_mlist_exit(pml); 3951 SFMMU_HASH_UNLOCK(hmebp); 3952 3953 if (locked) 3954 page_unlock(pp); 3955 3956 kmem_cache_free(pa_hment_cache, pahmep); 3957 return; 3958 } 3959 3960 sfmmu_mlist_exit(pml); 3961 SFMMU_HASH_UNLOCK(hmebp); 3962 if (locked) 3963 page_unlock(pp); 3964 } 3965 3966 /* 3967 * hat_probe returns 1 if the translation for the address 'addr' is 3968 * loaded, zero otherwise. 3969 * 3970 * hat_probe should be used only for advisorary purposes because it may 3971 * occasionally return the wrong value. The implementation must guarantee that 3972 * returning the wrong value is a very rare event. hat_probe is used 3973 * to implement optimizations in the segment drivers. 3974 * 3975 */ 3976 int 3977 hat_probe(struct hat *sfmmup, caddr_t addr) 3978 { 3979 pfn_t pfn; 3980 tte_t tte; 3981 3982 ASSERT(sfmmup != NULL); 3983 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3984 3985 ASSERT((sfmmup == ksfmmup) || 3986 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3987 3988 if (sfmmup == ksfmmup) { 3989 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 3990 == PFN_SUSPENDED) { 3991 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 3992 } 3993 } else { 3994 pfn = sfmmu_uvatopfn(addr, sfmmup); 3995 } 3996 3997 if (pfn != PFN_INVALID) 3998 return (1); 3999 else 4000 return (0); 4001 } 4002 4003 ssize_t 4004 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 4005 { 4006 tte_t tte; 4007 4008 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4009 4010 sfmmu_gettte(sfmmup, addr, &tte); 4011 if (TTE_IS_VALID(&tte)) { 4012 return (TTEBYTES(TTE_CSZ(&tte))); 4013 } 4014 return (-1); 4015 } 4016 4017 static void 4018 sfmmu_gettte(struct hat *sfmmup, caddr_t addr, tte_t *ttep) 4019 { 4020 struct hmehash_bucket *hmebp; 4021 hmeblk_tag hblktag; 4022 int hmeshift, hashno = 1; 4023 struct hme_blk *hmeblkp, *list = NULL; 4024 struct sf_hment *sfhmep; 4025 4026 /* support for ISM */ 4027 ism_map_t *ism_map; 4028 ism_blk_t *ism_blkp; 4029 int i; 4030 sfmmu_t *ism_hatid = NULL; 4031 sfmmu_t *locked_hatid = NULL; 4032 4033 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4034 4035 ism_blkp = sfmmup->sfmmu_iblk; 4036 if (ism_blkp) { 4037 sfmmu_ismhat_enter(sfmmup, 0); 4038 locked_hatid = sfmmup; 4039 } 4040 while (ism_blkp && ism_hatid == NULL) { 4041 ism_map = ism_blkp->iblk_maps; 4042 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 4043 if (addr >= ism_start(ism_map[i]) && 4044 addr < ism_end(ism_map[i])) { 4045 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 4046 addr = (caddr_t)(addr - 4047 ism_start(ism_map[i])); 4048 break; 4049 } 4050 } 4051 ism_blkp = ism_blkp->iblk_next; 4052 } 4053 if (locked_hatid) { 4054 sfmmu_ismhat_exit(locked_hatid, 0); 4055 } 4056 4057 hblktag.htag_id = sfmmup; 4058 ttep->ll = 0; 4059 4060 do { 4061 hmeshift = HME_HASH_SHIFT(hashno); 4062 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4063 hblktag.htag_rehash = hashno; 4064 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4065 4066 SFMMU_HASH_LOCK(hmebp); 4067 4068 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4069 if (hmeblkp != NULL) { 4070 HBLKTOHME(sfhmep, hmeblkp, addr); 4071 sfmmu_copytte(&sfhmep->hme_tte, ttep); 4072 SFMMU_HASH_UNLOCK(hmebp); 4073 break; 4074 } 4075 SFMMU_HASH_UNLOCK(hmebp); 4076 hashno++; 4077 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 4078 4079 sfmmu_hblks_list_purge(&list); 4080 } 4081 4082 uint_t 4083 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4084 { 4085 tte_t tte; 4086 4087 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4088 4089 sfmmu_gettte(sfmmup, addr, &tte); 4090 if (TTE_IS_VALID(&tte)) { 4091 *attr = sfmmu_ptov_attr(&tte); 4092 return (0); 4093 } 4094 *attr = 0; 4095 return ((uint_t)0xffffffff); 4096 } 4097 4098 /* 4099 * Enables more attributes on specified address range (ie. logical OR) 4100 */ 4101 void 4102 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4103 { 4104 if (hat->sfmmu_xhat_provider) { 4105 XHAT_SETATTR(hat, addr, len, attr); 4106 return; 4107 } else { 4108 /* 4109 * This must be a CPU HAT. If the address space has 4110 * XHATs attached, change attributes for all of them, 4111 * just in case 4112 */ 4113 ASSERT(hat->sfmmu_as != NULL); 4114 if (hat->sfmmu_as->a_xhat != NULL) 4115 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 4116 } 4117 4118 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4119 } 4120 4121 /* 4122 * Assigns attributes to the specified address range. All the attributes 4123 * are specified. 4124 */ 4125 void 4126 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4127 { 4128 if (hat->sfmmu_xhat_provider) { 4129 XHAT_CHGATTR(hat, addr, len, attr); 4130 return; 4131 } else { 4132 /* 4133 * This must be a CPU HAT. If the address space has 4134 * XHATs attached, change attributes for all of them, 4135 * just in case 4136 */ 4137 ASSERT(hat->sfmmu_as != NULL); 4138 if (hat->sfmmu_as->a_xhat != NULL) 4139 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 4140 } 4141 4142 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4143 } 4144 4145 /* 4146 * Remove attributes on the specified address range (ie. loginal NAND) 4147 */ 4148 void 4149 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4150 { 4151 if (hat->sfmmu_xhat_provider) { 4152 XHAT_CLRATTR(hat, addr, len, attr); 4153 return; 4154 } else { 4155 /* 4156 * This must be a CPU HAT. If the address space has 4157 * XHATs attached, change attributes for all of them, 4158 * just in case 4159 */ 4160 ASSERT(hat->sfmmu_as != NULL); 4161 if (hat->sfmmu_as->a_xhat != NULL) 4162 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4163 } 4164 4165 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4166 } 4167 4168 /* 4169 * Change attributes on an address range to that specified by attr and mode. 4170 */ 4171 static void 4172 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4173 int mode) 4174 { 4175 struct hmehash_bucket *hmebp; 4176 hmeblk_tag hblktag; 4177 int hmeshift, hashno = 1; 4178 struct hme_blk *hmeblkp, *list = NULL; 4179 caddr_t endaddr; 4180 cpuset_t cpuset; 4181 demap_range_t dmr; 4182 4183 CPUSET_ZERO(cpuset); 4184 4185 ASSERT((sfmmup == ksfmmup) || 4186 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4187 ASSERT((len & MMU_PAGEOFFSET) == 0); 4188 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4189 4190 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4191 ((addr + len) > (caddr_t)USERLIMIT)) { 4192 panic("user addr %p in kernel space", 4193 (void *)addr); 4194 } 4195 4196 endaddr = addr + len; 4197 hblktag.htag_id = sfmmup; 4198 DEMAP_RANGE_INIT(sfmmup, &dmr); 4199 4200 while (addr < endaddr) { 4201 hmeshift = HME_HASH_SHIFT(hashno); 4202 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4203 hblktag.htag_rehash = hashno; 4204 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4205 4206 SFMMU_HASH_LOCK(hmebp); 4207 4208 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4209 if (hmeblkp != NULL) { 4210 /* 4211 * We've encountered a shadow hmeblk so skip the range 4212 * of the next smaller mapping size. 4213 */ 4214 if (hmeblkp->hblk_shw_bit) { 4215 ASSERT(sfmmup != ksfmmup); 4216 ASSERT(hashno > 1); 4217 addr = (caddr_t)P2END((uintptr_t)addr, 4218 TTEBYTES(hashno - 1)); 4219 } else { 4220 addr = sfmmu_hblk_chgattr(sfmmup, 4221 hmeblkp, addr, endaddr, &dmr, attr, mode); 4222 } 4223 SFMMU_HASH_UNLOCK(hmebp); 4224 hashno = 1; 4225 continue; 4226 } 4227 SFMMU_HASH_UNLOCK(hmebp); 4228 4229 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4230 /* 4231 * We have traversed the whole list and rehashed 4232 * if necessary without finding the address to chgattr. 4233 * This is ok, so we increment the address by the 4234 * smallest hmeblk range for kernel mappings or for 4235 * user mappings with no large pages, and the largest 4236 * hmeblk range, to account for shadow hmeblks, for 4237 * user mappings with large pages and continue. 4238 */ 4239 if (sfmmup == ksfmmup) 4240 addr = (caddr_t)P2END((uintptr_t)addr, 4241 TTEBYTES(1)); 4242 else 4243 addr = (caddr_t)P2END((uintptr_t)addr, 4244 TTEBYTES(hashno)); 4245 hashno = 1; 4246 } else { 4247 hashno++; 4248 } 4249 } 4250 4251 sfmmu_hblks_list_purge(&list); 4252 DEMAP_RANGE_FLUSH(&dmr); 4253 cpuset = sfmmup->sfmmu_cpusran; 4254 xt_sync(cpuset); 4255 } 4256 4257 /* 4258 * This function chgattr on a range of addresses in an hmeblk. It returns the 4259 * next addres that needs to be chgattr. 4260 * It should be called with the hash lock held. 4261 * XXX It should be possible to optimize chgattr by not flushing every time but 4262 * on the other hand: 4263 * 1. do one flush crosscall. 4264 * 2. only flush if we are increasing permissions (make sure this will work) 4265 */ 4266 static caddr_t 4267 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4268 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4269 { 4270 tte_t tte, tteattr, tteflags, ttemod; 4271 struct sf_hment *sfhmep; 4272 int ttesz; 4273 struct page *pp = NULL; 4274 kmutex_t *pml, *pmtx; 4275 int ret; 4276 int use_demap_range; 4277 #if defined(SF_ERRATA_57) 4278 int check_exec; 4279 #endif 4280 4281 ASSERT(in_hblk_range(hmeblkp, addr)); 4282 ASSERT(hmeblkp->hblk_shw_bit == 0); 4283 4284 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4285 ttesz = get_hblk_ttesz(hmeblkp); 4286 4287 /* 4288 * Flush the current demap region if addresses have been 4289 * skipped or the page size doesn't match. 4290 */ 4291 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4292 if (use_demap_range) { 4293 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4294 } else { 4295 DEMAP_RANGE_FLUSH(dmrp); 4296 } 4297 4298 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4299 #if defined(SF_ERRATA_57) 4300 check_exec = (sfmmup != ksfmmup) && 4301 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4302 TTE_IS_EXECUTABLE(&tteattr); 4303 #endif 4304 HBLKTOHME(sfhmep, hmeblkp, addr); 4305 while (addr < endaddr) { 4306 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4307 if (TTE_IS_VALID(&tte)) { 4308 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4309 /* 4310 * if the new attr is the same as old 4311 * continue 4312 */ 4313 goto next_addr; 4314 } 4315 if (!TTE_IS_WRITABLE(&tteattr)) { 4316 /* 4317 * make sure we clear hw modify bit if we 4318 * removing write protections 4319 */ 4320 tteflags.tte_intlo |= TTE_HWWR_INT; 4321 } 4322 4323 pml = NULL; 4324 pp = sfhmep->hme_page; 4325 if (pp) { 4326 pml = sfmmu_mlist_enter(pp); 4327 } 4328 4329 if (pp != sfhmep->hme_page) { 4330 /* 4331 * tte must have been unloaded. 4332 */ 4333 ASSERT(pml); 4334 sfmmu_mlist_exit(pml); 4335 continue; 4336 } 4337 4338 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4339 4340 ttemod = tte; 4341 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4342 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4343 4344 #if defined(SF_ERRATA_57) 4345 if (check_exec && addr < errata57_limit) 4346 ttemod.tte_exec_perm = 0; 4347 #endif 4348 ret = sfmmu_modifytte_try(&tte, &ttemod, 4349 &sfhmep->hme_tte); 4350 4351 if (ret < 0) { 4352 /* tte changed underneath us */ 4353 if (pml) { 4354 sfmmu_mlist_exit(pml); 4355 } 4356 continue; 4357 } 4358 4359 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4360 /* 4361 * need to sync if we are clearing modify bit. 4362 */ 4363 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4364 } 4365 4366 if (pp && PP_ISRO(pp)) { 4367 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4368 pmtx = sfmmu_page_enter(pp); 4369 PP_CLRRO(pp); 4370 sfmmu_page_exit(pmtx); 4371 } 4372 } 4373 4374 if (ret > 0 && use_demap_range) { 4375 DEMAP_RANGE_MARKPG(dmrp, addr); 4376 } else if (ret > 0) { 4377 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4378 } 4379 4380 if (pml) { 4381 sfmmu_mlist_exit(pml); 4382 } 4383 } 4384 next_addr: 4385 addr += TTEBYTES(ttesz); 4386 sfhmep++; 4387 DEMAP_RANGE_NEXTPG(dmrp); 4388 } 4389 return (addr); 4390 } 4391 4392 /* 4393 * This routine converts virtual attributes to physical ones. It will 4394 * update the tteflags field with the tte mask corresponding to the attributes 4395 * affected and it returns the new attributes. It will also clear the modify 4396 * bit if we are taking away write permission. This is necessary since the 4397 * modify bit is the hardware permission bit and we need to clear it in order 4398 * to detect write faults. 4399 */ 4400 static uint64_t 4401 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 4402 { 4403 tte_t ttevalue; 4404 4405 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 4406 4407 switch (mode) { 4408 case SFMMU_CHGATTR: 4409 /* all attributes specified */ 4410 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 4411 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 4412 ttemaskp->tte_inthi = TTEINTHI_ATTR; 4413 ttemaskp->tte_intlo = TTEINTLO_ATTR; 4414 break; 4415 case SFMMU_SETATTR: 4416 ASSERT(!(attr & ~HAT_PROT_MASK)); 4417 ttemaskp->ll = 0; 4418 ttevalue.ll = 0; 4419 /* 4420 * a valid tte implies exec and read for sfmmu 4421 * so no need to do anything about them. 4422 * since priviledged access implies user access 4423 * PROT_USER doesn't make sense either. 4424 */ 4425 if (attr & PROT_WRITE) { 4426 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 4427 ttevalue.tte_intlo |= TTE_WRPRM_INT; 4428 } 4429 break; 4430 case SFMMU_CLRATTR: 4431 /* attributes will be nand with current ones */ 4432 if (attr & ~(PROT_WRITE | PROT_USER)) { 4433 panic("sfmmu: attr %x not supported", attr); 4434 } 4435 ttemaskp->ll = 0; 4436 ttevalue.ll = 0; 4437 if (attr & PROT_WRITE) { 4438 /* clear both writable and modify bit */ 4439 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 4440 } 4441 if (attr & PROT_USER) { 4442 ttemaskp->tte_intlo |= TTE_PRIV_INT; 4443 ttevalue.tte_intlo |= TTE_PRIV_INT; 4444 } 4445 break; 4446 default: 4447 panic("sfmmu_vtop_attr: bad mode %x", mode); 4448 } 4449 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 4450 return (ttevalue.ll); 4451 } 4452 4453 static uint_t 4454 sfmmu_ptov_attr(tte_t *ttep) 4455 { 4456 uint_t attr; 4457 4458 ASSERT(TTE_IS_VALID(ttep)); 4459 4460 attr = PROT_READ; 4461 4462 if (TTE_IS_WRITABLE(ttep)) { 4463 attr |= PROT_WRITE; 4464 } 4465 if (TTE_IS_EXECUTABLE(ttep)) { 4466 attr |= PROT_EXEC; 4467 } 4468 if (!TTE_IS_PRIVILEGED(ttep)) { 4469 attr |= PROT_USER; 4470 } 4471 if (TTE_IS_NFO(ttep)) { 4472 attr |= HAT_NOFAULT; 4473 } 4474 if (TTE_IS_NOSYNC(ttep)) { 4475 attr |= HAT_NOSYNC; 4476 } 4477 if (TTE_IS_SIDEFFECT(ttep)) { 4478 attr |= SFMMU_SIDEFFECT; 4479 } 4480 if (!TTE_IS_VCACHEABLE(ttep)) { 4481 attr |= SFMMU_UNCACHEVTTE; 4482 } 4483 if (!TTE_IS_PCACHEABLE(ttep)) { 4484 attr |= SFMMU_UNCACHEPTTE; 4485 } 4486 return (attr); 4487 } 4488 4489 /* 4490 * hat_chgprot is a deprecated hat call. New segment drivers 4491 * should store all attributes and use hat_*attr calls. 4492 * 4493 * Change the protections in the virtual address range 4494 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 4495 * then remove write permission, leaving the other 4496 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 4497 * 4498 */ 4499 void 4500 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 4501 { 4502 struct hmehash_bucket *hmebp; 4503 hmeblk_tag hblktag; 4504 int hmeshift, hashno = 1; 4505 struct hme_blk *hmeblkp, *list = NULL; 4506 caddr_t endaddr; 4507 cpuset_t cpuset; 4508 demap_range_t dmr; 4509 4510 ASSERT((len & MMU_PAGEOFFSET) == 0); 4511 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4512 4513 if (sfmmup->sfmmu_xhat_provider) { 4514 XHAT_CHGPROT(sfmmup, addr, len, vprot); 4515 return; 4516 } else { 4517 /* 4518 * This must be a CPU HAT. If the address space has 4519 * XHATs attached, change attributes for all of them, 4520 * just in case 4521 */ 4522 ASSERT(sfmmup->sfmmu_as != NULL); 4523 if (sfmmup->sfmmu_as->a_xhat != NULL) 4524 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 4525 } 4526 4527 CPUSET_ZERO(cpuset); 4528 4529 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 4530 ((addr + len) > (caddr_t)USERLIMIT)) { 4531 panic("user addr %p vprot %x in kernel space", 4532 (void *)addr, vprot); 4533 } 4534 endaddr = addr + len; 4535 hblktag.htag_id = sfmmup; 4536 DEMAP_RANGE_INIT(sfmmup, &dmr); 4537 4538 while (addr < endaddr) { 4539 hmeshift = HME_HASH_SHIFT(hashno); 4540 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4541 hblktag.htag_rehash = hashno; 4542 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4543 4544 SFMMU_HASH_LOCK(hmebp); 4545 4546 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4547 if (hmeblkp != NULL) { 4548 /* 4549 * We've encountered a shadow hmeblk so skip the range 4550 * of the next smaller mapping size. 4551 */ 4552 if (hmeblkp->hblk_shw_bit) { 4553 ASSERT(sfmmup != ksfmmup); 4554 ASSERT(hashno > 1); 4555 addr = (caddr_t)P2END((uintptr_t)addr, 4556 TTEBYTES(hashno - 1)); 4557 } else { 4558 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 4559 addr, endaddr, &dmr, vprot); 4560 } 4561 SFMMU_HASH_UNLOCK(hmebp); 4562 hashno = 1; 4563 continue; 4564 } 4565 SFMMU_HASH_UNLOCK(hmebp); 4566 4567 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4568 /* 4569 * We have traversed the whole list and rehashed 4570 * if necessary without finding the address to chgprot. 4571 * This is ok so we increment the address by the 4572 * smallest hmeblk range for kernel mappings and the 4573 * largest hmeblk range, to account for shadow hmeblks, 4574 * for user mappings and continue. 4575 */ 4576 if (sfmmup == ksfmmup) 4577 addr = (caddr_t)P2END((uintptr_t)addr, 4578 TTEBYTES(1)); 4579 else 4580 addr = (caddr_t)P2END((uintptr_t)addr, 4581 TTEBYTES(hashno)); 4582 hashno = 1; 4583 } else { 4584 hashno++; 4585 } 4586 } 4587 4588 sfmmu_hblks_list_purge(&list); 4589 DEMAP_RANGE_FLUSH(&dmr); 4590 cpuset = sfmmup->sfmmu_cpusran; 4591 xt_sync(cpuset); 4592 } 4593 4594 /* 4595 * This function chgprots a range of addresses in an hmeblk. It returns the 4596 * next addres that needs to be chgprot. 4597 * It should be called with the hash lock held. 4598 * XXX It shold be possible to optimize chgprot by not flushing every time but 4599 * on the other hand: 4600 * 1. do one flush crosscall. 4601 * 2. only flush if we are increasing permissions (make sure this will work) 4602 */ 4603 static caddr_t 4604 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4605 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 4606 { 4607 uint_t pprot; 4608 tte_t tte, ttemod; 4609 struct sf_hment *sfhmep; 4610 uint_t tteflags; 4611 int ttesz; 4612 struct page *pp = NULL; 4613 kmutex_t *pml, *pmtx; 4614 int ret; 4615 int use_demap_range; 4616 #if defined(SF_ERRATA_57) 4617 int check_exec; 4618 #endif 4619 4620 ASSERT(in_hblk_range(hmeblkp, addr)); 4621 ASSERT(hmeblkp->hblk_shw_bit == 0); 4622 4623 #ifdef DEBUG 4624 if (get_hblk_ttesz(hmeblkp) != TTE8K && 4625 (endaddr < get_hblk_endaddr(hmeblkp))) { 4626 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 4627 } 4628 #endif /* DEBUG */ 4629 4630 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4631 ttesz = get_hblk_ttesz(hmeblkp); 4632 4633 pprot = sfmmu_vtop_prot(vprot, &tteflags); 4634 #if defined(SF_ERRATA_57) 4635 check_exec = (sfmmup != ksfmmup) && 4636 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4637 ((vprot & PROT_EXEC) == PROT_EXEC); 4638 #endif 4639 HBLKTOHME(sfhmep, hmeblkp, addr); 4640 4641 /* 4642 * Flush the current demap region if addresses have been 4643 * skipped or the page size doesn't match. 4644 */ 4645 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 4646 if (use_demap_range) { 4647 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4648 } else { 4649 DEMAP_RANGE_FLUSH(dmrp); 4650 } 4651 4652 while (addr < endaddr) { 4653 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4654 if (TTE_IS_VALID(&tte)) { 4655 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 4656 /* 4657 * if the new protection is the same as old 4658 * continue 4659 */ 4660 goto next_addr; 4661 } 4662 pml = NULL; 4663 pp = sfhmep->hme_page; 4664 if (pp) { 4665 pml = sfmmu_mlist_enter(pp); 4666 } 4667 if (pp != sfhmep->hme_page) { 4668 /* 4669 * tte most have been unloaded 4670 * underneath us. Recheck 4671 */ 4672 ASSERT(pml); 4673 sfmmu_mlist_exit(pml); 4674 continue; 4675 } 4676 4677 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4678 4679 ttemod = tte; 4680 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 4681 #if defined(SF_ERRATA_57) 4682 if (check_exec && addr < errata57_limit) 4683 ttemod.tte_exec_perm = 0; 4684 #endif 4685 ret = sfmmu_modifytte_try(&tte, &ttemod, 4686 &sfhmep->hme_tte); 4687 4688 if (ret < 0) { 4689 /* tte changed underneath us */ 4690 if (pml) { 4691 sfmmu_mlist_exit(pml); 4692 } 4693 continue; 4694 } 4695 4696 if (tteflags & TTE_HWWR_INT) { 4697 /* 4698 * need to sync if we are clearing modify bit. 4699 */ 4700 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4701 } 4702 4703 if (pp && PP_ISRO(pp)) { 4704 if (pprot & TTE_WRPRM_INT) { 4705 pmtx = sfmmu_page_enter(pp); 4706 PP_CLRRO(pp); 4707 sfmmu_page_exit(pmtx); 4708 } 4709 } 4710 4711 if (ret > 0 && use_demap_range) { 4712 DEMAP_RANGE_MARKPG(dmrp, addr); 4713 } else if (ret > 0) { 4714 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4715 } 4716 4717 if (pml) { 4718 sfmmu_mlist_exit(pml); 4719 } 4720 } 4721 next_addr: 4722 addr += TTEBYTES(ttesz); 4723 sfhmep++; 4724 DEMAP_RANGE_NEXTPG(dmrp); 4725 } 4726 return (addr); 4727 } 4728 4729 /* 4730 * This routine is deprecated and should only be used by hat_chgprot. 4731 * The correct routine is sfmmu_vtop_attr. 4732 * This routine converts virtual page protections to physical ones. It will 4733 * update the tteflags field with the tte mask corresponding to the protections 4734 * affected and it returns the new protections. It will also clear the modify 4735 * bit if we are taking away write permission. This is necessary since the 4736 * modify bit is the hardware permission bit and we need to clear it in order 4737 * to detect write faults. 4738 * It accepts the following special protections: 4739 * ~PROT_WRITE = remove write permissions. 4740 * ~PROT_USER = remove user permissions. 4741 */ 4742 static uint_t 4743 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 4744 { 4745 if (vprot == (uint_t)~PROT_WRITE) { 4746 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 4747 return (0); /* will cause wrprm to be cleared */ 4748 } 4749 if (vprot == (uint_t)~PROT_USER) { 4750 *tteflagsp = TTE_PRIV_INT; 4751 return (0); /* will cause privprm to be cleared */ 4752 } 4753 if ((vprot == 0) || (vprot == PROT_USER) || 4754 ((vprot & PROT_ALL) != vprot)) { 4755 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4756 } 4757 4758 switch (vprot) { 4759 case (PROT_READ): 4760 case (PROT_EXEC): 4761 case (PROT_EXEC | PROT_READ): 4762 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4763 return (TTE_PRIV_INT); /* set prv and clr wrt */ 4764 case (PROT_WRITE): 4765 case (PROT_WRITE | PROT_READ): 4766 case (PROT_EXEC | PROT_WRITE): 4767 case (PROT_EXEC | PROT_WRITE | PROT_READ): 4768 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4769 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 4770 case (PROT_USER | PROT_READ): 4771 case (PROT_USER | PROT_EXEC): 4772 case (PROT_USER | PROT_EXEC | PROT_READ): 4773 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4774 return (0); /* clr prv and wrt */ 4775 case (PROT_USER | PROT_WRITE): 4776 case (PROT_USER | PROT_WRITE | PROT_READ): 4777 case (PROT_USER | PROT_EXEC | PROT_WRITE): 4778 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 4779 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4780 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 4781 default: 4782 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4783 } 4784 return (0); 4785 } 4786 4787 /* 4788 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 4789 * the normal algorithm would take too long for a very large VA range with 4790 * few real mappings. This routine just walks thru all HMEs in the global 4791 * hash table to find and remove mappings. 4792 */ 4793 static void 4794 hat_unload_large_virtual( 4795 struct hat *sfmmup, 4796 caddr_t startaddr, 4797 size_t len, 4798 uint_t flags, 4799 hat_callback_t *callback) 4800 { 4801 struct hmehash_bucket *hmebp; 4802 struct hme_blk *hmeblkp; 4803 struct hme_blk *pr_hblk = NULL; 4804 struct hme_blk *nx_hblk; 4805 struct hme_blk *list = NULL; 4806 int i; 4807 uint64_t hblkpa, prevpa, nx_pa; 4808 demap_range_t dmr, *dmrp; 4809 cpuset_t cpuset; 4810 caddr_t endaddr = startaddr + len; 4811 caddr_t sa; 4812 caddr_t ea; 4813 caddr_t cb_sa[MAX_CB_ADDR]; 4814 caddr_t cb_ea[MAX_CB_ADDR]; 4815 int addr_cnt = 0; 4816 int a = 0; 4817 4818 if (sfmmup->sfmmu_free) { 4819 dmrp = NULL; 4820 } else { 4821 dmrp = &dmr; 4822 DEMAP_RANGE_INIT(sfmmup, dmrp); 4823 } 4824 4825 /* 4826 * Loop through all the hash buckets of HME blocks looking for matches. 4827 */ 4828 for (i = 0; i <= UHMEHASH_SZ; i++) { 4829 hmebp = &uhme_hash[i]; 4830 SFMMU_HASH_LOCK(hmebp); 4831 hmeblkp = hmebp->hmeblkp; 4832 hblkpa = hmebp->hmeh_nextpa; 4833 prevpa = 0; 4834 pr_hblk = NULL; 4835 while (hmeblkp) { 4836 nx_hblk = hmeblkp->hblk_next; 4837 nx_pa = hmeblkp->hblk_nextpa; 4838 4839 /* 4840 * skip if not this context, if a shadow block or 4841 * if the mapping is not in the requested range 4842 */ 4843 if (hmeblkp->hblk_tag.htag_id != sfmmup || 4844 hmeblkp->hblk_shw_bit || 4845 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 4846 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 4847 pr_hblk = hmeblkp; 4848 prevpa = hblkpa; 4849 goto next_block; 4850 } 4851 4852 /* 4853 * unload if there are any current valid mappings 4854 */ 4855 if (hmeblkp->hblk_vcnt != 0 || 4856 hmeblkp->hblk_hmecnt != 0) 4857 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 4858 sa, ea, dmrp, flags); 4859 4860 /* 4861 * on unmap we also release the HME block itself, once 4862 * all mappings are gone. 4863 */ 4864 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 4865 !hmeblkp->hblk_vcnt && 4866 !hmeblkp->hblk_hmecnt) { 4867 ASSERT(!hmeblkp->hblk_lckcnt); 4868 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 4869 prevpa, pr_hblk); 4870 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 4871 } else { 4872 pr_hblk = hmeblkp; 4873 prevpa = hblkpa; 4874 } 4875 4876 if (callback == NULL) 4877 goto next_block; 4878 4879 /* 4880 * HME blocks may span more than one page, but we may be 4881 * unmapping only one page, so check for a smaller range 4882 * for the callback 4883 */ 4884 if (sa < startaddr) 4885 sa = startaddr; 4886 if (--ea > endaddr) 4887 ea = endaddr - 1; 4888 4889 cb_sa[addr_cnt] = sa; 4890 cb_ea[addr_cnt] = ea; 4891 if (++addr_cnt == MAX_CB_ADDR) { 4892 if (dmrp != NULL) { 4893 DEMAP_RANGE_FLUSH(dmrp); 4894 cpuset = sfmmup->sfmmu_cpusran; 4895 xt_sync(cpuset); 4896 } 4897 4898 for (a = 0; a < MAX_CB_ADDR; ++a) { 4899 callback->hcb_start_addr = cb_sa[a]; 4900 callback->hcb_end_addr = cb_ea[a]; 4901 callback->hcb_function(callback); 4902 } 4903 addr_cnt = 0; 4904 } 4905 4906 next_block: 4907 hmeblkp = nx_hblk; 4908 hblkpa = nx_pa; 4909 } 4910 SFMMU_HASH_UNLOCK(hmebp); 4911 } 4912 4913 sfmmu_hblks_list_purge(&list); 4914 if (dmrp != NULL) { 4915 DEMAP_RANGE_FLUSH(dmrp); 4916 cpuset = sfmmup->sfmmu_cpusran; 4917 xt_sync(cpuset); 4918 } 4919 4920 for (a = 0; a < addr_cnt; ++a) { 4921 callback->hcb_start_addr = cb_sa[a]; 4922 callback->hcb_end_addr = cb_ea[a]; 4923 callback->hcb_function(callback); 4924 } 4925 4926 /* 4927 * Check TSB and TLB page sizes if the process isn't exiting. 4928 */ 4929 if (!sfmmup->sfmmu_free) 4930 sfmmu_check_page_sizes(sfmmup, 0); 4931 } 4932 4933 /* 4934 * Unload all the mappings in the range [addr..addr+len). addr and len must 4935 * be MMU_PAGESIZE aligned. 4936 */ 4937 4938 extern struct seg *segkmap; 4939 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 4940 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 4941 4942 4943 void 4944 hat_unload_callback( 4945 struct hat *sfmmup, 4946 caddr_t addr, 4947 size_t len, 4948 uint_t flags, 4949 hat_callback_t *callback) 4950 { 4951 struct hmehash_bucket *hmebp; 4952 hmeblk_tag hblktag; 4953 int hmeshift, hashno, iskernel; 4954 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 4955 caddr_t endaddr; 4956 cpuset_t cpuset; 4957 uint64_t hblkpa, prevpa; 4958 int addr_count = 0; 4959 int a; 4960 caddr_t cb_start_addr[MAX_CB_ADDR]; 4961 caddr_t cb_end_addr[MAX_CB_ADDR]; 4962 int issegkmap = ISSEGKMAP(sfmmup, addr); 4963 demap_range_t dmr, *dmrp; 4964 4965 if (sfmmup->sfmmu_xhat_provider) { 4966 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 4967 return; 4968 } else { 4969 /* 4970 * This must be a CPU HAT. If the address space has 4971 * XHATs attached, unload the mappings for all of them, 4972 * just in case 4973 */ 4974 ASSERT(sfmmup->sfmmu_as != NULL); 4975 if (sfmmup->sfmmu_as->a_xhat != NULL) 4976 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 4977 len, flags, callback); 4978 } 4979 4980 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 4981 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4982 4983 ASSERT(sfmmup != NULL); 4984 ASSERT((len & MMU_PAGEOFFSET) == 0); 4985 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4986 4987 /* 4988 * Probing through a large VA range (say 63 bits) will be slow, even 4989 * at 4 Meg steps between the probes. So, when the virtual address range 4990 * is very large, search the HME entries for what to unload. 4991 * 4992 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 4993 * 4994 * UHMEHASH_SZ is number of hash buckets to examine 4995 * 4996 */ 4997 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 4998 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 4999 return; 5000 } 5001 5002 CPUSET_ZERO(cpuset); 5003 5004 /* 5005 * If the process is exiting, we can save a lot of fuss since 5006 * we'll flush the TLB when we free the ctx anyway. 5007 */ 5008 if (sfmmup->sfmmu_free) 5009 dmrp = NULL; 5010 else 5011 dmrp = &dmr; 5012 5013 DEMAP_RANGE_INIT(sfmmup, dmrp); 5014 endaddr = addr + len; 5015 hblktag.htag_id = sfmmup; 5016 5017 /* 5018 * It is likely for the vm to call unload over a wide range of 5019 * addresses that are actually very sparsely populated by 5020 * translations. In order to speed this up the sfmmu hat supports 5021 * the concept of shadow hmeblks. Dummy large page hmeblks that 5022 * correspond to actual small translations are allocated at tteload 5023 * time and are referred to as shadow hmeblks. Now, during unload 5024 * time, we first check if we have a shadow hmeblk for that 5025 * translation. The absence of one means the corresponding address 5026 * range is empty and can be skipped. 5027 * 5028 * The kernel is an exception to above statement and that is why 5029 * we don't use shadow hmeblks and hash starting from the smallest 5030 * page size. 5031 */ 5032 if (sfmmup == KHATID) { 5033 iskernel = 1; 5034 hashno = TTE64K; 5035 } else { 5036 iskernel = 0; 5037 if (mmu_page_sizes == max_mmu_page_sizes) { 5038 hashno = TTE256M; 5039 } else { 5040 hashno = TTE4M; 5041 } 5042 } 5043 while (addr < endaddr) { 5044 hmeshift = HME_HASH_SHIFT(hashno); 5045 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5046 hblktag.htag_rehash = hashno; 5047 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5048 5049 SFMMU_HASH_LOCK(hmebp); 5050 5051 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 5052 prevpa, &list); 5053 if (hmeblkp == NULL) { 5054 /* 5055 * didn't find an hmeblk. skip the appropiate 5056 * address range. 5057 */ 5058 SFMMU_HASH_UNLOCK(hmebp); 5059 if (iskernel) { 5060 if (hashno < mmu_hashcnt) { 5061 hashno++; 5062 continue; 5063 } else { 5064 hashno = TTE64K; 5065 addr = (caddr_t)roundup((uintptr_t)addr 5066 + 1, MMU_PAGESIZE64K); 5067 continue; 5068 } 5069 } 5070 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5071 (1 << hmeshift)); 5072 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5073 ASSERT(hashno == TTE64K); 5074 continue; 5075 } 5076 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5077 hashno = TTE512K; 5078 continue; 5079 } 5080 if (mmu_page_sizes == max_mmu_page_sizes) { 5081 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5082 hashno = TTE4M; 5083 continue; 5084 } 5085 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5086 hashno = TTE32M; 5087 continue; 5088 } 5089 hashno = TTE256M; 5090 continue; 5091 } else { 5092 hashno = TTE4M; 5093 continue; 5094 } 5095 } 5096 ASSERT(hmeblkp); 5097 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5098 /* 5099 * If the valid count is zero we can skip the range 5100 * mapped by this hmeblk. 5101 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5102 * is used by segment drivers as a hint 5103 * that the mapping resource won't be used any longer. 5104 * The best example of this is during exit(). 5105 */ 5106 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5107 get_hblk_span(hmeblkp)); 5108 if ((flags & HAT_UNLOAD_UNMAP) || 5109 (iskernel && !issegkmap)) { 5110 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5111 pr_hblk); 5112 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5113 } 5114 SFMMU_HASH_UNLOCK(hmebp); 5115 5116 if (iskernel) { 5117 hashno = TTE64K; 5118 continue; 5119 } 5120 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5121 ASSERT(hashno == TTE64K); 5122 continue; 5123 } 5124 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5125 hashno = TTE512K; 5126 continue; 5127 } 5128 if (mmu_page_sizes == max_mmu_page_sizes) { 5129 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5130 hashno = TTE4M; 5131 continue; 5132 } 5133 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5134 hashno = TTE32M; 5135 continue; 5136 } 5137 hashno = TTE256M; 5138 continue; 5139 } else { 5140 hashno = TTE4M; 5141 continue; 5142 } 5143 } 5144 if (hmeblkp->hblk_shw_bit) { 5145 /* 5146 * If we encounter a shadow hmeblk we know there is 5147 * smaller sized hmeblks mapping the same address space. 5148 * Decrement the hash size and rehash. 5149 */ 5150 ASSERT(sfmmup != KHATID); 5151 hashno--; 5152 SFMMU_HASH_UNLOCK(hmebp); 5153 continue; 5154 } 5155 5156 /* 5157 * track callback address ranges. 5158 * only start a new range when it's not contiguous 5159 */ 5160 if (callback != NULL) { 5161 if (addr_count > 0 && 5162 addr == cb_end_addr[addr_count - 1]) 5163 --addr_count; 5164 else 5165 cb_start_addr[addr_count] = addr; 5166 } 5167 5168 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5169 dmrp, flags); 5170 5171 if (callback != NULL) 5172 cb_end_addr[addr_count++] = addr; 5173 5174 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5175 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5176 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5177 pr_hblk); 5178 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5179 } 5180 SFMMU_HASH_UNLOCK(hmebp); 5181 5182 /* 5183 * Notify our caller as to exactly which pages 5184 * have been unloaded. We do these in clumps, 5185 * to minimize the number of xt_sync()s that need to occur. 5186 */ 5187 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5188 DEMAP_RANGE_FLUSH(dmrp); 5189 if (dmrp != NULL) { 5190 cpuset = sfmmup->sfmmu_cpusran; 5191 xt_sync(cpuset); 5192 } 5193 5194 for (a = 0; a < MAX_CB_ADDR; ++a) { 5195 callback->hcb_start_addr = cb_start_addr[a]; 5196 callback->hcb_end_addr = cb_end_addr[a]; 5197 callback->hcb_function(callback); 5198 } 5199 addr_count = 0; 5200 } 5201 if (iskernel) { 5202 hashno = TTE64K; 5203 continue; 5204 } 5205 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5206 ASSERT(hashno == TTE64K); 5207 continue; 5208 } 5209 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5210 hashno = TTE512K; 5211 continue; 5212 } 5213 if (mmu_page_sizes == max_mmu_page_sizes) { 5214 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5215 hashno = TTE4M; 5216 continue; 5217 } 5218 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5219 hashno = TTE32M; 5220 continue; 5221 } 5222 hashno = TTE256M; 5223 } else { 5224 hashno = TTE4M; 5225 } 5226 } 5227 5228 sfmmu_hblks_list_purge(&list); 5229 DEMAP_RANGE_FLUSH(dmrp); 5230 if (dmrp != NULL) { 5231 cpuset = sfmmup->sfmmu_cpusran; 5232 xt_sync(cpuset); 5233 } 5234 if (callback && addr_count != 0) { 5235 for (a = 0; a < addr_count; ++a) { 5236 callback->hcb_start_addr = cb_start_addr[a]; 5237 callback->hcb_end_addr = cb_end_addr[a]; 5238 callback->hcb_function(callback); 5239 } 5240 } 5241 5242 /* 5243 * Check TSB and TLB page sizes if the process isn't exiting. 5244 */ 5245 if (!sfmmup->sfmmu_free) 5246 sfmmu_check_page_sizes(sfmmup, 0); 5247 } 5248 5249 /* 5250 * Unload all the mappings in the range [addr..addr+len). addr and len must 5251 * be MMU_PAGESIZE aligned. 5252 */ 5253 void 5254 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5255 { 5256 if (sfmmup->sfmmu_xhat_provider) { 5257 XHAT_UNLOAD(sfmmup, addr, len, flags); 5258 return; 5259 } 5260 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5261 } 5262 5263 5264 /* 5265 * Find the largest mapping size for this page. 5266 */ 5267 int 5268 fnd_mapping_sz(page_t *pp) 5269 { 5270 int sz; 5271 int p_index; 5272 5273 p_index = PP_MAPINDEX(pp); 5274 5275 sz = 0; 5276 p_index >>= 1; /* don't care about 8K bit */ 5277 for (; p_index; p_index >>= 1) { 5278 sz++; 5279 } 5280 5281 return (sz); 5282 } 5283 5284 /* 5285 * This function unloads a range of addresses for an hmeblk. 5286 * It returns the next address to be unloaded. 5287 * It should be called with the hash lock held. 5288 */ 5289 static caddr_t 5290 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5291 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5292 { 5293 tte_t tte, ttemod; 5294 struct sf_hment *sfhmep; 5295 int ttesz; 5296 long ttecnt; 5297 page_t *pp; 5298 kmutex_t *pml; 5299 int ret; 5300 int use_demap_range; 5301 5302 ASSERT(in_hblk_range(hmeblkp, addr)); 5303 ASSERT(!hmeblkp->hblk_shw_bit); 5304 #ifdef DEBUG 5305 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5306 (endaddr < get_hblk_endaddr(hmeblkp))) { 5307 panic("sfmmu_hblk_unload: partial unload of large page"); 5308 } 5309 #endif /* DEBUG */ 5310 5311 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5312 ttesz = get_hblk_ttesz(hmeblkp); 5313 5314 use_demap_range = (do_virtual_coloring && 5315 ((dmrp == NULL) || TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5316 if (use_demap_range) { 5317 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5318 } else { 5319 DEMAP_RANGE_FLUSH(dmrp); 5320 } 5321 ttecnt = 0; 5322 HBLKTOHME(sfhmep, hmeblkp, addr); 5323 5324 while (addr < endaddr) { 5325 pml = NULL; 5326 again: 5327 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5328 if (TTE_IS_VALID(&tte)) { 5329 pp = sfhmep->hme_page; 5330 if (pp && pml == NULL) { 5331 pml = sfmmu_mlist_enter(pp); 5332 } 5333 5334 /* 5335 * Verify if hme still points to 'pp' now that 5336 * we have p_mapping lock. 5337 */ 5338 if (sfhmep->hme_page != pp) { 5339 if (pp != NULL && sfhmep->hme_page != NULL) { 5340 if (pml) { 5341 sfmmu_mlist_exit(pml); 5342 } 5343 /* Re-start this iteration. */ 5344 continue; 5345 } 5346 ASSERT((pp != NULL) && 5347 (sfhmep->hme_page == NULL)); 5348 goto tte_unloaded; 5349 } 5350 5351 /* 5352 * This point on we have both HASH and p_mapping 5353 * lock. 5354 */ 5355 ASSERT(pp == sfhmep->hme_page); 5356 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5357 5358 /* 5359 * We need to loop on modify tte because it is 5360 * possible for pagesync to come along and 5361 * change the software bits beneath us. 5362 * 5363 * Page_unload can also invalidate the tte after 5364 * we read tte outside of p_mapping lock. 5365 */ 5366 ttemod = tte; 5367 5368 TTE_SET_INVALID(&ttemod); 5369 ret = sfmmu_modifytte_try(&tte, &ttemod, 5370 &sfhmep->hme_tte); 5371 5372 if (ret <= 0) { 5373 if (TTE_IS_VALID(&tte)) { 5374 goto again; 5375 } else { 5376 /* 5377 * We read in a valid pte, but it 5378 * is unloaded by page_unload. 5379 * hme_page has become NULL and 5380 * we hold no p_mapping lock. 5381 */ 5382 ASSERT(pp == NULL && pml == NULL); 5383 goto tte_unloaded; 5384 } 5385 } 5386 5387 if (!(flags & HAT_UNLOAD_NOSYNC)) { 5388 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5389 } 5390 5391 /* 5392 * Ok- we invalidated the tte. Do the rest of the job. 5393 */ 5394 ttecnt++; 5395 5396 if (flags & HAT_UNLOAD_UNLOCK) { 5397 ASSERT(hmeblkp->hblk_lckcnt > 0); 5398 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 5399 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 5400 } 5401 5402 /* 5403 * Normally we would need to flush the page 5404 * from the virtual cache at this point in 5405 * order to prevent a potential cache alias 5406 * inconsistency. 5407 * The particular scenario we need to worry 5408 * about is: 5409 * Given: va1 and va2 are two virtual address 5410 * that alias and map the same physical 5411 * address. 5412 * 1. mapping exists from va1 to pa and data 5413 * has been read into the cache. 5414 * 2. unload va1. 5415 * 3. load va2 and modify data using va2. 5416 * 4 unload va2. 5417 * 5. load va1 and reference data. Unless we 5418 * flush the data cache when we unload we will 5419 * get stale data. 5420 * Fortunately, page coloring eliminates the 5421 * above scenario by remembering the color a 5422 * physical page was last or is currently 5423 * mapped to. Now, we delay the flush until 5424 * the loading of translations. Only when the 5425 * new translation is of a different color 5426 * are we forced to flush. 5427 */ 5428 if (use_demap_range) { 5429 /* 5430 * Mark this page as needing a demap. 5431 */ 5432 DEMAP_RANGE_MARKPG(dmrp, addr); 5433 } else { 5434 if (do_virtual_coloring) { 5435 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 5436 sfmmup->sfmmu_free, 0); 5437 } else { 5438 pfn_t pfnum; 5439 5440 pfnum = TTE_TO_PFN(addr, &tte); 5441 sfmmu_tlbcache_demap(addr, sfmmup, 5442 hmeblkp, pfnum, sfmmup->sfmmu_free, 5443 FLUSH_NECESSARY_CPUS, 5444 CACHE_FLUSH, 0); 5445 } 5446 } 5447 5448 if (pp) { 5449 /* 5450 * Remove the hment from the mapping list 5451 */ 5452 ASSERT(hmeblkp->hblk_hmecnt > 0); 5453 5454 /* 5455 * Again, we cannot 5456 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 5457 */ 5458 HME_SUB(sfhmep, pp); 5459 membar_stst(); 5460 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 5461 } 5462 5463 ASSERT(hmeblkp->hblk_vcnt > 0); 5464 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 5465 5466 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 5467 !hmeblkp->hblk_lckcnt); 5468 5469 #ifdef VAC 5470 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 5471 if (PP_ISTNC(pp)) { 5472 /* 5473 * If page was temporary 5474 * uncached, try to recache 5475 * it. Note that HME_SUB() was 5476 * called above so p_index and 5477 * mlist had been updated. 5478 */ 5479 conv_tnc(pp, ttesz); 5480 } else if (pp->p_mapping == NULL) { 5481 ASSERT(kpm_enable); 5482 /* 5483 * Page is marked to be in VAC conflict 5484 * to an existing kpm mapping and/or is 5485 * kpm mapped using only the regular 5486 * pagesize. 5487 */ 5488 sfmmu_kpm_hme_unload(pp); 5489 } 5490 } 5491 #endif /* VAC */ 5492 } else if ((pp = sfhmep->hme_page) != NULL) { 5493 /* 5494 * TTE is invalid but the hme 5495 * still exists. let pageunload 5496 * complete its job. 5497 */ 5498 ASSERT(pml == NULL); 5499 pml = sfmmu_mlist_enter(pp); 5500 if (sfhmep->hme_page != NULL) { 5501 sfmmu_mlist_exit(pml); 5502 pml = NULL; 5503 goto again; 5504 } 5505 ASSERT(sfhmep->hme_page == NULL); 5506 } else if (hmeblkp->hblk_hmecnt != 0) { 5507 /* 5508 * pageunload may have not finished decrementing 5509 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 5510 * wait for pageunload to finish. Rely on pageunload 5511 * to decrement hblk_hmecnt after hblk_vcnt. 5512 */ 5513 pfn_t pfn = TTE_TO_TTEPFN(&tte); 5514 ASSERT(pml == NULL); 5515 if (pf_is_memory(pfn)) { 5516 pp = page_numtopp_nolock(pfn); 5517 if (pp != NULL) { 5518 pml = sfmmu_mlist_enter(pp); 5519 sfmmu_mlist_exit(pml); 5520 pml = NULL; 5521 } 5522 } 5523 } 5524 5525 tte_unloaded: 5526 /* 5527 * At this point, the tte we are looking at 5528 * should be unloaded, and hme has been unlinked 5529 * from page too. This is important because in 5530 * pageunload, it does ttesync() then HME_SUB. 5531 * We need to make sure HME_SUB has been completed 5532 * so we know ttesync() has been completed. Otherwise, 5533 * at exit time, after return from hat layer, VM will 5534 * release as structure which hat_setstat() (called 5535 * by ttesync()) needs. 5536 */ 5537 #ifdef DEBUG 5538 { 5539 tte_t dtte; 5540 5541 ASSERT(sfhmep->hme_page == NULL); 5542 5543 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 5544 ASSERT(!TTE_IS_VALID(&dtte)); 5545 } 5546 #endif 5547 5548 if (pml) { 5549 sfmmu_mlist_exit(pml); 5550 } 5551 5552 addr += TTEBYTES(ttesz); 5553 sfhmep++; 5554 DEMAP_RANGE_NEXTPG(dmrp); 5555 } 5556 if (ttecnt > 0) 5557 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 5558 return (addr); 5559 } 5560 5561 /* 5562 * Synchronize all the mappings in the range [addr..addr+len). 5563 * Can be called with clearflag having two states: 5564 * HAT_SYNC_DONTZERO means just return the rm stats 5565 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 5566 */ 5567 void 5568 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 5569 { 5570 struct hmehash_bucket *hmebp; 5571 hmeblk_tag hblktag; 5572 int hmeshift, hashno = 1; 5573 struct hme_blk *hmeblkp, *list = NULL; 5574 caddr_t endaddr; 5575 cpuset_t cpuset; 5576 5577 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 5578 ASSERT((sfmmup == ksfmmup) || 5579 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5580 ASSERT((len & MMU_PAGEOFFSET) == 0); 5581 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 5582 (clearflag == HAT_SYNC_ZERORM)); 5583 5584 CPUSET_ZERO(cpuset); 5585 5586 endaddr = addr + len; 5587 hblktag.htag_id = sfmmup; 5588 /* 5589 * Spitfire supports 4 page sizes. 5590 * Most pages are expected to be of the smallest page 5591 * size (8K) and these will not need to be rehashed. 64K 5592 * pages also don't need to be rehashed because the an hmeblk 5593 * spans 64K of address space. 512K pages might need 1 rehash and 5594 * and 4M pages 2 rehashes. 5595 */ 5596 while (addr < endaddr) { 5597 hmeshift = HME_HASH_SHIFT(hashno); 5598 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5599 hblktag.htag_rehash = hashno; 5600 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5601 5602 SFMMU_HASH_LOCK(hmebp); 5603 5604 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5605 if (hmeblkp != NULL) { 5606 /* 5607 * We've encountered a shadow hmeblk so skip the range 5608 * of the next smaller mapping size. 5609 */ 5610 if (hmeblkp->hblk_shw_bit) { 5611 ASSERT(sfmmup != ksfmmup); 5612 ASSERT(hashno > 1); 5613 addr = (caddr_t)P2END((uintptr_t)addr, 5614 TTEBYTES(hashno - 1)); 5615 } else { 5616 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 5617 addr, endaddr, clearflag); 5618 } 5619 SFMMU_HASH_UNLOCK(hmebp); 5620 hashno = 1; 5621 continue; 5622 } 5623 SFMMU_HASH_UNLOCK(hmebp); 5624 5625 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5626 /* 5627 * We have traversed the whole list and rehashed 5628 * if necessary without finding the address to sync. 5629 * This is ok so we increment the address by the 5630 * smallest hmeblk range for kernel mappings and the 5631 * largest hmeblk range, to account for shadow hmeblks, 5632 * for user mappings and continue. 5633 */ 5634 if (sfmmup == ksfmmup) 5635 addr = (caddr_t)P2END((uintptr_t)addr, 5636 TTEBYTES(1)); 5637 else 5638 addr = (caddr_t)P2END((uintptr_t)addr, 5639 TTEBYTES(hashno)); 5640 hashno = 1; 5641 } else { 5642 hashno++; 5643 } 5644 } 5645 sfmmu_hblks_list_purge(&list); 5646 cpuset = sfmmup->sfmmu_cpusran; 5647 xt_sync(cpuset); 5648 } 5649 5650 static caddr_t 5651 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5652 caddr_t endaddr, int clearflag) 5653 { 5654 tte_t tte, ttemod; 5655 struct sf_hment *sfhmep; 5656 int ttesz; 5657 struct page *pp; 5658 kmutex_t *pml; 5659 int ret; 5660 5661 ASSERT(hmeblkp->hblk_shw_bit == 0); 5662 5663 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5664 5665 ttesz = get_hblk_ttesz(hmeblkp); 5666 HBLKTOHME(sfhmep, hmeblkp, addr); 5667 5668 while (addr < endaddr) { 5669 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5670 if (TTE_IS_VALID(&tte)) { 5671 pml = NULL; 5672 pp = sfhmep->hme_page; 5673 if (pp) { 5674 pml = sfmmu_mlist_enter(pp); 5675 } 5676 if (pp != sfhmep->hme_page) { 5677 /* 5678 * tte most have been unloaded 5679 * underneath us. Recheck 5680 */ 5681 ASSERT(pml); 5682 sfmmu_mlist_exit(pml); 5683 continue; 5684 } 5685 5686 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5687 5688 if (clearflag == HAT_SYNC_ZERORM) { 5689 ttemod = tte; 5690 TTE_CLR_RM(&ttemod); 5691 ret = sfmmu_modifytte_try(&tte, &ttemod, 5692 &sfhmep->hme_tte); 5693 if (ret < 0) { 5694 if (pml) { 5695 sfmmu_mlist_exit(pml); 5696 } 5697 continue; 5698 } 5699 5700 if (ret > 0) { 5701 sfmmu_tlb_demap(addr, sfmmup, 5702 hmeblkp, 0, 0); 5703 } 5704 } 5705 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5706 if (pml) { 5707 sfmmu_mlist_exit(pml); 5708 } 5709 } 5710 addr += TTEBYTES(ttesz); 5711 sfhmep++; 5712 } 5713 return (addr); 5714 } 5715 5716 /* 5717 * This function will sync a tte to the page struct and it will 5718 * update the hat stats. Currently it allows us to pass a NULL pp 5719 * and we will simply update the stats. We may want to change this 5720 * so we only keep stats for pages backed by pp's. 5721 */ 5722 static void 5723 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 5724 { 5725 uint_t rm = 0; 5726 int sz; 5727 pgcnt_t npgs; 5728 5729 ASSERT(TTE_IS_VALID(ttep)); 5730 5731 if (TTE_IS_NOSYNC(ttep)) { 5732 return; 5733 } 5734 5735 if (TTE_IS_REF(ttep)) { 5736 rm = P_REF; 5737 } 5738 if (TTE_IS_MOD(ttep)) { 5739 rm |= P_MOD; 5740 } 5741 5742 if (rm == 0) { 5743 return; 5744 } 5745 5746 sz = TTE_CSZ(ttep); 5747 if (sfmmup->sfmmu_rmstat) { 5748 int i; 5749 caddr_t vaddr = addr; 5750 5751 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 5752 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 5753 } 5754 5755 } 5756 5757 /* 5758 * XXX I want to use cas to update nrm bits but they 5759 * currently belong in common/vm and not in hat where 5760 * they should be. 5761 * The nrm bits are protected by the same mutex as 5762 * the one that protects the page's mapping list. 5763 */ 5764 if (!pp) 5765 return; 5766 ASSERT(sfmmu_mlist_held(pp)); 5767 /* 5768 * If the tte is for a large page, we need to sync all the 5769 * pages covered by the tte. 5770 */ 5771 if (sz != TTE8K) { 5772 ASSERT(pp->p_szc != 0); 5773 pp = PP_GROUPLEADER(pp, sz); 5774 ASSERT(sfmmu_mlist_held(pp)); 5775 } 5776 5777 /* Get number of pages from tte size. */ 5778 npgs = TTEPAGES(sz); 5779 5780 do { 5781 ASSERT(pp); 5782 ASSERT(sfmmu_mlist_held(pp)); 5783 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 5784 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 5785 hat_page_setattr(pp, rm); 5786 5787 /* 5788 * Are we done? If not, we must have a large mapping. 5789 * For large mappings we need to sync the rest of the pages 5790 * covered by this tte; goto the next page. 5791 */ 5792 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 5793 } 5794 5795 /* 5796 * Execute pre-callback handler of each pa_hment linked to pp 5797 * 5798 * Inputs: 5799 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 5800 * capture_cpus: pointer to return value (below) 5801 * 5802 * Returns: 5803 * Propagates the subsystem callback return values back to the caller; 5804 * returns 0 on success. If capture_cpus is non-NULL, the value returned 5805 * is zero if all of the pa_hments are of a type that do not require 5806 * capturing CPUs prior to suspending the mapping, else it is 1. 5807 */ 5808 static int 5809 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 5810 { 5811 struct sf_hment *sfhmep; 5812 struct pa_hment *pahmep; 5813 int (*f)(caddr_t, uint_t, uint_t, void *); 5814 int ret; 5815 id_t id; 5816 int locked = 0; 5817 kmutex_t *pml; 5818 5819 ASSERT(PAGE_EXCL(pp)); 5820 if (!sfmmu_mlist_held(pp)) { 5821 pml = sfmmu_mlist_enter(pp); 5822 locked = 1; 5823 } 5824 5825 if (capture_cpus) 5826 *capture_cpus = 0; 5827 5828 top: 5829 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5830 /* 5831 * skip sf_hments corresponding to VA<->PA mappings; 5832 * for pa_hment's, hme_tte.ll is zero 5833 */ 5834 if (!IS_PAHME(sfhmep)) 5835 continue; 5836 5837 pahmep = sfhmep->hme_data; 5838 ASSERT(pahmep != NULL); 5839 5840 /* 5841 * skip if pre-handler has been called earlier in this loop 5842 */ 5843 if (pahmep->flags & flag) 5844 continue; 5845 5846 id = pahmep->cb_id; 5847 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5848 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 5849 *capture_cpus = 1; 5850 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 5851 pahmep->flags |= flag; 5852 continue; 5853 } 5854 5855 /* 5856 * Drop the mapping list lock to avoid locking order issues. 5857 */ 5858 if (locked) 5859 sfmmu_mlist_exit(pml); 5860 5861 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 5862 if (ret != 0) 5863 return (ret); /* caller must do the cleanup */ 5864 5865 if (locked) { 5866 pml = sfmmu_mlist_enter(pp); 5867 pahmep->flags |= flag; 5868 goto top; 5869 } 5870 5871 pahmep->flags |= flag; 5872 } 5873 5874 if (locked) 5875 sfmmu_mlist_exit(pml); 5876 5877 return (0); 5878 } 5879 5880 /* 5881 * Execute post-callback handler of each pa_hment linked to pp 5882 * 5883 * Same overall assumptions and restrictions apply as for 5884 * hat_pageprocess_precallbacks(). 5885 */ 5886 static void 5887 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 5888 { 5889 pfn_t pgpfn = pp->p_pagenum; 5890 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 5891 pfn_t newpfn; 5892 struct sf_hment *sfhmep; 5893 struct pa_hment *pahmep; 5894 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 5895 id_t id; 5896 int locked = 0; 5897 kmutex_t *pml; 5898 5899 ASSERT(PAGE_EXCL(pp)); 5900 if (!sfmmu_mlist_held(pp)) { 5901 pml = sfmmu_mlist_enter(pp); 5902 locked = 1; 5903 } 5904 5905 top: 5906 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5907 /* 5908 * skip sf_hments corresponding to VA<->PA mappings; 5909 * for pa_hment's, hme_tte.ll is zero 5910 */ 5911 if (!IS_PAHME(sfhmep)) 5912 continue; 5913 5914 pahmep = sfhmep->hme_data; 5915 ASSERT(pahmep != NULL); 5916 5917 if ((pahmep->flags & flag) == 0) 5918 continue; 5919 5920 pahmep->flags &= ~flag; 5921 5922 id = pahmep->cb_id; 5923 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5924 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 5925 continue; 5926 5927 /* 5928 * Convert the base page PFN into the constituent PFN 5929 * which is needed by the callback handler. 5930 */ 5931 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 5932 5933 /* 5934 * Drop the mapping list lock to avoid locking order issues. 5935 */ 5936 if (locked) 5937 sfmmu_mlist_exit(pml); 5938 5939 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 5940 != 0) 5941 panic("sfmmu: posthandler failed"); 5942 5943 if (locked) { 5944 pml = sfmmu_mlist_enter(pp); 5945 goto top; 5946 } 5947 } 5948 5949 if (locked) 5950 sfmmu_mlist_exit(pml); 5951 } 5952 5953 /* 5954 * Suspend locked kernel mapping 5955 */ 5956 void 5957 hat_pagesuspend(struct page *pp) 5958 { 5959 struct sf_hment *sfhmep; 5960 sfmmu_t *sfmmup; 5961 tte_t tte, ttemod; 5962 struct hme_blk *hmeblkp; 5963 caddr_t addr; 5964 int index, cons; 5965 cpuset_t cpuset; 5966 5967 ASSERT(PAGE_EXCL(pp)); 5968 ASSERT(sfmmu_mlist_held(pp)); 5969 5970 mutex_enter(&kpr_suspendlock); 5971 5972 /* 5973 * Call into dtrace to tell it we're about to suspend a 5974 * kernel mapping. This prevents us from running into issues 5975 * with probe context trying to touch a suspended page 5976 * in the relocation codepath itself. 5977 */ 5978 if (dtrace_kreloc_init) 5979 (*dtrace_kreloc_init)(); 5980 5981 index = PP_MAPINDEX(pp); 5982 cons = TTE8K; 5983 5984 retry: 5985 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5986 5987 if (IS_PAHME(sfhmep)) 5988 continue; 5989 5990 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 5991 continue; 5992 5993 /* 5994 * Loop until we successfully set the suspend bit in 5995 * the TTE. 5996 */ 5997 again: 5998 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5999 ASSERT(TTE_IS_VALID(&tte)); 6000 6001 ttemod = tte; 6002 TTE_SET_SUSPEND(&ttemod); 6003 if (sfmmu_modifytte_try(&tte, &ttemod, 6004 &sfhmep->hme_tte) < 0) 6005 goto again; 6006 6007 /* 6008 * Invalidate TSB entry 6009 */ 6010 hmeblkp = sfmmu_hmetohblk(sfhmep); 6011 6012 sfmmup = hblktosfmmu(hmeblkp); 6013 ASSERT(sfmmup == ksfmmup); 6014 6015 addr = tte_to_vaddr(hmeblkp, tte); 6016 6017 /* 6018 * No need to make sure that the TSB for this sfmmu is 6019 * not being relocated since it is ksfmmup and thus it 6020 * will never be relocated. 6021 */ 6022 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 6023 6024 /* 6025 * Update xcall stats 6026 */ 6027 cpuset = cpu_ready_set; 6028 CPUSET_DEL(cpuset, CPU->cpu_id); 6029 6030 /* LINTED: constant in conditional context */ 6031 SFMMU_XCALL_STATS(ksfmmup); 6032 6033 /* 6034 * Flush TLB entry on remote CPU's 6035 */ 6036 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6037 (uint64_t)ksfmmup); 6038 xt_sync(cpuset); 6039 6040 /* 6041 * Flush TLB entry on local CPU 6042 */ 6043 vtag_flushpage(addr, (uint64_t)ksfmmup); 6044 } 6045 6046 while (index != 0) { 6047 index = index >> 1; 6048 if (index != 0) 6049 cons++; 6050 if (index & 0x1) { 6051 pp = PP_GROUPLEADER(pp, cons); 6052 goto retry; 6053 } 6054 } 6055 } 6056 6057 #ifdef DEBUG 6058 6059 #define N_PRLE 1024 6060 struct prle { 6061 page_t *targ; 6062 page_t *repl; 6063 int status; 6064 int pausecpus; 6065 hrtime_t whence; 6066 }; 6067 6068 static struct prle page_relocate_log[N_PRLE]; 6069 static int prl_entry; 6070 static kmutex_t prl_mutex; 6071 6072 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6073 mutex_enter(&prl_mutex); \ 6074 page_relocate_log[prl_entry].targ = *(t); \ 6075 page_relocate_log[prl_entry].repl = *(r); \ 6076 page_relocate_log[prl_entry].status = (s); \ 6077 page_relocate_log[prl_entry].pausecpus = (p); \ 6078 page_relocate_log[prl_entry].whence = gethrtime(); \ 6079 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6080 mutex_exit(&prl_mutex); 6081 6082 #else /* !DEBUG */ 6083 #define PAGE_RELOCATE_LOG(t, r, s, p) 6084 #endif 6085 6086 /* 6087 * Core Kernel Page Relocation Algorithm 6088 * 6089 * Input: 6090 * 6091 * target : constituent pages are SE_EXCL locked. 6092 * replacement: constituent pages are SE_EXCL locked. 6093 * 6094 * Output: 6095 * 6096 * nrelocp: number of pages relocated 6097 */ 6098 int 6099 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6100 { 6101 page_t *targ, *repl; 6102 page_t *tpp, *rpp; 6103 kmutex_t *low, *high; 6104 spgcnt_t npages, i; 6105 page_t *pl = NULL; 6106 int old_pil; 6107 cpuset_t cpuset; 6108 int cap_cpus; 6109 int ret; 6110 6111 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 6112 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6113 return (EAGAIN); 6114 } 6115 6116 mutex_enter(&kpr_mutex); 6117 kreloc_thread = curthread; 6118 6119 targ = *target; 6120 repl = *replacement; 6121 ASSERT(repl != NULL); 6122 ASSERT(targ->p_szc == repl->p_szc); 6123 6124 npages = page_get_pagecnt(targ->p_szc); 6125 6126 /* 6127 * unload VA<->PA mappings that are not locked 6128 */ 6129 tpp = targ; 6130 for (i = 0; i < npages; i++) { 6131 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6132 tpp++; 6133 } 6134 6135 /* 6136 * Do "presuspend" callbacks, in a context from which we can still 6137 * block as needed. Note that we don't hold the mapping list lock 6138 * of "targ" at this point due to potential locking order issues; 6139 * we assume that between the hat_pageunload() above and holding 6140 * the SE_EXCL lock that the mapping list *cannot* change at this 6141 * point. 6142 */ 6143 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6144 if (ret != 0) { 6145 /* 6146 * EIO translates to fatal error, for all others cleanup 6147 * and return EAGAIN. 6148 */ 6149 ASSERT(ret != EIO); 6150 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6151 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6152 kreloc_thread = NULL; 6153 mutex_exit(&kpr_mutex); 6154 return (EAGAIN); 6155 } 6156 6157 /* 6158 * acquire p_mapping list lock for both the target and replacement 6159 * root pages. 6160 * 6161 * low and high refer to the need to grab the mlist locks in a 6162 * specific order in order to prevent race conditions. Thus the 6163 * lower lock must be grabbed before the higher lock. 6164 * 6165 * This will block hat_unload's accessing p_mapping list. Since 6166 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6167 * blocked. Thus, no one else will be accessing the p_mapping list 6168 * while we suspend and reload the locked mapping below. 6169 */ 6170 tpp = targ; 6171 rpp = repl; 6172 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6173 6174 kpreempt_disable(); 6175 6176 #ifdef VAC 6177 /* 6178 * If the replacement page is of a different virtual color 6179 * than the page it is replacing, we need to handle the VAC 6180 * consistency for it just as we would if we were setting up 6181 * a new mapping to a page. 6182 */ 6183 if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) { 6184 if (tpp->p_vcolor != rpp->p_vcolor) { 6185 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6186 rpp->p_pagenum); 6187 } 6188 } 6189 #endif 6190 6191 /* 6192 * We raise our PIL to 13 so that we don't get captured by 6193 * another CPU or pinned by an interrupt thread. We can't go to 6194 * PIL 14 since the nexus driver(s) may need to interrupt at 6195 * that level in the case of IOMMU pseudo mappings. 6196 */ 6197 cpuset = cpu_ready_set; 6198 CPUSET_DEL(cpuset, CPU->cpu_id); 6199 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6200 old_pil = splr(XCALL_PIL); 6201 } else { 6202 old_pil = -1; 6203 xc_attention(cpuset); 6204 } 6205 ASSERT(getpil() == XCALL_PIL); 6206 6207 /* 6208 * Now do suspend callbacks. In the case of an IOMMU mapping 6209 * this will suspend all DMA activity to the page while it is 6210 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6211 * may be captured at this point we should have acquired any needed 6212 * locks in the presuspend callback. 6213 */ 6214 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6215 if (ret != 0) { 6216 repl = targ; 6217 goto suspend_fail; 6218 } 6219 6220 /* 6221 * Raise the PIL yet again, this time to block all high-level 6222 * interrupts on this CPU. This is necessary to prevent an 6223 * interrupt routine from pinning the thread which holds the 6224 * mapping suspended and then touching the suspended page. 6225 * 6226 * Once the page is suspended we also need to be careful to 6227 * avoid calling any functions which touch any seg_kmem memory 6228 * since that memory may be backed by the very page we are 6229 * relocating in here! 6230 */ 6231 hat_pagesuspend(targ); 6232 6233 /* 6234 * Now that we are confident everybody has stopped using this page, 6235 * copy the page contents. Note we use a physical copy to prevent 6236 * locking issues and to avoid fpRAS because we can't handle it in 6237 * this context. 6238 */ 6239 for (i = 0; i < npages; i++, tpp++, rpp++) { 6240 /* 6241 * Copy the contents of the page. 6242 */ 6243 ppcopy_kernel(tpp, rpp); 6244 } 6245 6246 tpp = targ; 6247 rpp = repl; 6248 for (i = 0; i < npages; i++, tpp++, rpp++) { 6249 /* 6250 * Copy attributes. VAC consistency was handled above, 6251 * if required. 6252 */ 6253 rpp->p_nrm = tpp->p_nrm; 6254 tpp->p_nrm = 0; 6255 rpp->p_index = tpp->p_index; 6256 tpp->p_index = 0; 6257 #ifdef VAC 6258 rpp->p_vcolor = tpp->p_vcolor; 6259 #endif 6260 } 6261 6262 /* 6263 * First, unsuspend the page, if we set the suspend bit, and transfer 6264 * the mapping list from the target page to the replacement page. 6265 * Next process postcallbacks; since pa_hment's are linked only to the 6266 * p_mapping list of root page, we don't iterate over the constituent 6267 * pages. 6268 */ 6269 hat_pagereload(targ, repl); 6270 6271 suspend_fail: 6272 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6273 6274 /* 6275 * Now lower our PIL and release any captured CPUs since we 6276 * are out of the "danger zone". After this it will again be 6277 * safe to acquire adaptive mutex locks, or to drop them... 6278 */ 6279 if (old_pil != -1) { 6280 splx(old_pil); 6281 } else { 6282 xc_dismissed(cpuset); 6283 } 6284 6285 kpreempt_enable(); 6286 6287 sfmmu_mlist_reloc_exit(low, high); 6288 6289 /* 6290 * Postsuspend callbacks should drop any locks held across 6291 * the suspend callbacks. As before, we don't hold the mapping 6292 * list lock at this point.. our assumption is that the mapping 6293 * list still can't change due to our holding SE_EXCL lock and 6294 * there being no unlocked mappings left. Hence the restriction 6295 * on calling context to hat_delete_callback() 6296 */ 6297 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6298 if (ret != 0) { 6299 /* 6300 * The second presuspend call failed: we got here through 6301 * the suspend_fail label above. 6302 */ 6303 ASSERT(ret != EIO); 6304 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6305 kreloc_thread = NULL; 6306 mutex_exit(&kpr_mutex); 6307 return (EAGAIN); 6308 } 6309 6310 /* 6311 * Now that we're out of the performance critical section we can 6312 * take care of updating the hash table, since we still 6313 * hold all the pages locked SE_EXCL at this point we 6314 * needn't worry about things changing out from under us. 6315 */ 6316 tpp = targ; 6317 rpp = repl; 6318 for (i = 0; i < npages; i++, tpp++, rpp++) { 6319 6320 /* 6321 * replace targ with replacement in page_hash table 6322 */ 6323 targ = tpp; 6324 page_relocate_hash(rpp, targ); 6325 6326 /* 6327 * concatenate target; caller of platform_page_relocate() 6328 * expects target to be concatenated after returning. 6329 */ 6330 ASSERT(targ->p_next == targ); 6331 ASSERT(targ->p_prev == targ); 6332 page_list_concat(&pl, &targ); 6333 } 6334 6335 ASSERT(*target == pl); 6336 *nrelocp = npages; 6337 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6338 kreloc_thread = NULL; 6339 mutex_exit(&kpr_mutex); 6340 return (0); 6341 } 6342 6343 /* 6344 * Called when stray pa_hments are found attached to a page which is 6345 * being freed. Notify the subsystem which attached the pa_hment of 6346 * the error if it registered a suitable handler, else panic. 6347 */ 6348 static void 6349 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6350 { 6351 id_t cb_id = pahmep->cb_id; 6352 6353 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6354 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6355 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6356 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6357 return; /* non-fatal */ 6358 } 6359 panic("pa_hment leaked: 0x%p", pahmep); 6360 } 6361 6362 /* 6363 * Remove all mappings to page 'pp'. 6364 */ 6365 int 6366 hat_pageunload(struct page *pp, uint_t forceflag) 6367 { 6368 struct page *origpp = pp; 6369 struct sf_hment *sfhme, *tmphme; 6370 struct hme_blk *hmeblkp; 6371 kmutex_t *pml; 6372 #ifdef VAC 6373 kmutex_t *pmtx; 6374 #endif 6375 cpuset_t cpuset, tset; 6376 int index, cons; 6377 int xhme_blks; 6378 int pa_hments; 6379 6380 ASSERT(PAGE_EXCL(pp)); 6381 6382 retry_xhat: 6383 tmphme = NULL; 6384 xhme_blks = 0; 6385 pa_hments = 0; 6386 CPUSET_ZERO(cpuset); 6387 6388 pml = sfmmu_mlist_enter(pp); 6389 6390 #ifdef VAC 6391 if (pp->p_kpmref) 6392 sfmmu_kpm_pageunload(pp); 6393 ASSERT(!PP_ISMAPPED_KPM(pp)); 6394 #endif 6395 6396 index = PP_MAPINDEX(pp); 6397 cons = TTE8K; 6398 retry: 6399 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6400 tmphme = sfhme->hme_next; 6401 6402 if (IS_PAHME(sfhme)) { 6403 ASSERT(sfhme->hme_data != NULL); 6404 pa_hments++; 6405 continue; 6406 } 6407 6408 hmeblkp = sfmmu_hmetohblk(sfhme); 6409 if (hmeblkp->hblk_xhat_bit) { 6410 struct xhat_hme_blk *xblk = 6411 (struct xhat_hme_blk *)hmeblkp; 6412 6413 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 6414 pp, forceflag, XBLK2PROVBLK(xblk)); 6415 6416 xhme_blks = 1; 6417 continue; 6418 } 6419 6420 /* 6421 * If there are kernel mappings don't unload them, they will 6422 * be suspended. 6423 */ 6424 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 6425 hmeblkp->hblk_tag.htag_id == ksfmmup) 6426 continue; 6427 6428 tset = sfmmu_pageunload(pp, sfhme, cons); 6429 CPUSET_OR(cpuset, tset); 6430 } 6431 6432 while (index != 0) { 6433 index = index >> 1; 6434 if (index != 0) 6435 cons++; 6436 if (index & 0x1) { 6437 /* Go to leading page */ 6438 pp = PP_GROUPLEADER(pp, cons); 6439 ASSERT(sfmmu_mlist_held(pp)); 6440 goto retry; 6441 } 6442 } 6443 6444 /* 6445 * cpuset may be empty if the page was only mapped by segkpm, 6446 * in which case we won't actually cross-trap. 6447 */ 6448 xt_sync(cpuset); 6449 6450 /* 6451 * The page should have no mappings at this point, unless 6452 * we were called from hat_page_relocate() in which case we 6453 * leave the locked mappings which will be suspended later. 6454 */ 6455 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 6456 (forceflag == SFMMU_KERNEL_RELOC)); 6457 6458 #ifdef VAC 6459 if (PP_ISTNC(pp)) { 6460 if (cons == TTE8K) { 6461 pmtx = sfmmu_page_enter(pp); 6462 PP_CLRTNC(pp); 6463 sfmmu_page_exit(pmtx); 6464 } else { 6465 conv_tnc(pp, cons); 6466 } 6467 } 6468 #endif /* VAC */ 6469 6470 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 6471 /* 6472 * Unlink any pa_hments and free them, calling back 6473 * the responsible subsystem to notify it of the error. 6474 * This can occur in situations such as drivers leaking 6475 * DMA handles: naughty, but common enough that we'd like 6476 * to keep the system running rather than bringing it 6477 * down with an obscure error like "pa_hment leaked" 6478 * which doesn't aid the user in debugging their driver. 6479 */ 6480 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6481 tmphme = sfhme->hme_next; 6482 if (IS_PAHME(sfhme)) { 6483 struct pa_hment *pahmep = sfhme->hme_data; 6484 sfmmu_pahment_leaked(pahmep); 6485 HME_SUB(sfhme, pp); 6486 kmem_cache_free(pa_hment_cache, pahmep); 6487 } 6488 } 6489 6490 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 6491 } 6492 6493 sfmmu_mlist_exit(pml); 6494 6495 /* 6496 * XHAT may not have finished unloading pages 6497 * because some other thread was waiting for 6498 * mlist lock and XHAT_PAGEUNLOAD let it do 6499 * the job. 6500 */ 6501 if (xhme_blks) { 6502 pp = origpp; 6503 goto retry_xhat; 6504 } 6505 6506 return (0); 6507 } 6508 6509 cpuset_t 6510 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 6511 { 6512 struct hme_blk *hmeblkp; 6513 sfmmu_t *sfmmup; 6514 tte_t tte, ttemod; 6515 #ifdef DEBUG 6516 tte_t orig_old; 6517 #endif /* DEBUG */ 6518 caddr_t addr; 6519 int ttesz; 6520 int ret; 6521 cpuset_t cpuset; 6522 6523 ASSERT(pp != NULL); 6524 ASSERT(sfmmu_mlist_held(pp)); 6525 ASSERT(pp->p_vnode != &kvp); 6526 6527 CPUSET_ZERO(cpuset); 6528 6529 hmeblkp = sfmmu_hmetohblk(sfhme); 6530 6531 readtte: 6532 sfmmu_copytte(&sfhme->hme_tte, &tte); 6533 if (TTE_IS_VALID(&tte)) { 6534 sfmmup = hblktosfmmu(hmeblkp); 6535 ttesz = get_hblk_ttesz(hmeblkp); 6536 /* 6537 * Only unload mappings of 'cons' size. 6538 */ 6539 if (ttesz != cons) 6540 return (cpuset); 6541 6542 /* 6543 * Note that we have p_mapping lock, but no hash lock here. 6544 * hblk_unload() has to have both hash lock AND p_mapping 6545 * lock before it tries to modify tte. So, the tte could 6546 * not become invalid in the sfmmu_modifytte_try() below. 6547 */ 6548 ttemod = tte; 6549 #ifdef DEBUG 6550 orig_old = tte; 6551 #endif /* DEBUG */ 6552 6553 TTE_SET_INVALID(&ttemod); 6554 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6555 if (ret < 0) { 6556 #ifdef DEBUG 6557 /* only R/M bits can change. */ 6558 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 6559 #endif /* DEBUG */ 6560 goto readtte; 6561 } 6562 6563 if (ret == 0) { 6564 panic("pageunload: cas failed?"); 6565 } 6566 6567 addr = tte_to_vaddr(hmeblkp, tte); 6568 6569 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6570 6571 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 6572 6573 /* 6574 * We need to flush the page from the virtual cache 6575 * in order to prevent a virtual cache alias 6576 * inconsistency. The particular scenario we need 6577 * to worry about is: 6578 * Given: va1 and va2 are two virtual address that 6579 * alias and will map the same physical address. 6580 * 1. mapping exists from va1 to pa and data has 6581 * been read into the cache. 6582 * 2. unload va1. 6583 * 3. load va2 and modify data using va2. 6584 * 4 unload va2. 6585 * 5. load va1 and reference data. Unless we flush 6586 * the data cache when we unload we will get 6587 * stale data. 6588 * This scenario is taken care of by using virtual 6589 * page coloring. 6590 */ 6591 if (sfmmup->sfmmu_ismhat) { 6592 /* 6593 * Flush TSBs, TLBs and caches 6594 * of every process 6595 * sharing this ism segment. 6596 */ 6597 sfmmu_hat_lock_all(); 6598 mutex_enter(&ism_mlist_lock); 6599 kpreempt_disable(); 6600 if (do_virtual_coloring) 6601 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6602 pp->p_pagenum, CACHE_NO_FLUSH); 6603 else 6604 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6605 pp->p_pagenum, CACHE_FLUSH); 6606 kpreempt_enable(); 6607 mutex_exit(&ism_mlist_lock); 6608 sfmmu_hat_unlock_all(); 6609 cpuset = cpu_ready_set; 6610 } else if (do_virtual_coloring) { 6611 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6612 cpuset = sfmmup->sfmmu_cpusran; 6613 } else { 6614 sfmmu_tlbcache_demap(addr, sfmmup, hmeblkp, 6615 pp->p_pagenum, 0, FLUSH_NECESSARY_CPUS, 6616 CACHE_FLUSH, 0); 6617 cpuset = sfmmup->sfmmu_cpusran; 6618 } 6619 6620 /* 6621 * Hme_sub has to run after ttesync() and a_rss update. 6622 * See hblk_unload(). 6623 */ 6624 HME_SUB(sfhme, pp); 6625 membar_stst(); 6626 6627 /* 6628 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 6629 * since pteload may have done a HME_ADD() right after 6630 * we did the HME_SUB() above. Hmecnt is now maintained 6631 * by cas only. no lock guranteed its value. The only 6632 * gurantee we have is the hmecnt should not be less than 6633 * what it should be so the hblk will not be taken away. 6634 * It's also important that we decremented the hmecnt after 6635 * we are done with hmeblkp so that this hmeblk won't be 6636 * stolen. 6637 */ 6638 ASSERT(hmeblkp->hblk_hmecnt > 0); 6639 ASSERT(hmeblkp->hblk_vcnt > 0); 6640 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6641 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6642 /* 6643 * This is bug 4063182. 6644 * XXX: fixme 6645 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6646 * !hmeblkp->hblk_lckcnt); 6647 */ 6648 } else { 6649 panic("invalid tte? pp %p &tte %p", 6650 (void *)pp, (void *)&tte); 6651 } 6652 6653 return (cpuset); 6654 } 6655 6656 /* 6657 * While relocating a kernel page, this function will move the mappings 6658 * from tpp to dpp and modify any associated data with these mappings. 6659 * It also unsuspends the suspended kernel mapping. 6660 */ 6661 static void 6662 hat_pagereload(struct page *tpp, struct page *dpp) 6663 { 6664 struct sf_hment *sfhme; 6665 tte_t tte, ttemod; 6666 int index, cons; 6667 6668 ASSERT(getpil() == PIL_MAX); 6669 ASSERT(sfmmu_mlist_held(tpp)); 6670 ASSERT(sfmmu_mlist_held(dpp)); 6671 6672 index = PP_MAPINDEX(tpp); 6673 cons = TTE8K; 6674 6675 /* Update real mappings to the page */ 6676 retry: 6677 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 6678 if (IS_PAHME(sfhme)) 6679 continue; 6680 sfmmu_copytte(&sfhme->hme_tte, &tte); 6681 ttemod = tte; 6682 6683 /* 6684 * replace old pfn with new pfn in TTE 6685 */ 6686 PFN_TO_TTE(ttemod, dpp->p_pagenum); 6687 6688 /* 6689 * clear suspend bit 6690 */ 6691 ASSERT(TTE_IS_SUSPEND(&ttemod)); 6692 TTE_CLR_SUSPEND(&ttemod); 6693 6694 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 6695 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 6696 6697 /* 6698 * set hme_page point to new page 6699 */ 6700 sfhme->hme_page = dpp; 6701 } 6702 6703 /* 6704 * move p_mapping list from old page to new page 6705 */ 6706 dpp->p_mapping = tpp->p_mapping; 6707 tpp->p_mapping = NULL; 6708 dpp->p_share = tpp->p_share; 6709 tpp->p_share = 0; 6710 6711 while (index != 0) { 6712 index = index >> 1; 6713 if (index != 0) 6714 cons++; 6715 if (index & 0x1) { 6716 tpp = PP_GROUPLEADER(tpp, cons); 6717 dpp = PP_GROUPLEADER(dpp, cons); 6718 goto retry; 6719 } 6720 } 6721 6722 if (dtrace_kreloc_fini) 6723 (*dtrace_kreloc_fini)(); 6724 mutex_exit(&kpr_suspendlock); 6725 } 6726 6727 uint_t 6728 hat_pagesync(struct page *pp, uint_t clearflag) 6729 { 6730 struct sf_hment *sfhme, *tmphme = NULL; 6731 struct hme_blk *hmeblkp; 6732 kmutex_t *pml; 6733 cpuset_t cpuset, tset; 6734 int index, cons; 6735 extern ulong_t po_share; 6736 page_t *save_pp = pp; 6737 6738 CPUSET_ZERO(cpuset); 6739 6740 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 6741 return (PP_GENERIC_ATTR(pp)); 6742 } 6743 6744 if ((clearflag == (HAT_SYNC_STOPON_REF | HAT_SYNC_DONTZERO)) && 6745 PP_ISREF(pp)) { 6746 return (PP_GENERIC_ATTR(pp)); 6747 } 6748 6749 if ((clearflag == (HAT_SYNC_STOPON_MOD | HAT_SYNC_DONTZERO)) && 6750 PP_ISMOD(pp)) { 6751 return (PP_GENERIC_ATTR(pp)); 6752 } 6753 6754 if ((clearflag & HAT_SYNC_STOPON_SHARED) != 0 && 6755 (pp->p_share > po_share) && 6756 !(clearflag & HAT_SYNC_ZERORM)) { 6757 if (PP_ISRO(pp)) 6758 hat_page_setattr(pp, P_REF); 6759 return (PP_GENERIC_ATTR(pp)); 6760 } 6761 6762 clearflag &= ~HAT_SYNC_STOPON_SHARED; 6763 pml = sfmmu_mlist_enter(pp); 6764 index = PP_MAPINDEX(pp); 6765 cons = TTE8K; 6766 retry: 6767 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6768 /* 6769 * We need to save the next hment on the list since 6770 * it is possible for pagesync to remove an invalid hment 6771 * from the list. 6772 */ 6773 tmphme = sfhme->hme_next; 6774 /* 6775 * If we are looking for large mappings and this hme doesn't 6776 * reach the range we are seeking, just ignore its. 6777 */ 6778 hmeblkp = sfmmu_hmetohblk(sfhme); 6779 if (hmeblkp->hblk_xhat_bit) 6780 continue; 6781 6782 if (hme_size(sfhme) < cons) 6783 continue; 6784 tset = sfmmu_pagesync(pp, sfhme, 6785 clearflag & ~HAT_SYNC_STOPON_RM); 6786 CPUSET_OR(cpuset, tset); 6787 /* 6788 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 6789 * as the "ref" or "mod" is set. 6790 */ 6791 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 6792 ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 6793 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) { 6794 index = 0; 6795 break; 6796 } 6797 } 6798 6799 while (index) { 6800 index = index >> 1; 6801 cons++; 6802 if (index & 0x1) { 6803 /* Go to leading page */ 6804 pp = PP_GROUPLEADER(pp, cons); 6805 goto retry; 6806 } 6807 } 6808 6809 xt_sync(cpuset); 6810 sfmmu_mlist_exit(pml); 6811 return (PP_GENERIC_ATTR(save_pp)); 6812 } 6813 6814 /* 6815 * Get all the hardware dependent attributes for a page struct 6816 */ 6817 static cpuset_t 6818 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 6819 uint_t clearflag) 6820 { 6821 caddr_t addr; 6822 tte_t tte, ttemod; 6823 struct hme_blk *hmeblkp; 6824 int ret; 6825 sfmmu_t *sfmmup; 6826 cpuset_t cpuset; 6827 6828 ASSERT(pp != NULL); 6829 ASSERT(sfmmu_mlist_held(pp)); 6830 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6831 (clearflag == HAT_SYNC_ZERORM)); 6832 6833 SFMMU_STAT(sf_pagesync); 6834 6835 CPUSET_ZERO(cpuset); 6836 6837 sfmmu_pagesync_retry: 6838 6839 sfmmu_copytte(&sfhme->hme_tte, &tte); 6840 if (TTE_IS_VALID(&tte)) { 6841 hmeblkp = sfmmu_hmetohblk(sfhme); 6842 sfmmup = hblktosfmmu(hmeblkp); 6843 addr = tte_to_vaddr(hmeblkp, tte); 6844 if (clearflag == HAT_SYNC_ZERORM) { 6845 ttemod = tte; 6846 TTE_CLR_RM(&ttemod); 6847 ret = sfmmu_modifytte_try(&tte, &ttemod, 6848 &sfhme->hme_tte); 6849 if (ret < 0) { 6850 /* 6851 * cas failed and the new value is not what 6852 * we want. 6853 */ 6854 goto sfmmu_pagesync_retry; 6855 } 6856 6857 if (ret > 0) { 6858 /* we win the cas */ 6859 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6860 cpuset = sfmmup->sfmmu_cpusran; 6861 } 6862 } 6863 6864 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6865 } 6866 return (cpuset); 6867 } 6868 6869 /* 6870 * Remove write permission from a mappings to a page, so that 6871 * we can detect the next modification of it. This requires modifying 6872 * the TTE then invalidating (demap) any TLB entry using that TTE. 6873 * This code is similar to sfmmu_pagesync(). 6874 */ 6875 static cpuset_t 6876 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 6877 { 6878 caddr_t addr; 6879 tte_t tte; 6880 tte_t ttemod; 6881 struct hme_blk *hmeblkp; 6882 int ret; 6883 sfmmu_t *sfmmup; 6884 cpuset_t cpuset; 6885 6886 ASSERT(pp != NULL); 6887 ASSERT(sfmmu_mlist_held(pp)); 6888 6889 CPUSET_ZERO(cpuset); 6890 SFMMU_STAT(sf_clrwrt); 6891 6892 retry: 6893 6894 sfmmu_copytte(&sfhme->hme_tte, &tte); 6895 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 6896 hmeblkp = sfmmu_hmetohblk(sfhme); 6897 6898 /* 6899 * xhat mappings should never be to a VMODSORT page. 6900 */ 6901 ASSERT(hmeblkp->hblk_xhat_bit == 0); 6902 6903 sfmmup = hblktosfmmu(hmeblkp); 6904 addr = tte_to_vaddr(hmeblkp, tte); 6905 6906 ttemod = tte; 6907 TTE_CLR_WRT(&ttemod); 6908 TTE_CLR_MOD(&ttemod); 6909 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6910 6911 /* 6912 * if cas failed and the new value is not what 6913 * we want retry 6914 */ 6915 if (ret < 0) 6916 goto retry; 6917 6918 /* we win the cas */ 6919 if (ret > 0) { 6920 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6921 cpuset = sfmmup->sfmmu_cpusran; 6922 } 6923 } 6924 6925 return (cpuset); 6926 } 6927 6928 /* 6929 * Walk all mappings of a page, removing write permission and clearing the 6930 * ref/mod bits. This code is similar to hat_pagesync() 6931 */ 6932 static void 6933 hat_page_clrwrt(page_t *pp) 6934 { 6935 struct sf_hment *sfhme; 6936 struct sf_hment *tmphme = NULL; 6937 kmutex_t *pml; 6938 cpuset_t cpuset; 6939 cpuset_t tset; 6940 int index; 6941 int cons; 6942 6943 CPUSET_ZERO(cpuset); 6944 6945 pml = sfmmu_mlist_enter(pp); 6946 index = PP_MAPINDEX(pp); 6947 cons = TTE8K; 6948 retry: 6949 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6950 tmphme = sfhme->hme_next; 6951 6952 /* 6953 * If we are looking for large mappings and this hme doesn't 6954 * reach the range we are seeking, just ignore its. 6955 */ 6956 6957 if (hme_size(sfhme) < cons) 6958 continue; 6959 6960 tset = sfmmu_pageclrwrt(pp, sfhme); 6961 CPUSET_OR(cpuset, tset); 6962 } 6963 6964 while (index) { 6965 index = index >> 1; 6966 cons++; 6967 if (index & 0x1) { 6968 /* Go to leading page */ 6969 pp = PP_GROUPLEADER(pp, cons); 6970 goto retry; 6971 } 6972 } 6973 6974 xt_sync(cpuset); 6975 sfmmu_mlist_exit(pml); 6976 } 6977 6978 /* 6979 * Set the given REF/MOD/RO bits for the given page. 6980 * For a vnode with a sorted v_pages list, we need to change 6981 * the attributes and the v_pages list together under page_vnode_mutex. 6982 */ 6983 void 6984 hat_page_setattr(page_t *pp, uint_t flag) 6985 { 6986 vnode_t *vp = pp->p_vnode; 6987 page_t **listp; 6988 kmutex_t *pmtx; 6989 kmutex_t *vphm = NULL; 6990 6991 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 6992 6993 /* 6994 * nothing to do if attribute already set 6995 */ 6996 if ((pp->p_nrm & flag) == flag) 6997 return; 6998 6999 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7000 vphm = page_vnode_mutex(vp); 7001 mutex_enter(vphm); 7002 } 7003 7004 pmtx = sfmmu_page_enter(pp); 7005 pp->p_nrm |= flag; 7006 sfmmu_page_exit(pmtx); 7007 7008 if (vphm != NULL) { 7009 /* 7010 * Some File Systems examine v_pages for NULL w/o 7011 * grabbing the vphm mutex. Must not let it become NULL when 7012 * pp is the only page on the list. 7013 */ 7014 if (pp->p_vpnext != pp) { 7015 page_vpsub(&vp->v_pages, pp); 7016 if (vp->v_pages != NULL) 7017 listp = &vp->v_pages->p_vpprev->p_vpnext; 7018 else 7019 listp = &vp->v_pages; 7020 page_vpadd(listp, pp); 7021 } 7022 mutex_exit(vphm); 7023 } 7024 } 7025 7026 void 7027 hat_page_clrattr(page_t *pp, uint_t flag) 7028 { 7029 vnode_t *vp = pp->p_vnode; 7030 kmutex_t *pmtx; 7031 7032 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7033 7034 pmtx = sfmmu_page_enter(pp); 7035 7036 /* 7037 * Caller is expected to hold page's io lock for VMODSORT to work 7038 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7039 * bit is cleared. 7040 * We don't have assert to avoid tripping some existing third party 7041 * code. The dirty page is moved back to top of the v_page list 7042 * after IO is done in pvn_write_done(). 7043 */ 7044 pp->p_nrm &= ~flag; 7045 sfmmu_page_exit(pmtx); 7046 7047 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7048 7049 /* 7050 * VMODSORT works by removing write permissions and getting 7051 * a fault when a page is made dirty. At this point 7052 * we need to remove write permission from all mappings 7053 * to this page. 7054 */ 7055 hat_page_clrwrt(pp); 7056 } 7057 } 7058 7059 uint_t 7060 hat_page_getattr(page_t *pp, uint_t flag) 7061 { 7062 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7063 return ((uint_t)(pp->p_nrm & flag)); 7064 } 7065 7066 /* 7067 * DEBUG kernels: verify that a kernel va<->pa translation 7068 * is safe by checking the underlying page_t is in a page 7069 * relocation-safe state. 7070 */ 7071 #ifdef DEBUG 7072 void 7073 sfmmu_check_kpfn(pfn_t pfn) 7074 { 7075 page_t *pp; 7076 int index, cons; 7077 7078 if (hat_check_vtop == 0) 7079 return; 7080 7081 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 7082 return; 7083 7084 pp = page_numtopp_nolock(pfn); 7085 if (!pp) 7086 return; 7087 7088 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7089 return; 7090 7091 /* 7092 * Handed a large kernel page, we dig up the root page since we 7093 * know the root page might have the lock also. 7094 */ 7095 if (pp->p_szc != 0) { 7096 index = PP_MAPINDEX(pp); 7097 cons = TTE8K; 7098 again: 7099 while (index != 0) { 7100 index >>= 1; 7101 if (index != 0) 7102 cons++; 7103 if (index & 0x1) { 7104 pp = PP_GROUPLEADER(pp, cons); 7105 goto again; 7106 } 7107 } 7108 } 7109 7110 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7111 return; 7112 7113 /* 7114 * Pages need to be locked or allocated "permanent" (either from 7115 * static_arena arena or explicitly setting PG_NORELOC when calling 7116 * page_create_va()) for VA->PA translations to be valid. 7117 */ 7118 if (!PP_ISNORELOC(pp)) 7119 panic("Illegal VA->PA translation, pp 0x%p not permanent", pp); 7120 else 7121 panic("Illegal VA->PA translation, pp 0x%p not locked", pp); 7122 } 7123 #endif /* DEBUG */ 7124 7125 /* 7126 * Returns a page frame number for a given virtual address. 7127 * Returns PFN_INVALID to indicate an invalid mapping 7128 */ 7129 pfn_t 7130 hat_getpfnum(struct hat *hat, caddr_t addr) 7131 { 7132 pfn_t pfn; 7133 tte_t tte; 7134 7135 /* 7136 * We would like to 7137 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7138 * but we can't because the iommu driver will call this 7139 * routine at interrupt time and it can't grab the as lock 7140 * or it will deadlock: A thread could have the as lock 7141 * and be waiting for io. The io can't complete 7142 * because the interrupt thread is blocked trying to grab 7143 * the as lock. 7144 */ 7145 7146 ASSERT(hat->sfmmu_xhat_provider == NULL); 7147 7148 if (hat == ksfmmup) { 7149 if (segkpm && IS_KPM_ADDR(addr)) 7150 return (sfmmu_kpm_vatopfn(addr)); 7151 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7152 == PFN_SUSPENDED) { 7153 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7154 } 7155 sfmmu_check_kpfn(pfn); 7156 return (pfn); 7157 } else { 7158 return (sfmmu_uvatopfn(addr, hat)); 7159 } 7160 } 7161 7162 /* 7163 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7164 * Use hat_getpfnum(kas.a_hat, ...) instead. 7165 * 7166 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7167 * but can't right now due to the fact that some software has grown to use 7168 * this interface incorrectly. So for now when the interface is misused, 7169 * return a warning to the user that in the future it won't work in the 7170 * way they're abusing it, and carry on (after disabling page relocation). 7171 */ 7172 pfn_t 7173 hat_getkpfnum(caddr_t addr) 7174 { 7175 pfn_t pfn; 7176 tte_t tte; 7177 int badcaller = 0; 7178 extern int segkmem_reloc; 7179 7180 if (segkpm && IS_KPM_ADDR(addr)) { 7181 badcaller = 1; 7182 pfn = sfmmu_kpm_vatopfn(addr); 7183 } else { 7184 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7185 == PFN_SUSPENDED) { 7186 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7187 } 7188 badcaller = pf_is_memory(pfn); 7189 } 7190 7191 if (badcaller) { 7192 /* 7193 * We can't return PFN_INVALID or the caller may panic 7194 * or corrupt the system. The only alternative is to 7195 * disable page relocation at this point for all kernel 7196 * memory. This will impact any callers of page_relocate() 7197 * such as FMA or DR. 7198 * 7199 * RFE: Add junk here to spit out an ereport so the sysadmin 7200 * can be advised that he should upgrade his device driver 7201 * so that this doesn't happen. 7202 */ 7203 hat_getkpfnum_badcall(caller()); 7204 if (hat_kpr_enabled && segkmem_reloc) { 7205 hat_kpr_enabled = 0; 7206 segkmem_reloc = 0; 7207 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 7208 } 7209 } 7210 return (pfn); 7211 } 7212 7213 pfn_t 7214 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup) 7215 { 7216 struct hmehash_bucket *hmebp; 7217 hmeblk_tag hblktag; 7218 int hmeshift, hashno = 1; 7219 struct hme_blk *hmeblkp = NULL; 7220 7221 struct sf_hment *sfhmep; 7222 tte_t tte; 7223 pfn_t pfn; 7224 7225 /* support for ISM */ 7226 ism_map_t *ism_map; 7227 ism_blk_t *ism_blkp; 7228 int i; 7229 sfmmu_t *ism_hatid = NULL; 7230 sfmmu_t *locked_hatid = NULL; 7231 7232 7233 ASSERT(sfmmup != ksfmmup); 7234 SFMMU_STAT(sf_user_vtop); 7235 /* 7236 * Set ism_hatid if vaddr falls in a ISM segment. 7237 */ 7238 ism_blkp = sfmmup->sfmmu_iblk; 7239 if (ism_blkp) { 7240 sfmmu_ismhat_enter(sfmmup, 0); 7241 locked_hatid = sfmmup; 7242 } 7243 while (ism_blkp && ism_hatid == NULL) { 7244 ism_map = ism_blkp->iblk_maps; 7245 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7246 if (vaddr >= ism_start(ism_map[i]) && 7247 vaddr < ism_end(ism_map[i])) { 7248 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7249 vaddr = (caddr_t)(vaddr - 7250 ism_start(ism_map[i])); 7251 break; 7252 } 7253 } 7254 ism_blkp = ism_blkp->iblk_next; 7255 } 7256 if (locked_hatid) { 7257 sfmmu_ismhat_exit(locked_hatid, 0); 7258 } 7259 7260 hblktag.htag_id = sfmmup; 7261 do { 7262 hmeshift = HME_HASH_SHIFT(hashno); 7263 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7264 hblktag.htag_rehash = hashno; 7265 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7266 7267 SFMMU_HASH_LOCK(hmebp); 7268 7269 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7270 if (hmeblkp != NULL) { 7271 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7272 sfmmu_copytte(&sfhmep->hme_tte, &tte); 7273 if (TTE_IS_VALID(&tte)) { 7274 pfn = TTE_TO_PFN(vaddr, &tte); 7275 } else { 7276 pfn = PFN_INVALID; 7277 } 7278 SFMMU_HASH_UNLOCK(hmebp); 7279 return (pfn); 7280 } 7281 SFMMU_HASH_UNLOCK(hmebp); 7282 hashno++; 7283 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7284 return (PFN_INVALID); 7285 } 7286 7287 7288 /* 7289 * For compatability with AT&T and later optimizations 7290 */ 7291 /* ARGSUSED */ 7292 void 7293 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 7294 { 7295 ASSERT(hat != NULL); 7296 ASSERT(hat->sfmmu_xhat_provider == NULL); 7297 } 7298 7299 /* 7300 * Return the number of mappings to a particular page. 7301 * This number is an approximation of the number of 7302 * number of people sharing the page. 7303 */ 7304 ulong_t 7305 hat_page_getshare(page_t *pp) 7306 { 7307 page_t *spp = pp; /* start page */ 7308 kmutex_t *pml; 7309 ulong_t cnt; 7310 int index, sz = TTE64K; 7311 7312 /* 7313 * We need to grab the mlist lock to make sure any outstanding 7314 * load/unloads complete. Otherwise we could return zero 7315 * even though the unload(s) hasn't finished yet. 7316 */ 7317 pml = sfmmu_mlist_enter(spp); 7318 cnt = spp->p_share; 7319 7320 #ifdef VAC 7321 if (kpm_enable) 7322 cnt += spp->p_kpmref; 7323 #endif 7324 7325 /* 7326 * If we have any large mappings, we count the number of 7327 * mappings that this large page is part of. 7328 */ 7329 index = PP_MAPINDEX(spp); 7330 index >>= 1; 7331 while (index) { 7332 pp = PP_GROUPLEADER(spp, sz); 7333 if ((index & 0x1) && pp != spp) { 7334 cnt += pp->p_share; 7335 spp = pp; 7336 } 7337 index >>= 1; 7338 sz++; 7339 } 7340 sfmmu_mlist_exit(pml); 7341 return (cnt); 7342 } 7343 7344 /* 7345 * Unload all large mappings to the pp and reset the p_szc field of every 7346 * constituent page according to the remaining mappings. 7347 * 7348 * pp must be locked SE_EXCL. Even though no other constituent pages are 7349 * locked it's legal to unload the large mappings to the pp because all 7350 * constituent pages of large locked mappings have to be locked SE_SHARED. 7351 * This means if we have SE_EXCL lock on one of constituent pages none of the 7352 * large mappings to pp are locked. 7353 * 7354 * Decrease p_szc field starting from the last constituent page and ending 7355 * with the root page. This method is used because other threads rely on the 7356 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 7357 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 7358 * ensures that p_szc changes of the constituent pages appears atomic for all 7359 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 7360 * 7361 * This mechanism is only used for file system pages where it's not always 7362 * possible to get SE_EXCL locks on all constituent pages to demote the size 7363 * code (as is done for anonymous or kernel large pages). 7364 * 7365 * See more comments in front of sfmmu_mlspl_enter(). 7366 */ 7367 void 7368 hat_page_demote(page_t *pp) 7369 { 7370 int index; 7371 int sz; 7372 cpuset_t cpuset; 7373 int sync = 0; 7374 page_t *rootpp; 7375 struct sf_hment *sfhme; 7376 struct sf_hment *tmphme = NULL; 7377 struct hme_blk *hmeblkp; 7378 uint_t pszc; 7379 page_t *lastpp; 7380 cpuset_t tset; 7381 pgcnt_t npgs; 7382 kmutex_t *pml; 7383 kmutex_t *pmtx = NULL; 7384 7385 ASSERT(PAGE_EXCL(pp)); 7386 ASSERT(!PP_ISFREE(pp)); 7387 ASSERT(page_szc_lock_assert(pp)); 7388 pml = sfmmu_mlist_enter(pp); 7389 7390 pszc = pp->p_szc; 7391 if (pszc == 0) { 7392 goto out; 7393 } 7394 7395 index = PP_MAPINDEX(pp) >> 1; 7396 7397 if (index) { 7398 CPUSET_ZERO(cpuset); 7399 sz = TTE64K; 7400 sync = 1; 7401 } 7402 7403 while (index) { 7404 if (!(index & 0x1)) { 7405 index >>= 1; 7406 sz++; 7407 continue; 7408 } 7409 ASSERT(sz <= pszc); 7410 rootpp = PP_GROUPLEADER(pp, sz); 7411 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 7412 tmphme = sfhme->hme_next; 7413 hmeblkp = sfmmu_hmetohblk(sfhme); 7414 if (hme_size(sfhme) != sz) { 7415 continue; 7416 } 7417 if (hmeblkp->hblk_xhat_bit) { 7418 cmn_err(CE_PANIC, 7419 "hat_page_demote: xhat hmeblk"); 7420 } 7421 tset = sfmmu_pageunload(rootpp, sfhme, sz); 7422 CPUSET_OR(cpuset, tset); 7423 } 7424 if (index >>= 1) { 7425 sz++; 7426 } 7427 } 7428 7429 ASSERT(!PP_ISMAPPED_LARGE(pp)); 7430 7431 if (sync) { 7432 xt_sync(cpuset); 7433 #ifdef VAC 7434 if (PP_ISTNC(pp)) { 7435 conv_tnc(rootpp, sz); 7436 } 7437 #endif /* VAC */ 7438 } 7439 7440 pmtx = sfmmu_page_enter(pp); 7441 7442 ASSERT(pp->p_szc == pszc); 7443 rootpp = PP_PAGEROOT(pp); 7444 ASSERT(rootpp->p_szc == pszc); 7445 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 7446 7447 while (lastpp != rootpp) { 7448 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 7449 ASSERT(sz < pszc); 7450 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 7451 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 7452 while (--npgs > 0) { 7453 lastpp->p_szc = (uchar_t)sz; 7454 lastpp = PP_PAGEPREV(lastpp); 7455 } 7456 if (sz) { 7457 /* 7458 * make sure before current root's pszc 7459 * is updated all updates to constituent pages pszc 7460 * fields are globally visible. 7461 */ 7462 membar_producer(); 7463 } 7464 lastpp->p_szc = sz; 7465 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 7466 if (lastpp != rootpp) { 7467 lastpp = PP_PAGEPREV(lastpp); 7468 } 7469 } 7470 if (sz == 0) { 7471 /* the loop above doesn't cover this case */ 7472 rootpp->p_szc = 0; 7473 } 7474 out: 7475 ASSERT(pp->p_szc == 0); 7476 if (pmtx != NULL) { 7477 sfmmu_page_exit(pmtx); 7478 } 7479 sfmmu_mlist_exit(pml); 7480 } 7481 7482 /* 7483 * Refresh the HAT ismttecnt[] element for size szc. 7484 * Caller must have set ISM busy flag to prevent mapping 7485 * lists from changing while we're traversing them. 7486 */ 7487 pgcnt_t 7488 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 7489 { 7490 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 7491 ism_map_t *ism_map; 7492 pgcnt_t npgs = 0; 7493 int j; 7494 7495 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 7496 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 7497 ism_map = ism_blkp->iblk_maps; 7498 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) 7499 npgs += ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 7500 } 7501 sfmmup->sfmmu_ismttecnt[szc] = npgs; 7502 return (npgs); 7503 } 7504 7505 /* 7506 * Yield the memory claim requirement for an address space. 7507 * 7508 * This is currently implemented as the number of bytes that have active 7509 * hardware translations that have page structures. Therefore, it can 7510 * underestimate the traditional resident set size, eg, if the 7511 * physical page is present and the hardware translation is missing; 7512 * and it can overestimate the rss, eg, if there are active 7513 * translations to a frame buffer with page structs. 7514 * Also, it does not take sharing into account. 7515 * 7516 * Note that we don't acquire locks here since this function is most often 7517 * called from the clock thread. 7518 */ 7519 size_t 7520 hat_get_mapped_size(struct hat *hat) 7521 { 7522 size_t assize = 0; 7523 int i; 7524 7525 if (hat == NULL) 7526 return (0); 7527 7528 ASSERT(hat->sfmmu_xhat_provider == NULL); 7529 7530 for (i = 0; i < mmu_page_sizes; i++) 7531 assize += (pgcnt_t)hat->sfmmu_ttecnt[i] * TTEBYTES(i); 7532 7533 if (hat->sfmmu_iblk == NULL) 7534 return (assize); 7535 7536 for (i = 0; i < mmu_page_sizes; i++) 7537 assize += (pgcnt_t)hat->sfmmu_ismttecnt[i] * TTEBYTES(i); 7538 7539 return (assize); 7540 } 7541 7542 int 7543 hat_stats_enable(struct hat *hat) 7544 { 7545 hatlock_t *hatlockp; 7546 7547 ASSERT(hat->sfmmu_xhat_provider == NULL); 7548 7549 hatlockp = sfmmu_hat_enter(hat); 7550 hat->sfmmu_rmstat++; 7551 sfmmu_hat_exit(hatlockp); 7552 return (1); 7553 } 7554 7555 void 7556 hat_stats_disable(struct hat *hat) 7557 { 7558 hatlock_t *hatlockp; 7559 7560 ASSERT(hat->sfmmu_xhat_provider == NULL); 7561 7562 hatlockp = sfmmu_hat_enter(hat); 7563 hat->sfmmu_rmstat--; 7564 sfmmu_hat_exit(hatlockp); 7565 } 7566 7567 /* 7568 * Routines for entering or removing ourselves from the 7569 * ism_hat's mapping list. 7570 */ 7571 static void 7572 iment_add(struct ism_ment *iment, struct hat *ism_hat) 7573 { 7574 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7575 7576 iment->iment_prev = NULL; 7577 iment->iment_next = ism_hat->sfmmu_iment; 7578 if (ism_hat->sfmmu_iment) { 7579 ism_hat->sfmmu_iment->iment_prev = iment; 7580 } 7581 ism_hat->sfmmu_iment = iment; 7582 } 7583 7584 static void 7585 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 7586 { 7587 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7588 7589 if (ism_hat->sfmmu_iment == NULL) { 7590 panic("ism map entry remove - no entries"); 7591 } 7592 7593 if (iment->iment_prev) { 7594 ASSERT(ism_hat->sfmmu_iment != iment); 7595 iment->iment_prev->iment_next = iment->iment_next; 7596 } else { 7597 ASSERT(ism_hat->sfmmu_iment == iment); 7598 ism_hat->sfmmu_iment = iment->iment_next; 7599 } 7600 7601 if (iment->iment_next) { 7602 iment->iment_next->iment_prev = iment->iment_prev; 7603 } 7604 7605 /* 7606 * zero out the entry 7607 */ 7608 iment->iment_next = NULL; 7609 iment->iment_prev = NULL; 7610 iment->iment_hat = NULL; 7611 } 7612 7613 /* 7614 * Hat_share()/unshare() return an (non-zero) error 7615 * when saddr and daddr are not properly aligned. 7616 * 7617 * The top level mapping element determines the alignment 7618 * requirement for saddr and daddr, depending on different 7619 * architectures. 7620 * 7621 * When hat_share()/unshare() are not supported, 7622 * HATOP_SHARE()/UNSHARE() return 0 7623 */ 7624 int 7625 hat_share(struct hat *sfmmup, caddr_t addr, 7626 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 7627 { 7628 ism_blk_t *ism_blkp; 7629 ism_blk_t *new_iblk; 7630 ism_map_t *ism_map; 7631 ism_ment_t *ism_ment; 7632 int i, added; 7633 hatlock_t *hatlockp; 7634 int reload_mmu = 0; 7635 uint_t ismshift = page_get_shift(ismszc); 7636 size_t ismpgsz = page_get_pagesize(ismszc); 7637 uint_t ismmask = (uint_t)ismpgsz - 1; 7638 size_t sh_size = ISM_SHIFT(ismshift, len); 7639 ushort_t ismhatflag; 7640 7641 #ifdef DEBUG 7642 caddr_t eaddr = addr + len; 7643 #endif /* DEBUG */ 7644 7645 ASSERT(ism_hatid != NULL && sfmmup != NULL); 7646 ASSERT(sptaddr == ISMID_STARTADDR); 7647 /* 7648 * Check the alignment. 7649 */ 7650 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 7651 return (EINVAL); 7652 7653 /* 7654 * Check size alignment. 7655 */ 7656 if (!ISM_ALIGNED(ismshift, len)) 7657 return (EINVAL); 7658 7659 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 7660 7661 /* 7662 * Allocate ism_ment for the ism_hat's mapping list, and an 7663 * ism map blk in case we need one. We must do our 7664 * allocations before acquiring locks to prevent a deadlock 7665 * in the kmem allocator on the mapping list lock. 7666 */ 7667 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 7668 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 7669 7670 /* 7671 * Serialize ISM mappings with the ISM busy flag, and also the 7672 * trap handlers. 7673 */ 7674 sfmmu_ismhat_enter(sfmmup, 0); 7675 7676 /* 7677 * Allocate an ism map blk if necessary. 7678 */ 7679 if (sfmmup->sfmmu_iblk == NULL) { 7680 sfmmup->sfmmu_iblk = new_iblk; 7681 bzero(new_iblk, sizeof (*new_iblk)); 7682 new_iblk->iblk_nextpa = (uint64_t)-1; 7683 membar_stst(); /* make sure next ptr visible to all CPUs */ 7684 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 7685 reload_mmu = 1; 7686 new_iblk = NULL; 7687 } 7688 7689 #ifdef DEBUG 7690 /* 7691 * Make sure mapping does not already exist. 7692 */ 7693 ism_blkp = sfmmup->sfmmu_iblk; 7694 while (ism_blkp) { 7695 ism_map = ism_blkp->iblk_maps; 7696 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 7697 if ((addr >= ism_start(ism_map[i]) && 7698 addr < ism_end(ism_map[i])) || 7699 eaddr > ism_start(ism_map[i]) && 7700 eaddr <= ism_end(ism_map[i])) { 7701 panic("sfmmu_share: Already mapped!"); 7702 } 7703 } 7704 ism_blkp = ism_blkp->iblk_next; 7705 } 7706 #endif /* DEBUG */ 7707 7708 ASSERT(ismszc >= TTE4M); 7709 if (ismszc == TTE4M) { 7710 ismhatflag = HAT_4M_FLAG; 7711 } else if (ismszc == TTE32M) { 7712 ismhatflag = HAT_32M_FLAG; 7713 } else if (ismszc == TTE256M) { 7714 ismhatflag = HAT_256M_FLAG; 7715 } 7716 /* 7717 * Add mapping to first available mapping slot. 7718 */ 7719 ism_blkp = sfmmup->sfmmu_iblk; 7720 added = 0; 7721 while (!added) { 7722 ism_map = ism_blkp->iblk_maps; 7723 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7724 if (ism_map[i].imap_ismhat == NULL) { 7725 7726 ism_map[i].imap_ismhat = ism_hatid; 7727 ism_map[i].imap_vb_shift = (ushort_t)ismshift; 7728 ism_map[i].imap_hatflags = ismhatflag; 7729 ism_map[i].imap_sz_mask = ismmask; 7730 /* 7731 * imap_seg is checked in ISM_CHECK to see if 7732 * non-NULL, then other info assumed valid. 7733 */ 7734 membar_stst(); 7735 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 7736 ism_map[i].imap_ment = ism_ment; 7737 7738 /* 7739 * Now add ourselves to the ism_hat's 7740 * mapping list. 7741 */ 7742 ism_ment->iment_hat = sfmmup; 7743 ism_ment->iment_base_va = addr; 7744 ism_hatid->sfmmu_ismhat = 1; 7745 ism_hatid->sfmmu_flags = 0; 7746 mutex_enter(&ism_mlist_lock); 7747 iment_add(ism_ment, ism_hatid); 7748 mutex_exit(&ism_mlist_lock); 7749 added = 1; 7750 break; 7751 } 7752 } 7753 if (!added && ism_blkp->iblk_next == NULL) { 7754 ism_blkp->iblk_next = new_iblk; 7755 new_iblk = NULL; 7756 bzero(ism_blkp->iblk_next, 7757 sizeof (*ism_blkp->iblk_next)); 7758 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 7759 membar_stst(); 7760 ism_blkp->iblk_nextpa = 7761 va_to_pa((caddr_t)ism_blkp->iblk_next); 7762 } 7763 ism_blkp = ism_blkp->iblk_next; 7764 } 7765 7766 /* 7767 * Update our counters for this sfmmup's ism mappings. 7768 */ 7769 for (i = 0; i <= ismszc; i++) { 7770 if (!(disable_ism_large_pages & (1 << i))) 7771 (void) ism_tsb_entries(sfmmup, i); 7772 } 7773 7774 hatlockp = sfmmu_hat_enter(sfmmup); 7775 7776 /* 7777 * For ISM and DISM we do not support 512K pages, so we only 7778 * only search the 4M and 8K/64K hashes for 4 pagesize cpus, and search 7779 * the 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 7780 */ 7781 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 7782 7783 if (ismszc > TTE4M && !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) 7784 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7785 7786 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) 7787 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 7788 7789 /* 7790 * If we updated the ismblkpa for this HAT or we need 7791 * to start searching the 256M or 32M or 4M hash, we must 7792 * make sure all CPUs running this process reload their 7793 * tsbmiss area. Otherwise they will fail to load the mappings 7794 * in the tsbmiss handler and will loop calling pagefault(). 7795 */ 7796 switch (ismszc) { 7797 case TTE256M: 7798 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 7799 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 7800 sfmmu_sync_mmustate(sfmmup); 7801 } 7802 break; 7803 case TTE32M: 7804 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 7805 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 7806 sfmmu_sync_mmustate(sfmmup); 7807 } 7808 break; 7809 case TTE4M: 7810 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 7811 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7812 sfmmu_sync_mmustate(sfmmup); 7813 } 7814 break; 7815 default: 7816 break; 7817 } 7818 7819 /* 7820 * Now we can drop the locks. 7821 */ 7822 sfmmu_ismhat_exit(sfmmup, 1); 7823 sfmmu_hat_exit(hatlockp); 7824 7825 /* 7826 * Free up ismblk if we didn't use it. 7827 */ 7828 if (new_iblk != NULL) 7829 kmem_cache_free(ism_blk_cache, new_iblk); 7830 7831 /* 7832 * Check TSB and TLB page sizes. 7833 */ 7834 sfmmu_check_page_sizes(sfmmup, 1); 7835 7836 return (0); 7837 } 7838 7839 /* 7840 * hat_unshare removes exactly one ism_map from 7841 * this process's as. It expects multiple calls 7842 * to hat_unshare for multiple shm segments. 7843 */ 7844 void 7845 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 7846 { 7847 ism_map_t *ism_map; 7848 ism_ment_t *free_ment = NULL; 7849 ism_blk_t *ism_blkp; 7850 struct hat *ism_hatid; 7851 int found, i; 7852 hatlock_t *hatlockp; 7853 struct tsb_info *tsbinfo; 7854 uint_t ismshift = page_get_shift(ismszc); 7855 size_t sh_size = ISM_SHIFT(ismshift, len); 7856 7857 ASSERT(ISM_ALIGNED(ismshift, addr)); 7858 ASSERT(ISM_ALIGNED(ismshift, len)); 7859 ASSERT(sfmmup != NULL); 7860 ASSERT(sfmmup != ksfmmup); 7861 7862 if (sfmmup->sfmmu_xhat_provider) { 7863 XHAT_UNSHARE(sfmmup, addr, len); 7864 return; 7865 } else { 7866 /* 7867 * This must be a CPU HAT. If the address space has 7868 * XHATs attached, inform all XHATs that ISM segment 7869 * is going away 7870 */ 7871 ASSERT(sfmmup->sfmmu_as != NULL); 7872 if (sfmmup->sfmmu_as->a_xhat != NULL) 7873 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 7874 } 7875 7876 /* 7877 * Make sure that during the entire time ISM mappings are removed, 7878 * the trap handlers serialize behind us, and that no one else 7879 * can be mucking with ISM mappings. This also lets us get away 7880 * with not doing expensive cross calls to flush the TLB -- we 7881 * just discard the context, flush the entire TSB, and call it 7882 * a day. 7883 */ 7884 sfmmu_ismhat_enter(sfmmup, 0); 7885 7886 /* 7887 * Remove the mapping. 7888 * 7889 * We can't have any holes in the ism map. 7890 * The tsb miss code while searching the ism map will 7891 * stop on an empty map slot. So we must move 7892 * everyone past the hole up 1 if any. 7893 * 7894 * Also empty ism map blks are not freed until the 7895 * process exits. This is to prevent a MT race condition 7896 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 7897 */ 7898 found = 0; 7899 ism_blkp = sfmmup->sfmmu_iblk; 7900 while (!found && ism_blkp) { 7901 ism_map = ism_blkp->iblk_maps; 7902 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7903 if (addr == ism_start(ism_map[i]) && 7904 sh_size == (size_t)(ism_size(ism_map[i]))) { 7905 found = 1; 7906 break; 7907 } 7908 } 7909 if (!found) 7910 ism_blkp = ism_blkp->iblk_next; 7911 } 7912 7913 if (found) { 7914 ism_hatid = ism_map[i].imap_ismhat; 7915 ASSERT(ism_hatid != NULL); 7916 ASSERT(ism_hatid->sfmmu_ismhat == 1); 7917 7918 /* 7919 * First remove ourselves from the ism mapping list. 7920 */ 7921 mutex_enter(&ism_mlist_lock); 7922 iment_sub(ism_map[i].imap_ment, ism_hatid); 7923 mutex_exit(&ism_mlist_lock); 7924 free_ment = ism_map[i].imap_ment; 7925 7926 /* 7927 * Now gurantee that any other cpu 7928 * that tries to process an ISM miss 7929 * will go to tl=0. 7930 */ 7931 hatlockp = sfmmu_hat_enter(sfmmup); 7932 7933 sfmmu_invalidate_ctx(sfmmup); 7934 7935 sfmmu_hat_exit(hatlockp); 7936 7937 /* 7938 * We delete the ism map by copying 7939 * the next map over the current one. 7940 * We will take the next one in the maps 7941 * array or from the next ism_blk. 7942 */ 7943 while (ism_blkp) { 7944 ism_map = ism_blkp->iblk_maps; 7945 while (i < (ISM_MAP_SLOTS - 1)) { 7946 ism_map[i] = ism_map[i + 1]; 7947 i++; 7948 } 7949 /* i == (ISM_MAP_SLOTS - 1) */ 7950 ism_blkp = ism_blkp->iblk_next; 7951 if (ism_blkp) { 7952 ism_map[i] = ism_blkp->iblk_maps[0]; 7953 i = 0; 7954 } else { 7955 ism_map[i].imap_seg = 0; 7956 ism_map[i].imap_vb_shift = 0; 7957 ism_map[i].imap_hatflags = 0; 7958 ism_map[i].imap_sz_mask = 0; 7959 ism_map[i].imap_ismhat = NULL; 7960 ism_map[i].imap_ment = NULL; 7961 } 7962 } 7963 7964 /* 7965 * Now flush entire TSB for the process, since 7966 * demapping page by page can be too expensive. 7967 * We don't have to flush the TLB here anymore 7968 * since we switch to a new TLB ctx instead. 7969 * Also, there is no need to flush if the process 7970 * is exiting since the TSB will be freed later. 7971 */ 7972 if (!sfmmup->sfmmu_free) { 7973 hatlockp = sfmmu_hat_enter(sfmmup); 7974 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 7975 tsbinfo = tsbinfo->tsb_next) { 7976 if (tsbinfo->tsb_flags & TSB_SWAPPED) 7977 continue; 7978 sfmmu_inv_tsb(tsbinfo->tsb_va, 7979 TSB_BYTES(tsbinfo->tsb_szc)); 7980 } 7981 sfmmu_hat_exit(hatlockp); 7982 } 7983 } 7984 7985 /* 7986 * Update our counters for this sfmmup's ism mappings. 7987 */ 7988 for (i = 0; i <= ismszc; i++) { 7989 if (!(disable_ism_large_pages & (1 << i))) 7990 (void) ism_tsb_entries(sfmmup, i); 7991 } 7992 7993 sfmmu_ismhat_exit(sfmmup, 0); 7994 7995 /* 7996 * We must do our freeing here after dropping locks 7997 * to prevent a deadlock in the kmem allocator on the 7998 * mapping list lock. 7999 */ 8000 if (free_ment != NULL) 8001 kmem_cache_free(ism_ment_cache, free_ment); 8002 8003 /* 8004 * Check TSB and TLB page sizes if the process isn't exiting. 8005 */ 8006 if (!sfmmup->sfmmu_free) 8007 sfmmu_check_page_sizes(sfmmup, 0); 8008 } 8009 8010 /* ARGSUSED */ 8011 static int 8012 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8013 { 8014 /* void *buf is sfmmu_t pointer */ 8015 return (0); 8016 } 8017 8018 /* ARGSUSED */ 8019 static void 8020 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8021 { 8022 /* void *buf is sfmmu_t pointer */ 8023 } 8024 8025 /* 8026 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8027 * field to be the pa of this hmeblk 8028 */ 8029 /* ARGSUSED */ 8030 static int 8031 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8032 { 8033 struct hme_blk *hmeblkp; 8034 8035 bzero(buf, (size_t)cdrarg); 8036 hmeblkp = (struct hme_blk *)buf; 8037 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8038 8039 #ifdef HBLK_TRACE 8040 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8041 #endif /* HBLK_TRACE */ 8042 8043 return (0); 8044 } 8045 8046 /* ARGSUSED */ 8047 static void 8048 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8049 { 8050 8051 #ifdef HBLK_TRACE 8052 8053 struct hme_blk *hmeblkp; 8054 8055 hmeblkp = (struct hme_blk *)buf; 8056 mutex_destroy(&hmeblkp->hblk_audit_lock); 8057 8058 #endif /* HBLK_TRACE */ 8059 } 8060 8061 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 8062 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 8063 /* 8064 * The kmem allocator will callback into our reclaim routine when the system 8065 * is running low in memory. We traverse the hash and free up all unused but 8066 * still cached hme_blks. We also traverse the free list and free them up 8067 * as well. 8068 */ 8069 /*ARGSUSED*/ 8070 static void 8071 sfmmu_hblkcache_reclaim(void *cdrarg) 8072 { 8073 int i; 8074 uint64_t hblkpa, prevpa, nx_pa; 8075 struct hmehash_bucket *hmebp; 8076 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 8077 static struct hmehash_bucket *uhmehash_reclaim_hand; 8078 static struct hmehash_bucket *khmehash_reclaim_hand; 8079 struct hme_blk *list = NULL; 8080 8081 hmebp = uhmehash_reclaim_hand; 8082 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 8083 uhmehash_reclaim_hand = hmebp = uhme_hash; 8084 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8085 8086 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8087 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8088 hmeblkp = hmebp->hmeblkp; 8089 hblkpa = hmebp->hmeh_nextpa; 8090 prevpa = 0; 8091 pr_hblk = NULL; 8092 while (hmeblkp) { 8093 nx_hblk = hmeblkp->hblk_next; 8094 nx_pa = hmeblkp->hblk_nextpa; 8095 if (!hmeblkp->hblk_vcnt && 8096 !hmeblkp->hblk_hmecnt) { 8097 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8098 prevpa, pr_hblk); 8099 sfmmu_hblk_free(hmebp, hmeblkp, 8100 hblkpa, &list); 8101 } else { 8102 pr_hblk = hmeblkp; 8103 prevpa = hblkpa; 8104 } 8105 hmeblkp = nx_hblk; 8106 hblkpa = nx_pa; 8107 } 8108 SFMMU_HASH_UNLOCK(hmebp); 8109 } 8110 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 8111 hmebp = uhme_hash; 8112 } 8113 8114 hmebp = khmehash_reclaim_hand; 8115 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 8116 khmehash_reclaim_hand = hmebp = khme_hash; 8117 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8118 8119 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8120 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8121 hmeblkp = hmebp->hmeblkp; 8122 hblkpa = hmebp->hmeh_nextpa; 8123 prevpa = 0; 8124 pr_hblk = NULL; 8125 while (hmeblkp) { 8126 nx_hblk = hmeblkp->hblk_next; 8127 nx_pa = hmeblkp->hblk_nextpa; 8128 if (!hmeblkp->hblk_vcnt && 8129 !hmeblkp->hblk_hmecnt) { 8130 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8131 prevpa, pr_hblk); 8132 sfmmu_hblk_free(hmebp, hmeblkp, 8133 hblkpa, &list); 8134 } else { 8135 pr_hblk = hmeblkp; 8136 prevpa = hblkpa; 8137 } 8138 hmeblkp = nx_hblk; 8139 hblkpa = nx_pa; 8140 } 8141 SFMMU_HASH_UNLOCK(hmebp); 8142 } 8143 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 8144 hmebp = khme_hash; 8145 } 8146 sfmmu_hblks_list_purge(&list); 8147 } 8148 8149 /* 8150 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 8151 * same goes for sfmmu_get_addrvcolor(). 8152 * 8153 * This function will return the virtual color for the specified page. The 8154 * virtual color corresponds to this page current mapping or its last mapping. 8155 * It is used by memory allocators to choose addresses with the correct 8156 * alignment so vac consistency is automatically maintained. If the page 8157 * has no color it returns -1. 8158 */ 8159 /*ARGSUSED*/ 8160 int 8161 sfmmu_get_ppvcolor(struct page *pp) 8162 { 8163 #ifdef VAC 8164 int color; 8165 8166 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 8167 return (-1); 8168 } 8169 color = PP_GET_VCOLOR(pp); 8170 ASSERT(color < mmu_btop(shm_alignment)); 8171 return (color); 8172 #else 8173 return (-1); 8174 #endif /* VAC */ 8175 } 8176 8177 /* 8178 * This function will return the desired alignment for vac consistency 8179 * (vac color) given a virtual address. If no vac is present it returns -1. 8180 */ 8181 /*ARGSUSED*/ 8182 int 8183 sfmmu_get_addrvcolor(caddr_t vaddr) 8184 { 8185 #ifdef VAC 8186 if (cache & CACHE_VAC) { 8187 return (addr_to_vcolor(vaddr)); 8188 } else { 8189 return (-1); 8190 } 8191 #else 8192 return (-1); 8193 #endif /* VAC */ 8194 } 8195 8196 #ifdef VAC 8197 /* 8198 * Check for conflicts. 8199 * A conflict exists if the new and existent mappings do not match in 8200 * their "shm_alignment fields. If conflicts exist, the existant mappings 8201 * are flushed unless one of them is locked. If one of them is locked, then 8202 * the mappings are flushed and converted to non-cacheable mappings. 8203 */ 8204 static void 8205 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 8206 { 8207 struct hat *tmphat; 8208 struct sf_hment *sfhmep, *tmphme = NULL; 8209 struct hme_blk *hmeblkp; 8210 int vcolor; 8211 tte_t tte; 8212 8213 ASSERT(sfmmu_mlist_held(pp)); 8214 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 8215 8216 vcolor = addr_to_vcolor(addr); 8217 if (PP_NEWPAGE(pp)) { 8218 PP_SET_VCOLOR(pp, vcolor); 8219 return; 8220 } 8221 8222 if (PP_GET_VCOLOR(pp) == vcolor) { 8223 return; 8224 } 8225 8226 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 8227 /* 8228 * Previous user of page had a different color 8229 * but since there are no current users 8230 * we just flush the cache and change the color. 8231 */ 8232 SFMMU_STAT(sf_pgcolor_conflict); 8233 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8234 PP_SET_VCOLOR(pp, vcolor); 8235 return; 8236 } 8237 8238 /* 8239 * If we get here we have a vac conflict with a current 8240 * mapping. VAC conflict policy is as follows. 8241 * - The default is to unload the other mappings unless: 8242 * - If we have a large mapping we uncache the page. 8243 * We need to uncache the rest of the large page too. 8244 * - If any of the mappings are locked we uncache the page. 8245 * - If the requested mapping is inconsistent 8246 * with another mapping and that mapping 8247 * is in the same address space we have to 8248 * make it non-cached. The default thing 8249 * to do is unload the inconsistent mapping 8250 * but if they are in the same address space 8251 * we run the risk of unmapping the pc or the 8252 * stack which we will use as we return to the user, 8253 * in which case we can then fault on the thing 8254 * we just unloaded and get into an infinite loop. 8255 */ 8256 if (PP_ISMAPPED_LARGE(pp)) { 8257 int sz; 8258 8259 /* 8260 * Existing mapping is for big pages. We don't unload 8261 * existing big mappings to satisfy new mappings. 8262 * Always convert all mappings to TNC. 8263 */ 8264 sz = fnd_mapping_sz(pp); 8265 pp = PP_GROUPLEADER(pp, sz); 8266 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 8267 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 8268 TTEPAGES(sz)); 8269 8270 return; 8271 } 8272 8273 /* 8274 * check if any mapping is in same as or if it is locked 8275 * since in that case we need to uncache. 8276 */ 8277 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8278 tmphme = sfhmep->hme_next; 8279 hmeblkp = sfmmu_hmetohblk(sfhmep); 8280 if (hmeblkp->hblk_xhat_bit) 8281 continue; 8282 tmphat = hblktosfmmu(hmeblkp); 8283 sfmmu_copytte(&sfhmep->hme_tte, &tte); 8284 ASSERT(TTE_IS_VALID(&tte)); 8285 if ((tmphat == hat) || hmeblkp->hblk_lckcnt) { 8286 /* 8287 * We have an uncache conflict 8288 */ 8289 SFMMU_STAT(sf_uncache_conflict); 8290 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 8291 return; 8292 } 8293 } 8294 8295 /* 8296 * We have an unload conflict 8297 * We have already checked for LARGE mappings, therefore 8298 * the remaining mapping(s) must be TTE8K. 8299 */ 8300 SFMMU_STAT(sf_unload_conflict); 8301 8302 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8303 tmphme = sfhmep->hme_next; 8304 hmeblkp = sfmmu_hmetohblk(sfhmep); 8305 if (hmeblkp->hblk_xhat_bit) 8306 continue; 8307 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 8308 } 8309 8310 if (PP_ISMAPPED_KPM(pp)) 8311 sfmmu_kpm_vac_unload(pp, addr); 8312 8313 /* 8314 * Unloads only do TLB flushes so we need to flush the 8315 * cache here. 8316 */ 8317 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8318 PP_SET_VCOLOR(pp, vcolor); 8319 } 8320 8321 /* 8322 * Whenever a mapping is unloaded and the page is in TNC state, 8323 * we see if the page can be made cacheable again. 'pp' is 8324 * the page that we just unloaded a mapping from, the size 8325 * of mapping that was unloaded is 'ottesz'. 8326 * Remark: 8327 * The recache policy for mpss pages can leave a performance problem 8328 * under the following circumstances: 8329 * . A large page in uncached mode has just been unmapped. 8330 * . All constituent pages are TNC due to a conflicting small mapping. 8331 * . There are many other, non conflicting, small mappings around for 8332 * a lot of the constituent pages. 8333 * . We're called w/ the "old" groupleader page and the old ottesz, 8334 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 8335 * we end up w/ TTE8K or npages == 1. 8336 * . We call tst_tnc w/ the old groupleader only, and if there is no 8337 * conflict, we re-cache only this page. 8338 * . All other small mappings are not checked and will be left in TNC mode. 8339 * The problem is not very serious because: 8340 * . mpss is actually only defined for heap and stack, so the probability 8341 * is not very high that a large page mapping exists in parallel to a small 8342 * one (this is possible, but seems to be bad programming style in the 8343 * appl). 8344 * . The problem gets a little bit more serious, when those TNC pages 8345 * have to be mapped into kernel space, e.g. for networking. 8346 * . When VAC alias conflicts occur in applications, this is regarded 8347 * as an application bug. So if kstat's show them, the appl should 8348 * be changed anyway. 8349 */ 8350 void 8351 conv_tnc(page_t *pp, int ottesz) 8352 { 8353 int cursz, dosz; 8354 pgcnt_t curnpgs, dopgs; 8355 pgcnt_t pg64k; 8356 page_t *pp2; 8357 8358 /* 8359 * Determine how big a range we check for TNC and find 8360 * leader page. cursz is the size of the biggest 8361 * mapping that still exist on 'pp'. 8362 */ 8363 if (PP_ISMAPPED_LARGE(pp)) { 8364 cursz = fnd_mapping_sz(pp); 8365 } else { 8366 cursz = TTE8K; 8367 } 8368 8369 if (ottesz >= cursz) { 8370 dosz = ottesz; 8371 pp2 = pp; 8372 } else { 8373 dosz = cursz; 8374 pp2 = PP_GROUPLEADER(pp, dosz); 8375 } 8376 8377 pg64k = TTEPAGES(TTE64K); 8378 dopgs = TTEPAGES(dosz); 8379 8380 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 8381 8382 while (dopgs != 0) { 8383 curnpgs = TTEPAGES(cursz); 8384 if (tst_tnc(pp2, curnpgs)) { 8385 SFMMU_STAT_ADD(sf_recache, curnpgs); 8386 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 8387 curnpgs); 8388 } 8389 8390 ASSERT(dopgs >= curnpgs); 8391 dopgs -= curnpgs; 8392 8393 if (dopgs == 0) { 8394 break; 8395 } 8396 8397 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 8398 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 8399 cursz = fnd_mapping_sz(pp2); 8400 } else { 8401 cursz = TTE8K; 8402 } 8403 } 8404 } 8405 8406 /* 8407 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 8408 * returns 0 otherwise. Note that oaddr argument is valid for only 8409 * 8k pages. 8410 */ 8411 int 8412 tst_tnc(page_t *pp, pgcnt_t npages) 8413 { 8414 struct sf_hment *sfhme; 8415 struct hme_blk *hmeblkp; 8416 tte_t tte; 8417 caddr_t vaddr; 8418 int clr_valid = 0; 8419 int color, color1, bcolor; 8420 int i, ncolors; 8421 8422 ASSERT(pp != NULL); 8423 ASSERT(!(cache & CACHE_WRITEBACK)); 8424 8425 if (npages > 1) { 8426 ncolors = CACHE_NUM_COLOR; 8427 } 8428 8429 for (i = 0; i < npages; i++) { 8430 ASSERT(sfmmu_mlist_held(pp)); 8431 ASSERT(PP_ISTNC(pp)); 8432 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 8433 8434 if (PP_ISPNC(pp)) { 8435 return (0); 8436 } 8437 8438 clr_valid = 0; 8439 if (PP_ISMAPPED_KPM(pp)) { 8440 caddr_t kpmvaddr; 8441 8442 ASSERT(kpm_enable); 8443 kpmvaddr = hat_kpm_page2va(pp, 1); 8444 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 8445 color1 = addr_to_vcolor(kpmvaddr); 8446 clr_valid = 1; 8447 } 8448 8449 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8450 hmeblkp = sfmmu_hmetohblk(sfhme); 8451 if (hmeblkp->hblk_xhat_bit) 8452 continue; 8453 8454 sfmmu_copytte(&sfhme->hme_tte, &tte); 8455 ASSERT(TTE_IS_VALID(&tte)); 8456 8457 vaddr = tte_to_vaddr(hmeblkp, tte); 8458 color = addr_to_vcolor(vaddr); 8459 8460 if (npages > 1) { 8461 /* 8462 * If there is a big mapping, make sure 8463 * 8K mapping is consistent with the big 8464 * mapping. 8465 */ 8466 bcolor = i % ncolors; 8467 if (color != bcolor) { 8468 return (0); 8469 } 8470 } 8471 if (!clr_valid) { 8472 clr_valid = 1; 8473 color1 = color; 8474 } 8475 8476 if (color1 != color) { 8477 return (0); 8478 } 8479 } 8480 8481 pp = PP_PAGENEXT(pp); 8482 } 8483 8484 return (1); 8485 } 8486 8487 void 8488 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 8489 pgcnt_t npages) 8490 { 8491 kmutex_t *pmtx; 8492 int i, ncolors, bcolor; 8493 kpm_hlk_t *kpmp; 8494 cpuset_t cpuset; 8495 8496 ASSERT(pp != NULL); 8497 ASSERT(!(cache & CACHE_WRITEBACK)); 8498 8499 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 8500 pmtx = sfmmu_page_enter(pp); 8501 8502 /* 8503 * Fast path caching single unmapped page 8504 */ 8505 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 8506 flags == HAT_CACHE) { 8507 PP_CLRTNC(pp); 8508 PP_CLRPNC(pp); 8509 sfmmu_page_exit(pmtx); 8510 sfmmu_kpm_kpmp_exit(kpmp); 8511 return; 8512 } 8513 8514 /* 8515 * We need to capture all cpus in order to change cacheability 8516 * because we can't allow one cpu to access the same physical 8517 * page using a cacheable and a non-cachebale mapping at the same 8518 * time. Since we may end up walking the ism mapping list 8519 * have to grab it's lock now since we can't after all the 8520 * cpus have been captured. 8521 */ 8522 sfmmu_hat_lock_all(); 8523 mutex_enter(&ism_mlist_lock); 8524 kpreempt_disable(); 8525 cpuset = cpu_ready_set; 8526 xc_attention(cpuset); 8527 8528 if (npages > 1) { 8529 /* 8530 * Make sure all colors are flushed since the 8531 * sfmmu_page_cache() only flushes one color- 8532 * it does not know big pages. 8533 */ 8534 ncolors = CACHE_NUM_COLOR; 8535 if (flags & HAT_TMPNC) { 8536 for (i = 0; i < ncolors; i++) { 8537 sfmmu_cache_flushcolor(i, pp->p_pagenum); 8538 } 8539 cache_flush_flag = CACHE_NO_FLUSH; 8540 } 8541 } 8542 8543 for (i = 0; i < npages; i++) { 8544 8545 ASSERT(sfmmu_mlist_held(pp)); 8546 8547 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 8548 8549 if (npages > 1) { 8550 bcolor = i % ncolors; 8551 } else { 8552 bcolor = NO_VCOLOR; 8553 } 8554 8555 sfmmu_page_cache(pp, flags, cache_flush_flag, 8556 bcolor); 8557 } 8558 8559 pp = PP_PAGENEXT(pp); 8560 } 8561 8562 xt_sync(cpuset); 8563 xc_dismissed(cpuset); 8564 mutex_exit(&ism_mlist_lock); 8565 sfmmu_hat_unlock_all(); 8566 sfmmu_page_exit(pmtx); 8567 sfmmu_kpm_kpmp_exit(kpmp); 8568 kpreempt_enable(); 8569 } 8570 8571 /* 8572 * This function changes the virtual cacheability of all mappings to a 8573 * particular page. When changing from uncache to cacheable the mappings will 8574 * only be changed if all of them have the same virtual color. 8575 * We need to flush the cache in all cpus. It is possible that 8576 * a process referenced a page as cacheable but has sinced exited 8577 * and cleared the mapping list. We still to flush it but have no 8578 * state so all cpus is the only alternative. 8579 */ 8580 static void 8581 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 8582 { 8583 struct sf_hment *sfhme; 8584 struct hme_blk *hmeblkp; 8585 sfmmu_t *sfmmup; 8586 tte_t tte, ttemod; 8587 caddr_t vaddr; 8588 int ret, color; 8589 pfn_t pfn; 8590 8591 color = bcolor; 8592 pfn = pp->p_pagenum; 8593 8594 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8595 8596 hmeblkp = sfmmu_hmetohblk(sfhme); 8597 8598 if (hmeblkp->hblk_xhat_bit) 8599 continue; 8600 8601 sfmmu_copytte(&sfhme->hme_tte, &tte); 8602 ASSERT(TTE_IS_VALID(&tte)); 8603 vaddr = tte_to_vaddr(hmeblkp, tte); 8604 color = addr_to_vcolor(vaddr); 8605 8606 #ifdef DEBUG 8607 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 8608 ASSERT(color == bcolor); 8609 } 8610 #endif 8611 8612 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 8613 8614 ttemod = tte; 8615 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 8616 TTE_CLR_VCACHEABLE(&ttemod); 8617 } else { /* flags & HAT_CACHE */ 8618 TTE_SET_VCACHEABLE(&ttemod); 8619 } 8620 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 8621 if (ret < 0) { 8622 /* 8623 * Since all cpus are captured modifytte should not 8624 * fail. 8625 */ 8626 panic("sfmmu_page_cache: write to tte failed"); 8627 } 8628 8629 sfmmup = hblktosfmmu(hmeblkp); 8630 if (cache_flush_flag == CACHE_FLUSH) { 8631 /* 8632 * Flush TSBs, TLBs and caches 8633 */ 8634 if (sfmmup->sfmmu_ismhat) { 8635 if (flags & HAT_CACHE) { 8636 SFMMU_STAT(sf_ism_recache); 8637 } else { 8638 SFMMU_STAT(sf_ism_uncache); 8639 } 8640 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8641 pfn, CACHE_FLUSH); 8642 } else { 8643 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 8644 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 8645 } 8646 8647 /* 8648 * all cache entries belonging to this pfn are 8649 * now flushed. 8650 */ 8651 cache_flush_flag = CACHE_NO_FLUSH; 8652 } else { 8653 8654 /* 8655 * Flush only TSBs and TLBs. 8656 */ 8657 if (sfmmup->sfmmu_ismhat) { 8658 if (flags & HAT_CACHE) { 8659 SFMMU_STAT(sf_ism_recache); 8660 } else { 8661 SFMMU_STAT(sf_ism_uncache); 8662 } 8663 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8664 pfn, CACHE_NO_FLUSH); 8665 } else { 8666 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 8667 } 8668 } 8669 } 8670 8671 if (PP_ISMAPPED_KPM(pp)) 8672 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 8673 8674 switch (flags) { 8675 8676 default: 8677 panic("sfmmu_pagecache: unknown flags"); 8678 break; 8679 8680 case HAT_CACHE: 8681 PP_CLRTNC(pp); 8682 PP_CLRPNC(pp); 8683 PP_SET_VCOLOR(pp, color); 8684 break; 8685 8686 case HAT_TMPNC: 8687 PP_SETTNC(pp); 8688 PP_SET_VCOLOR(pp, NO_VCOLOR); 8689 break; 8690 8691 case HAT_UNCACHE: 8692 PP_SETPNC(pp); 8693 PP_CLRTNC(pp); 8694 PP_SET_VCOLOR(pp, NO_VCOLOR); 8695 break; 8696 } 8697 } 8698 #endif /* VAC */ 8699 8700 8701 /* 8702 * Wrapper routine used to return a context. 8703 * 8704 * It's the responsibility of the caller to guarantee that the 8705 * process serializes on calls here by taking the HAT lock for 8706 * the hat. 8707 * 8708 */ 8709 static void 8710 sfmmu_get_ctx(sfmmu_t *sfmmup) 8711 { 8712 mmu_ctx_t *mmu_ctxp; 8713 uint_t pstate_save; 8714 8715 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8716 ASSERT(sfmmup != ksfmmup); 8717 8718 kpreempt_disable(); 8719 8720 mmu_ctxp = CPU_MMU_CTXP(CPU); 8721 ASSERT(mmu_ctxp); 8722 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 8723 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 8724 8725 /* 8726 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 8727 */ 8728 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 8729 sfmmu_ctx_wrap_around(mmu_ctxp); 8730 8731 /* 8732 * Let the MMU set up the page sizes to use for 8733 * this context in the TLB. Don't program 2nd dtlb for ism hat. 8734 */ 8735 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 8736 mmu_set_ctx_page_sizes(sfmmup); 8737 } 8738 8739 /* 8740 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 8741 * interrupts disabled to prevent race condition with wrap-around 8742 * ctx invalidatation. In sun4v, ctx invalidation also involves 8743 * a HV call to set the number of TSBs to 0. If interrupts are not 8744 * disabled until after sfmmu_load_mmustate is complete TSBs may 8745 * become assigned to INVALID_CONTEXT. This is not allowed. 8746 */ 8747 pstate_save = sfmmu_disable_intrs(); 8748 8749 sfmmu_alloc_ctx(sfmmup, 1, CPU); 8750 sfmmu_load_mmustate(sfmmup); 8751 8752 sfmmu_enable_intrs(pstate_save); 8753 8754 kpreempt_enable(); 8755 } 8756 8757 /* 8758 * When all cnums are used up in a MMU, cnum will wrap around to the 8759 * next generation and start from 2. 8760 */ 8761 static void 8762 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp) 8763 { 8764 8765 /* caller must have disabled the preemption */ 8766 ASSERT(curthread->t_preempt >= 1); 8767 ASSERT(mmu_ctxp != NULL); 8768 8769 /* acquire Per-MMU (PM) spin lock */ 8770 mutex_enter(&mmu_ctxp->mmu_lock); 8771 8772 /* re-check to see if wrap-around is needed */ 8773 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 8774 goto done; 8775 8776 SFMMU_MMU_STAT(mmu_wrap_around); 8777 8778 /* update gnum */ 8779 ASSERT(mmu_ctxp->mmu_gnum != 0); 8780 mmu_ctxp->mmu_gnum++; 8781 if (mmu_ctxp->mmu_gnum == 0 || 8782 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 8783 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 8784 (void *)mmu_ctxp); 8785 } 8786 8787 if (mmu_ctxp->mmu_ncpus > 1) { 8788 cpuset_t cpuset; 8789 8790 membar_enter(); /* make sure updated gnum visible */ 8791 8792 SFMMU_XCALL_STATS(NULL); 8793 8794 /* xcall to others on the same MMU to invalidate ctx */ 8795 cpuset = mmu_ctxp->mmu_cpuset; 8796 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id)); 8797 CPUSET_DEL(cpuset, CPU->cpu_id); 8798 CPUSET_AND(cpuset, cpu_ready_set); 8799 8800 /* 8801 * Pass in INVALID_CONTEXT as the first parameter to 8802 * sfmmu_raise_tsb_exception, which invalidates the context 8803 * of any process running on the CPUs in the MMU. 8804 */ 8805 xt_some(cpuset, sfmmu_raise_tsb_exception, 8806 INVALID_CONTEXT, INVALID_CONTEXT); 8807 xt_sync(cpuset); 8808 8809 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 8810 } 8811 8812 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 8813 sfmmu_setctx_sec(INVALID_CONTEXT); 8814 sfmmu_clear_utsbinfo(); 8815 } 8816 8817 /* 8818 * No xcall is needed here. For sun4u systems all CPUs in context 8819 * domain share a single physical MMU therefore it's enough to flush 8820 * TLB on local CPU. On sun4v systems we use 1 global context 8821 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 8822 * handler. Note that vtag_flushall_uctxs() is called 8823 * for Ultra II machine, where the equivalent flushall functionality 8824 * is implemented in SW, and only user ctx TLB entries are flushed. 8825 */ 8826 if (&vtag_flushall_uctxs != NULL) { 8827 vtag_flushall_uctxs(); 8828 } else { 8829 vtag_flushall(); 8830 } 8831 8832 /* reset mmu cnum, skips cnum 0 and 1 */ 8833 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 8834 8835 done: 8836 mutex_exit(&mmu_ctxp->mmu_lock); 8837 } 8838 8839 8840 /* 8841 * For multi-threaded process, set the process context to INVALID_CONTEXT 8842 * so that it faults and reloads the MMU state from TL=0. For single-threaded 8843 * process, we can just load the MMU state directly without having to 8844 * set context invalid. Caller must hold the hat lock since we don't 8845 * acquire it here. 8846 */ 8847 static void 8848 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 8849 { 8850 uint_t cnum; 8851 uint_t pstate_save; 8852 8853 ASSERT(sfmmup != ksfmmup); 8854 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8855 8856 kpreempt_disable(); 8857 8858 /* 8859 * We check whether the pass'ed-in sfmmup is the same as the 8860 * current running proc. This is to makes sure the current proc 8861 * stays single-threaded if it already is. 8862 */ 8863 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 8864 (curthread->t_procp->p_lwpcnt == 1)) { 8865 /* single-thread */ 8866 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 8867 if (cnum != INVALID_CONTEXT) { 8868 uint_t curcnum; 8869 /* 8870 * Disable interrupts to prevent race condition 8871 * with sfmmu_ctx_wrap_around ctx invalidation. 8872 * In sun4v, ctx invalidation involves setting 8873 * TSB to NULL, hence, interrupts should be disabled 8874 * untill after sfmmu_load_mmustate is completed. 8875 */ 8876 pstate_save = sfmmu_disable_intrs(); 8877 curcnum = sfmmu_getctx_sec(); 8878 if (curcnum == cnum) 8879 sfmmu_load_mmustate(sfmmup); 8880 sfmmu_enable_intrs(pstate_save); 8881 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 8882 } 8883 } else { 8884 /* 8885 * multi-thread 8886 * or when sfmmup is not the same as the curproc. 8887 */ 8888 sfmmu_invalidate_ctx(sfmmup); 8889 } 8890 8891 kpreempt_enable(); 8892 } 8893 8894 8895 /* 8896 * Replace the specified TSB with a new TSB. This function gets called when 8897 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 8898 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 8899 * (8K). 8900 * 8901 * Caller must hold the HAT lock, but should assume any tsb_info 8902 * pointers it has are no longer valid after calling this function. 8903 * 8904 * Return values: 8905 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 8906 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 8907 * something to this tsbinfo/TSB 8908 * TSB_SUCCESS Operation succeeded 8909 */ 8910 static tsb_replace_rc_t 8911 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 8912 hatlock_t *hatlockp, uint_t flags) 8913 { 8914 struct tsb_info *new_tsbinfo = NULL; 8915 struct tsb_info *curtsb, *prevtsb; 8916 uint_t tte_sz_mask; 8917 int i; 8918 8919 ASSERT(sfmmup != ksfmmup); 8920 ASSERT(sfmmup->sfmmu_ismhat == 0); 8921 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8922 ASSERT(szc <= tsb_max_growsize); 8923 8924 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 8925 return (TSB_LOSTRACE); 8926 8927 /* 8928 * Find the tsb_info ahead of this one in the list, and 8929 * also make sure that the tsb_info passed in really 8930 * exists! 8931 */ 8932 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8933 curtsb != old_tsbinfo && curtsb != NULL; 8934 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8935 ASSERT(curtsb != NULL); 8936 8937 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8938 /* 8939 * The process is swapped out, so just set the new size 8940 * code. When it swaps back in, we'll allocate a new one 8941 * of the new chosen size. 8942 */ 8943 curtsb->tsb_szc = szc; 8944 return (TSB_SUCCESS); 8945 } 8946 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 8947 8948 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 8949 8950 /* 8951 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 8952 * If we fail to allocate a TSB, exit. 8953 */ 8954 sfmmu_hat_exit(hatlockp); 8955 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, tte_sz_mask, 8956 flags, sfmmup)) { 8957 (void) sfmmu_hat_enter(sfmmup); 8958 if (!(flags & TSB_SWAPIN)) 8959 SFMMU_STAT(sf_tsb_resize_failures); 8960 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8961 return (TSB_ALLOCFAIL); 8962 } 8963 (void) sfmmu_hat_enter(sfmmup); 8964 8965 /* 8966 * Re-check to make sure somebody else didn't muck with us while we 8967 * didn't hold the HAT lock. If the process swapped out, fine, just 8968 * exit; this can happen if we try to shrink the TSB from the context 8969 * of another process (such as on an ISM unmap), though it is rare. 8970 */ 8971 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8972 SFMMU_STAT(sf_tsb_resize_failures); 8973 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8974 sfmmu_hat_exit(hatlockp); 8975 sfmmu_tsbinfo_free(new_tsbinfo); 8976 (void) sfmmu_hat_enter(sfmmup); 8977 return (TSB_LOSTRACE); 8978 } 8979 8980 #ifdef DEBUG 8981 /* Reverify that the tsb_info still exists.. for debugging only */ 8982 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8983 curtsb != old_tsbinfo && curtsb != NULL; 8984 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8985 ASSERT(curtsb != NULL); 8986 #endif /* DEBUG */ 8987 8988 /* 8989 * Quiesce any CPUs running this process on their next TLB miss 8990 * so they atomically see the new tsb_info. We temporarily set the 8991 * context to invalid context so new threads that come on processor 8992 * after we do the xcall to cpusran will also serialize behind the 8993 * HAT lock on TLB miss and will see the new TSB. Since this short 8994 * race with a new thread coming on processor is relatively rare, 8995 * this synchronization mechanism should be cheaper than always 8996 * pausing all CPUs for the duration of the setup, which is what 8997 * the old implementation did. This is particuarly true if we are 8998 * copying a huge chunk of memory around during that window. 8999 * 9000 * The memory barriers are to make sure things stay consistent 9001 * with resume() since it does not hold the HAT lock while 9002 * walking the list of tsb_info structures. 9003 */ 9004 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 9005 /* The TSB is either growing or shrinking. */ 9006 sfmmu_invalidate_ctx(sfmmup); 9007 } else { 9008 /* 9009 * It is illegal to swap in TSBs from a process other 9010 * than a process being swapped in. This in turn 9011 * implies we do not have a valid MMU context here 9012 * since a process needs one to resolve translation 9013 * misses. 9014 */ 9015 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 9016 } 9017 9018 #ifdef DEBUG 9019 ASSERT(max_mmu_ctxdoms > 0); 9020 9021 /* 9022 * Process should have INVALID_CONTEXT on all MMUs 9023 */ 9024 for (i = 0; i < max_mmu_ctxdoms; i++) { 9025 9026 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 9027 } 9028 #endif 9029 9030 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 9031 membar_stst(); /* strict ordering required */ 9032 if (prevtsb) 9033 prevtsb->tsb_next = new_tsbinfo; 9034 else 9035 sfmmup->sfmmu_tsb = new_tsbinfo; 9036 membar_enter(); /* make sure new TSB globally visible */ 9037 sfmmu_setup_tsbinfo(sfmmup); 9038 9039 /* 9040 * We need to migrate TSB entries from the old TSB to the new TSB 9041 * if tsb_remap_ttes is set and the TSB is growing. 9042 */ 9043 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 9044 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 9045 9046 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9047 9048 /* 9049 * Drop the HAT lock to free our old tsb_info. 9050 */ 9051 sfmmu_hat_exit(hatlockp); 9052 9053 if ((flags & TSB_GROW) == TSB_GROW) { 9054 SFMMU_STAT(sf_tsb_grow); 9055 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 9056 SFMMU_STAT(sf_tsb_shrink); 9057 } 9058 9059 sfmmu_tsbinfo_free(old_tsbinfo); 9060 9061 (void) sfmmu_hat_enter(sfmmup); 9062 return (TSB_SUCCESS); 9063 } 9064 9065 /* 9066 * This function will re-program hat pgsz array, and invalidate the 9067 * process' context, forcing the process to switch to another 9068 * context on the next TLB miss, and therefore start using the 9069 * TLB that is reprogrammed for the new page sizes. 9070 */ 9071 void 9072 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 9073 { 9074 int i; 9075 hatlock_t *hatlockp = NULL; 9076 9077 hatlockp = sfmmu_hat_enter(sfmmup); 9078 /* USIII+-IV+ optimization, requires hat lock */ 9079 if (tmp_pgsz) { 9080 for (i = 0; i < mmu_page_sizes; i++) 9081 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 9082 } 9083 SFMMU_STAT(sf_tlb_reprog_pgsz); 9084 9085 sfmmu_invalidate_ctx(sfmmup); 9086 9087 sfmmu_hat_exit(hatlockp); 9088 } 9089 9090 /* 9091 * This function assumes that there are either four or six supported page 9092 * sizes and at most two programmable TLBs, so we need to decide which 9093 * page sizes are most important and then tell the MMU layer so it 9094 * can adjust the TLB page sizes accordingly (if supported). 9095 * 9096 * If these assumptions change, this function will need to be 9097 * updated to support whatever the new limits are. 9098 * 9099 * The growing flag is nonzero if we are growing the address space, 9100 * and zero if it is shrinking. This allows us to decide whether 9101 * to grow or shrink our TSB, depending upon available memory 9102 * conditions. 9103 */ 9104 static void 9105 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 9106 { 9107 uint64_t ttecnt[MMU_PAGE_SIZES]; 9108 uint64_t tte8k_cnt, tte4m_cnt; 9109 uint8_t i; 9110 int sectsb_thresh; 9111 9112 /* 9113 * Kernel threads, processes with small address spaces not using 9114 * large pages, and dummy ISM HATs need not apply. 9115 */ 9116 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 9117 return; 9118 9119 if ((sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) == 0 && 9120 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 9121 return; 9122 9123 for (i = 0; i < mmu_page_sizes; i++) { 9124 ttecnt[i] = SFMMU_TTE_CNT(sfmmup, i); 9125 } 9126 9127 /* Check pagesizes in use, and possibly reprogram DTLB. */ 9128 if (&mmu_check_page_sizes) 9129 mmu_check_page_sizes(sfmmup, ttecnt); 9130 9131 /* 9132 * Calculate the number of 8k ttes to represent the span of these 9133 * pages. 9134 */ 9135 tte8k_cnt = ttecnt[TTE8K] + 9136 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 9137 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 9138 if (mmu_page_sizes == max_mmu_page_sizes) { 9139 tte4m_cnt = ttecnt[TTE4M] + 9140 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 9141 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 9142 } else { 9143 tte4m_cnt = ttecnt[TTE4M]; 9144 } 9145 9146 /* 9147 * Inflate TSB sizes by a factor of 2 if this process 9148 * uses 4M text pages to minimize extra conflict misses 9149 * in the first TSB since without counting text pages 9150 * 8K TSB may become too small. 9151 * 9152 * Also double the size of the second TSB to minimize 9153 * extra conflict misses due to competition between 4M text pages 9154 * and data pages. 9155 * 9156 * We need to adjust the second TSB allocation threshold by the 9157 * inflation factor, since there is no point in creating a second 9158 * TSB when we know all the mappings can fit in the I/D TLBs. 9159 */ 9160 sectsb_thresh = tsb_sectsb_threshold; 9161 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 9162 tte8k_cnt <<= 1; 9163 tte4m_cnt <<= 1; 9164 sectsb_thresh <<= 1; 9165 } 9166 9167 /* 9168 * Check to see if our TSB is the right size; we may need to 9169 * grow or shrink it. If the process is small, our work is 9170 * finished at this point. 9171 */ 9172 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 9173 return; 9174 } 9175 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 9176 } 9177 9178 static void 9179 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 9180 uint64_t tte4m_cnt, int sectsb_thresh) 9181 { 9182 int tsb_bits; 9183 uint_t tsb_szc; 9184 struct tsb_info *tsbinfop; 9185 hatlock_t *hatlockp = NULL; 9186 9187 hatlockp = sfmmu_hat_enter(sfmmup); 9188 ASSERT(hatlockp != NULL); 9189 tsbinfop = sfmmup->sfmmu_tsb; 9190 ASSERT(tsbinfop != NULL); 9191 9192 /* 9193 * If we're growing, select the size based on RSS. If we're 9194 * shrinking, leave some room so we don't have to turn around and 9195 * grow again immediately. 9196 */ 9197 if (growing) 9198 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 9199 else 9200 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 9201 9202 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9203 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9204 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9205 hatlockp, TSB_SHRINK); 9206 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 9207 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9208 hatlockp, TSB_GROW); 9209 } 9210 tsbinfop = sfmmup->sfmmu_tsb; 9211 9212 /* 9213 * With the TLB and first TSB out of the way, we need to see if 9214 * we need a second TSB for 4M pages. If we managed to reprogram 9215 * the TLB page sizes above, the process will start using this new 9216 * TSB right away; otherwise, it will start using it on the next 9217 * context switch. Either way, it's no big deal so there's no 9218 * synchronization with the trap handlers here unless we grow the 9219 * TSB (in which case it's required to prevent using the old one 9220 * after it's freed). Note: second tsb is required for 32M/256M 9221 * page sizes. 9222 */ 9223 if (tte4m_cnt > sectsb_thresh) { 9224 /* 9225 * If we're growing, select the size based on RSS. If we're 9226 * shrinking, leave some room so we don't have to turn 9227 * around and grow again immediately. 9228 */ 9229 if (growing) 9230 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 9231 else 9232 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 9233 if (tsbinfop->tsb_next == NULL) { 9234 struct tsb_info *newtsb; 9235 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 9236 0 : TSB_ALLOC; 9237 9238 sfmmu_hat_exit(hatlockp); 9239 9240 /* 9241 * Try to allocate a TSB for 4[32|256]M pages. If we 9242 * can't get the size we want, retry w/a minimum sized 9243 * TSB. If that still didn't work, give up; we can 9244 * still run without one. 9245 */ 9246 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 9247 TSB4M|TSB32M|TSB256M:TSB4M; 9248 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 9249 allocflags, sfmmup) != 0) && 9250 (sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 9251 tsb_bits, allocflags, sfmmup) != 0)) { 9252 return; 9253 } 9254 9255 hatlockp = sfmmu_hat_enter(sfmmup); 9256 9257 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 9258 sfmmup->sfmmu_tsb->tsb_next = newtsb; 9259 SFMMU_STAT(sf_tsb_sectsb_create); 9260 sfmmu_setup_tsbinfo(sfmmup); 9261 sfmmu_hat_exit(hatlockp); 9262 return; 9263 } else { 9264 /* 9265 * It's annoying, but possible for us 9266 * to get here.. we dropped the HAT lock 9267 * because of locking order in the kmem 9268 * allocator, and while we were off getting 9269 * our memory, some other thread decided to 9270 * do us a favor and won the race to get a 9271 * second TSB for this process. Sigh. 9272 */ 9273 sfmmu_hat_exit(hatlockp); 9274 sfmmu_tsbinfo_free(newtsb); 9275 return; 9276 } 9277 } 9278 9279 /* 9280 * We have a second TSB, see if it's big enough. 9281 */ 9282 tsbinfop = tsbinfop->tsb_next; 9283 9284 /* 9285 * Check to see if our second TSB is the right size; 9286 * we may need to grow or shrink it. 9287 * To prevent thrashing (e.g. growing the TSB on a 9288 * subsequent map operation), only try to shrink if 9289 * the TSB reach exceeds twice the virtual address 9290 * space size. 9291 */ 9292 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9293 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9294 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9295 tsb_szc, hatlockp, TSB_SHRINK); 9296 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 9297 TSB_OK_GROW()) { 9298 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9299 tsb_szc, hatlockp, TSB_GROW); 9300 } 9301 } 9302 9303 sfmmu_hat_exit(hatlockp); 9304 } 9305 9306 /* 9307 * Free up a sfmmu 9308 * Since the sfmmu is currently embedded in the hat struct we simply zero 9309 * out our fields and free up the ism map blk list if any. 9310 */ 9311 static void 9312 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 9313 { 9314 ism_blk_t *blkp, *nx_blkp; 9315 #ifdef DEBUG 9316 ism_map_t *map; 9317 int i; 9318 #endif 9319 9320 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 9321 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 9322 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 9323 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 9324 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 9325 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 9326 9327 sfmmup->sfmmu_free = 0; 9328 sfmmup->sfmmu_ismhat = 0; 9329 9330 blkp = sfmmup->sfmmu_iblk; 9331 sfmmup->sfmmu_iblk = NULL; 9332 9333 while (blkp) { 9334 #ifdef DEBUG 9335 map = blkp->iblk_maps; 9336 for (i = 0; i < ISM_MAP_SLOTS; i++) { 9337 ASSERT(map[i].imap_seg == 0); 9338 ASSERT(map[i].imap_ismhat == NULL); 9339 ASSERT(map[i].imap_ment == NULL); 9340 } 9341 #endif 9342 nx_blkp = blkp->iblk_next; 9343 blkp->iblk_next = NULL; 9344 blkp->iblk_nextpa = (uint64_t)-1; 9345 kmem_cache_free(ism_blk_cache, blkp); 9346 blkp = nx_blkp; 9347 } 9348 } 9349 9350 /* 9351 * Locking primitves accessed by HATLOCK macros 9352 */ 9353 9354 #define SFMMU_SPL_MTX (0x0) 9355 #define SFMMU_ML_MTX (0x1) 9356 9357 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 9358 SPL_HASH(pg) : MLIST_HASH(pg)) 9359 9360 kmutex_t * 9361 sfmmu_page_enter(struct page *pp) 9362 { 9363 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 9364 } 9365 9366 void 9367 sfmmu_page_exit(kmutex_t *spl) 9368 { 9369 mutex_exit(spl); 9370 } 9371 9372 int 9373 sfmmu_page_spl_held(struct page *pp) 9374 { 9375 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 9376 } 9377 9378 kmutex_t * 9379 sfmmu_mlist_enter(struct page *pp) 9380 { 9381 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 9382 } 9383 9384 void 9385 sfmmu_mlist_exit(kmutex_t *mml) 9386 { 9387 mutex_exit(mml); 9388 } 9389 9390 int 9391 sfmmu_mlist_held(struct page *pp) 9392 { 9393 9394 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 9395 } 9396 9397 /* 9398 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 9399 * sfmmu_mlist_enter() case mml_table lock array is used and for 9400 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 9401 * 9402 * The lock is taken on a root page so that it protects an operation on all 9403 * constituent pages of a large page pp belongs to. 9404 * 9405 * The routine takes a lock from the appropriate array. The lock is determined 9406 * by hashing the root page. After taking the lock this routine checks if the 9407 * root page has the same size code that was used to determine the root (i.e 9408 * that root hasn't changed). If root page has the expected p_szc field we 9409 * have the right lock and it's returned to the caller. If root's p_szc 9410 * decreased we release the lock and retry from the beginning. This case can 9411 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 9412 * value and taking the lock. The number of retries due to p_szc decrease is 9413 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 9414 * determined by hashing pp itself. 9415 * 9416 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 9417 * possible that p_szc can increase. To increase p_szc a thread has to lock 9418 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 9419 * callers that don't hold a page locked recheck if hmeblk through which pp 9420 * was found still maps this pp. If it doesn't map it anymore returned lock 9421 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 9422 * p_szc increase after taking the lock it returns this lock without further 9423 * retries because in this case the caller doesn't care about which lock was 9424 * taken. The caller will drop it right away. 9425 * 9426 * After the routine returns it's guaranteed that hat_page_demote() can't 9427 * change p_szc field of any of constituent pages of a large page pp belongs 9428 * to as long as pp was either locked at least SHARED prior to this call or 9429 * the caller finds that hment that pointed to this pp still references this 9430 * pp (this also assumes that the caller holds hme hash bucket lock so that 9431 * the same pp can't be remapped into the same hmeblk after it was unmapped by 9432 * hat_pageunload()). 9433 */ 9434 static kmutex_t * 9435 sfmmu_mlspl_enter(struct page *pp, int type) 9436 { 9437 kmutex_t *mtx; 9438 uint_t prev_rszc = UINT_MAX; 9439 page_t *rootpp; 9440 uint_t szc; 9441 uint_t rszc; 9442 uint_t pszc = pp->p_szc; 9443 9444 ASSERT(pp != NULL); 9445 9446 again: 9447 if (pszc == 0) { 9448 mtx = SFMMU_MLSPL_MTX(type, pp); 9449 mutex_enter(mtx); 9450 return (mtx); 9451 } 9452 9453 /* The lock lives in the root page */ 9454 rootpp = PP_GROUPLEADER(pp, pszc); 9455 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9456 mutex_enter(mtx); 9457 9458 /* 9459 * Return mml in the following 3 cases: 9460 * 9461 * 1) If pp itself is root since if its p_szc decreased before we took 9462 * the lock pp is still the root of smaller szc page. And if its p_szc 9463 * increased it doesn't matter what lock we return (see comment in 9464 * front of this routine). 9465 * 9466 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 9467 * large page we have the right lock since any previous potential 9468 * hat_page_demote() is done demoting from greater than current root's 9469 * p_szc because hat_page_demote() changes root's p_szc last. No 9470 * further hat_page_demote() can start or be in progress since it 9471 * would need the same lock we currently hold. 9472 * 9473 * 3) If rootpp's p_szc increased since previous iteration it doesn't 9474 * matter what lock we return (see comment in front of this routine). 9475 */ 9476 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 9477 rszc >= prev_rszc) { 9478 return (mtx); 9479 } 9480 9481 /* 9482 * hat_page_demote() could have decreased root's p_szc. 9483 * In this case pp's p_szc must also be smaller than pszc. 9484 * Retry. 9485 */ 9486 if (rszc < pszc) { 9487 szc = pp->p_szc; 9488 if (szc < pszc) { 9489 mutex_exit(mtx); 9490 pszc = szc; 9491 goto again; 9492 } 9493 /* 9494 * pp's p_szc increased after it was decreased. 9495 * page cannot be mapped. Return current lock. The caller 9496 * will drop it right away. 9497 */ 9498 return (mtx); 9499 } 9500 9501 /* 9502 * root's p_szc is greater than pp's p_szc. 9503 * hat_page_demote() is not done with all pages 9504 * yet. Wait for it to complete. 9505 */ 9506 mutex_exit(mtx); 9507 rootpp = PP_GROUPLEADER(rootpp, rszc); 9508 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9509 mutex_enter(mtx); 9510 mutex_exit(mtx); 9511 prev_rszc = rszc; 9512 goto again; 9513 } 9514 9515 static int 9516 sfmmu_mlspl_held(struct page *pp, int type) 9517 { 9518 kmutex_t *mtx; 9519 9520 ASSERT(pp != NULL); 9521 /* The lock lives in the root page */ 9522 pp = PP_PAGEROOT(pp); 9523 ASSERT(pp != NULL); 9524 9525 mtx = SFMMU_MLSPL_MTX(type, pp); 9526 return (MUTEX_HELD(mtx)); 9527 } 9528 9529 static uint_t 9530 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 9531 { 9532 struct hme_blk *hblkp; 9533 9534 if (freehblkp != NULL) { 9535 mutex_enter(&freehblkp_lock); 9536 if (freehblkp != NULL) { 9537 /* 9538 * If the current thread is owning hblk_reserve, 9539 * let it succede even if freehblkcnt is really low. 9540 */ 9541 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 9542 SFMMU_STAT(sf_get_free_throttle); 9543 mutex_exit(&freehblkp_lock); 9544 return (0); 9545 } 9546 freehblkcnt--; 9547 *hmeblkpp = freehblkp; 9548 hblkp = *hmeblkpp; 9549 freehblkp = hblkp->hblk_next; 9550 mutex_exit(&freehblkp_lock); 9551 hblkp->hblk_next = NULL; 9552 SFMMU_STAT(sf_get_free_success); 9553 return (1); 9554 } 9555 mutex_exit(&freehblkp_lock); 9556 } 9557 SFMMU_STAT(sf_get_free_fail); 9558 return (0); 9559 } 9560 9561 static uint_t 9562 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 9563 { 9564 struct hme_blk *hblkp; 9565 9566 /* 9567 * If the current thread is mapping into kernel space, 9568 * let it succede even if freehblkcnt is max 9569 * so that it will avoid freeing it to kmem. 9570 * This will prevent stack overflow due to 9571 * possible recursion since kmem_cache_free() 9572 * might require creation of a slab which 9573 * in turn needs an hmeblk to map that slab; 9574 * let's break this vicious chain at the first 9575 * opportunity. 9576 */ 9577 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9578 mutex_enter(&freehblkp_lock); 9579 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9580 SFMMU_STAT(sf_put_free_success); 9581 freehblkcnt++; 9582 hmeblkp->hblk_next = freehblkp; 9583 freehblkp = hmeblkp; 9584 mutex_exit(&freehblkp_lock); 9585 return (1); 9586 } 9587 mutex_exit(&freehblkp_lock); 9588 } 9589 9590 /* 9591 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 9592 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 9593 * we are not in the process of mapping into kernel space. 9594 */ 9595 ASSERT(!critical); 9596 while (freehblkcnt > HBLK_RESERVE_CNT) { 9597 mutex_enter(&freehblkp_lock); 9598 if (freehblkcnt > HBLK_RESERVE_CNT) { 9599 freehblkcnt--; 9600 hblkp = freehblkp; 9601 freehblkp = hblkp->hblk_next; 9602 mutex_exit(&freehblkp_lock); 9603 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 9604 kmem_cache_free(sfmmu8_cache, hblkp); 9605 continue; 9606 } 9607 mutex_exit(&freehblkp_lock); 9608 } 9609 SFMMU_STAT(sf_put_free_fail); 9610 return (0); 9611 } 9612 9613 static void 9614 sfmmu_hblk_swap(struct hme_blk *new) 9615 { 9616 struct hme_blk *old, *hblkp, *prev; 9617 uint64_t hblkpa, prevpa, newpa; 9618 caddr_t base, vaddr, endaddr; 9619 struct hmehash_bucket *hmebp; 9620 struct sf_hment *osfhme, *nsfhme; 9621 page_t *pp; 9622 kmutex_t *pml; 9623 tte_t tte; 9624 9625 #ifdef DEBUG 9626 hmeblk_tag hblktag; 9627 struct hme_blk *found; 9628 #endif 9629 old = HBLK_RESERVE; 9630 9631 /* 9632 * save pa before bcopy clobbers it 9633 */ 9634 newpa = new->hblk_nextpa; 9635 9636 base = (caddr_t)get_hblk_base(old); 9637 endaddr = base + get_hblk_span(old); 9638 9639 /* 9640 * acquire hash bucket lock. 9641 */ 9642 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K); 9643 9644 /* 9645 * copy contents from old to new 9646 */ 9647 bcopy((void *)old, (void *)new, HME8BLK_SZ); 9648 9649 /* 9650 * add new to hash chain 9651 */ 9652 sfmmu_hblk_hash_add(hmebp, new, newpa); 9653 9654 /* 9655 * search hash chain for hblk_reserve; this needs to be performed 9656 * after adding new, otherwise prevpa and prev won't correspond 9657 * to the hblk which is prior to old in hash chain when we call 9658 * sfmmu_hblk_hash_rm to remove old later. 9659 */ 9660 for (prevpa = 0, prev = NULL, 9661 hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp; 9662 hblkp != NULL && hblkp != old; 9663 prevpa = hblkpa, prev = hblkp, 9664 hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next); 9665 9666 if (hblkp != old) 9667 panic("sfmmu_hblk_swap: hblk_reserve not found"); 9668 9669 /* 9670 * p_mapping list is still pointing to hments in hblk_reserve; 9671 * fix up p_mapping list so that they point to hments in new. 9672 * 9673 * Since all these mappings are created by hblk_reserve_thread 9674 * on the way and it's using at least one of the buffers from each of 9675 * the newly minted slabs, there is no danger of any of these 9676 * mappings getting unloaded by another thread. 9677 * 9678 * tsbmiss could only modify ref/mod bits of hments in old/new. 9679 * Since all of these hments hold mappings established by segkmem 9680 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 9681 * have no meaning for the mappings in hblk_reserve. hments in 9682 * old and new are identical except for ref/mod bits. 9683 */ 9684 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 9685 9686 HBLKTOHME(osfhme, old, vaddr); 9687 sfmmu_copytte(&osfhme->hme_tte, &tte); 9688 9689 if (TTE_IS_VALID(&tte)) { 9690 if ((pp = osfhme->hme_page) == NULL) 9691 panic("sfmmu_hblk_swap: page not mapped"); 9692 9693 pml = sfmmu_mlist_enter(pp); 9694 9695 if (pp != osfhme->hme_page) 9696 panic("sfmmu_hblk_swap: mapping changed"); 9697 9698 HBLKTOHME(nsfhme, new, vaddr); 9699 9700 HME_ADD(nsfhme, pp); 9701 HME_SUB(osfhme, pp); 9702 9703 sfmmu_mlist_exit(pml); 9704 } 9705 } 9706 9707 /* 9708 * remove old from hash chain 9709 */ 9710 sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev); 9711 9712 #ifdef DEBUG 9713 9714 hblktag.htag_id = ksfmmup; 9715 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 9716 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 9717 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 9718 9719 if (found != new) 9720 panic("sfmmu_hblk_swap: new hblk not found"); 9721 #endif 9722 9723 SFMMU_HASH_UNLOCK(hmebp); 9724 9725 /* 9726 * Reset hblk_reserve 9727 */ 9728 bzero((void *)old, HME8BLK_SZ); 9729 old->hblk_nextpa = va_to_pa((caddr_t)old); 9730 } 9731 9732 /* 9733 * Grab the mlist mutex for both pages passed in. 9734 * 9735 * low and high will be returned as pointers to the mutexes for these pages. 9736 * low refers to the mutex residing in the lower bin of the mlist hash, while 9737 * high refers to the mutex residing in the higher bin of the mlist hash. This 9738 * is due to the locking order restrictions on the same thread grabbing 9739 * multiple mlist mutexes. The low lock must be acquired before the high lock. 9740 * 9741 * If both pages hash to the same mutex, only grab that single mutex, and 9742 * high will be returned as NULL 9743 * If the pages hash to different bins in the hash, grab the lower addressed 9744 * lock first and then the higher addressed lock in order to follow the locking 9745 * rules involved with the same thread grabbing multiple mlist mutexes. 9746 * low and high will both have non-NULL values. 9747 */ 9748 static void 9749 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 9750 kmutex_t **low, kmutex_t **high) 9751 { 9752 kmutex_t *mml_targ, *mml_repl; 9753 9754 /* 9755 * no need to do the dance around szc as in sfmmu_mlist_enter() 9756 * because this routine is only called by hat_page_relocate() and all 9757 * targ and repl pages are already locked EXCL so szc can't change. 9758 */ 9759 9760 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 9761 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 9762 9763 if (mml_targ == mml_repl) { 9764 *low = mml_targ; 9765 *high = NULL; 9766 } else { 9767 if (mml_targ < mml_repl) { 9768 *low = mml_targ; 9769 *high = mml_repl; 9770 } else { 9771 *low = mml_repl; 9772 *high = mml_targ; 9773 } 9774 } 9775 9776 mutex_enter(*low); 9777 if (*high) 9778 mutex_enter(*high); 9779 } 9780 9781 static void 9782 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 9783 { 9784 if (high) 9785 mutex_exit(high); 9786 mutex_exit(low); 9787 } 9788 9789 static hatlock_t * 9790 sfmmu_hat_enter(sfmmu_t *sfmmup) 9791 { 9792 hatlock_t *hatlockp; 9793 9794 if (sfmmup != ksfmmup) { 9795 hatlockp = TSB_HASH(sfmmup); 9796 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 9797 return (hatlockp); 9798 } 9799 return (NULL); 9800 } 9801 9802 static hatlock_t * 9803 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 9804 { 9805 hatlock_t *hatlockp; 9806 9807 if (sfmmup != ksfmmup) { 9808 hatlockp = TSB_HASH(sfmmup); 9809 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 9810 return (NULL); 9811 return (hatlockp); 9812 } 9813 return (NULL); 9814 } 9815 9816 static void 9817 sfmmu_hat_exit(hatlock_t *hatlockp) 9818 { 9819 if (hatlockp != NULL) 9820 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 9821 } 9822 9823 static void 9824 sfmmu_hat_lock_all(void) 9825 { 9826 int i; 9827 for (i = 0; i < SFMMU_NUM_LOCK; i++) 9828 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 9829 } 9830 9831 static void 9832 sfmmu_hat_unlock_all(void) 9833 { 9834 int i; 9835 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 9836 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 9837 } 9838 9839 int 9840 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 9841 { 9842 ASSERT(sfmmup != ksfmmup); 9843 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 9844 } 9845 9846 /* 9847 * Locking primitives to provide consistency between ISM unmap 9848 * and other operations. Since ISM unmap can take a long time, we 9849 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 9850 * contention on the hatlock buckets while ISM segments are being 9851 * unmapped. The tradeoff is that the flags don't prevent priority 9852 * inversion from occurring, so we must request kernel priority in 9853 * case we have to sleep to keep from getting buried while holding 9854 * the HAT_ISMBUSY flag set, which in turn could block other kernel 9855 * threads from running (for example, in sfmmu_uvatopfn()). 9856 */ 9857 static void 9858 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 9859 { 9860 hatlock_t *hatlockp; 9861 9862 THREAD_KPRI_REQUEST(); 9863 if (!hatlock_held) 9864 hatlockp = sfmmu_hat_enter(sfmmup); 9865 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 9866 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 9867 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 9868 if (!hatlock_held) 9869 sfmmu_hat_exit(hatlockp); 9870 } 9871 9872 static void 9873 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 9874 { 9875 hatlock_t *hatlockp; 9876 9877 if (!hatlock_held) 9878 hatlockp = sfmmu_hat_enter(sfmmup); 9879 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 9880 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 9881 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 9882 if (!hatlock_held) 9883 sfmmu_hat_exit(hatlockp); 9884 THREAD_KPRI_RELEASE(); 9885 } 9886 9887 /* 9888 * 9889 * Algorithm: 9890 * 9891 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 9892 * hblks. 9893 * 9894 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 9895 * 9896 * (a) try to return an hblk from reserve pool of free hblks; 9897 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 9898 * and return hblk_reserve. 9899 * 9900 * (3) call kmem_cache_alloc() to allocate hblk; 9901 * 9902 * (a) if hblk_reserve_lock is held by the current thread, 9903 * atomically replace hblk_reserve by the hblk that is 9904 * returned by kmem_cache_alloc; release hblk_reserve_lock 9905 * and call kmem_cache_alloc() again. 9906 * (b) if reserve pool is not full, add the hblk that is 9907 * returned by kmem_cache_alloc to reserve pool and 9908 * call kmem_cache_alloc again. 9909 * 9910 */ 9911 static struct hme_blk * 9912 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 9913 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 9914 uint_t flags) 9915 { 9916 struct hme_blk *hmeblkp = NULL; 9917 struct hme_blk *newhblkp; 9918 struct hme_blk *shw_hblkp = NULL; 9919 struct kmem_cache *sfmmu_cache = NULL; 9920 uint64_t hblkpa; 9921 ulong_t index; 9922 uint_t owner; /* set to 1 if using hblk_reserve */ 9923 uint_t forcefree; 9924 int sleep; 9925 9926 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 9927 9928 /* 9929 * If segkmem is not created yet, allocate from static hmeblks 9930 * created at the end of startup_modules(). See the block comment 9931 * in startup_modules() describing how we estimate the number of 9932 * static hmeblks that will be needed during re-map. 9933 */ 9934 if (!hblk_alloc_dynamic) { 9935 9936 if (size == TTE8K) { 9937 index = nucleus_hblk8.index; 9938 if (index >= nucleus_hblk8.len) { 9939 /* 9940 * If we panic here, see startup_modules() to 9941 * make sure that we are calculating the 9942 * number of hblk8's that we need correctly. 9943 */ 9944 panic("no nucleus hblk8 to allocate"); 9945 } 9946 hmeblkp = 9947 (struct hme_blk *)&nucleus_hblk8.list[index]; 9948 nucleus_hblk8.index++; 9949 SFMMU_STAT(sf_hblk8_nalloc); 9950 } else { 9951 index = nucleus_hblk1.index; 9952 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 9953 /* 9954 * If we panic here, see startup_modules() 9955 * and H8TOH1; most likely you need to 9956 * update the calculation of the number 9957 * of hblk1's the kernel needs to boot. 9958 */ 9959 panic("no nucleus hblk1 to allocate"); 9960 } 9961 hmeblkp = 9962 (struct hme_blk *)&nucleus_hblk1.list[index]; 9963 nucleus_hblk1.index++; 9964 SFMMU_STAT(sf_hblk1_nalloc); 9965 } 9966 9967 goto hblk_init; 9968 } 9969 9970 SFMMU_HASH_UNLOCK(hmebp); 9971 9972 if (sfmmup != KHATID) { 9973 if (mmu_page_sizes == max_mmu_page_sizes) { 9974 if (size < TTE256M) 9975 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 9976 size, flags); 9977 } else { 9978 if (size < TTE4M) 9979 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 9980 size, flags); 9981 } 9982 } 9983 9984 fill_hblk: 9985 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 9986 9987 if (owner && size == TTE8K) { 9988 9989 /* 9990 * We are really in a tight spot. We already own 9991 * hblk_reserve and we need another hblk. In anticipation 9992 * of this kind of scenario, we specifically set aside 9993 * HBLK_RESERVE_MIN number of hblks to be used exclusively 9994 * by owner of hblk_reserve. 9995 */ 9996 SFMMU_STAT(sf_hblk_recurse_cnt); 9997 9998 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 9999 panic("sfmmu_hblk_alloc: reserve list is empty"); 10000 10001 goto hblk_verify; 10002 } 10003 10004 ASSERT(!owner); 10005 10006 if ((flags & HAT_NO_KALLOC) == 0) { 10007 10008 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 10009 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 10010 10011 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 10012 hmeblkp = sfmmu_hblk_steal(size); 10013 } else { 10014 /* 10015 * if we are the owner of hblk_reserve, 10016 * swap hblk_reserve with hmeblkp and 10017 * start a fresh life. Hope things go 10018 * better this time. 10019 */ 10020 if (hblk_reserve_thread == curthread) { 10021 ASSERT(sfmmu_cache == sfmmu8_cache); 10022 sfmmu_hblk_swap(hmeblkp); 10023 hblk_reserve_thread = NULL; 10024 mutex_exit(&hblk_reserve_lock); 10025 goto fill_hblk; 10026 } 10027 /* 10028 * let's donate this hblk to our reserve list if 10029 * we are not mapping kernel range 10030 */ 10031 if (size == TTE8K && sfmmup != KHATID) 10032 if (sfmmu_put_free_hblk(hmeblkp, 0)) 10033 goto fill_hblk; 10034 } 10035 } else { 10036 /* 10037 * We are here to map the slab in sfmmu8_cache; let's 10038 * check if we could tap our reserve list; if successful, 10039 * this will avoid the pain of going thru sfmmu_hblk_swap 10040 */ 10041 SFMMU_STAT(sf_hblk_slab_cnt); 10042 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 10043 /* 10044 * let's start hblk_reserve dance 10045 */ 10046 SFMMU_STAT(sf_hblk_reserve_cnt); 10047 owner = 1; 10048 mutex_enter(&hblk_reserve_lock); 10049 hmeblkp = HBLK_RESERVE; 10050 hblk_reserve_thread = curthread; 10051 } 10052 } 10053 10054 hblk_verify: 10055 ASSERT(hmeblkp != NULL); 10056 set_hblk_sz(hmeblkp, size); 10057 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10058 SFMMU_HASH_LOCK(hmebp); 10059 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10060 if (newhblkp != NULL) { 10061 SFMMU_HASH_UNLOCK(hmebp); 10062 if (hmeblkp != HBLK_RESERVE) { 10063 /* 10064 * This is really tricky! 10065 * 10066 * vmem_alloc(vmem_seg_arena) 10067 * vmem_alloc(vmem_internal_arena) 10068 * segkmem_alloc(heap_arena) 10069 * vmem_alloc(heap_arena) 10070 * page_create() 10071 * hat_memload() 10072 * kmem_cache_free() 10073 * kmem_cache_alloc() 10074 * kmem_slab_create() 10075 * vmem_alloc(kmem_internal_arena) 10076 * segkmem_alloc(heap_arena) 10077 * vmem_alloc(heap_arena) 10078 * page_create() 10079 * hat_memload() 10080 * kmem_cache_free() 10081 * ... 10082 * 10083 * Thus, hat_memload() could call kmem_cache_free 10084 * for enough number of times that we could easily 10085 * hit the bottom of the stack or run out of reserve 10086 * list of vmem_seg structs. So, we must donate 10087 * this hblk to reserve list if it's allocated 10088 * from sfmmu8_cache *and* mapping kernel range. 10089 * We don't need to worry about freeing hmeblk1's 10090 * to kmem since they don't map any kmem slabs. 10091 * 10092 * Note: When segkmem supports largepages, we must 10093 * free hmeblk1's to reserve list as well. 10094 */ 10095 forcefree = (sfmmup == KHATID) ? 1 : 0; 10096 if (size == TTE8K && 10097 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 10098 goto re_verify; 10099 } 10100 ASSERT(sfmmup != KHATID); 10101 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10102 } else { 10103 /* 10104 * Hey! we don't need hblk_reserve any more. 10105 */ 10106 ASSERT(owner); 10107 hblk_reserve_thread = NULL; 10108 mutex_exit(&hblk_reserve_lock); 10109 owner = 0; 10110 } 10111 re_verify: 10112 /* 10113 * let's check if the goodies are still present 10114 */ 10115 SFMMU_HASH_LOCK(hmebp); 10116 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10117 if (newhblkp != NULL) { 10118 /* 10119 * return newhblkp if it's not hblk_reserve; 10120 * if newhblkp is hblk_reserve, return it 10121 * _only if_ we are the owner of hblk_reserve. 10122 */ 10123 if (newhblkp != HBLK_RESERVE || owner) { 10124 return (newhblkp); 10125 } else { 10126 /* 10127 * we just hit hblk_reserve in the hash and 10128 * we are not the owner of that; 10129 * 10130 * block until hblk_reserve_thread completes 10131 * swapping hblk_reserve and try the dance 10132 * once again. 10133 */ 10134 SFMMU_HASH_UNLOCK(hmebp); 10135 mutex_enter(&hblk_reserve_lock); 10136 mutex_exit(&hblk_reserve_lock); 10137 SFMMU_STAT(sf_hblk_reserve_hit); 10138 goto fill_hblk; 10139 } 10140 } else { 10141 /* 10142 * it's no more! try the dance once again. 10143 */ 10144 SFMMU_HASH_UNLOCK(hmebp); 10145 goto fill_hblk; 10146 } 10147 } 10148 10149 hblk_init: 10150 set_hblk_sz(hmeblkp, size); 10151 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10152 hmeblkp->hblk_next = (struct hme_blk *)NULL; 10153 hmeblkp->hblk_tag = hblktag; 10154 hmeblkp->hblk_shadow = shw_hblkp; 10155 hblkpa = hmeblkp->hblk_nextpa; 10156 hmeblkp->hblk_nextpa = 0; 10157 10158 ASSERT(get_hblk_ttesz(hmeblkp) == size); 10159 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 10160 ASSERT(hmeblkp->hblk_hmecnt == 0); 10161 ASSERT(hmeblkp->hblk_vcnt == 0); 10162 ASSERT(hmeblkp->hblk_lckcnt == 0); 10163 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10164 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 10165 return (hmeblkp); 10166 } 10167 10168 /* 10169 * This function performs any cleanup required on the hme_blk 10170 * and returns it to the free list. 10171 */ 10172 /* ARGSUSED */ 10173 static void 10174 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10175 uint64_t hblkpa, struct hme_blk **listp) 10176 { 10177 int shw_size, vshift; 10178 struct hme_blk *shw_hblkp; 10179 uint_t shw_mask, newshw_mask; 10180 uintptr_t vaddr; 10181 int size; 10182 uint_t critical; 10183 10184 ASSERT(hmeblkp); 10185 ASSERT(!hmeblkp->hblk_hmecnt); 10186 ASSERT(!hmeblkp->hblk_vcnt); 10187 ASSERT(!hmeblkp->hblk_lckcnt); 10188 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10189 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 10190 10191 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 10192 10193 size = get_hblk_ttesz(hmeblkp); 10194 shw_hblkp = hmeblkp->hblk_shadow; 10195 if (shw_hblkp) { 10196 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 10197 if (mmu_page_sizes == max_mmu_page_sizes) { 10198 ASSERT(size < TTE256M); 10199 } else { 10200 ASSERT(size < TTE4M); 10201 } 10202 10203 shw_size = get_hblk_ttesz(shw_hblkp); 10204 vaddr = get_hblk_base(hmeblkp); 10205 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10206 ASSERT(vshift < 8); 10207 /* 10208 * Atomically clear shadow mask bit 10209 */ 10210 do { 10211 shw_mask = shw_hblkp->hblk_shw_mask; 10212 ASSERT(shw_mask & (1 << vshift)); 10213 newshw_mask = shw_mask & ~(1 << vshift); 10214 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10215 shw_mask, newshw_mask); 10216 } while (newshw_mask != shw_mask); 10217 hmeblkp->hblk_shadow = NULL; 10218 } 10219 hmeblkp->hblk_next = NULL; 10220 hmeblkp->hblk_nextpa = hblkpa; 10221 hmeblkp->hblk_shw_bit = 0; 10222 10223 if (hmeblkp->hblk_nuc_bit == 0) { 10224 10225 if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical)) 10226 return; 10227 10228 hmeblkp->hblk_next = *listp; 10229 *listp = hmeblkp; 10230 } 10231 } 10232 10233 static void 10234 sfmmu_hblks_list_purge(struct hme_blk **listp) 10235 { 10236 struct hme_blk *hmeblkp; 10237 10238 while ((hmeblkp = *listp) != NULL) { 10239 *listp = hmeblkp->hblk_next; 10240 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10241 } 10242 } 10243 10244 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 10245 10246 static uint_t sfmmu_hblk_steal_twice; 10247 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 10248 10249 /* 10250 * Steal a hmeblk 10251 * Enough hmeblks were allocated at startup (nucleus hmeblks) and also 10252 * hmeblks were added dynamically. We should never ever not be able to 10253 * find one. Look for an unused/unlocked hmeblk in user hash table. 10254 */ 10255 static struct hme_blk * 10256 sfmmu_hblk_steal(int size) 10257 { 10258 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 10259 struct hmehash_bucket *hmebp; 10260 struct hme_blk *hmeblkp = NULL, *pr_hblk; 10261 uint64_t hblkpa, prevpa; 10262 int i; 10263 10264 for (;;) { 10265 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 10266 uhmehash_steal_hand; 10267 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 10268 10269 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 10270 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 10271 SFMMU_HASH_LOCK(hmebp); 10272 hmeblkp = hmebp->hmeblkp; 10273 hblkpa = hmebp->hmeh_nextpa; 10274 prevpa = 0; 10275 pr_hblk = NULL; 10276 while (hmeblkp) { 10277 /* 10278 * check if it is a hmeblk that is not locked 10279 * and not shared. skip shadow hmeblks with 10280 * shadow_mask set i.e valid count non zero. 10281 */ 10282 if ((get_hblk_ttesz(hmeblkp) == size) && 10283 (hmeblkp->hblk_shw_bit == 0 || 10284 hmeblkp->hblk_vcnt == 0) && 10285 (hmeblkp->hblk_lckcnt == 0)) { 10286 /* 10287 * there is a high probability that we 10288 * will find a free one. search some 10289 * buckets for a free hmeblk initially 10290 * before unloading a valid hmeblk. 10291 */ 10292 if ((hmeblkp->hblk_vcnt == 0 && 10293 hmeblkp->hblk_hmecnt == 0) || (i >= 10294 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 10295 if (sfmmu_steal_this_hblk(hmebp, 10296 hmeblkp, hblkpa, prevpa, 10297 pr_hblk)) { 10298 /* 10299 * Hblk is unloaded 10300 * successfully 10301 */ 10302 break; 10303 } 10304 } 10305 } 10306 pr_hblk = hmeblkp; 10307 prevpa = hblkpa; 10308 hblkpa = hmeblkp->hblk_nextpa; 10309 hmeblkp = hmeblkp->hblk_next; 10310 } 10311 10312 SFMMU_HASH_UNLOCK(hmebp); 10313 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 10314 hmebp = uhme_hash; 10315 } 10316 uhmehash_steal_hand = hmebp; 10317 10318 if (hmeblkp != NULL) 10319 break; 10320 10321 /* 10322 * in the worst case, look for a free one in the kernel 10323 * hash table. 10324 */ 10325 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 10326 SFMMU_HASH_LOCK(hmebp); 10327 hmeblkp = hmebp->hmeblkp; 10328 hblkpa = hmebp->hmeh_nextpa; 10329 prevpa = 0; 10330 pr_hblk = NULL; 10331 while (hmeblkp) { 10332 /* 10333 * check if it is free hmeblk 10334 */ 10335 if ((get_hblk_ttesz(hmeblkp) == size) && 10336 (hmeblkp->hblk_lckcnt == 0) && 10337 (hmeblkp->hblk_vcnt == 0) && 10338 (hmeblkp->hblk_hmecnt == 0)) { 10339 if (sfmmu_steal_this_hblk(hmebp, 10340 hmeblkp, hblkpa, prevpa, pr_hblk)) { 10341 break; 10342 } else { 10343 /* 10344 * Cannot fail since we have 10345 * hash lock. 10346 */ 10347 panic("fail to steal?"); 10348 } 10349 } 10350 10351 pr_hblk = hmeblkp; 10352 prevpa = hblkpa; 10353 hblkpa = hmeblkp->hblk_nextpa; 10354 hmeblkp = hmeblkp->hblk_next; 10355 } 10356 10357 SFMMU_HASH_UNLOCK(hmebp); 10358 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 10359 hmebp = khme_hash; 10360 } 10361 10362 if (hmeblkp != NULL) 10363 break; 10364 sfmmu_hblk_steal_twice++; 10365 } 10366 return (hmeblkp); 10367 } 10368 10369 /* 10370 * This routine does real work to prepare a hblk to be "stolen" by 10371 * unloading the mappings, updating shadow counts .... 10372 * It returns 1 if the block is ready to be reused (stolen), or 0 10373 * means the block cannot be stolen yet- pageunload is still working 10374 * on this hblk. 10375 */ 10376 static int 10377 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10378 uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk) 10379 { 10380 int shw_size, vshift; 10381 struct hme_blk *shw_hblkp; 10382 uintptr_t vaddr; 10383 uint_t shw_mask, newshw_mask; 10384 10385 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10386 10387 /* 10388 * check if the hmeblk is free, unload if necessary 10389 */ 10390 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10391 sfmmu_t *sfmmup; 10392 demap_range_t dmr; 10393 10394 sfmmup = hblktosfmmu(hmeblkp); 10395 DEMAP_RANGE_INIT(sfmmup, &dmr); 10396 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 10397 (caddr_t)get_hblk_base(hmeblkp), 10398 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 10399 DEMAP_RANGE_FLUSH(&dmr); 10400 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10401 /* 10402 * Pageunload is working on the same hblk. 10403 */ 10404 return (0); 10405 } 10406 10407 sfmmu_hblk_steal_unload_count++; 10408 } 10409 10410 ASSERT(hmeblkp->hblk_lckcnt == 0); 10411 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 10412 10413 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 10414 hmeblkp->hblk_nextpa = hblkpa; 10415 10416 shw_hblkp = hmeblkp->hblk_shadow; 10417 if (shw_hblkp) { 10418 shw_size = get_hblk_ttesz(shw_hblkp); 10419 vaddr = get_hblk_base(hmeblkp); 10420 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10421 ASSERT(vshift < 8); 10422 /* 10423 * Atomically clear shadow mask bit 10424 */ 10425 do { 10426 shw_mask = shw_hblkp->hblk_shw_mask; 10427 ASSERT(shw_mask & (1 << vshift)); 10428 newshw_mask = shw_mask & ~(1 << vshift); 10429 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10430 shw_mask, newshw_mask); 10431 } while (newshw_mask != shw_mask); 10432 hmeblkp->hblk_shadow = NULL; 10433 } 10434 10435 /* 10436 * remove shadow bit if we are stealing an unused shadow hmeblk. 10437 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 10438 * we are indeed allocating a shadow hmeblk. 10439 */ 10440 hmeblkp->hblk_shw_bit = 0; 10441 10442 sfmmu_hblk_steal_count++; 10443 SFMMU_STAT(sf_steal_count); 10444 10445 return (1); 10446 } 10447 10448 struct hme_blk * 10449 sfmmu_hmetohblk(struct sf_hment *sfhme) 10450 { 10451 struct hme_blk *hmeblkp; 10452 struct sf_hment *sfhme0; 10453 struct hme_blk *hblk_dummy = 0; 10454 10455 /* 10456 * No dummy sf_hments, please. 10457 */ 10458 ASSERT(sfhme->hme_tte.ll != 0); 10459 10460 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 10461 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 10462 (uintptr_t)&hblk_dummy->hblk_hme[0]); 10463 10464 return (hmeblkp); 10465 } 10466 10467 /* 10468 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 10469 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 10470 * KM_SLEEP allocation. 10471 * 10472 * Return 0 on success, -1 otherwise. 10473 */ 10474 static void 10475 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 10476 { 10477 struct tsb_info *tsbinfop, *next; 10478 tsb_replace_rc_t rc; 10479 boolean_t gotfirst = B_FALSE; 10480 10481 ASSERT(sfmmup != ksfmmup); 10482 ASSERT(sfmmu_hat_lock_held(sfmmup)); 10483 10484 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 10485 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10486 } 10487 10488 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10489 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 10490 } else { 10491 return; 10492 } 10493 10494 ASSERT(sfmmup->sfmmu_tsb != NULL); 10495 10496 /* 10497 * Loop over all tsbinfo's replacing them with ones that actually have 10498 * a TSB. If any of the replacements ever fail, bail out of the loop. 10499 */ 10500 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 10501 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 10502 next = tsbinfop->tsb_next; 10503 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 10504 hatlockp, TSB_SWAPIN); 10505 if (rc != TSB_SUCCESS) { 10506 break; 10507 } 10508 gotfirst = B_TRUE; 10509 } 10510 10511 switch (rc) { 10512 case TSB_SUCCESS: 10513 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10514 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10515 return; 10516 case TSB_ALLOCFAIL: 10517 break; 10518 default: 10519 panic("sfmmu_replace_tsb returned unrecognized failure code " 10520 "%d", rc); 10521 } 10522 10523 /* 10524 * In this case, we failed to get one of our TSBs. If we failed to 10525 * get the first TSB, get one of minimum size (8KB). Walk the list 10526 * and throw away the tsbinfos, starting where the allocation failed; 10527 * we can get by with just one TSB as long as we don't leave the 10528 * SWAPPED tsbinfo structures lying around. 10529 */ 10530 tsbinfop = sfmmup->sfmmu_tsb; 10531 next = tsbinfop->tsb_next; 10532 tsbinfop->tsb_next = NULL; 10533 10534 sfmmu_hat_exit(hatlockp); 10535 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 10536 next = tsbinfop->tsb_next; 10537 sfmmu_tsbinfo_free(tsbinfop); 10538 } 10539 hatlockp = sfmmu_hat_enter(sfmmup); 10540 10541 /* 10542 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 10543 * pages. 10544 */ 10545 if (!gotfirst) { 10546 tsbinfop = sfmmup->sfmmu_tsb; 10547 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 10548 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 10549 ASSERT(rc == TSB_SUCCESS); 10550 } else { 10551 /* update machine specific tsbinfo */ 10552 sfmmu_setup_tsbinfo(sfmmup); 10553 } 10554 10555 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10556 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10557 } 10558 10559 /* 10560 * Handle exceptions for low level tsb_handler. 10561 * 10562 * There are many scenarios that could land us here: 10563 * 10564 * If the context is invalid we land here. The context can be invalid 10565 * for 3 reasons: 1) we couldn't allocate a new context and now need to 10566 * perform a wrap around operation in order to allocate a new context. 10567 * 2) Context was invalidated to change pagesize programming 3) ISMs or 10568 * TSBs configuration is changeing for this process and we are forced into 10569 * here to do a syncronization operation. If the context is valid we can 10570 * be here from window trap hanlder. In this case just call trap to handle 10571 * the fault. 10572 * 10573 * Note that the process will run in INVALID_CONTEXT before 10574 * faulting into here and subsequently loading the MMU registers 10575 * (including the TSB base register) associated with this process. 10576 * For this reason, the trap handlers must all test for 10577 * INVALID_CONTEXT before attempting to access any registers other 10578 * than the context registers. 10579 */ 10580 void 10581 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 10582 { 10583 sfmmu_t *sfmmup; 10584 uint_t ctxnum; 10585 klwp_id_t lwp; 10586 char lwp_save_state; 10587 hatlock_t *hatlockp; 10588 struct tsb_info *tsbinfop; 10589 10590 SFMMU_STAT(sf_tsb_exceptions); 10591 SFMMU_MMU_STAT(mmu_tsb_exceptions); 10592 sfmmup = astosfmmu(curthread->t_procp->p_as); 10593 ctxnum = tagaccess & TAGACC_CTX_MASK; 10594 10595 ASSERT(sfmmup != ksfmmup && ctxnum != KCONTEXT); 10596 ASSERT(sfmmup->sfmmu_ismhat == 0); 10597 /* 10598 * First, make sure we come out of here with a valid ctx, 10599 * since if we don't get one we'll simply loop on the 10600 * faulting instruction. 10601 * 10602 * If the ISM mappings are changing, the TSB is being relocated, or 10603 * the process is swapped out we serialize behind the controlling 10604 * thread with the sfmmu_flags and sfmmu_tsb_cv condition variable. 10605 * Otherwise we synchronize with the context stealer or the thread 10606 * that required us to change out our MMU registers (such 10607 * as a thread changing out our TSB while we were running) by 10608 * locking the HAT and grabbing the rwlock on the context as a 10609 * reader temporarily. 10610 */ 10611 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 10612 ctxnum == INVALID_CONTEXT); 10613 10614 if (ctxnum == INVALID_CONTEXT) { 10615 /* 10616 * Must set lwp state to LWP_SYS before 10617 * trying to acquire any adaptive lock 10618 */ 10619 lwp = ttolwp(curthread); 10620 ASSERT(lwp); 10621 lwp_save_state = lwp->lwp_state; 10622 lwp->lwp_state = LWP_SYS; 10623 10624 hatlockp = sfmmu_hat_enter(sfmmup); 10625 retry: 10626 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 10627 tsbinfop = tsbinfop->tsb_next) { 10628 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 10629 cv_wait(&sfmmup->sfmmu_tsb_cv, 10630 HATLOCK_MUTEXP(hatlockp)); 10631 goto retry; 10632 } 10633 } 10634 10635 /* 10636 * Wait for ISM maps to be updated. 10637 */ 10638 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 10639 cv_wait(&sfmmup->sfmmu_tsb_cv, 10640 HATLOCK_MUTEXP(hatlockp)); 10641 goto retry; 10642 } 10643 10644 /* 10645 * If we're swapping in, get TSB(s). Note that we must do 10646 * this before we get a ctx or load the MMU state. Once 10647 * we swap in we have to recheck to make sure the TSB(s) and 10648 * ISM mappings didn't change while we slept. 10649 */ 10650 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10651 sfmmu_tsb_swapin(sfmmup, hatlockp); 10652 goto retry; 10653 } 10654 10655 sfmmu_get_ctx(sfmmup); 10656 10657 sfmmu_hat_exit(hatlockp); 10658 /* 10659 * Must restore lwp_state if not calling 10660 * trap() for further processing. Restore 10661 * it anyway. 10662 */ 10663 lwp->lwp_state = lwp_save_state; 10664 if (sfmmup->sfmmu_ttecnt[TTE8K] != 0 || 10665 sfmmup->sfmmu_ttecnt[TTE64K] != 0 || 10666 sfmmup->sfmmu_ttecnt[TTE512K] != 0 || 10667 sfmmup->sfmmu_ttecnt[TTE4M] != 0 || 10668 sfmmup->sfmmu_ttecnt[TTE32M] != 0 || 10669 sfmmup->sfmmu_ttecnt[TTE256M] != 0) { 10670 return; 10671 } 10672 if (traptype == T_DATA_PROT) { 10673 traptype = T_DATA_MMU_MISS; 10674 } 10675 } 10676 trap(rp, (caddr_t)tagaccess, traptype, 0); 10677 } 10678 10679 /* 10680 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 10681 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 10682 * rather than spinning to avoid send mondo timeouts with 10683 * interrupts enabled. When the lock is acquired it is immediately 10684 * released and we return back to sfmmu_vatopfn just after 10685 * the GET_TTE call. 10686 */ 10687 void 10688 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 10689 { 10690 struct page **pp; 10691 10692 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10693 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10694 } 10695 10696 /* 10697 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 10698 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 10699 * cross traps which cannot be handled while spinning in the 10700 * trap handlers. Simply enter and exit the kpr_suspendlock spin 10701 * mutex, which is held by the holder of the suspend bit, and then 10702 * retry the trapped instruction after unwinding. 10703 */ 10704 /*ARGSUSED*/ 10705 void 10706 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 10707 { 10708 ASSERT(curthread != kreloc_thread); 10709 mutex_enter(&kpr_suspendlock); 10710 mutex_exit(&kpr_suspendlock); 10711 } 10712 10713 /* 10714 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 10715 * This routine may be called with all cpu's captured. Therefore, the 10716 * caller is responsible for holding all locks and disabling kernel 10717 * preemption. 10718 */ 10719 /* ARGSUSED */ 10720 static void 10721 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 10722 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 10723 { 10724 cpuset_t cpuset; 10725 caddr_t va; 10726 ism_ment_t *ment; 10727 sfmmu_t *sfmmup; 10728 #ifdef VAC 10729 int vcolor; 10730 #endif 10731 int ttesz; 10732 10733 /* 10734 * Walk the ism_hat's mapping list and flush the page 10735 * from every hat sharing this ism_hat. This routine 10736 * may be called while all cpu's have been captured. 10737 * Therefore we can't attempt to grab any locks. For now 10738 * this means we will protect the ism mapping list under 10739 * a single lock which will be grabbed by the caller. 10740 * If hat_share/unshare scalibility becomes a performance 10741 * problem then we may need to re-think ism mapping list locking. 10742 */ 10743 ASSERT(ism_sfmmup->sfmmu_ismhat); 10744 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 10745 addr = addr - ISMID_STARTADDR; 10746 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 10747 10748 sfmmup = ment->iment_hat; 10749 10750 va = ment->iment_base_va; 10751 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 10752 10753 /* 10754 * Flush TSB of ISM mappings. 10755 */ 10756 ttesz = get_hblk_ttesz(hmeblkp); 10757 if (ttesz == TTE8K || ttesz == TTE4M) { 10758 sfmmu_unload_tsb(sfmmup, va, ttesz); 10759 } else { 10760 caddr_t sva = va; 10761 caddr_t eva; 10762 ASSERT(addr == (caddr_t)get_hblk_base(hmeblkp)); 10763 eva = sva + get_hblk_span(hmeblkp); 10764 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); 10765 } 10766 10767 cpuset = sfmmup->sfmmu_cpusran; 10768 CPUSET_AND(cpuset, cpu_ready_set); 10769 CPUSET_DEL(cpuset, CPU->cpu_id); 10770 10771 SFMMU_XCALL_STATS(sfmmup); 10772 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 10773 (uint64_t)sfmmup); 10774 10775 vtag_flushpage(va, (uint64_t)sfmmup); 10776 10777 #ifdef VAC 10778 /* 10779 * Flush D$ 10780 * When flushing D$ we must flush all 10781 * cpu's. See sfmmu_cache_flush(). 10782 */ 10783 if (cache_flush_flag == CACHE_FLUSH) { 10784 cpuset = cpu_ready_set; 10785 CPUSET_DEL(cpuset, CPU->cpu_id); 10786 10787 SFMMU_XCALL_STATS(sfmmup); 10788 vcolor = addr_to_vcolor(va); 10789 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10790 vac_flushpage(pfnum, vcolor); 10791 } 10792 #endif /* VAC */ 10793 } 10794 } 10795 10796 /* 10797 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 10798 * a particular virtual address and ctx. If noflush is set we do not 10799 * flush the TLB/TSB. This function may or may not be called with the 10800 * HAT lock held. 10801 */ 10802 static void 10803 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10804 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 10805 int hat_lock_held) 10806 { 10807 #ifdef VAC 10808 int vcolor; 10809 #endif 10810 cpuset_t cpuset; 10811 hatlock_t *hatlockp; 10812 10813 #if defined(lint) && !defined(VAC) 10814 pfnum = pfnum; 10815 cpu_flag = cpu_flag; 10816 cache_flush_flag = cache_flush_flag; 10817 #endif 10818 /* 10819 * There is no longer a need to protect against ctx being 10820 * stolen here since we don't store the ctx in the TSB anymore. 10821 */ 10822 #ifdef VAC 10823 vcolor = addr_to_vcolor(addr); 10824 #endif 10825 10826 /* 10827 * We must hold the hat lock during the flush of TLB, 10828 * to avoid a race with sfmmu_invalidate_ctx(), where 10829 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 10830 * causing TLB demap routine to skip flush on that MMU. 10831 * If the context on a MMU has already been set to 10832 * INVALID_CONTEXT, we just get an extra flush on 10833 * that MMU. 10834 */ 10835 if (!hat_lock_held && !tlb_noflush) 10836 hatlockp = sfmmu_hat_enter(sfmmup); 10837 10838 kpreempt_disable(); 10839 if (!tlb_noflush) { 10840 /* 10841 * Flush the TSB and TLB. 10842 */ 10843 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10844 10845 cpuset = sfmmup->sfmmu_cpusran; 10846 CPUSET_AND(cpuset, cpu_ready_set); 10847 CPUSET_DEL(cpuset, CPU->cpu_id); 10848 10849 SFMMU_XCALL_STATS(sfmmup); 10850 10851 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 10852 (uint64_t)sfmmup); 10853 10854 vtag_flushpage(addr, (uint64_t)sfmmup); 10855 } 10856 10857 if (!hat_lock_held && !tlb_noflush) 10858 sfmmu_hat_exit(hatlockp); 10859 10860 #ifdef VAC 10861 /* 10862 * Flush the D$ 10863 * 10864 * Even if the ctx is stolen, we need to flush the 10865 * cache. Our ctx stealer only flushes the TLBs. 10866 */ 10867 if (cache_flush_flag == CACHE_FLUSH) { 10868 if (cpu_flag & FLUSH_ALL_CPUS) { 10869 cpuset = cpu_ready_set; 10870 } else { 10871 cpuset = sfmmup->sfmmu_cpusran; 10872 CPUSET_AND(cpuset, cpu_ready_set); 10873 } 10874 CPUSET_DEL(cpuset, CPU->cpu_id); 10875 SFMMU_XCALL_STATS(sfmmup); 10876 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10877 vac_flushpage(pfnum, vcolor); 10878 } 10879 #endif /* VAC */ 10880 kpreempt_enable(); 10881 } 10882 10883 /* 10884 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 10885 * address and ctx. If noflush is set we do not currently do anything. 10886 * This function may or may not be called with the HAT lock held. 10887 */ 10888 static void 10889 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10890 int tlb_noflush, int hat_lock_held) 10891 { 10892 cpuset_t cpuset; 10893 hatlock_t *hatlockp; 10894 10895 /* 10896 * If the process is exiting we have nothing to do. 10897 */ 10898 if (tlb_noflush) 10899 return; 10900 10901 /* 10902 * Flush TSB. 10903 */ 10904 if (!hat_lock_held) 10905 hatlockp = sfmmu_hat_enter(sfmmup); 10906 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10907 10908 kpreempt_disable(); 10909 10910 cpuset = sfmmup->sfmmu_cpusran; 10911 CPUSET_AND(cpuset, cpu_ready_set); 10912 CPUSET_DEL(cpuset, CPU->cpu_id); 10913 10914 SFMMU_XCALL_STATS(sfmmup); 10915 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 10916 10917 vtag_flushpage(addr, (uint64_t)sfmmup); 10918 10919 if (!hat_lock_held) 10920 sfmmu_hat_exit(hatlockp); 10921 10922 kpreempt_enable(); 10923 10924 } 10925 10926 /* 10927 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 10928 * call handler that can flush a range of pages to save on xcalls. 10929 */ 10930 static int sfmmu_xcall_save; 10931 10932 static void 10933 sfmmu_tlb_range_demap(demap_range_t *dmrp) 10934 { 10935 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 10936 hatlock_t *hatlockp; 10937 cpuset_t cpuset; 10938 uint64_t sfmmu_pgcnt; 10939 pgcnt_t pgcnt = 0; 10940 int pgunload = 0; 10941 int dirtypg = 0; 10942 caddr_t addr = dmrp->dmr_addr; 10943 caddr_t eaddr; 10944 uint64_t bitvec = dmrp->dmr_bitvec; 10945 10946 ASSERT(bitvec & 1); 10947 10948 /* 10949 * Flush TSB and calculate number of pages to flush. 10950 */ 10951 while (bitvec != 0) { 10952 dirtypg = 0; 10953 /* 10954 * Find the first page to flush and then count how many 10955 * pages there are after it that also need to be flushed. 10956 * This way the number of TSB flushes is minimized. 10957 */ 10958 while ((bitvec & 1) == 0) { 10959 pgcnt++; 10960 addr += MMU_PAGESIZE; 10961 bitvec >>= 1; 10962 } 10963 while (bitvec & 1) { 10964 dirtypg++; 10965 bitvec >>= 1; 10966 } 10967 eaddr = addr + ptob(dirtypg); 10968 hatlockp = sfmmu_hat_enter(sfmmup); 10969 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 10970 sfmmu_hat_exit(hatlockp); 10971 pgunload += dirtypg; 10972 addr = eaddr; 10973 pgcnt += dirtypg; 10974 } 10975 10976 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 10977 if (sfmmup->sfmmu_free == 0) { 10978 addr = dmrp->dmr_addr; 10979 bitvec = dmrp->dmr_bitvec; 10980 10981 /* 10982 * make sure it has SFMMU_PGCNT_SHIFT bits only, 10983 * as it will be used to pack argument for xt_some 10984 */ 10985 ASSERT((pgcnt > 0) && 10986 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 10987 10988 /* 10989 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 10990 * the low 6 bits of sfmmup. This is doable since pgcnt 10991 * always >= 1. 10992 */ 10993 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 10994 sfmmu_pgcnt = (uint64_t)sfmmup | 10995 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 10996 10997 /* 10998 * We must hold the hat lock during the flush of TLB, 10999 * to avoid a race with sfmmu_invalidate_ctx(), where 11000 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 11001 * causing TLB demap routine to skip flush on that MMU. 11002 * If the context on a MMU has already been set to 11003 * INVALID_CONTEXT, we just get an extra flush on 11004 * that MMU. 11005 */ 11006 hatlockp = sfmmu_hat_enter(sfmmup); 11007 kpreempt_disable(); 11008 11009 cpuset = sfmmup->sfmmu_cpusran; 11010 CPUSET_AND(cpuset, cpu_ready_set); 11011 CPUSET_DEL(cpuset, CPU->cpu_id); 11012 11013 SFMMU_XCALL_STATS(sfmmup); 11014 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 11015 sfmmu_pgcnt); 11016 11017 for (; bitvec != 0; bitvec >>= 1) { 11018 if (bitvec & 1) 11019 vtag_flushpage(addr, (uint64_t)sfmmup); 11020 addr += MMU_PAGESIZE; 11021 } 11022 kpreempt_enable(); 11023 sfmmu_hat_exit(hatlockp); 11024 11025 sfmmu_xcall_save += (pgunload-1); 11026 } 11027 dmrp->dmr_bitvec = 0; 11028 } 11029 11030 /* 11031 * In cases where we need to synchronize with TLB/TSB miss trap 11032 * handlers, _and_ need to flush the TLB, it's a lot easier to 11033 * throw away the context from the process than to do a 11034 * special song and dance to keep things consistent for the 11035 * handlers. 11036 * 11037 * Since the process suddenly ends up without a context and our caller 11038 * holds the hat lock, threads that fault after this function is called 11039 * will pile up on the lock. We can then do whatever we need to 11040 * atomically from the context of the caller. The first blocked thread 11041 * to resume executing will get the process a new context, and the 11042 * process will resume executing. 11043 * 11044 * One added advantage of this approach is that on MMUs that 11045 * support a "flush all" operation, we will delay the flush until 11046 * cnum wrap-around, and then flush the TLB one time. This 11047 * is rather rare, so it's a lot less expensive than making 8000 11048 * x-calls to flush the TLB 8000 times. 11049 * 11050 * A per-process (PP) lock is used to synchronize ctx allocations in 11051 * resume() and ctx invalidations here. 11052 */ 11053 static void 11054 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 11055 { 11056 cpuset_t cpuset; 11057 int cnum, currcnum; 11058 mmu_ctx_t *mmu_ctxp; 11059 int i; 11060 uint_t pstate_save; 11061 11062 SFMMU_STAT(sf_ctx_inv); 11063 11064 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11065 ASSERT(sfmmup != ksfmmup); 11066 11067 kpreempt_disable(); 11068 11069 mmu_ctxp = CPU_MMU_CTXP(CPU); 11070 ASSERT(mmu_ctxp); 11071 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 11072 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 11073 11074 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 11075 11076 pstate_save = sfmmu_disable_intrs(); 11077 11078 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 11079 /* set HAT cnum invalid across all context domains. */ 11080 for (i = 0; i < max_mmu_ctxdoms; i++) { 11081 11082 cnum = sfmmup->sfmmu_ctxs[i].cnum; 11083 if (cnum == INVALID_CONTEXT) { 11084 continue; 11085 } 11086 11087 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 11088 } 11089 membar_enter(); /* make sure globally visible to all CPUs */ 11090 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 11091 11092 sfmmu_enable_intrs(pstate_save); 11093 11094 cpuset = sfmmup->sfmmu_cpusran; 11095 CPUSET_DEL(cpuset, CPU->cpu_id); 11096 CPUSET_AND(cpuset, cpu_ready_set); 11097 if (!CPUSET_ISNULL(cpuset)) { 11098 SFMMU_XCALL_STATS(sfmmup); 11099 xt_some(cpuset, sfmmu_raise_tsb_exception, 11100 (uint64_t)sfmmup, INVALID_CONTEXT); 11101 xt_sync(cpuset); 11102 SFMMU_STAT(sf_tsb_raise_exception); 11103 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 11104 } 11105 11106 /* 11107 * If the hat to-be-invalidated is the same as the current 11108 * process on local CPU we need to invalidate 11109 * this CPU context as well. 11110 */ 11111 if ((sfmmu_getctx_sec() == currcnum) && 11112 (currcnum != INVALID_CONTEXT)) { 11113 sfmmu_setctx_sec(INVALID_CONTEXT); 11114 sfmmu_clear_utsbinfo(); 11115 } 11116 11117 kpreempt_enable(); 11118 11119 /* 11120 * we hold the hat lock, so nobody should allocate a context 11121 * for us yet 11122 */ 11123 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 11124 } 11125 11126 #ifdef VAC 11127 /* 11128 * We need to flush the cache in all cpus. It is possible that 11129 * a process referenced a page as cacheable but has sinced exited 11130 * and cleared the mapping list. We still to flush it but have no 11131 * state so all cpus is the only alternative. 11132 */ 11133 void 11134 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 11135 { 11136 cpuset_t cpuset; 11137 11138 kpreempt_disable(); 11139 cpuset = cpu_ready_set; 11140 CPUSET_DEL(cpuset, CPU->cpu_id); 11141 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 11142 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 11143 xt_sync(cpuset); 11144 vac_flushpage(pfnum, vcolor); 11145 kpreempt_enable(); 11146 } 11147 11148 void 11149 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 11150 { 11151 cpuset_t cpuset; 11152 11153 ASSERT(vcolor >= 0); 11154 11155 kpreempt_disable(); 11156 cpuset = cpu_ready_set; 11157 CPUSET_DEL(cpuset, CPU->cpu_id); 11158 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 11159 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 11160 xt_sync(cpuset); 11161 vac_flushcolor(vcolor, pfnum); 11162 kpreempt_enable(); 11163 } 11164 #endif /* VAC */ 11165 11166 /* 11167 * We need to prevent processes from accessing the TSB using a cached physical 11168 * address. It's alright if they try to access the TSB via virtual address 11169 * since they will just fault on that virtual address once the mapping has 11170 * been suspended. 11171 */ 11172 #pragma weak sendmondo_in_recover 11173 11174 /* ARGSUSED */ 11175 static int 11176 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 11177 { 11178 hatlock_t *hatlockp; 11179 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11180 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11181 extern uint32_t sendmondo_in_recover; 11182 11183 if (flags != HAT_PRESUSPEND) 11184 return (0); 11185 11186 hatlockp = sfmmu_hat_enter(sfmmup); 11187 11188 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 11189 11190 /* 11191 * For Cheetah+ Erratum 25: 11192 * Wait for any active recovery to finish. We can't risk 11193 * relocating the TSB of the thread running mondo_recover_proc() 11194 * since, if we did that, we would deadlock. The scenario we are 11195 * trying to avoid is as follows: 11196 * 11197 * THIS CPU RECOVER CPU 11198 * -------- ----------- 11199 * Begins recovery, walking through TSB 11200 * hat_pagesuspend() TSB TTE 11201 * TLB miss on TSB TTE, spins at TL1 11202 * xt_sync() 11203 * send_mondo_timeout() 11204 * mondo_recover_proc() 11205 * ((deadlocked)) 11206 * 11207 * The second half of the workaround is that mondo_recover_proc() 11208 * checks to see if the tsb_info has the RELOC flag set, and if it 11209 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 11210 * and hence avoiding the TLB miss that could result in a deadlock. 11211 */ 11212 if (&sendmondo_in_recover) { 11213 membar_enter(); /* make sure RELOC flag visible */ 11214 while (sendmondo_in_recover) { 11215 drv_usecwait(1); 11216 membar_consumer(); 11217 } 11218 } 11219 11220 sfmmu_invalidate_ctx(sfmmup); 11221 sfmmu_hat_exit(hatlockp); 11222 11223 return (0); 11224 } 11225 11226 /* ARGSUSED */ 11227 static int 11228 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 11229 void *tsbinfo, pfn_t newpfn) 11230 { 11231 hatlock_t *hatlockp; 11232 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11233 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11234 11235 if (flags != HAT_POSTUNSUSPEND) 11236 return (0); 11237 11238 hatlockp = sfmmu_hat_enter(sfmmup); 11239 11240 SFMMU_STAT(sf_tsb_reloc); 11241 11242 /* 11243 * The process may have swapped out while we were relocating one 11244 * of its TSBs. If so, don't bother doing the setup since the 11245 * process can't be using the memory anymore. 11246 */ 11247 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 11248 ASSERT(va == tsbinfop->tsb_va); 11249 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 11250 sfmmu_setup_tsbinfo(sfmmup); 11251 11252 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 11253 sfmmu_inv_tsb(tsbinfop->tsb_va, 11254 TSB_BYTES(tsbinfop->tsb_szc)); 11255 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 11256 } 11257 } 11258 11259 membar_exit(); 11260 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 11261 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11262 11263 sfmmu_hat_exit(hatlockp); 11264 11265 return (0); 11266 } 11267 11268 /* 11269 * Allocate and initialize a tsb_info structure. Note that we may or may not 11270 * allocate a TSB here, depending on the flags passed in. 11271 */ 11272 static int 11273 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 11274 uint_t flags, sfmmu_t *sfmmup) 11275 { 11276 int err; 11277 11278 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 11279 sfmmu_tsbinfo_cache, KM_SLEEP); 11280 11281 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 11282 tsb_szc, flags, sfmmup)) != 0) { 11283 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 11284 SFMMU_STAT(sf_tsb_allocfail); 11285 *tsbinfopp = NULL; 11286 return (err); 11287 } 11288 SFMMU_STAT(sf_tsb_alloc); 11289 11290 /* 11291 * Bump the TSB size counters for this TSB size. 11292 */ 11293 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 11294 return (0); 11295 } 11296 11297 static void 11298 sfmmu_tsb_free(struct tsb_info *tsbinfo) 11299 { 11300 caddr_t tsbva = tsbinfo->tsb_va; 11301 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 11302 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 11303 vmem_t *vmp = tsbinfo->tsb_vmp; 11304 11305 /* 11306 * If we allocated this TSB from relocatable kernel memory, then we 11307 * need to uninstall the callback handler. 11308 */ 11309 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 11310 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11311 caddr_t slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 11312 page_t **ppl; 11313 int ret; 11314 11315 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 11316 ASSERT(ret == 0); 11317 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 11318 0, NULL); 11319 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 11320 } 11321 11322 if (kmem_cachep != NULL) { 11323 kmem_cache_free(kmem_cachep, tsbva); 11324 } else { 11325 vmem_xfree(vmp, (void *)tsbva, tsb_size); 11326 } 11327 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 11328 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 11329 } 11330 11331 static void 11332 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 11333 { 11334 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 11335 sfmmu_tsb_free(tsbinfo); 11336 } 11337 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 11338 11339 } 11340 11341 /* 11342 * Setup all the references to physical memory for this tsbinfo. 11343 * The underlying page(s) must be locked. 11344 */ 11345 static void 11346 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 11347 { 11348 ASSERT(pfn != PFN_INVALID); 11349 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 11350 11351 #ifndef sun4v 11352 if (tsbinfo->tsb_szc == 0) { 11353 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 11354 PROT_WRITE|PROT_READ, TTE8K); 11355 } else { 11356 /* 11357 * Round down PA and use a large mapping; the handlers will 11358 * compute the TSB pointer at the correct offset into the 11359 * big virtual page. NOTE: this assumes all TSBs larger 11360 * than 8K must come from physically contiguous slabs of 11361 * size tsb_slab_size. 11362 */ 11363 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 11364 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 11365 } 11366 tsbinfo->tsb_pa = ptob(pfn); 11367 11368 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 11369 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 11370 11371 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 11372 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 11373 #else /* sun4v */ 11374 tsbinfo->tsb_pa = ptob(pfn); 11375 #endif /* sun4v */ 11376 } 11377 11378 11379 /* 11380 * Returns zero on success, ENOMEM if over the high water mark, 11381 * or EAGAIN if the caller needs to retry with a smaller TSB 11382 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 11383 * 11384 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 11385 * is specified and the TSB requested is PAGESIZE, though it 11386 * may sleep waiting for memory if sufficient memory is not 11387 * available. 11388 */ 11389 static int 11390 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 11391 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 11392 { 11393 caddr_t vaddr = NULL; 11394 caddr_t slab_vaddr; 11395 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11396 int tsbbytes = TSB_BYTES(tsbcode); 11397 int lowmem = 0; 11398 struct kmem_cache *kmem_cachep = NULL; 11399 vmem_t *vmp = NULL; 11400 lgrp_id_t lgrpid = LGRP_NONE; 11401 pfn_t pfn; 11402 uint_t cbflags = HAC_SLEEP; 11403 page_t **pplist; 11404 int ret; 11405 11406 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 11407 flags |= TSB_ALLOC; 11408 11409 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 11410 11411 tsbinfo->tsb_sfmmu = sfmmup; 11412 11413 /* 11414 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 11415 * return. 11416 */ 11417 if ((flags & TSB_ALLOC) == 0) { 11418 tsbinfo->tsb_szc = tsbcode; 11419 tsbinfo->tsb_ttesz_mask = tteszmask; 11420 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 11421 tsbinfo->tsb_pa = -1; 11422 tsbinfo->tsb_tte.ll = 0; 11423 tsbinfo->tsb_next = NULL; 11424 tsbinfo->tsb_flags = TSB_SWAPPED; 11425 tsbinfo->tsb_cache = NULL; 11426 tsbinfo->tsb_vmp = NULL; 11427 return (0); 11428 } 11429 11430 #ifdef DEBUG 11431 /* 11432 * For debugging: 11433 * Randomly force allocation failures every tsb_alloc_mtbf 11434 * tries if TSB_FORCEALLOC is not specified. This will 11435 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 11436 * it is even, to allow testing of both failure paths... 11437 */ 11438 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 11439 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 11440 tsb_alloc_count = 0; 11441 tsb_alloc_fail_mtbf++; 11442 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 11443 } 11444 #endif /* DEBUG */ 11445 11446 /* 11447 * Enforce high water mark if we are not doing a forced allocation 11448 * and are not shrinking a process' TSB. 11449 */ 11450 if ((flags & TSB_SHRINK) == 0 && 11451 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 11452 if ((flags & TSB_FORCEALLOC) == 0) 11453 return (ENOMEM); 11454 lowmem = 1; 11455 } 11456 11457 /* 11458 * Allocate from the correct location based upon the size of the TSB 11459 * compared to the base page size, and what memory conditions dictate. 11460 * Note we always do nonblocking allocations from the TSB arena since 11461 * we don't want memory fragmentation to cause processes to block 11462 * indefinitely waiting for memory; until the kernel algorithms that 11463 * coalesce large pages are improved this is our best option. 11464 * 11465 * Algorithm: 11466 * If allocating a "large" TSB (>8K), allocate from the 11467 * appropriate kmem_tsb_default_arena vmem arena 11468 * else if low on memory or the TSB_FORCEALLOC flag is set or 11469 * tsb_forceheap is set 11470 * Allocate from kernel heap via sfmmu_tsb8k_cache with 11471 * KM_SLEEP (never fails) 11472 * else 11473 * Allocate from appropriate sfmmu_tsb_cache with 11474 * KM_NOSLEEP 11475 * endif 11476 */ 11477 if (tsb_lgrp_affinity) 11478 lgrpid = lgrp_home_id(curthread); 11479 if (lgrpid == LGRP_NONE) 11480 lgrpid = 0; /* use lgrp of boot CPU */ 11481 11482 if (tsbbytes > MMU_PAGESIZE) { 11483 vmp = kmem_tsb_default_arena[lgrpid]; 11484 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 0, 0, 11485 NULL, NULL, VM_NOSLEEP); 11486 #ifdef DEBUG 11487 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 11488 #else /* !DEBUG */ 11489 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 11490 #endif /* DEBUG */ 11491 kmem_cachep = sfmmu_tsb8k_cache; 11492 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 11493 ASSERT(vaddr != NULL); 11494 } else { 11495 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 11496 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 11497 } 11498 11499 tsbinfo->tsb_cache = kmem_cachep; 11500 tsbinfo->tsb_vmp = vmp; 11501 11502 if (vaddr == NULL) { 11503 return (EAGAIN); 11504 } 11505 11506 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 11507 kmem_cachep = tsbinfo->tsb_cache; 11508 11509 /* 11510 * If we are allocating from outside the cage, then we need to 11511 * register a relocation callback handler. Note that for now 11512 * since pseudo mappings always hang off of the slab's root page, 11513 * we need only lock the first 8K of the TSB slab. This is a bit 11514 * hacky but it is good for performance. 11515 */ 11516 if (kmem_cachep != sfmmu_tsb8k_cache) { 11517 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 11518 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 11519 ASSERT(ret == 0); 11520 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 11521 cbflags, (void *)tsbinfo, &pfn, NULL); 11522 11523 /* 11524 * Need to free up resources if we could not successfully 11525 * add the callback function and return an error condition. 11526 */ 11527 if (ret != 0) { 11528 if (kmem_cachep) { 11529 kmem_cache_free(kmem_cachep, vaddr); 11530 } else { 11531 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 11532 } 11533 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 11534 S_WRITE); 11535 return (EAGAIN); 11536 } 11537 } else { 11538 /* 11539 * Since allocation of 8K TSBs from heap is rare and occurs 11540 * during memory pressure we allocate them from permanent 11541 * memory rather than using callbacks to get the PFN. 11542 */ 11543 pfn = hat_getpfnum(kas.a_hat, vaddr); 11544 } 11545 11546 tsbinfo->tsb_va = vaddr; 11547 tsbinfo->tsb_szc = tsbcode; 11548 tsbinfo->tsb_ttesz_mask = tteszmask; 11549 tsbinfo->tsb_next = NULL; 11550 tsbinfo->tsb_flags = 0; 11551 11552 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 11553 11554 if (kmem_cachep != sfmmu_tsb8k_cache) { 11555 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 11556 } 11557 11558 sfmmu_inv_tsb(vaddr, tsbbytes); 11559 return (0); 11560 } 11561 11562 /* 11563 * Initialize per cpu tsb and per cpu tsbmiss_area 11564 */ 11565 void 11566 sfmmu_init_tsbs(void) 11567 { 11568 int i; 11569 struct tsbmiss *tsbmissp; 11570 struct kpmtsbm *kpmtsbmp; 11571 #ifndef sun4v 11572 extern int dcache_line_mask; 11573 #endif /* sun4v */ 11574 extern uint_t vac_colors; 11575 11576 /* 11577 * Init. tsb miss area. 11578 */ 11579 tsbmissp = tsbmiss_area; 11580 11581 for (i = 0; i < NCPU; tsbmissp++, i++) { 11582 /* 11583 * initialize the tsbmiss area. 11584 * Do this for all possible CPUs as some may be added 11585 * while the system is running. There is no cost to this. 11586 */ 11587 tsbmissp->ksfmmup = ksfmmup; 11588 #ifndef sun4v 11589 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 11590 #endif /* sun4v */ 11591 tsbmissp->khashstart = 11592 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 11593 tsbmissp->uhashstart = 11594 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 11595 tsbmissp->khashsz = khmehash_num; 11596 tsbmissp->uhashsz = uhmehash_num; 11597 } 11598 11599 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 11600 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 11601 11602 if (kpm_enable == 0) 11603 return; 11604 11605 /* -- Begin KPM specific init -- */ 11606 11607 if (kpm_smallpages) { 11608 /* 11609 * If we're using base pagesize pages for seg_kpm 11610 * mappings, we use the kernel TSB since we can't afford 11611 * to allocate a second huge TSB for these mappings. 11612 */ 11613 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11614 kpm_tsbsz = ktsb_szcode; 11615 kpmsm_tsbbase = kpm_tsbbase; 11616 kpmsm_tsbsz = kpm_tsbsz; 11617 } else { 11618 /* 11619 * In VAC conflict case, just put the entries in the 11620 * kernel 8K indexed TSB for now so we can find them. 11621 * This could really be changed in the future if we feel 11622 * the need... 11623 */ 11624 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11625 kpmsm_tsbsz = ktsb_szcode; 11626 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 11627 kpm_tsbsz = ktsb4m_szcode; 11628 } 11629 11630 kpmtsbmp = kpmtsbm_area; 11631 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 11632 /* 11633 * Initialize the kpmtsbm area. 11634 * Do this for all possible CPUs as some may be added 11635 * while the system is running. There is no cost to this. 11636 */ 11637 kpmtsbmp->vbase = kpm_vbase; 11638 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 11639 kpmtsbmp->sz_shift = kpm_size_shift; 11640 kpmtsbmp->kpmp_shift = kpmp_shift; 11641 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 11642 if (kpm_smallpages == 0) { 11643 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 11644 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 11645 } else { 11646 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 11647 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 11648 } 11649 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 11650 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 11651 #ifdef DEBUG 11652 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 11653 #endif /* DEBUG */ 11654 if (ktsb_phys) 11655 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 11656 } 11657 11658 /* -- End KPM specific init -- */ 11659 } 11660 11661 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 11662 struct tsb_info ktsb_info[2]; 11663 11664 /* 11665 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 11666 */ 11667 void 11668 sfmmu_init_ktsbinfo() 11669 { 11670 ASSERT(ksfmmup != NULL); 11671 ASSERT(ksfmmup->sfmmu_tsb == NULL); 11672 /* 11673 * Allocate tsbinfos for kernel and copy in data 11674 * to make debug easier and sun4v setup easier. 11675 */ 11676 ktsb_info[0].tsb_sfmmu = ksfmmup; 11677 ktsb_info[0].tsb_szc = ktsb_szcode; 11678 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 11679 ktsb_info[0].tsb_va = ktsb_base; 11680 ktsb_info[0].tsb_pa = ktsb_pbase; 11681 ktsb_info[0].tsb_flags = 0; 11682 ktsb_info[0].tsb_tte.ll = 0; 11683 ktsb_info[0].tsb_cache = NULL; 11684 11685 ktsb_info[1].tsb_sfmmu = ksfmmup; 11686 ktsb_info[1].tsb_szc = ktsb4m_szcode; 11687 ktsb_info[1].tsb_ttesz_mask = TSB4M; 11688 ktsb_info[1].tsb_va = ktsb4m_base; 11689 ktsb_info[1].tsb_pa = ktsb4m_pbase; 11690 ktsb_info[1].tsb_flags = 0; 11691 ktsb_info[1].tsb_tte.ll = 0; 11692 ktsb_info[1].tsb_cache = NULL; 11693 11694 /* Link them into ksfmmup. */ 11695 ktsb_info[0].tsb_next = &ktsb_info[1]; 11696 ktsb_info[1].tsb_next = NULL; 11697 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 11698 11699 sfmmu_setup_tsbinfo(ksfmmup); 11700 } 11701 11702 /* 11703 * Cache the last value returned from va_to_pa(). If the VA specified 11704 * in the current call to cached_va_to_pa() maps to the same Page (as the 11705 * previous call to cached_va_to_pa()), then compute the PA using 11706 * cached info, else call va_to_pa(). 11707 * 11708 * Note: this function is neither MT-safe nor consistent in the presence 11709 * of multiple, interleaved threads. This function was created to enable 11710 * an optimization used during boot (at a point when there's only one thread 11711 * executing on the "boot CPU", and before startup_vm() has been called). 11712 */ 11713 static uint64_t 11714 cached_va_to_pa(void *vaddr) 11715 { 11716 static uint64_t prev_vaddr_base = 0; 11717 static uint64_t prev_pfn = 0; 11718 11719 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 11720 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 11721 } else { 11722 uint64_t pa = va_to_pa(vaddr); 11723 11724 if (pa != ((uint64_t)-1)) { 11725 /* 11726 * Computed physical address is valid. Cache its 11727 * related info for the next cached_va_to_pa() call. 11728 */ 11729 prev_pfn = pa & MMU_PAGEMASK; 11730 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 11731 } 11732 11733 return (pa); 11734 } 11735 } 11736 11737 /* 11738 * Carve up our nucleus hblk region. We may allocate more hblks than 11739 * asked due to rounding errors but we are guaranteed to have at least 11740 * enough space to allocate the requested number of hblk8's and hblk1's. 11741 */ 11742 void 11743 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 11744 { 11745 struct hme_blk *hmeblkp; 11746 size_t hme8blk_sz, hme1blk_sz; 11747 size_t i; 11748 size_t hblk8_bound; 11749 ulong_t j = 0, k = 0; 11750 11751 ASSERT(addr != NULL && size != 0); 11752 11753 /* Need to use proper structure alignment */ 11754 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 11755 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 11756 11757 nucleus_hblk8.list = (void *)addr; 11758 nucleus_hblk8.index = 0; 11759 11760 /* 11761 * Use as much memory as possible for hblk8's since we 11762 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 11763 * We need to hold back enough space for the hblk1's which 11764 * we'll allocate next. 11765 */ 11766 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 11767 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 11768 hmeblkp = (struct hme_blk *)addr; 11769 addr += hme8blk_sz; 11770 hmeblkp->hblk_nuc_bit = 1; 11771 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11772 } 11773 nucleus_hblk8.len = j; 11774 ASSERT(j >= nhblk8); 11775 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 11776 11777 nucleus_hblk1.list = (void *)addr; 11778 nucleus_hblk1.index = 0; 11779 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 11780 hmeblkp = (struct hme_blk *)addr; 11781 addr += hme1blk_sz; 11782 hmeblkp->hblk_nuc_bit = 1; 11783 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11784 } 11785 ASSERT(k >= nhblk1); 11786 nucleus_hblk1.len = k; 11787 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 11788 } 11789 11790 /* 11791 * This function is currently not supported on this platform. For what 11792 * it's supposed to do, see hat.c and hat_srmmu.c 11793 */ 11794 /* ARGSUSED */ 11795 faultcode_t 11796 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 11797 uint_t flags) 11798 { 11799 ASSERT(hat->sfmmu_xhat_provider == NULL); 11800 return (FC_NOSUPPORT); 11801 } 11802 11803 /* 11804 * Searchs the mapping list of the page for a mapping of the same size. If not 11805 * found the corresponding bit is cleared in the p_index field. When large 11806 * pages are more prevalent in the system, we can maintain the mapping list 11807 * in order and we don't have to traverse the list each time. Just check the 11808 * next and prev entries, and if both are of different size, we clear the bit. 11809 */ 11810 static void 11811 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 11812 { 11813 struct sf_hment *sfhmep; 11814 struct hme_blk *hmeblkp; 11815 int index; 11816 pgcnt_t npgs; 11817 11818 ASSERT(ttesz > TTE8K); 11819 11820 ASSERT(sfmmu_mlist_held(pp)); 11821 11822 ASSERT(PP_ISMAPPED_LARGE(pp)); 11823 11824 /* 11825 * Traverse mapping list looking for another mapping of same size. 11826 * since we only want to clear index field if all mappings of 11827 * that size are gone. 11828 */ 11829 11830 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 11831 hmeblkp = sfmmu_hmetohblk(sfhmep); 11832 if (hmeblkp->hblk_xhat_bit) 11833 continue; 11834 if (hme_size(sfhmep) == ttesz) { 11835 /* 11836 * another mapping of the same size. don't clear index. 11837 */ 11838 return; 11839 } 11840 } 11841 11842 /* 11843 * Clear the p_index bit for large page. 11844 */ 11845 index = PAGESZ_TO_INDEX(ttesz); 11846 npgs = TTEPAGES(ttesz); 11847 while (npgs-- > 0) { 11848 ASSERT(pp->p_index & index); 11849 pp->p_index &= ~index; 11850 pp = PP_PAGENEXT(pp); 11851 } 11852 } 11853 11854 /* 11855 * return supported features 11856 */ 11857 /* ARGSUSED */ 11858 int 11859 hat_supported(enum hat_features feature, void *arg) 11860 { 11861 switch (feature) { 11862 case HAT_SHARED_PT: 11863 case HAT_DYNAMIC_ISM_UNMAP: 11864 case HAT_VMODSORT: 11865 return (1); 11866 default: 11867 return (0); 11868 } 11869 } 11870 11871 void 11872 hat_enter(struct hat *hat) 11873 { 11874 hatlock_t *hatlockp; 11875 11876 if (hat != ksfmmup) { 11877 hatlockp = TSB_HASH(hat); 11878 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 11879 } 11880 } 11881 11882 void 11883 hat_exit(struct hat *hat) 11884 { 11885 hatlock_t *hatlockp; 11886 11887 if (hat != ksfmmup) { 11888 hatlockp = TSB_HASH(hat); 11889 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 11890 } 11891 } 11892 11893 /*ARGSUSED*/ 11894 void 11895 hat_reserve(struct as *as, caddr_t addr, size_t len) 11896 { 11897 } 11898 11899 static void 11900 hat_kstat_init(void) 11901 { 11902 kstat_t *ksp; 11903 11904 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 11905 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 11906 KSTAT_FLAG_VIRTUAL); 11907 if (ksp) { 11908 ksp->ks_data = (void *) &sfmmu_global_stat; 11909 kstat_install(ksp); 11910 } 11911 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 11912 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 11913 KSTAT_FLAG_VIRTUAL); 11914 if (ksp) { 11915 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 11916 kstat_install(ksp); 11917 } 11918 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 11919 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 11920 KSTAT_FLAG_WRITABLE); 11921 if (ksp) { 11922 ksp->ks_update = sfmmu_kstat_percpu_update; 11923 kstat_install(ksp); 11924 } 11925 } 11926 11927 /* ARGSUSED */ 11928 static int 11929 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 11930 { 11931 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 11932 struct tsbmiss *tsbm = tsbmiss_area; 11933 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 11934 int i; 11935 11936 ASSERT(cpu_kstat); 11937 if (rw == KSTAT_READ) { 11938 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 11939 cpu_kstat->sf_itlb_misses = tsbm->itlb_misses; 11940 cpu_kstat->sf_dtlb_misses = tsbm->dtlb_misses; 11941 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 11942 tsbm->uprot_traps; 11943 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 11944 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 11945 11946 if (tsbm->itlb_misses > 0 && tsbm->dtlb_misses > 0) { 11947 cpu_kstat->sf_tsb_hits = 11948 (tsbm->itlb_misses + tsbm->dtlb_misses) - 11949 (tsbm->utsb_misses + tsbm->ktsb_misses + 11950 kpmtsbm->kpm_tsb_misses); 11951 } else { 11952 cpu_kstat->sf_tsb_hits = 0; 11953 } 11954 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 11955 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 11956 } 11957 } else { 11958 /* KSTAT_WRITE is used to clear stats */ 11959 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 11960 tsbm->itlb_misses = 0; 11961 tsbm->dtlb_misses = 0; 11962 tsbm->utsb_misses = 0; 11963 tsbm->ktsb_misses = 0; 11964 tsbm->uprot_traps = 0; 11965 tsbm->kprot_traps = 0; 11966 kpmtsbm->kpm_dtlb_misses = 0; 11967 kpmtsbm->kpm_tsb_misses = 0; 11968 } 11969 } 11970 return (0); 11971 } 11972 11973 #ifdef DEBUG 11974 11975 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 11976 11977 /* 11978 * A tte checker. *orig_old is the value we read before cas. 11979 * *cur is the value returned by cas. 11980 * *new is the desired value when we do the cas. 11981 * 11982 * *hmeblkp is currently unused. 11983 */ 11984 11985 /* ARGSUSED */ 11986 void 11987 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 11988 { 11989 pfn_t i, j, k; 11990 int cpuid = CPU->cpu_id; 11991 11992 gorig[cpuid] = orig_old; 11993 gcur[cpuid] = cur; 11994 gnew[cpuid] = new; 11995 11996 #ifdef lint 11997 hmeblkp = hmeblkp; 11998 #endif 11999 12000 if (TTE_IS_VALID(orig_old)) { 12001 if (TTE_IS_VALID(cur)) { 12002 i = TTE_TO_TTEPFN(orig_old); 12003 j = TTE_TO_TTEPFN(cur); 12004 k = TTE_TO_TTEPFN(new); 12005 if (i != j) { 12006 /* remap error? */ 12007 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 12008 } 12009 12010 if (i != k) { 12011 /* remap error? */ 12012 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 12013 } 12014 } else { 12015 if (TTE_IS_VALID(new)) { 12016 panic("chk_tte: invalid cur? "); 12017 } 12018 12019 i = TTE_TO_TTEPFN(orig_old); 12020 k = TTE_TO_TTEPFN(new); 12021 if (i != k) { 12022 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 12023 } 12024 } 12025 } else { 12026 if (TTE_IS_VALID(cur)) { 12027 j = TTE_TO_TTEPFN(cur); 12028 if (TTE_IS_VALID(new)) { 12029 k = TTE_TO_TTEPFN(new); 12030 if (j != k) { 12031 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 12032 j, k); 12033 } 12034 } else { 12035 panic("chk_tte: why here?"); 12036 } 12037 } else { 12038 if (!TTE_IS_VALID(new)) { 12039 panic("chk_tte: why here2 ?"); 12040 } 12041 } 12042 } 12043 } 12044 12045 #endif /* DEBUG */ 12046 12047 extern void prefetch_tsbe_read(struct tsbe *); 12048 extern void prefetch_tsbe_write(struct tsbe *); 12049 12050 12051 /* 12052 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 12053 * us optimal performance on Cheetah+. You can only have 8 outstanding 12054 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 12055 * prefetch to make the most utilization of the prefetch capability. 12056 */ 12057 #define TSBE_PREFETCH_STRIDE (7) 12058 12059 void 12060 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 12061 { 12062 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 12063 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 12064 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 12065 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 12066 struct tsbe *old; 12067 struct tsbe *new; 12068 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 12069 uint64_t va; 12070 int new_offset; 12071 int i; 12072 int vpshift; 12073 int last_prefetch; 12074 12075 if (old_bytes == new_bytes) { 12076 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 12077 } else { 12078 12079 /* 12080 * A TSBE is 16 bytes which means there are four TSBE's per 12081 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 12082 */ 12083 old = (struct tsbe *)old_tsbinfo->tsb_va; 12084 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 12085 for (i = 0; i < old_entries; i++, old++) { 12086 if (((i & (4-1)) == 0) && (i < last_prefetch)) 12087 prefetch_tsbe_read(old); 12088 if (!old->tte_tag.tag_invalid) { 12089 /* 12090 * We have a valid TTE to remap. Check the 12091 * size. We won't remap 64K or 512K TTEs 12092 * because they span more than one TSB entry 12093 * and are indexed using an 8K virt. page. 12094 * Ditto for 32M and 256M TTEs. 12095 */ 12096 if (TTE_CSZ(&old->tte_data) == TTE64K || 12097 TTE_CSZ(&old->tte_data) == TTE512K) 12098 continue; 12099 if (mmu_page_sizes == max_mmu_page_sizes) { 12100 if (TTE_CSZ(&old->tte_data) == TTE32M || 12101 TTE_CSZ(&old->tte_data) == TTE256M) 12102 continue; 12103 } 12104 12105 /* clear the lower 22 bits of the va */ 12106 va = *(uint64_t *)old << 22; 12107 /* turn va into a virtual pfn */ 12108 va >>= 22 - TSB_START_SIZE; 12109 /* 12110 * or in bits from the offset in the tsb 12111 * to get the real virtual pfn. These 12112 * correspond to bits [21:13] in the va 12113 */ 12114 vpshift = 12115 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 12116 0x1ff; 12117 va |= (i << vpshift); 12118 va >>= vpshift; 12119 new_offset = va & (new_entries - 1); 12120 new = new_base + new_offset; 12121 prefetch_tsbe_write(new); 12122 *new = *old; 12123 } 12124 } 12125 } 12126 } 12127 12128 /* 12129 * unused in sfmmu 12130 */ 12131 void 12132 hat_dump(void) 12133 { 12134 } 12135 12136 /* 12137 * Called when a thread is exiting and we have switched to the kernel address 12138 * space. Perform the same VM initialization resume() uses when switching 12139 * processes. 12140 * 12141 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 12142 * we call it anyway in case the semantics change in the future. 12143 */ 12144 /*ARGSUSED*/ 12145 void 12146 hat_thread_exit(kthread_t *thd) 12147 { 12148 uint64_t pgsz_cnum; 12149 uint_t pstate_save; 12150 12151 ASSERT(thd->t_procp->p_as == &kas); 12152 12153 pgsz_cnum = KCONTEXT; 12154 #ifdef sun4u 12155 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 12156 #endif 12157 /* 12158 * Note that sfmmu_load_mmustate() is currently a no-op for 12159 * kernel threads. We need to disable interrupts here, 12160 * simply because otherwise sfmmu_load_mmustate() would panic 12161 * if the caller does not disable interrupts. 12162 */ 12163 pstate_save = sfmmu_disable_intrs(); 12164 sfmmu_setctx_sec(pgsz_cnum); 12165 sfmmu_load_mmustate(ksfmmup); 12166 sfmmu_enable_intrs(pstate_save); 12167 } 12168