1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * VM - Hardware Address Translation management for Spitfire MMU. 31 * 32 * This file implements the machine specific hardware translation 33 * needed by the VM system. The machine independent interface is 34 * described in <vm/hat.h> while the machine dependent interface 35 * and data structures are described in <vm/hat_sfmmu.h>. 36 * 37 * The hat layer manages the address translation hardware as a cache 38 * driven by calls from the higher levels in the VM system. 39 */ 40 41 #include <sys/types.h> 42 #include <vm/hat.h> 43 #include <vm/hat_sfmmu.h> 44 #include <vm/page.h> 45 #include <sys/pte.h> 46 #include <sys/systm.h> 47 #include <sys/mman.h> 48 #include <sys/sysmacros.h> 49 #include <sys/machparam.h> 50 #include <sys/vtrace.h> 51 #include <sys/kmem.h> 52 #include <sys/mmu.h> 53 #include <sys/cmn_err.h> 54 #include <sys/cpu.h> 55 #include <sys/cpuvar.h> 56 #include <sys/debug.h> 57 #include <sys/lgrp.h> 58 #include <sys/archsystm.h> 59 #include <sys/machsystm.h> 60 #include <sys/vmsystm.h> 61 #include <vm/as.h> 62 #include <vm/seg.h> 63 #include <vm/seg_kp.h> 64 #include <vm/seg_kmem.h> 65 #include <vm/seg_kpm.h> 66 #include <vm/rm.h> 67 #include <sys/t_lock.h> 68 #include <sys/obpdefs.h> 69 #include <sys/vm_machparam.h> 70 #include <sys/var.h> 71 #include <sys/trap.h> 72 #include <sys/machtrap.h> 73 #include <sys/scb.h> 74 #include <sys/bitmap.h> 75 #include <sys/machlock.h> 76 #include <sys/membar.h> 77 #include <sys/atomic.h> 78 #include <sys/cpu_module.h> 79 #include <sys/prom_debug.h> 80 #include <sys/ksynch.h> 81 #include <sys/mem_config.h> 82 #include <sys/mem_cage.h> 83 #include <sys/dtrace.h> 84 #include <vm/vm_dep.h> 85 #include <vm/xhat_sfmmu.h> 86 #include <sys/fpu/fpusystm.h> 87 88 #if defined(SF_ERRATA_57) 89 extern caddr_t errata57_limit; 90 #endif 91 92 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 93 (sizeof (int64_t))) 94 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 95 96 #define HBLK_RESERVE_CNT 128 97 #define HBLK_RESERVE_MIN 20 98 99 static struct hme_blk *freehblkp; 100 static kmutex_t freehblkp_lock; 101 static int freehblkcnt; 102 103 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 104 static kmutex_t hblk_reserve_lock; 105 static kthread_t *hblk_reserve_thread; 106 107 static nucleus_hblk8_info_t nucleus_hblk8; 108 static nucleus_hblk1_info_t nucleus_hblk1; 109 110 /* 111 * SFMMU specific hat functions 112 */ 113 void hat_pagecachectl(struct page *, int); 114 115 /* flags for hat_pagecachectl */ 116 #define HAT_CACHE 0x1 117 #define HAT_UNCACHE 0x2 118 #define HAT_TMPNC 0x4 119 120 /* 121 * Flag to allow the creation of non-cacheable translations 122 * to system memory. It is off by default. At the moment this 123 * flag is used by the ecache error injector. The error injector 124 * will turn it on when creating such a translation then shut it 125 * off when it's finished. 126 */ 127 128 int sfmmu_allow_nc_trans = 0; 129 130 /* 131 * Flag to disable large page support. 132 * value of 1 => disable all large pages. 133 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 134 * 135 * For example, use the value 0x4 to disable 512K pages. 136 * 137 */ 138 #define LARGE_PAGES_OFF 0x1 139 140 /* 141 * WARNING: 512K pages MUST be disabled for ISM/DISM. If not 142 * a process would page fault indefinitely if it tried to 143 * access a 512K page. 144 */ 145 int disable_ism_large_pages = (1 << TTE512K); 146 int disable_large_pages = 0; 147 int disable_auto_large_pages = 0; 148 149 /* 150 * Private sfmmu data structures for hat management 151 */ 152 static struct kmem_cache *sfmmuid_cache; 153 154 /* 155 * Private sfmmu data structures for ctx management 156 */ 157 static struct ctx *ctxhand; /* hand used while stealing ctxs */ 158 static struct ctx *ctxfree; /* head of free ctx list */ 159 static struct ctx *ctxdirty; /* head of dirty ctx list */ 160 161 /* 162 * Private sfmmu data structures for tsb management 163 */ 164 static struct kmem_cache *sfmmu_tsbinfo_cache; 165 static struct kmem_cache *sfmmu_tsb8k_cache; 166 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 167 static vmem_t *kmem_tsb_arena; 168 169 /* 170 * sfmmu static variables for hmeblk resource management. 171 */ 172 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 173 static struct kmem_cache *sfmmu8_cache; 174 static struct kmem_cache *sfmmu1_cache; 175 static struct kmem_cache *pa_hment_cache; 176 177 static kmutex_t ctx_list_lock; /* mutex for ctx free/dirty lists */ 178 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 179 /* 180 * private data for ism 181 */ 182 static struct kmem_cache *ism_blk_cache; 183 static struct kmem_cache *ism_ment_cache; 184 #define ISMID_STARTADDR NULL 185 186 /* 187 * Whether to delay TLB flushes and use Cheetah's flush-all support 188 * when removing contexts from the dirty list. 189 */ 190 int delay_tlb_flush; 191 int disable_delay_tlb_flush; 192 193 /* 194 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 195 * HAT flags, synchronizing TLB/TSB coherency, and context management. 196 * The lock is hashed on the sfmmup since the case where we need to lock 197 * all processes is rare but does occur (e.g. we need to unload a shared 198 * mapping from all processes using the mapping). We have a lot of buckets, 199 * and each slab of sfmmu_t's can use about a quarter of them, giving us 200 * a fairly good distribution without wasting too much space and overhead 201 * when we have to grab them all. 202 */ 203 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 204 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 205 206 /* 207 * Hash algorithm optimized for a small number of slabs. 208 * 7 is (highbit((sizeof sfmmu_t)) - 1) 209 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 210 * kmem_cache, and thus they will be sequential within that cache. In 211 * addition, each new slab will have a different "color" up to cache_maxcolor 212 * which will skew the hashing for each successive slab which is allocated. 213 * If the size of sfmmu_t changed to a larger size, this algorithm may need 214 * to be revisited. 215 */ 216 #define TSB_HASH_SHIFT_BITS (7) 217 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 218 219 #ifdef DEBUG 220 int tsb_hash_debug = 0; 221 #define TSB_HASH(sfmmup) \ 222 (tsb_hash_debug ? &hat_lock[0] : \ 223 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 224 #else /* DEBUG */ 225 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 226 #endif /* DEBUG */ 227 228 229 /* sfmmu_replace_tsb() return codes. */ 230 typedef enum tsb_replace_rc { 231 TSB_SUCCESS, 232 TSB_ALLOCFAIL, 233 TSB_LOSTRACE, 234 TSB_ALREADY_SWAPPED, 235 TSB_CANTGROW 236 } tsb_replace_rc_t; 237 238 /* 239 * Flags for TSB allocation routines. 240 */ 241 #define TSB_ALLOC 0x01 242 #define TSB_FORCEALLOC 0x02 243 #define TSB_GROW 0x04 244 #define TSB_SHRINK 0x08 245 #define TSB_SWAPIN 0x10 246 247 /* 248 * Support for HAT callbacks. 249 */ 250 #define SFMMU_MAX_RELOC_CALLBACKS 10 251 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 252 static id_t sfmmu_cb_nextid = 0; 253 static id_t sfmmu_tsb_cb_id; 254 struct sfmmu_callback *sfmmu_cb_table; 255 256 /* 257 * Kernel page relocation is enabled by default for non-caged 258 * kernel pages. This has little effect unless segkmem_reloc is 259 * set, since by default kernel memory comes from inside the 260 * kernel cage. 261 */ 262 int hat_kpr_enabled = 1; 263 264 kmutex_t kpr_mutex; 265 kmutex_t kpr_suspendlock; 266 kthread_t *kreloc_thread; 267 268 /* 269 * Enable VA->PA translation sanity checking on DEBUG kernels. 270 * Disabled by default. This is incompatible with some 271 * drivers (error injector, RSM) so if it breaks you get 272 * to keep both pieces. 273 */ 274 int hat_check_vtop = 0; 275 276 /* 277 * Private sfmmu routines (prototypes) 278 */ 279 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 280 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 281 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t); 282 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 283 caddr_t, demap_range_t *, uint_t); 284 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 285 caddr_t, int); 286 static void sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *, 287 uint64_t, struct hme_blk **); 288 static void sfmmu_hblks_list_purge(struct hme_blk **); 289 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 290 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 291 static struct hme_blk *sfmmu_hblk_steal(int); 292 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 293 struct hme_blk *, uint64_t, uint64_t, 294 struct hme_blk *); 295 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 296 297 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 298 uint_t, uint_t, pgcnt_t); 299 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 300 uint_t); 301 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 302 uint_t); 303 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 304 caddr_t, int); 305 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 306 struct hmehash_bucket *, caddr_t, uint_t, uint_t); 307 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 308 caddr_t, page_t **, uint_t); 309 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 310 311 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 312 pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *); 313 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 314 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 315 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 316 static int tst_tnc(page_t *pp, pgcnt_t); 317 static void conv_tnc(page_t *pp, int); 318 319 static struct ctx *sfmmu_get_ctx(sfmmu_t *); 320 static void sfmmu_free_ctx(sfmmu_t *, struct ctx *); 321 static void sfmmu_free_sfmmu(sfmmu_t *); 322 323 static void sfmmu_gettte(struct hat *, caddr_t, tte_t *); 324 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 325 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 326 327 static cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 328 static void hat_pagereload(struct page *, struct page *); 329 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 330 static void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 331 static void sfmmu_page_cache(page_t *, int, int, int); 332 333 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 334 pfn_t, int, int, int, int); 335 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 336 pfn_t, int); 337 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 338 static void sfmmu_tlb_range_demap(demap_range_t *); 339 static void sfmmu_tlb_ctx_demap(sfmmu_t *); 340 static void sfmmu_tlb_all_demap(void); 341 static void sfmmu_tlb_swap_ctx(sfmmu_t *, struct ctx *); 342 static void sfmmu_sync_mmustate(sfmmu_t *); 343 344 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 345 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 346 sfmmu_t *); 347 static void sfmmu_tsb_free(struct tsb_info *); 348 static void sfmmu_tsbinfo_free(struct tsb_info *); 349 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 350 sfmmu_t *); 351 352 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 353 static int sfmmu_select_tsb_szc(pgcnt_t); 354 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 355 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 356 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 357 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 358 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 359 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 360 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 361 hatlock_t *, uint_t); 362 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 363 364 static void sfmmu_cache_flush(pfn_t, int); 365 void sfmmu_cache_flushcolor(int, pfn_t); 366 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 367 caddr_t, demap_range_t *, uint_t, int); 368 369 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 370 static uint_t sfmmu_ptov_attr(tte_t *); 371 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 372 caddr_t, demap_range_t *, uint_t); 373 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 374 static int sfmmu_idcache_constructor(void *, void *, int); 375 static void sfmmu_idcache_destructor(void *, void *); 376 static int sfmmu_hblkcache_constructor(void *, void *, int); 377 static void sfmmu_hblkcache_destructor(void *, void *); 378 static void sfmmu_hblkcache_reclaim(void *); 379 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 380 struct hmehash_bucket *); 381 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 382 383 static void sfmmu_reuse_ctx(struct ctx *, sfmmu_t *); 384 static void sfmmu_disallow_ctx_steal(sfmmu_t *); 385 static void sfmmu_allow_ctx_steal(sfmmu_t *); 386 387 static void sfmmu_rm_large_mappings(page_t *, int); 388 389 static void hat_lock_init(void); 390 static void hat_kstat_init(void); 391 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 392 static void sfmmu_check_page_sizes(sfmmu_t *, int); 393 static int fnd_mapping_sz(page_t *); 394 static void iment_add(struct ism_ment *, struct hat *); 395 static void iment_sub(struct ism_ment *, struct hat *); 396 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 397 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 398 extern void sfmmu_clear_utsbinfo(void); 399 400 /* kpm prototypes */ 401 static caddr_t sfmmu_kpm_mapin(page_t *); 402 static void sfmmu_kpm_mapout(page_t *, caddr_t); 403 static int sfmmu_kpme_lookup(struct kpme *, page_t *); 404 static void sfmmu_kpme_add(struct kpme *, page_t *); 405 static void sfmmu_kpme_sub(struct kpme *, page_t *); 406 static caddr_t sfmmu_kpm_getvaddr(page_t *, int *); 407 static int sfmmu_kpm_fault(caddr_t, struct memseg *, page_t *); 408 static int sfmmu_kpm_fault_small(caddr_t, struct memseg *, page_t *); 409 static void sfmmu_kpm_vac_conflict(page_t *, caddr_t); 410 static void sfmmu_kpm_pageunload(page_t *); 411 static void sfmmu_kpm_vac_unload(page_t *, caddr_t); 412 static void sfmmu_kpm_demap_large(caddr_t); 413 static void sfmmu_kpm_demap_small(caddr_t); 414 static void sfmmu_kpm_demap_tlbs(caddr_t, int); 415 static void sfmmu_kpm_hme_unload(page_t *); 416 static kpm_hlk_t *sfmmu_kpm_kpmp_enter(page_t *, pgcnt_t); 417 static void sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp); 418 static void sfmmu_kpm_page_cache(page_t *, int, int); 419 420 /* kpm globals */ 421 #ifdef DEBUG 422 /* 423 * Enable trap level tsbmiss handling 424 */ 425 int kpm_tsbmtl = 1; 426 427 /* 428 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 429 * required TLB shootdowns in this case, so handle w/ care. Off by default. 430 */ 431 int kpm_tlb_flush; 432 #endif /* DEBUG */ 433 434 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 435 436 #ifdef DEBUG 437 static void sfmmu_check_hblk_flist(); 438 #endif 439 440 /* 441 * Semi-private sfmmu data structures. Some of them are initialize in 442 * startup or in hat_init. Some of them are private but accessed by 443 * assembly code or mach_sfmmu.c 444 */ 445 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 446 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 447 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 448 uint64_t khme_hash_pa; /* PA of khme_hash */ 449 int uhmehash_num; /* # of buckets in user hash table */ 450 int khmehash_num; /* # of buckets in kernel hash table */ 451 struct ctx *ctxs; /* used by <machine/mmu.c> */ 452 uint_t nctxs; /* total number of contexts */ 453 454 int cache; /* describes system cache */ 455 456 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 457 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 458 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 459 int ktsb_sz; /* kernel 8k-indexed tsb size */ 460 461 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 462 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 463 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 464 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 465 466 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 467 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 468 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 469 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 470 471 #ifndef sun4v 472 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 473 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 474 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 475 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 476 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 477 #endif /* sun4v */ 478 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 479 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 480 481 /* 482 * Size to use for TSB slabs. Future platforms that support page sizes 483 * larger than 4M may wish to change these values, and provide their own 484 * assembly macros for building and decoding the TSB base register contents. 485 */ 486 uint_t tsb_slab_size = MMU_PAGESIZE4M; 487 uint_t tsb_slab_shift = MMU_PAGESHIFT4M; 488 uint_t tsb_slab_ttesz = TTE4M; 489 uint_t tsb_slab_mask = 0x1ff; /* 4M page alignment for 8K pfn */ 490 491 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 492 int tsb_max_growsize = UTSB_MAX_SZCODE; 493 494 /* 495 * Tunable parameters dealing with TSB policies. 496 */ 497 498 /* 499 * This undocumented tunable forces all 8K TSBs to be allocated from 500 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 501 */ 502 #ifdef DEBUG 503 int tsb_forceheap = 0; 504 #endif /* DEBUG */ 505 506 /* 507 * Decide whether to use per-lgroup arenas, or one global set of 508 * TSB arenas. The default is not to break up per-lgroup, since 509 * most platforms don't recognize any tangible benefit from it. 510 */ 511 int tsb_lgrp_affinity = 0; 512 513 /* 514 * Used for growing the TSB based on the process RSS. 515 * tsb_rss_factor is based on the smallest TSB, and is 516 * shifted by the TSB size to determine if we need to grow. 517 * The default will grow the TSB if the number of TTEs for 518 * this page size exceeds 75% of the number of TSB entries, 519 * which should _almost_ eliminate all conflict misses 520 * (at the expense of using up lots and lots of memory). 521 */ 522 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 523 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 524 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 525 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 526 default_tsb_size) 527 #define TSB_OK_SHRINK() \ 528 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 529 #define TSB_OK_GROW() \ 530 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 531 532 int enable_tsb_rss_sizing = 1; 533 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 534 535 /* which TSB size code to use for new address spaces or if rss sizing off */ 536 int default_tsb_size = TSB_8K_SZCODE; 537 538 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 539 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 540 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 541 542 #ifdef DEBUG 543 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 544 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 545 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 546 static int tsb_alloc_fail_mtbf = 0; 547 static int tsb_alloc_count = 0; 548 #endif /* DEBUG */ 549 550 /* if set to 1, will remap valid TTEs when growing TSB. */ 551 int tsb_remap_ttes = 1; 552 553 /* 554 * If we have more than this many mappings, allocate a second TSB. 555 * This default is chosen because the I/D fully associative TLBs are 556 * assumed to have at least 8 available entries. Platforms with a 557 * larger fully-associative TLB could probably override the default. 558 */ 559 int tsb_sectsb_threshold = 8; 560 561 /* 562 * kstat data 563 */ 564 struct sfmmu_global_stat sfmmu_global_stat; 565 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 566 567 /* 568 * Global data 569 */ 570 sfmmu_t *ksfmmup; /* kernel's hat id */ 571 struct ctx *kctx; /* kernel's context */ 572 573 #ifdef DEBUG 574 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 575 #endif 576 577 /* sfmmu locking operations */ 578 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 579 static int sfmmu_mlspl_held(struct page *, int); 580 581 static kmutex_t *sfmmu_page_enter(page_t *); 582 static void sfmmu_page_exit(kmutex_t *); 583 static int sfmmu_page_spl_held(struct page *); 584 585 /* sfmmu internal locking operations - accessed directly */ 586 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 587 kmutex_t **, kmutex_t **); 588 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 589 static hatlock_t * 590 sfmmu_hat_enter(sfmmu_t *); 591 static hatlock_t * 592 sfmmu_hat_tryenter(sfmmu_t *); 593 static void sfmmu_hat_exit(hatlock_t *); 594 static void sfmmu_hat_lock_all(void); 595 static void sfmmu_hat_unlock_all(void); 596 static void sfmmu_ismhat_enter(sfmmu_t *, int); 597 static void sfmmu_ismhat_exit(sfmmu_t *, int); 598 599 /* 600 * Array of mutexes protecting a page's mapping list and p_nrm field. 601 * 602 * The hash function looks complicated, but is made up so that: 603 * 604 * "pp" not shifted, so adjacent pp values will hash to different cache lines 605 * (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock) 606 * 607 * "pp" >> mml_shift, incorporates more source bits into the hash result 608 * 609 * "& (mml_table_size - 1), should be faster than using remainder "%" 610 * 611 * Hopefully, mml_table, mml_table_size and mml_shift are all in the same 612 * cacheline, since they get declared next to each other below. We'll trust 613 * ld not to do something random. 614 */ 615 #ifdef DEBUG 616 int mlist_hash_debug = 0; 617 #define MLIST_HASH(pp) (mlist_hash_debug ? &mml_table[0] : \ 618 &mml_table[((uintptr_t)(pp) + \ 619 ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]) 620 #else /* !DEBUG */ 621 #define MLIST_HASH(pp) &mml_table[ \ 622 ((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)] 623 #endif /* !DEBUG */ 624 625 kmutex_t *mml_table; 626 uint_t mml_table_sz; /* must be a power of 2 */ 627 uint_t mml_shift; /* log2(mml_table_sz) + 3 for align */ 628 629 /* 630 * kpm_page lock hash. 631 * All slots should be used equally and 2 adjacent kpm_page_t's 632 * shouldn't have their mutexes in the same cache line. 633 */ 634 #ifdef DEBUG 635 int kpmp_hash_debug = 0; 636 #define KPMP_HASH(kpp) (kpmp_hash_debug ? &kpmp_table[0] : &kpmp_table[ \ 637 ((uintptr_t)(kpp) + ((uintptr_t)(kpp) >> kpmp_shift)) \ 638 & (kpmp_table_sz - 1)]) 639 #else /* !DEBUG */ 640 #define KPMP_HASH(kpp) &kpmp_table[ \ 641 ((uintptr_t)(kpp) + ((uintptr_t)(kpp) >> kpmp_shift)) \ 642 & (kpmp_table_sz - 1)] 643 #endif /* DEBUG */ 644 645 kpm_hlk_t *kpmp_table; 646 uint_t kpmp_table_sz; /* must be a power of 2 */ 647 uchar_t kpmp_shift; 648 649 #ifdef DEBUG 650 #define KPMP_SHASH(kpp) (kpmp_hash_debug ? &kpmp_stable[0] : &kpmp_stable[ \ 651 (((uintptr_t)(kpp) << kpmp_shift) + (uintptr_t)(kpp)) \ 652 & (kpmp_stable_sz - 1)]) 653 #else /* !DEBUG */ 654 #define KPMP_SHASH(kpp) &kpmp_stable[ \ 655 (((uintptr_t)(kpp) << kpmp_shift) + (uintptr_t)(kpp)) \ 656 & (kpmp_stable_sz - 1)] 657 #endif /* DEBUG */ 658 659 kpm_shlk_t *kpmp_stable; 660 uint_t kpmp_stable_sz; /* must be a power of 2 */ 661 662 /* 663 * SPL_HASH was improved to avoid false cache line sharing 664 */ 665 #define SPL_TABLE_SIZE 128 666 #define SPL_MASK (SPL_TABLE_SIZE - 1) 667 #define SPL_SHIFT 7 /* log2(SPL_TABLE_SIZE) */ 668 669 #define SPL_INDEX(pp) \ 670 ((((uintptr_t)(pp) >> SPL_SHIFT) ^ \ 671 ((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \ 672 (SPL_TABLE_SIZE - 1)) 673 674 #define SPL_HASH(pp) \ 675 (&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex) 676 677 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 678 679 680 /* 681 * hat_unload_callback() will group together callbacks in order 682 * to avoid xt_sync() calls. This is the maximum size of the group. 683 */ 684 #define MAX_CB_ADDR 32 685 686 #ifdef DEBUG 687 688 /* 689 * Debugging trace ring buffer for stolen and freed ctxs. The 690 * stolen_ctxs[] array is protected by the ctx_trace_mutex. 691 */ 692 struct ctx_trace stolen_ctxs[TRSIZE]; 693 struct ctx_trace *ctx_trace_first = &stolen_ctxs[0]; 694 struct ctx_trace *ctx_trace_last = &stolen_ctxs[TRSIZE-1]; 695 struct ctx_trace *ctx_trace_ptr = &stolen_ctxs[0]; 696 kmutex_t ctx_trace_mutex; 697 uint_t num_ctx_stolen = 0; 698 699 int ism_debug = 0; 700 701 #endif /* DEBUG */ 702 703 tte_t hw_tte; 704 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 705 706 /* 707 * kpm virtual address to physical address 708 */ 709 #define SFMMU_KPM_VTOP(vaddr, paddr) { \ 710 uintptr_t r, v; \ 711 \ 712 r = ((vaddr) - kpm_vbase) >> (uintptr_t)kpm_size_shift; \ 713 (paddr) = (vaddr) - kpm_vbase; \ 714 if (r != 0) { \ 715 v = ((uintptr_t)(vaddr) >> MMU_PAGESHIFT) & \ 716 vac_colors_mask; \ 717 (paddr) -= r << kpm_size_shift; \ 718 if (r > v) \ 719 (paddr) += (r - v) << MMU_PAGESHIFT; \ 720 else \ 721 (paddr) -= r << MMU_PAGESHIFT; \ 722 } \ 723 } 724 725 /* 726 * Wrapper for vmem_xalloc since vmem_create only allows limited 727 * parameters for vm_source_alloc functions. This function allows us 728 * to specify alignment consistent with the size of the object being 729 * allocated. 730 */ 731 static void * 732 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 733 { 734 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 735 } 736 737 /* Common code for setting tsb_alloc_hiwater. */ 738 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 739 ptob(pages) / tsb_alloc_hiwater_factor 740 741 /* 742 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 743 * a single TSB. physmem is the number of physical pages so we need physmem 8K 744 * TTEs to represent all those physical pages. We round this up by using 745 * 1<<highbit(). To figure out which size code to use, remember that the size 746 * code is just an amount to shift the smallest TSB size to get the size of 747 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 748 * highbit() - 1) to get the size code for the smallest TSB that can represent 749 * all of physical memory, while erring on the side of too much. 750 * 751 * If the computed size code is less than the current tsb_max_growsize, we set 752 * tsb_max_growsize to the computed size code. In the case where the computed 753 * size code is greater than tsb_max_growsize, we have these restrictions that 754 * apply to increasing tsb_max_growsize: 755 * 1) TSBs can't grow larger than the TSB slab size 756 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 757 */ 758 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 759 int i, szc; \ 760 \ 761 i = highbit(pages); \ 762 if ((1 << (i - 1)) == (pages)) \ 763 i--; /* 2^n case, round down */ \ 764 szc = i - TSB_START_SIZE; \ 765 if (szc < tsb_max_growsize) \ 766 tsb_max_growsize = szc; \ 767 else if ((szc > tsb_max_growsize) && \ 768 (szc <= tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT))) \ 769 tsb_max_growsize = MIN(szc, UTSB_MAX_SZCODE); \ 770 } 771 772 /* 773 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 774 * tsb_info which handles that TTE size. 775 */ 776 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) \ 777 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 778 ASSERT(sfmmu_hat_lock_held(sfmmup)); \ 779 if ((tte_szc) >= TTE4M) \ 780 (tsbinfop) = (tsbinfop)->tsb_next; 781 782 /* 783 * Return the number of mappings present in the HAT 784 * for a particular process and page size. 785 */ 786 #define SFMMU_TTE_CNT(sfmmup, szc) \ 787 (sfmmup)->sfmmu_iblk? \ 788 (sfmmup)->sfmmu_ismttecnt[(szc)] + \ 789 (sfmmup)->sfmmu_ttecnt[(szc)] : \ 790 (sfmmup)->sfmmu_ttecnt[(szc)]; 791 792 /* 793 * Macro to use to unload entries from the TSB. 794 * It has knowledge of which page sizes get replicated in the TSB 795 * and will call the appropriate unload routine for the appropriate size. 796 */ 797 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp) \ 798 { \ 799 int ttesz = get_hblk_ttesz(hmeblkp); \ 800 if (ttesz == TTE8K || ttesz == TTE4M) { \ 801 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 802 } else { \ 803 caddr_t sva = (caddr_t)get_hblk_base(hmeblkp); \ 804 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 805 ASSERT(addr >= sva && addr < eva); \ 806 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 807 } \ 808 } 809 810 811 /* Update tsb_alloc_hiwater after memory is configured. */ 812 /*ARGSUSED*/ 813 static void 814 sfmmu_update_tsb_post_add(void *arg, pgcnt_t delta_pages) 815 { 816 /* Assumes physmem has already been updated. */ 817 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 818 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 819 } 820 821 /* 822 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 823 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 824 * deleted. 825 */ 826 /*ARGSUSED*/ 827 static int 828 sfmmu_update_tsb_pre_del(void *arg, pgcnt_t delta_pages) 829 { 830 return (0); 831 } 832 833 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 834 /*ARGSUSED*/ 835 static void 836 sfmmu_update_tsb_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 837 { 838 /* 839 * Whether the delete was cancelled or not, just go ahead and update 840 * tsb_alloc_hiwater and tsb_max_growsize. 841 */ 842 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 843 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 844 } 845 846 static kphysm_setup_vector_t sfmmu_update_tsb_vec = { 847 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 848 sfmmu_update_tsb_post_add, /* post_add */ 849 sfmmu_update_tsb_pre_del, /* pre_del */ 850 sfmmu_update_tsb_post_del /* post_del */ 851 }; 852 853 854 /* 855 * HME_BLK HASH PRIMITIVES 856 */ 857 858 /* 859 * Enter a hme on the mapping list for page pp. 860 * When large pages are more prevalent in the system we might want to 861 * keep the mapping list in ascending order by the hment size. For now, 862 * small pages are more frequent, so don't slow it down. 863 */ 864 #define HME_ADD(hme, pp) \ 865 { \ 866 ASSERT(sfmmu_mlist_held(pp)); \ 867 \ 868 hme->hme_prev = NULL; \ 869 hme->hme_next = pp->p_mapping; \ 870 hme->hme_page = pp; \ 871 if (pp->p_mapping) { \ 872 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 873 ASSERT(pp->p_share > 0); \ 874 } else { \ 875 /* EMPTY */ \ 876 ASSERT(pp->p_share == 0); \ 877 } \ 878 pp->p_mapping = hme; \ 879 pp->p_share++; \ 880 } 881 882 /* 883 * Enter a hme on the mapping list for page pp. 884 * If we are unmapping a large translation, we need to make sure that the 885 * change is reflect in the corresponding bit of the p_index field. 886 */ 887 #define HME_SUB(hme, pp) \ 888 { \ 889 ASSERT(sfmmu_mlist_held(pp)); \ 890 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 891 \ 892 if (pp->p_mapping == NULL) { \ 893 panic("hme_remove - no mappings"); \ 894 } \ 895 \ 896 membar_stst(); /* ensure previous stores finish */ \ 897 \ 898 ASSERT(pp->p_share > 0); \ 899 pp->p_share--; \ 900 \ 901 if (hme->hme_prev) { \ 902 ASSERT(pp->p_mapping != hme); \ 903 ASSERT(hme->hme_prev->hme_page == pp || \ 904 IS_PAHME(hme->hme_prev)); \ 905 hme->hme_prev->hme_next = hme->hme_next; \ 906 } else { \ 907 ASSERT(pp->p_mapping == hme); \ 908 pp->p_mapping = hme->hme_next; \ 909 ASSERT((pp->p_mapping == NULL) ? \ 910 (pp->p_share == 0) : 1); \ 911 } \ 912 \ 913 if (hme->hme_next) { \ 914 ASSERT(hme->hme_next->hme_page == pp || \ 915 IS_PAHME(hme->hme_next)); \ 916 hme->hme_next->hme_prev = hme->hme_prev; \ 917 } \ 918 \ 919 /* zero out the entry */ \ 920 hme->hme_next = NULL; \ 921 hme->hme_prev = NULL; \ 922 hme->hme_page = NULL; \ 923 \ 924 if (hme_size(hme) > TTE8K) { \ 925 /* remove mappings for remainder of large pg */ \ 926 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 927 } \ 928 } 929 930 /* 931 * This function returns the hment given the hme_blk and a vaddr. 932 * It assumes addr has already been checked to belong to hme_blk's 933 * range. 934 */ 935 #define HBLKTOHME(hment, hmeblkp, addr) \ 936 { \ 937 int index; \ 938 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 939 } 940 941 /* 942 * Version of HBLKTOHME that also returns the index in hmeblkp 943 * of the hment. 944 */ 945 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 946 { \ 947 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 948 \ 949 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 950 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 951 } else \ 952 idx = 0; \ 953 \ 954 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 955 } 956 957 /* 958 * Disable any page sizes not supported by the CPU 959 */ 960 void 961 hat_init_pagesizes() 962 { 963 int i; 964 965 mmu_exported_page_sizes = 0; 966 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 967 extern int disable_text_largepages; 968 extern int disable_initdata_largepages; 969 970 szc_2_userszc[i] = (uint_t)-1; 971 userszc_2_szc[i] = (uint_t)-1; 972 973 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 974 disable_large_pages |= (1 << i); 975 disable_ism_large_pages |= (1 << i); 976 disable_text_largepages |= (1 << i); 977 disable_initdata_largepages |= (1 << i); 978 } else { 979 szc_2_userszc[i] = mmu_exported_page_sizes; 980 userszc_2_szc[mmu_exported_page_sizes] = i; 981 mmu_exported_page_sizes++; 982 } 983 } 984 985 disable_auto_large_pages = disable_large_pages; 986 987 /* 988 * Initialize mmu-specific large page sizes. 989 */ 990 if ((mmu_page_sizes == max_mmu_page_sizes) && 991 (&mmu_large_pages_disabled)) { 992 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 993 disable_ism_large_pages |= 994 mmu_large_pages_disabled(HAT_LOAD_SHARE); 995 disable_auto_large_pages |= 996 mmu_large_pages_disabled(HAT_LOAD_AUTOLPG); 997 } 998 999 } 1000 1001 /* 1002 * Initialize the hardware address translation structures. 1003 */ 1004 void 1005 hat_init(void) 1006 { 1007 struct ctx *ctx; 1008 struct ctx *cur_ctx = NULL; 1009 int i; 1010 1011 hat_lock_init(); 1012 hat_kstat_init(); 1013 1014 /* 1015 * Hardware-only bits in a TTE 1016 */ 1017 MAKE_TTE_MASK(&hw_tte); 1018 1019 hat_init_pagesizes(); 1020 1021 /* Initialize the hash locks */ 1022 for (i = 0; i < khmehash_num; i++) { 1023 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 1024 MUTEX_DEFAULT, NULL); 1025 } 1026 for (i = 0; i < uhmehash_num; i++) { 1027 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 1028 MUTEX_DEFAULT, NULL); 1029 } 1030 khmehash_num--; /* make sure counter starts from 0 */ 1031 uhmehash_num--; /* make sure counter starts from 0 */ 1032 1033 /* 1034 * Initialize ctx structures and list lock. 1035 * We keep two lists of ctxs. The "free" list contains contexts 1036 * ready to use. The "dirty" list contains contexts that are OK 1037 * to use after flushing the TLBs of any stale mappings. 1038 */ 1039 mutex_init(&ctx_list_lock, NULL, MUTEX_DEFAULT, NULL); 1040 kctx = &ctxs[KCONTEXT]; 1041 ctx = &ctxs[NUM_LOCKED_CTXS]; 1042 ctxhand = ctxfree = ctx; /* head of free list */ 1043 ctxdirty = NULL; 1044 for (i = NUM_LOCKED_CTXS; i < nctxs; i++) { 1045 cur_ctx = &ctxs[i]; 1046 cur_ctx->ctx_flags = CTX_FREE_FLAG; 1047 cur_ctx->ctx_free = &ctxs[i + 1]; 1048 } 1049 cur_ctx->ctx_free = NULL; /* tail of free list */ 1050 1051 /* 1052 * Intialize ism mapping list lock. 1053 */ 1054 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1055 1056 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", sizeof (sfmmu_t), 1057 0, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1058 NULL, NULL, NULL, 0); 1059 1060 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1061 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1062 1063 /* 1064 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1065 * from the heap when low on memory or when TSB_FORCEALLOC is 1066 * specified, don't use magazines to cache them--we want to return 1067 * them to the system as quickly as possible. 1068 */ 1069 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1070 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1071 static_arena, KMC_NOMAGAZINE); 1072 1073 /* 1074 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1075 * memory, which corresponds to the old static reserve for TSBs. 1076 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1077 * memory we'll allocate for TSB slabs; beyond this point TSB 1078 * allocations will be taken from the kernel heap (via 1079 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1080 * consumer. 1081 */ 1082 if (tsb_alloc_hiwater_factor == 0) { 1083 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1084 } 1085 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1086 1087 /* Set tsb_max_growsize. */ 1088 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1089 1090 /* 1091 * On smaller memory systems, allocate TSB memory in 512K chunks 1092 * instead of the default 4M slab size. The trap handlers need to 1093 * be patched with the final slab shift since they need to be able 1094 * to construct the TSB pointer at runtime. 1095 */ 1096 if ((tsb_max_growsize <= TSB_512K_SZCODE) && 1097 !(disable_large_pages & (1 << TTE512K))) { 1098 tsb_slab_size = MMU_PAGESIZE512K; 1099 tsb_slab_shift = MMU_PAGESHIFT512K; 1100 tsb_slab_ttesz = TTE512K; 1101 tsb_slab_mask = 0x3f; /* 512K page alignment for 8K pfn */ 1102 } 1103 1104 /* 1105 * Set up memory callback to update tsb_alloc_hiwater and 1106 * tsb_max_growsize. 1107 */ 1108 i = kphysm_setup_func_register(&sfmmu_update_tsb_vec, (void *) 0); 1109 ASSERT(i == 0); 1110 1111 /* 1112 * kmem_tsb_arena is the source from which large TSB slabs are 1113 * drawn. The quantum of this arena corresponds to the largest 1114 * TSB size we can dynamically allocate for user processes. 1115 * Currently it must also be a supported page size since we 1116 * use exactly one translation entry to map each slab page. 1117 * 1118 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1119 * which most TSBs are allocated. Since most TSB allocations are 1120 * typically 8K we have a kmem cache we stack on top of each 1121 * kmem_tsb_default_arena to speed up those allocations. 1122 * 1123 * Note the two-level scheme of arenas is required only 1124 * because vmem_create doesn't allow us to specify alignment 1125 * requirements. If this ever changes the code could be 1126 * simplified to use only one level of arenas. 1127 */ 1128 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1129 sfmmu_vmem_xalloc_aligned_wrapper, vmem_xfree, heap_arena, 1130 0, VM_SLEEP); 1131 1132 if (tsb_lgrp_affinity) { 1133 char s[50]; 1134 for (i = 0; i < NLGRPS_MAX; i++) { 1135 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1136 kmem_tsb_default_arena[i] = 1137 vmem_create(s, NULL, 0, PAGESIZE, 1138 sfmmu_tsb_segkmem_alloc, sfmmu_tsb_segkmem_free, 1139 kmem_tsb_arena, 0, VM_SLEEP | VM_BESTFIT); 1140 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1141 sfmmu_tsb_cache[i] = kmem_cache_create(s, PAGESIZE, 1142 PAGESIZE, NULL, NULL, NULL, NULL, 1143 kmem_tsb_default_arena[i], 0); 1144 } 1145 } else { 1146 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1147 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1148 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1149 VM_SLEEP | VM_BESTFIT); 1150 1151 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1152 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1153 kmem_tsb_default_arena[0], 0); 1154 } 1155 1156 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1157 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1158 sfmmu_hblkcache_destructor, 1159 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1160 hat_memload_arena, KMC_NOHASH); 1161 1162 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1163 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 1164 1165 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1166 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1167 sfmmu_hblkcache_destructor, 1168 NULL, (void *)HME1BLK_SZ, 1169 hat_memload1_arena, KMC_NOHASH); 1170 1171 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1172 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1173 1174 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1175 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1176 NULL, NULL, static_arena, KMC_NOHASH); 1177 1178 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1179 sizeof (ism_ment_t), 0, NULL, NULL, 1180 NULL, NULL, NULL, 0); 1181 1182 /* 1183 * We grab the first hat for the kernel, 1184 */ 1185 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1186 kas.a_hat = hat_alloc(&kas); 1187 AS_LOCK_EXIT(&kas, &kas.a_lock); 1188 1189 /* 1190 * Initialize hblk_reserve. 1191 */ 1192 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1193 va_to_pa((caddr_t)hblk_reserve); 1194 1195 #ifndef sun4v 1196 /* 1197 * Reserve some kernel virtual address space for the locked TTEs 1198 * that allow us to probe the TSB from TL>0. 1199 */ 1200 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1201 0, 0, NULL, NULL, VM_SLEEP); 1202 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1203 0, 0, NULL, NULL, VM_SLEEP); 1204 #endif 1205 1206 /* 1207 * The big page VAC handling code assumes VAC 1208 * will not be bigger than the smallest big 1209 * page- which is 64K. 1210 */ 1211 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1212 cmn_err(CE_PANIC, "VAC too big!"); 1213 } 1214 1215 (void) xhat_init(); 1216 1217 uhme_hash_pa = va_to_pa(uhme_hash); 1218 khme_hash_pa = va_to_pa(khme_hash); 1219 1220 /* 1221 * Initialize relocation locks. kpr_suspendlock is held 1222 * at PIL_MAX to prevent interrupts from pinning the holder 1223 * of a suspended TTE which may access it leading to a 1224 * deadlock condition. 1225 */ 1226 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1227 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1228 } 1229 1230 /* 1231 * Initialize locking for the hat layer, called early during boot. 1232 */ 1233 static void 1234 hat_lock_init() 1235 { 1236 int i; 1237 struct ctx *ctx; 1238 1239 /* 1240 * initialize the array of mutexes protecting a page's mapping 1241 * list and p_nrm field. 1242 */ 1243 for (i = 0; i < mml_table_sz; i++) 1244 mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL); 1245 1246 if (kpm_enable) { 1247 for (i = 0; i < kpmp_table_sz; i++) { 1248 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1249 MUTEX_DEFAULT, NULL); 1250 } 1251 } 1252 1253 /* 1254 * Initialize array of mutex locks that protects sfmmu fields and 1255 * TSB lists. 1256 */ 1257 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1258 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1259 NULL); 1260 1261 #ifdef DEBUG 1262 mutex_init(&ctx_trace_mutex, NULL, MUTEX_DEFAULT, NULL); 1263 #endif /* DEBUG */ 1264 1265 for (ctx = ctxs, i = 0; i < nctxs; i++, ctx++) { 1266 rw_init(&ctx->ctx_rwlock, NULL, RW_DEFAULT, NULL); 1267 } 1268 } 1269 1270 extern caddr_t kmem64_base, kmem64_end; 1271 1272 #define SFMMU_KERNEL_MAXVA \ 1273 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1274 1275 /* 1276 * Allocate a hat structure. 1277 * Called when an address space first uses a hat. 1278 */ 1279 struct hat * 1280 hat_alloc(struct as *as) 1281 { 1282 sfmmu_t *sfmmup; 1283 struct ctx *ctx; 1284 int i; 1285 extern uint_t get_color_start(struct as *); 1286 1287 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1288 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1289 sfmmup->sfmmu_as = as; 1290 sfmmup->sfmmu_flags = 0; 1291 1292 if (as == &kas) { 1293 ctx = kctx; 1294 ksfmmup = sfmmup; 1295 sfmmup->sfmmu_cnum = ctxtoctxnum(ctx); 1296 ASSERT(sfmmup->sfmmu_cnum == KCONTEXT); 1297 sfmmup->sfmmu_cext = 0; 1298 ctx->ctx_sfmmu = sfmmup; 1299 ctx->ctx_flags = 0; 1300 sfmmup->sfmmu_clrstart = 0; 1301 sfmmup->sfmmu_tsb = NULL; 1302 /* 1303 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1304 * to setup tsb_info for ksfmmup. 1305 */ 1306 } else { 1307 1308 /* 1309 * Just set to invalid ctx. When it faults, it will 1310 * get a valid ctx. This would avoid the situation 1311 * where we get a ctx, but it gets stolen and then 1312 * we fault when we try to run and so have to get 1313 * another ctx. 1314 */ 1315 sfmmup->sfmmu_cnum = INVALID_CONTEXT; 1316 sfmmup->sfmmu_cext = 0; 1317 /* initialize original physical page coloring bin */ 1318 sfmmup->sfmmu_clrstart = get_color_start(as); 1319 #ifdef DEBUG 1320 if (tsb_random_size) { 1321 uint32_t randval = (uint32_t)gettick() >> 4; 1322 int size = randval % (tsb_max_growsize + 1); 1323 1324 /* chose a random tsb size for stress testing */ 1325 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1326 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1327 } else 1328 #endif /* DEBUG */ 1329 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1330 default_tsb_size, 1331 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1332 sfmmup->sfmmu_flags = HAT_SWAPPED; 1333 ASSERT(sfmmup->sfmmu_tsb != NULL); 1334 } 1335 sfmmu_setup_tsbinfo(sfmmup); 1336 for (i = 0; i < max_mmu_page_sizes; i++) { 1337 sfmmup->sfmmu_ttecnt[i] = 0; 1338 sfmmup->sfmmu_ismttecnt[i] = 0; 1339 sfmmup->sfmmu_pgsz[i] = TTE8K; 1340 } 1341 1342 sfmmup->sfmmu_iblk = NULL; 1343 sfmmup->sfmmu_ismhat = 0; 1344 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1345 if (sfmmup == ksfmmup) { 1346 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1347 } else { 1348 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1349 } 1350 sfmmup->sfmmu_free = 0; 1351 sfmmup->sfmmu_rmstat = 0; 1352 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1353 sfmmup->sfmmu_xhat_provider = NULL; 1354 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1355 return (sfmmup); 1356 } 1357 1358 /* 1359 * Hat_setup, makes an address space context the current active one. 1360 * In sfmmu this translates to setting the secondary context with the 1361 * corresponding context. 1362 */ 1363 void 1364 hat_setup(struct hat *sfmmup, int allocflag) 1365 { 1366 struct ctx *ctx; 1367 uint_t ctx_num; 1368 hatlock_t *hatlockp; 1369 1370 /* Init needs some special treatment. */ 1371 if (allocflag == HAT_INIT) { 1372 /* 1373 * Make sure that we have 1374 * 1. a TSB 1375 * 2. a valid ctx that doesn't get stolen after this point. 1376 */ 1377 hatlockp = sfmmu_hat_enter(sfmmup); 1378 1379 /* 1380 * Swap in the TSB. hat_init() allocates tsbinfos without 1381 * TSBs, but we need one for init, since the kernel does some 1382 * special things to set up its stack and needs the TSB to 1383 * resolve page faults. 1384 */ 1385 sfmmu_tsb_swapin(sfmmup, hatlockp); 1386 1387 sfmmu_disallow_ctx_steal(sfmmup); 1388 1389 kpreempt_disable(); 1390 1391 ctx = sfmmutoctx(sfmmup); 1392 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1393 ctx_num = ctxtoctxnum(ctx); 1394 ASSERT(sfmmup == ctx->ctx_sfmmu); 1395 ASSERT(ctx_num >= NUM_LOCKED_CTXS); 1396 sfmmu_setctx_sec(ctx_num); 1397 sfmmu_load_mmustate(sfmmup); 1398 1399 kpreempt_enable(); 1400 1401 /* 1402 * Allow ctx to be stolen. 1403 */ 1404 sfmmu_allow_ctx_steal(sfmmup); 1405 sfmmu_hat_exit(hatlockp); 1406 } else { 1407 ASSERT(allocflag == HAT_ALLOC); 1408 1409 hatlockp = sfmmu_hat_enter(sfmmup); 1410 kpreempt_disable(); 1411 1412 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1413 sfmmu_setctx_sec(INVALID_CONTEXT); 1414 sfmmu_clear_utsbinfo(); 1415 1416 kpreempt_enable(); 1417 sfmmu_hat_exit(hatlockp); 1418 } 1419 } 1420 1421 /* 1422 * Free all the translation resources for the specified address space. 1423 * Called from as_free when an address space is being destroyed. 1424 */ 1425 void 1426 hat_free_start(struct hat *sfmmup) 1427 { 1428 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1429 ASSERT(sfmmup != ksfmmup); 1430 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1431 1432 sfmmup->sfmmu_free = 1; 1433 } 1434 1435 void 1436 hat_free_end(struct hat *sfmmup) 1437 { 1438 int i; 1439 1440 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1441 if (sfmmup->sfmmu_ismhat) { 1442 for (i = 0; i < mmu_page_sizes; i++) { 1443 sfmmup->sfmmu_ttecnt[i] = 0; 1444 sfmmup->sfmmu_ismttecnt[i] = 0; 1445 } 1446 } else { 1447 /* EMPTY */ 1448 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1449 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1450 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1451 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1452 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1453 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1454 } 1455 1456 if (sfmmup->sfmmu_rmstat) { 1457 hat_freestat(sfmmup->sfmmu_as, NULL); 1458 } 1459 if (!delay_tlb_flush) { 1460 sfmmu_tlb_ctx_demap(sfmmup); 1461 xt_sync(sfmmup->sfmmu_cpusran); 1462 } else { 1463 SFMMU_STAT(sf_tlbflush_deferred); 1464 } 1465 sfmmu_free_ctx(sfmmup, sfmmutoctx(sfmmup)); 1466 while (sfmmup->sfmmu_tsb != NULL) { 1467 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1468 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1469 sfmmup->sfmmu_tsb = next; 1470 } 1471 sfmmu_free_sfmmu(sfmmup); 1472 1473 kmem_cache_free(sfmmuid_cache, sfmmup); 1474 } 1475 1476 /* 1477 * Set up any translation structures, for the specified address space, 1478 * that are needed or preferred when the process is being swapped in. 1479 */ 1480 /* ARGSUSED */ 1481 void 1482 hat_swapin(struct hat *hat) 1483 { 1484 ASSERT(hat->sfmmu_xhat_provider == NULL); 1485 } 1486 1487 /* 1488 * Free all of the translation resources, for the specified address space, 1489 * that can be freed while the process is swapped out. Called from as_swapout. 1490 * Also, free up the ctx that this process was using. 1491 */ 1492 void 1493 hat_swapout(struct hat *sfmmup) 1494 { 1495 struct hmehash_bucket *hmebp; 1496 struct hme_blk *hmeblkp; 1497 struct hme_blk *pr_hblk = NULL; 1498 struct hme_blk *nx_hblk; 1499 struct ctx *ctx; 1500 int cnum; 1501 int i; 1502 uint64_t hblkpa, prevpa, nx_pa; 1503 struct hme_blk *list = NULL; 1504 hatlock_t *hatlockp; 1505 struct tsb_info *tsbinfop; 1506 struct free_tsb { 1507 struct free_tsb *next; 1508 struct tsb_info *tsbinfop; 1509 }; /* free list of TSBs */ 1510 struct free_tsb *freelist, *last, *next; 1511 1512 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1513 SFMMU_STAT(sf_swapout); 1514 1515 /* 1516 * There is no way to go from an as to all its translations in sfmmu. 1517 * Here is one of the times when we take the big hit and traverse 1518 * the hash looking for hme_blks to free up. Not only do we free up 1519 * this as hme_blks but all those that are free. We are obviously 1520 * swapping because we need memory so let's free up as much 1521 * as we can. 1522 * 1523 * Note that we don't flush TLB/TSB here -- it's not necessary 1524 * because: 1525 * 1) we free the ctx we're using and throw away the TSB(s); 1526 * 2) processes aren't runnable while being swapped out. 1527 */ 1528 ASSERT(sfmmup != KHATID); 1529 for (i = 0; i <= UHMEHASH_SZ; i++) { 1530 hmebp = &uhme_hash[i]; 1531 SFMMU_HASH_LOCK(hmebp); 1532 hmeblkp = hmebp->hmeblkp; 1533 hblkpa = hmebp->hmeh_nextpa; 1534 prevpa = 0; 1535 pr_hblk = NULL; 1536 while (hmeblkp) { 1537 1538 ASSERT(!hmeblkp->hblk_xhat_bit); 1539 1540 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 1541 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 1542 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 1543 (caddr_t)get_hblk_base(hmeblkp), 1544 get_hblk_endaddr(hmeblkp), 1545 NULL, HAT_UNLOAD); 1546 } 1547 nx_hblk = hmeblkp->hblk_next; 1548 nx_pa = hmeblkp->hblk_nextpa; 1549 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 1550 ASSERT(!hmeblkp->hblk_lckcnt); 1551 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 1552 prevpa, pr_hblk); 1553 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 1554 } else { 1555 pr_hblk = hmeblkp; 1556 prevpa = hblkpa; 1557 } 1558 hmeblkp = nx_hblk; 1559 hblkpa = nx_pa; 1560 } 1561 SFMMU_HASH_UNLOCK(hmebp); 1562 } 1563 1564 sfmmu_hblks_list_purge(&list); 1565 1566 /* 1567 * Now free up the ctx so that others can reuse it. 1568 */ 1569 hatlockp = sfmmu_hat_enter(sfmmup); 1570 ctx = sfmmutoctx(sfmmup); 1571 cnum = ctxtoctxnum(ctx); 1572 1573 if (cnum != INVALID_CONTEXT) { 1574 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 1575 if (sfmmup->sfmmu_cnum == cnum) { 1576 sfmmu_reuse_ctx(ctx, sfmmup); 1577 /* 1578 * Put ctx back to the free list. 1579 */ 1580 mutex_enter(&ctx_list_lock); 1581 CTX_SET_FLAGS(ctx, CTX_FREE_FLAG); 1582 ctx->ctx_free = ctxfree; 1583 ctxfree = ctx; 1584 mutex_exit(&ctx_list_lock); 1585 } 1586 rw_exit(&ctx->ctx_rwlock); 1587 } 1588 1589 /* 1590 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 1591 * If TSBs were never swapped in, just return. 1592 * This implies that we don't support partial swapping 1593 * of TSBs -- either all are swapped out, or none are. 1594 * 1595 * We must hold the HAT lock here to prevent racing with another 1596 * thread trying to unmap TTEs from the TSB or running the post- 1597 * relocator after relocating the TSB's memory. Unfortunately, we 1598 * can't free memory while holding the HAT lock or we could 1599 * deadlock, so we build a list of TSBs to be freed after marking 1600 * the tsbinfos as swapped out and free them after dropping the 1601 * lock. 1602 */ 1603 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 1604 sfmmu_hat_exit(hatlockp); 1605 return; 1606 } 1607 1608 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 1609 last = freelist = NULL; 1610 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 1611 tsbinfop = tsbinfop->tsb_next) { 1612 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 1613 1614 /* 1615 * Cast the TSB into a struct free_tsb and put it on the free 1616 * list. 1617 */ 1618 if (freelist == NULL) { 1619 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 1620 } else { 1621 last->next = (struct free_tsb *)tsbinfop->tsb_va; 1622 last = last->next; 1623 } 1624 last->next = NULL; 1625 last->tsbinfop = tsbinfop; 1626 tsbinfop->tsb_flags |= TSB_SWAPPED; 1627 /* 1628 * Zero out the TTE to clear the valid bit. 1629 * Note we can't use a value like 0xbad because we want to 1630 * ensure diagnostic bits are NEVER set on TTEs that might 1631 * be loaded. The intent is to catch any invalid access 1632 * to the swapped TSB, such as a thread running with a valid 1633 * context without first calling sfmmu_tsb_swapin() to 1634 * allocate TSB memory. 1635 */ 1636 tsbinfop->tsb_tte.ll = 0; 1637 } 1638 1639 /* Now we can drop the lock and free the TSB memory. */ 1640 sfmmu_hat_exit(hatlockp); 1641 for (; freelist != NULL; freelist = next) { 1642 next = freelist->next; 1643 sfmmu_tsb_free(freelist->tsbinfop); 1644 } 1645 } 1646 1647 /* 1648 * Duplicate the translations of an as into another newas 1649 */ 1650 /* ARGSUSED */ 1651 int 1652 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 1653 uint_t flag) 1654 { 1655 ASSERT(hat->sfmmu_xhat_provider == NULL); 1656 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW)); 1657 1658 if (flag == HAT_DUP_COW) { 1659 panic("hat_dup: HAT_DUP_COW not supported"); 1660 } 1661 return (0); 1662 } 1663 1664 /* 1665 * Set up addr to map to page pp with protection prot. 1666 * As an optimization we also load the TSB with the 1667 * corresponding tte but it is no big deal if the tte gets kicked out. 1668 */ 1669 void 1670 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 1671 uint_t attr, uint_t flags) 1672 { 1673 tte_t tte; 1674 1675 1676 ASSERT(hat != NULL); 1677 ASSERT(PAGE_LOCKED(pp)); 1678 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1679 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1680 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1681 1682 if (PP_ISFREE(pp)) { 1683 panic("hat_memload: loading a mapping to free page %p", 1684 (void *)pp); 1685 } 1686 1687 if (hat->sfmmu_xhat_provider) { 1688 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 1689 return; 1690 } 1691 1692 ASSERT((hat == ksfmmup) || 1693 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1694 1695 if (flags & ~SFMMU_LOAD_ALLFLAG) 1696 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 1697 flags & ~SFMMU_LOAD_ALLFLAG); 1698 1699 if (hat->sfmmu_rmstat) 1700 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 1701 1702 #if defined(SF_ERRATA_57) 1703 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1704 (addr < errata57_limit) && (attr & PROT_EXEC) && 1705 !(flags & HAT_LOAD_SHARE)) { 1706 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 1707 " page executable"); 1708 attr &= ~PROT_EXEC; 1709 } 1710 #endif 1711 1712 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 1713 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags); 1714 1715 /* 1716 * Check TSB and TLB page sizes. 1717 */ 1718 if ((flags & HAT_LOAD_SHARE) == 0) { 1719 sfmmu_check_page_sizes(hat, 1); 1720 } 1721 } 1722 1723 /* 1724 * hat_devload can be called to map real memory (e.g. 1725 * /dev/kmem) and even though hat_devload will determine pf is 1726 * for memory, it will be unable to get a shared lock on the 1727 * page (because someone else has it exclusively) and will 1728 * pass dp = NULL. If tteload doesn't get a non-NULL 1729 * page pointer it can't cache memory. 1730 */ 1731 void 1732 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 1733 uint_t attr, int flags) 1734 { 1735 tte_t tte; 1736 struct page *pp = NULL; 1737 int use_lgpg = 0; 1738 1739 ASSERT(hat != NULL); 1740 1741 if (hat->sfmmu_xhat_provider) { 1742 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 1743 return; 1744 } 1745 1746 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 1747 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 1748 ASSERT((hat == ksfmmup) || 1749 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 1750 if (len == 0) 1751 panic("hat_devload: zero len"); 1752 if (flags & ~SFMMU_LOAD_ALLFLAG) 1753 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 1754 flags & ~SFMMU_LOAD_ALLFLAG); 1755 1756 #if defined(SF_ERRATA_57) 1757 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1758 (addr < errata57_limit) && (attr & PROT_EXEC) && 1759 !(flags & HAT_LOAD_SHARE)) { 1760 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 1761 " page executable"); 1762 attr &= ~PROT_EXEC; 1763 } 1764 #endif 1765 1766 /* 1767 * If it's a memory page find its pp 1768 */ 1769 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 1770 pp = page_numtopp_nolock(pfn); 1771 if (pp == NULL) { 1772 flags |= HAT_LOAD_NOCONSIST; 1773 } else { 1774 if (PP_ISFREE(pp)) { 1775 panic("hat_memload: loading " 1776 "a mapping to free page %p", 1777 (void *)pp); 1778 } 1779 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1780 panic("hat_memload: loading a mapping " 1781 "to unlocked relocatable page %p", 1782 (void *)pp); 1783 } 1784 ASSERT(len == MMU_PAGESIZE); 1785 } 1786 } 1787 1788 if (hat->sfmmu_rmstat) 1789 hat_resvstat(len, hat->sfmmu_as, addr); 1790 1791 if (flags & HAT_LOAD_NOCONSIST) { 1792 attr |= SFMMU_UNCACHEVTTE; 1793 use_lgpg = 1; 1794 } 1795 if (!pf_is_memory(pfn)) { 1796 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 1797 use_lgpg = 1; 1798 switch (attr & HAT_ORDER_MASK) { 1799 case HAT_STRICTORDER: 1800 case HAT_UNORDERED_OK: 1801 /* 1802 * we set the side effect bit for all non 1803 * memory mappings unless merging is ok 1804 */ 1805 attr |= SFMMU_SIDEFFECT; 1806 break; 1807 case HAT_MERGING_OK: 1808 case HAT_LOADCACHING_OK: 1809 case HAT_STORECACHING_OK: 1810 break; 1811 default: 1812 panic("hat_devload: bad attr"); 1813 break; 1814 } 1815 } 1816 while (len) { 1817 if (!use_lgpg) { 1818 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1819 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1820 flags); 1821 len -= MMU_PAGESIZE; 1822 addr += MMU_PAGESIZE; 1823 pfn++; 1824 continue; 1825 } 1826 /* 1827 * try to use large pages, check va/pa alignments 1828 * Note that 32M/256M page sizes are not (yet) supported. 1829 */ 1830 if ((len >= MMU_PAGESIZE4M) && 1831 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 1832 !(disable_large_pages & (1 << TTE4M)) && 1833 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 1834 sfmmu_memtte(&tte, pfn, attr, TTE4M); 1835 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1836 flags); 1837 len -= MMU_PAGESIZE4M; 1838 addr += MMU_PAGESIZE4M; 1839 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 1840 } else if ((len >= MMU_PAGESIZE512K) && 1841 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 1842 !(disable_large_pages & (1 << TTE512K)) && 1843 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 1844 sfmmu_memtte(&tte, pfn, attr, TTE512K); 1845 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1846 flags); 1847 len -= MMU_PAGESIZE512K; 1848 addr += MMU_PAGESIZE512K; 1849 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 1850 } else if ((len >= MMU_PAGESIZE64K) && 1851 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 1852 !(disable_large_pages & (1 << TTE64K)) && 1853 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 1854 sfmmu_memtte(&tte, pfn, attr, TTE64K); 1855 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1856 flags); 1857 len -= MMU_PAGESIZE64K; 1858 addr += MMU_PAGESIZE64K; 1859 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 1860 } else { 1861 sfmmu_memtte(&tte, pfn, attr, TTE8K); 1862 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 1863 flags); 1864 len -= MMU_PAGESIZE; 1865 addr += MMU_PAGESIZE; 1866 pfn++; 1867 } 1868 } 1869 1870 /* 1871 * Check TSB and TLB page sizes. 1872 */ 1873 if ((flags & HAT_LOAD_SHARE) == 0) { 1874 sfmmu_check_page_sizes(hat, 1); 1875 } 1876 } 1877 1878 /* 1879 * Map the largest extend possible out of the page array. The array may NOT 1880 * be in order. The largest possible mapping a page can have 1881 * is specified in the p_szc field. The p_szc field 1882 * cannot change as long as there any mappings (large or small) 1883 * to any of the pages that make up the large page. (ie. any 1884 * promotion/demotion of page size is not up to the hat but up to 1885 * the page free list manager). The array 1886 * should consist of properly aligned contigous pages that are 1887 * part of a big page for a large mapping to be created. 1888 */ 1889 void 1890 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 1891 struct page **pps, uint_t attr, uint_t flags) 1892 { 1893 int ttesz; 1894 size_t mapsz; 1895 pgcnt_t numpg, npgs; 1896 tte_t tte; 1897 page_t *pp; 1898 int large_pages_disable; 1899 1900 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 1901 1902 if (hat->sfmmu_xhat_provider) { 1903 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 1904 return; 1905 } 1906 1907 if (hat->sfmmu_rmstat) 1908 hat_resvstat(len, hat->sfmmu_as, addr); 1909 1910 #if defined(SF_ERRATA_57) 1911 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 1912 (addr < errata57_limit) && (attr & PROT_EXEC) && 1913 !(flags & HAT_LOAD_SHARE)) { 1914 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 1915 "user page executable"); 1916 attr &= ~PROT_EXEC; 1917 } 1918 #endif 1919 1920 /* Get number of pages */ 1921 npgs = len >> MMU_PAGESHIFT; 1922 1923 if (flags & HAT_LOAD_SHARE) { 1924 large_pages_disable = disable_ism_large_pages; 1925 } else { 1926 large_pages_disable = disable_large_pages; 1927 } 1928 1929 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 1930 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 1931 return; 1932 } 1933 1934 while (npgs >= NHMENTS) { 1935 pp = *pps; 1936 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 1937 /* 1938 * Check if this page size is disabled. 1939 */ 1940 if (large_pages_disable & (1 << ttesz)) 1941 continue; 1942 1943 numpg = TTEPAGES(ttesz); 1944 mapsz = numpg << MMU_PAGESHIFT; 1945 if ((npgs >= numpg) && 1946 IS_P2ALIGNED(addr, mapsz) && 1947 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 1948 /* 1949 * At this point we have enough pages and 1950 * we know the virtual address and the pfn 1951 * are properly aligned. We still need 1952 * to check for physical contiguity but since 1953 * it is very likely that this is the case 1954 * we will assume they are so and undo 1955 * the request if necessary. It would 1956 * be great if we could get a hint flag 1957 * like HAT_CONTIG which would tell us 1958 * the pages are contigous for sure. 1959 */ 1960 sfmmu_memtte(&tte, (*pps)->p_pagenum, 1961 attr, ttesz); 1962 if (!sfmmu_tteload_array(hat, &tte, addr, 1963 pps, flags)) { 1964 break; 1965 } 1966 } 1967 } 1968 if (ttesz == TTE8K) { 1969 /* 1970 * We were not able to map array using a large page 1971 * batch a hmeblk or fraction at a time. 1972 */ 1973 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 1974 & (NHMENTS-1); 1975 numpg = NHMENTS - numpg; 1976 ASSERT(numpg <= npgs); 1977 mapsz = numpg * MMU_PAGESIZE; 1978 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 1979 numpg); 1980 } 1981 addr += mapsz; 1982 npgs -= numpg; 1983 pps += numpg; 1984 } 1985 1986 if (npgs) { 1987 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs); 1988 } 1989 1990 /* 1991 * Check TSB and TLB page sizes. 1992 */ 1993 if ((flags & HAT_LOAD_SHARE) == 0) { 1994 sfmmu_check_page_sizes(hat, 1); 1995 } 1996 } 1997 1998 /* 1999 * Function tries to batch 8K pages into the same hme blk. 2000 */ 2001 static void 2002 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2003 uint_t attr, uint_t flags, pgcnt_t npgs) 2004 { 2005 tte_t tte; 2006 page_t *pp; 2007 struct hmehash_bucket *hmebp; 2008 struct hme_blk *hmeblkp; 2009 int index; 2010 2011 while (npgs) { 2012 /* 2013 * Acquire the hash bucket. 2014 */ 2015 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K); 2016 ASSERT(hmebp); 2017 2018 /* 2019 * Find the hment block. 2020 */ 2021 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2022 TTE8K, flags); 2023 ASSERT(hmeblkp); 2024 2025 do { 2026 /* 2027 * Make the tte. 2028 */ 2029 pp = *pps; 2030 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2031 2032 /* 2033 * Add the translation. 2034 */ 2035 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2036 vaddr, pps, flags); 2037 2038 /* 2039 * Goto next page. 2040 */ 2041 pps++; 2042 npgs--; 2043 2044 /* 2045 * Goto next address. 2046 */ 2047 vaddr += MMU_PAGESIZE; 2048 2049 /* 2050 * Don't crossover into a different hmentblk. 2051 */ 2052 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2053 (NHMENTS-1)); 2054 2055 } while (index != 0 && npgs != 0); 2056 2057 /* 2058 * Release the hash bucket. 2059 */ 2060 2061 sfmmu_tteload_release_hashbucket(hmebp); 2062 } 2063 } 2064 2065 /* 2066 * Construct a tte for a page: 2067 * 2068 * tte_valid = 1 2069 * tte_size2 = size & TTE_SZ2_BITS (Panther-only) 2070 * tte_size = size 2071 * tte_nfo = attr & HAT_NOFAULT 2072 * tte_ie = attr & HAT_STRUCTURE_LE 2073 * tte_hmenum = hmenum 2074 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2075 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2076 * tte_ref = 1 (optimization) 2077 * tte_wr_perm = attr & PROT_WRITE; 2078 * tte_no_sync = attr & HAT_NOSYNC 2079 * tte_lock = attr & SFMMU_LOCKTTE 2080 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2081 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2082 * tte_e = attr & SFMMU_SIDEFFECT 2083 * tte_priv = !(attr & PROT_USER) 2084 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2085 * tte_glb = 0 2086 */ 2087 void 2088 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2089 { 2090 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2091 2092 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2093 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2094 2095 if (TTE_IS_NOSYNC(ttep)) { 2096 TTE_SET_REF(ttep); 2097 if (TTE_IS_WRITABLE(ttep)) { 2098 TTE_SET_MOD(ttep); 2099 } 2100 } 2101 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2102 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2103 } 2104 } 2105 2106 /* 2107 * This function will add a translation to the hme_blk and allocate the 2108 * hme_blk if one does not exist. 2109 * If a page structure is specified then it will add the 2110 * corresponding hment to the mapping list. 2111 * It will also update the hmenum field for the tte. 2112 */ 2113 void 2114 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2115 uint_t flags) 2116 { 2117 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags); 2118 } 2119 2120 /* 2121 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2122 * Assumes that a particular page size may only be resident in one TSB. 2123 */ 2124 static void 2125 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2126 { 2127 struct tsb_info *tsbinfop = NULL; 2128 uint64_t tag; 2129 struct tsbe *tsbe_addr; 2130 uint64_t tsb_base; 2131 uint_t tsb_size; 2132 int vpshift = MMU_PAGESHIFT; 2133 int phys = 0; 2134 2135 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2136 phys = ktsb_phys; 2137 if (ttesz >= TTE4M) { 2138 #ifndef sun4v 2139 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2140 #endif 2141 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2142 tsb_size = ktsb4m_szcode; 2143 } else { 2144 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2145 tsb_size = ktsb_szcode; 2146 } 2147 } else { 2148 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2149 2150 /* 2151 * If there isn't a TSB for this page size, or the TSB is 2152 * swapped out, there is nothing to do. Note that the latter 2153 * case seems impossible but can occur if hat_pageunload() 2154 * is called on an ISM mapping while the process is swapped 2155 * out. 2156 */ 2157 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2158 return; 2159 2160 /* 2161 * If another thread is in the middle of relocating a TSB 2162 * we can't unload the entry so set a flag so that the 2163 * TSB will be flushed before it can be accessed by the 2164 * process. 2165 */ 2166 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2167 if (ttep == NULL) 2168 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2169 return; 2170 } 2171 #if defined(UTSB_PHYS) 2172 phys = 1; 2173 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2174 #else 2175 tsb_base = (uint64_t)tsbinfop->tsb_va; 2176 #endif 2177 tsb_size = tsbinfop->tsb_szc; 2178 } 2179 if (ttesz >= TTE4M) 2180 vpshift = MMU_PAGESHIFT4M; 2181 2182 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2183 tag = sfmmu_make_tsbtag(vaddr); 2184 2185 if (ttep == NULL) { 2186 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2187 } else { 2188 if (ttesz >= TTE4M) { 2189 SFMMU_STAT(sf_tsb_load4m); 2190 } else { 2191 SFMMU_STAT(sf_tsb_load8k); 2192 } 2193 2194 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2195 } 2196 } 2197 2198 /* 2199 * Unmap all entries from [start, end) matching the given page size. 2200 * 2201 * This function is used primarily to unmap replicated 64K or 512K entries 2202 * from the TSB that are inserted using the base page size TSB pointer, but 2203 * it may also be called to unmap a range of addresses from the TSB. 2204 */ 2205 void 2206 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2207 { 2208 struct tsb_info *tsbinfop; 2209 uint64_t tag; 2210 struct tsbe *tsbe_addr; 2211 caddr_t vaddr; 2212 uint64_t tsb_base; 2213 int vpshift, vpgsz; 2214 uint_t tsb_size; 2215 int phys = 0; 2216 2217 /* 2218 * Assumptions: 2219 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2220 * at a time shooting down any valid entries we encounter. 2221 * 2222 * If ttesz >= 4M we walk the range 4M at a time shooting 2223 * down any valid mappings we find. 2224 */ 2225 if (sfmmup == ksfmmup) { 2226 phys = ktsb_phys; 2227 if (ttesz >= TTE4M) { 2228 #ifndef sun4v 2229 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2230 #endif 2231 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2232 tsb_size = ktsb4m_szcode; 2233 } else { 2234 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2235 tsb_size = ktsb_szcode; 2236 } 2237 } else { 2238 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2239 2240 /* 2241 * If there isn't a TSB for this page size, or the TSB is 2242 * swapped out, there is nothing to do. Note that the latter 2243 * case seems impossible but can occur if hat_pageunload() 2244 * is called on an ISM mapping while the process is swapped 2245 * out. 2246 */ 2247 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2248 return; 2249 2250 /* 2251 * If another thread is in the middle of relocating a TSB 2252 * we can't unload the entry so set a flag so that the 2253 * TSB will be flushed before it can be accessed by the 2254 * process. 2255 */ 2256 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2257 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2258 return; 2259 } 2260 #if defined(UTSB_PHYS) 2261 phys = 1; 2262 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2263 #else 2264 tsb_base = (uint64_t)tsbinfop->tsb_va; 2265 #endif 2266 tsb_size = tsbinfop->tsb_szc; 2267 } 2268 if (ttesz >= TTE4M) { 2269 vpshift = MMU_PAGESHIFT4M; 2270 vpgsz = MMU_PAGESIZE4M; 2271 } else { 2272 vpshift = MMU_PAGESHIFT; 2273 vpgsz = MMU_PAGESIZE; 2274 } 2275 2276 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2277 tag = sfmmu_make_tsbtag(vaddr); 2278 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2279 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2280 } 2281 } 2282 2283 /* 2284 * Select the optimum TSB size given the number of mappings 2285 * that need to be cached. 2286 */ 2287 static int 2288 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2289 { 2290 int szc = 0; 2291 2292 #ifdef DEBUG 2293 if (tsb_grow_stress) { 2294 uint32_t randval = (uint32_t)gettick() >> 4; 2295 return (randval % (tsb_max_growsize + 1)); 2296 } 2297 #endif /* DEBUG */ 2298 2299 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2300 szc++; 2301 return (szc); 2302 } 2303 2304 /* 2305 * This function will add a translation to the hme_blk and allocate the 2306 * hme_blk if one does not exist. 2307 * If a page structure is specified then it will add the 2308 * corresponding hment to the mapping list. 2309 * It will also update the hmenum field for the tte. 2310 * Furthermore, it attempts to create a large page translation 2311 * for <addr,hat> at page array pps. It assumes addr and first 2312 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2313 */ 2314 static int 2315 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2316 page_t **pps, uint_t flags) 2317 { 2318 struct hmehash_bucket *hmebp; 2319 struct hme_blk *hmeblkp; 2320 int ret; 2321 uint_t size; 2322 2323 /* 2324 * Get mapping size. 2325 */ 2326 size = TTE_CSZ(ttep); 2327 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2328 2329 /* 2330 * Acquire the hash bucket. 2331 */ 2332 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size); 2333 ASSERT(hmebp); 2334 2335 /* 2336 * Find the hment block. 2337 */ 2338 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags); 2339 ASSERT(hmeblkp); 2340 2341 /* 2342 * Add the translation. 2343 */ 2344 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags); 2345 2346 /* 2347 * Release the hash bucket. 2348 */ 2349 sfmmu_tteload_release_hashbucket(hmebp); 2350 2351 return (ret); 2352 } 2353 2354 /* 2355 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2356 */ 2357 static struct hmehash_bucket * 2358 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size) 2359 { 2360 struct hmehash_bucket *hmebp; 2361 int hmeshift; 2362 2363 hmeshift = HME_HASH_SHIFT(size); 2364 2365 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 2366 2367 SFMMU_HASH_LOCK(hmebp); 2368 2369 return (hmebp); 2370 } 2371 2372 /* 2373 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2374 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2375 * allocated. 2376 */ 2377 static struct hme_blk * 2378 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2379 caddr_t vaddr, uint_t size, uint_t flags) 2380 { 2381 hmeblk_tag hblktag; 2382 int hmeshift; 2383 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2384 uint64_t hblkpa, prevpa; 2385 struct kmem_cache *sfmmu_cache; 2386 uint_t forcefree; 2387 2388 hblktag.htag_id = sfmmup; 2389 hmeshift = HME_HASH_SHIFT(size); 2390 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2391 hblktag.htag_rehash = HME_HASH_REHASH(size); 2392 2393 ttearray_realloc: 2394 2395 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, 2396 pr_hblk, prevpa, &list); 2397 2398 /* 2399 * We block until hblk_reserve_lock is released; it's held by 2400 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2401 * replaced by a hblk from sfmmu8_cache. 2402 */ 2403 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2404 hblk_reserve_thread != curthread) { 2405 SFMMU_HASH_UNLOCK(hmebp); 2406 mutex_enter(&hblk_reserve_lock); 2407 mutex_exit(&hblk_reserve_lock); 2408 SFMMU_STAT(sf_hblk_reserve_hit); 2409 SFMMU_HASH_LOCK(hmebp); 2410 goto ttearray_realloc; 2411 } 2412 2413 if (hmeblkp == NULL) { 2414 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2415 hblktag, flags); 2416 } else { 2417 /* 2418 * It is possible for 8k and 64k hblks to collide since they 2419 * have the same rehash value. This is because we 2420 * lazily free hblks and 8K/64K blks could be lingering. 2421 * If we find size mismatch we free the block and & try again. 2422 */ 2423 if (get_hblk_ttesz(hmeblkp) != size) { 2424 ASSERT(!hmeblkp->hblk_vcnt); 2425 ASSERT(!hmeblkp->hblk_hmecnt); 2426 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 2427 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 2428 goto ttearray_realloc; 2429 } 2430 if (hmeblkp->hblk_shw_bit) { 2431 /* 2432 * if the hblk was previously used as a shadow hblk then 2433 * we will change it to a normal hblk 2434 */ 2435 if (hmeblkp->hblk_shw_mask) { 2436 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2437 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2438 goto ttearray_realloc; 2439 } else { 2440 hmeblkp->hblk_shw_bit = 0; 2441 } 2442 } 2443 SFMMU_STAT(sf_hblk_hit); 2444 } 2445 2446 /* 2447 * hat_memload() should never call kmem_cache_free(); see block 2448 * comment showing the stacktrace in sfmmu_hblk_alloc(); 2449 * enqueue each hblk in the list to reserve list if it's created 2450 * from sfmmu8_cache *and* sfmmup == KHATID. 2451 */ 2452 forcefree = (sfmmup == KHATID) ? 1 : 0; 2453 while ((pr_hblk = list) != NULL) { 2454 list = pr_hblk->hblk_next; 2455 sfmmu_cache = get_hblk_cache(pr_hblk); 2456 if ((sfmmu_cache == sfmmu8_cache) && 2457 sfmmu_put_free_hblk(pr_hblk, forcefree)) 2458 continue; 2459 2460 ASSERT(sfmmup != KHATID); 2461 kmem_cache_free(sfmmu_cache, pr_hblk); 2462 } 2463 2464 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2465 ASSERT(!hmeblkp->hblk_shw_bit); 2466 2467 return (hmeblkp); 2468 } 2469 2470 /* 2471 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2472 * otherwise. 2473 */ 2474 static int 2475 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2476 caddr_t vaddr, page_t **pps, uint_t flags) 2477 { 2478 page_t *pp = *pps; 2479 int hmenum, size, remap; 2480 tte_t tteold, flush_tte; 2481 #ifdef DEBUG 2482 tte_t orig_old; 2483 #endif /* DEBUG */ 2484 struct sf_hment *sfhme; 2485 kmutex_t *pml, *pmtx; 2486 hatlock_t *hatlockp; 2487 2488 /* 2489 * remove this panic when we decide to let user virtual address 2490 * space be >= USERLIMIT. 2491 */ 2492 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2493 panic("user addr %p in kernel space", vaddr); 2494 #if defined(TTE_IS_GLOBAL) 2495 if (TTE_IS_GLOBAL(ttep)) 2496 panic("sfmmu_tteload: creating global tte"); 2497 #endif 2498 2499 #ifdef DEBUG 2500 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2501 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2502 panic("sfmmu_tteload: non cacheable memory tte"); 2503 #endif /* DEBUG */ 2504 2505 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2506 !TTE_IS_MOD(ttep)) { 2507 /* 2508 * Don't load TSB for dummy as in ISM. Also don't preload 2509 * the TSB if the TTE isn't writable since we're likely to 2510 * fault on it again -- preloading can be fairly expensive. 2511 */ 2512 flags |= SFMMU_NO_TSBLOAD; 2513 } 2514 2515 size = TTE_CSZ(ttep); 2516 switch (size) { 2517 case TTE8K: 2518 SFMMU_STAT(sf_tteload8k); 2519 break; 2520 case TTE64K: 2521 SFMMU_STAT(sf_tteload64k); 2522 break; 2523 case TTE512K: 2524 SFMMU_STAT(sf_tteload512k); 2525 break; 2526 case TTE4M: 2527 SFMMU_STAT(sf_tteload4m); 2528 break; 2529 case (TTE32M): 2530 SFMMU_STAT(sf_tteload32m); 2531 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2532 break; 2533 case (TTE256M): 2534 SFMMU_STAT(sf_tteload256m); 2535 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2536 break; 2537 } 2538 2539 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2540 2541 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 2542 2543 /* 2544 * Need to grab mlist lock here so that pageunload 2545 * will not change tte behind us. 2546 */ 2547 if (pp) { 2548 pml = sfmmu_mlist_enter(pp); 2549 } 2550 2551 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2552 /* 2553 * Look for corresponding hment and if valid verify 2554 * pfns are equal. 2555 */ 2556 remap = TTE_IS_VALID(&tteold); 2557 if (remap) { 2558 pfn_t new_pfn, old_pfn; 2559 2560 old_pfn = TTE_TO_PFN(vaddr, &tteold); 2561 new_pfn = TTE_TO_PFN(vaddr, ttep); 2562 2563 if (flags & HAT_LOAD_REMAP) { 2564 /* make sure we are remapping same type of pages */ 2565 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 2566 panic("sfmmu_tteload - tte remap io<->memory"); 2567 } 2568 if (old_pfn != new_pfn && 2569 (pp != NULL || sfhme->hme_page != NULL)) { 2570 panic("sfmmu_tteload - tte remap pp != NULL"); 2571 } 2572 } else if (old_pfn != new_pfn) { 2573 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 2574 (void *)hmeblkp); 2575 } 2576 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 2577 } 2578 2579 if (pp) { 2580 if (size == TTE8K) { 2581 /* 2582 * Handle VAC consistency 2583 */ 2584 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 2585 sfmmu_vac_conflict(sfmmup, vaddr, pp); 2586 } 2587 2588 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2589 pmtx = sfmmu_page_enter(pp); 2590 PP_CLRRO(pp); 2591 sfmmu_page_exit(pmtx); 2592 } else if (!PP_ISMAPPED(pp) && 2593 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 2594 pmtx = sfmmu_page_enter(pp); 2595 if (!(PP_ISMOD(pp))) { 2596 PP_SETRO(pp); 2597 } 2598 sfmmu_page_exit(pmtx); 2599 } 2600 2601 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 2602 /* 2603 * sfmmu_pagearray_setup failed so return 2604 */ 2605 sfmmu_mlist_exit(pml); 2606 return (1); 2607 } 2608 } 2609 2610 /* 2611 * Make sure hment is not on a mapping list. 2612 */ 2613 ASSERT(remap || (sfhme->hme_page == NULL)); 2614 2615 /* if it is not a remap then hme->next better be NULL */ 2616 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 2617 2618 if (flags & HAT_LOAD_LOCK) { 2619 if (((int)hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 2620 panic("too high lckcnt-hmeblk %p", 2621 (void *)hmeblkp); 2622 } 2623 atomic_add_16(&hmeblkp->hblk_lckcnt, 1); 2624 2625 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 2626 } 2627 2628 if (pp && PP_ISNC(pp)) { 2629 /* 2630 * If the physical page is marked to be uncacheable, like 2631 * by a vac conflict, make sure the new mapping is also 2632 * uncacheable. 2633 */ 2634 TTE_CLR_VCACHEABLE(ttep); 2635 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 2636 } 2637 ttep->tte_hmenum = hmenum; 2638 2639 #ifdef DEBUG 2640 orig_old = tteold; 2641 #endif /* DEBUG */ 2642 2643 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 2644 if ((sfmmup == KHATID) && 2645 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 2646 sfmmu_copytte(&sfhme->hme_tte, &tteold); 2647 } 2648 #ifdef DEBUG 2649 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 2650 #endif /* DEBUG */ 2651 } 2652 2653 if (!TTE_IS_VALID(&tteold)) { 2654 2655 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 2656 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 2657 2658 /* 2659 * HAT_RELOAD_SHARE has been deprecated with lpg DISM. 2660 */ 2661 2662 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 2663 sfmmup != ksfmmup) { 2664 /* 2665 * If this is the first large mapping for the process 2666 * we must force any CPUs running this process to TL=0 2667 * where they will reload the HAT flags from the 2668 * tsbmiss area. This is necessary to make the large 2669 * mappings we are about to load visible to those CPUs; 2670 * otherwise they'll loop forever calling pagefault() 2671 * since we don't search large hash chains by default. 2672 */ 2673 hatlockp = sfmmu_hat_enter(sfmmup); 2674 if (size == TTE512K && 2675 !SFMMU_FLAGS_ISSET(sfmmup, HAT_512K_FLAG)) { 2676 SFMMU_FLAGS_SET(sfmmup, HAT_512K_FLAG); 2677 sfmmu_sync_mmustate(sfmmup); 2678 } else if (size == TTE4M && 2679 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 2680 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 2681 sfmmu_sync_mmustate(sfmmup); 2682 } else if (size == TTE64K && 2683 !SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) { 2684 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 2685 /* no sync mmustate; 64K shares 8K hashes */ 2686 } else if (mmu_page_sizes == max_mmu_page_sizes) { 2687 if (size == TTE32M && 2688 !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 2689 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 2690 sfmmu_sync_mmustate(sfmmup); 2691 } else if (size == TTE256M && 2692 !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 2693 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 2694 sfmmu_sync_mmustate(sfmmup); 2695 } 2696 } 2697 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 2698 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 2699 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 2700 } 2701 sfmmu_hat_exit(hatlockp); 2702 } 2703 } 2704 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 2705 2706 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 2707 hw_tte.tte_intlo; 2708 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 2709 hw_tte.tte_inthi; 2710 2711 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 2712 /* 2713 * If remap and new tte differs from old tte we need 2714 * to sync the mod bit and flush TLB/TSB. We don't 2715 * need to sync ref bit because we currently always set 2716 * ref bit in tteload. 2717 */ 2718 ASSERT(TTE_IS_REF(ttep)); 2719 if (TTE_IS_MOD(&tteold)) { 2720 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 2721 } 2722 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 2723 xt_sync(sfmmup->sfmmu_cpusran); 2724 } 2725 2726 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 2727 /* 2728 * We only preload 8K and 4M mappings into the TSB, since 2729 * 64K and 512K mappings are replicated and hence don't 2730 * have a single, unique TSB entry. Ditto for 32M/256M. 2731 */ 2732 if (size == TTE8K || size == TTE4M) { 2733 hatlockp = sfmmu_hat_enter(sfmmup); 2734 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, size); 2735 sfmmu_hat_exit(hatlockp); 2736 } 2737 } 2738 if (pp) { 2739 if (!remap) { 2740 HME_ADD(sfhme, pp); 2741 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 2742 ASSERT(hmeblkp->hblk_hmecnt > 0); 2743 2744 /* 2745 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 2746 * see pageunload() for comment. 2747 */ 2748 } 2749 sfmmu_mlist_exit(pml); 2750 } 2751 2752 return (0); 2753 } 2754 /* 2755 * Function unlocks hash bucket. 2756 */ 2757 static void 2758 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 2759 { 2760 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2761 SFMMU_HASH_UNLOCK(hmebp); 2762 } 2763 2764 /* 2765 * function which checks and sets up page array for a large 2766 * translation. Will set p_vcolor, p_index, p_ro fields. 2767 * Assumes addr and pfnum of first page are properly aligned. 2768 * Will check for physical contiguity. If check fails it return 2769 * non null. 2770 */ 2771 static int 2772 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 2773 { 2774 int i, index, ttesz, osz; 2775 pfn_t pfnum; 2776 pgcnt_t npgs; 2777 int cflags = 0; 2778 page_t *pp, *pp1; 2779 kmutex_t *pmtx; 2780 int vac_err = 0; 2781 int newidx = 0; 2782 2783 ttesz = TTE_CSZ(ttep); 2784 2785 ASSERT(ttesz > TTE8K); 2786 2787 npgs = TTEPAGES(ttesz); 2788 index = PAGESZ_TO_INDEX(ttesz); 2789 2790 pfnum = (*pps)->p_pagenum; 2791 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 2792 2793 /* 2794 * Save the first pp so we can do HAT_TMPNC at the end. 2795 */ 2796 pp1 = *pps; 2797 osz = fnd_mapping_sz(pp1); 2798 2799 for (i = 0; i < npgs; i++, pps++) { 2800 pp = *pps; 2801 ASSERT(PAGE_LOCKED(pp)); 2802 ASSERT(pp->p_szc >= ttesz); 2803 ASSERT(pp->p_szc == pp1->p_szc); 2804 ASSERT(sfmmu_mlist_held(pp)); 2805 2806 /* 2807 * XXX is it possible to maintain P_RO on the root only? 2808 */ 2809 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 2810 pmtx = sfmmu_page_enter(pp); 2811 PP_CLRRO(pp); 2812 sfmmu_page_exit(pmtx); 2813 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 2814 !PP_ISMOD(pp)) { 2815 pmtx = sfmmu_page_enter(pp); 2816 if (!(PP_ISMOD(pp))) { 2817 PP_SETRO(pp); 2818 } 2819 sfmmu_page_exit(pmtx); 2820 } 2821 2822 /* 2823 * If this is a remap we skip vac & contiguity checks. 2824 */ 2825 if (remap) 2826 continue; 2827 2828 /* 2829 * set p_vcolor and detect any vac conflicts. 2830 */ 2831 if (vac_err == 0) { 2832 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 2833 2834 } 2835 2836 /* 2837 * Save current index in case we need to undo it. 2838 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 2839 * "SFMMU_INDEX_SHIFT 6" 2840 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 2841 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 2842 * 2843 * So: index = PAGESZ_TO_INDEX(ttesz); 2844 * if ttesz == 1 then index = 0x2 2845 * 2 then index = 0x4 2846 * 3 then index = 0x8 2847 * 4 then index = 0x10 2848 * 5 then index = 0x20 2849 * The code below checks if it's a new pagesize (ie, newidx) 2850 * in case we need to take it back out of p_index, 2851 * and then or's the new index into the existing index. 2852 */ 2853 if ((PP_MAPINDEX(pp) & index) == 0) 2854 newidx = 1; 2855 pp->p_index = (PP_MAPINDEX(pp) | index); 2856 2857 /* 2858 * contiguity check 2859 */ 2860 if (pp->p_pagenum != pfnum) { 2861 /* 2862 * If we fail the contiguity test then 2863 * the only thing we need to fix is the p_index field. 2864 * We might get a few extra flushes but since this 2865 * path is rare that is ok. The p_ro field will 2866 * get automatically fixed on the next tteload to 2867 * the page. NO TNC bit is set yet. 2868 */ 2869 while (i >= 0) { 2870 pp = *pps; 2871 if (newidx) 2872 pp->p_index = (PP_MAPINDEX(pp) & 2873 ~index); 2874 pps--; 2875 i--; 2876 } 2877 return (1); 2878 } 2879 pfnum++; 2880 addr += MMU_PAGESIZE; 2881 } 2882 2883 if (vac_err) { 2884 if (ttesz > osz) { 2885 /* 2886 * There are some smaller mappings that causes vac 2887 * conflicts. Convert all existing small mappings to 2888 * TNC. 2889 */ 2890 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 2891 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 2892 npgs); 2893 } else { 2894 /* EMPTY */ 2895 /* 2896 * If there exists an big page mapping, 2897 * that means the whole existing big page 2898 * has TNC setting already. No need to covert to 2899 * TNC again. 2900 */ 2901 ASSERT(PP_ISTNC(pp1)); 2902 } 2903 } 2904 2905 return (0); 2906 } 2907 2908 /* 2909 * Routine that detects vac consistency for a large page. It also 2910 * sets virtual color for all pp's for this big mapping. 2911 */ 2912 static int 2913 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 2914 { 2915 int vcolor, ocolor; 2916 2917 ASSERT(sfmmu_mlist_held(pp)); 2918 2919 if (PP_ISNC(pp)) { 2920 return (HAT_TMPNC); 2921 } 2922 2923 vcolor = addr_to_vcolor(addr); 2924 if (PP_NEWPAGE(pp)) { 2925 PP_SET_VCOLOR(pp, vcolor); 2926 return (0); 2927 } 2928 2929 ocolor = PP_GET_VCOLOR(pp); 2930 if (ocolor == vcolor) { 2931 return (0); 2932 } 2933 2934 if (!PP_ISMAPPED(pp)) { 2935 /* 2936 * Previous user of page had a differnet color 2937 * but since there are no current users 2938 * we just flush the cache and change the color. 2939 * As an optimization for large pages we flush the 2940 * entire cache of that color and set a flag. 2941 */ 2942 SFMMU_STAT(sf_pgcolor_conflict); 2943 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 2944 CacheColor_SetFlushed(*cflags, ocolor); 2945 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 2946 } 2947 PP_SET_VCOLOR(pp, vcolor); 2948 return (0); 2949 } 2950 2951 /* 2952 * We got a real conflict with a current mapping. 2953 * set flags to start unencaching all mappings 2954 * and return failure so we restart looping 2955 * the pp array from the beginning. 2956 */ 2957 return (HAT_TMPNC); 2958 } 2959 2960 /* 2961 * creates a large page shadow hmeblk for a tte. 2962 * The purpose of this routine is to allow us to do quick unloads because 2963 * the vm layer can easily pass a very large but sparsely populated range. 2964 */ 2965 static struct hme_blk * 2966 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 2967 { 2968 struct hmehash_bucket *hmebp; 2969 hmeblk_tag hblktag; 2970 int hmeshift, size, vshift; 2971 uint_t shw_mask, newshw_mask; 2972 struct hme_blk *hmeblkp; 2973 2974 ASSERT(sfmmup != KHATID); 2975 if (mmu_page_sizes == max_mmu_page_sizes) { 2976 ASSERT(ttesz < TTE256M); 2977 } else { 2978 ASSERT(ttesz < TTE4M); 2979 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 2980 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 2981 } 2982 2983 if (ttesz == TTE8K) { 2984 size = TTE512K; 2985 } else { 2986 size = ++ttesz; 2987 } 2988 2989 hblktag.htag_id = sfmmup; 2990 hmeshift = HME_HASH_SHIFT(size); 2991 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2992 hblktag.htag_rehash = HME_HASH_REHASH(size); 2993 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 2994 2995 SFMMU_HASH_LOCK(hmebp); 2996 2997 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 2998 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 2999 if (hmeblkp == NULL) { 3000 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3001 hblktag, flags); 3002 } 3003 ASSERT(hmeblkp); 3004 if (!hmeblkp->hblk_shw_mask) { 3005 /* 3006 * if this is a unused hblk it was just allocated or could 3007 * potentially be a previous large page hblk so we need to 3008 * set the shadow bit. 3009 */ 3010 hmeblkp->hblk_shw_bit = 1; 3011 } 3012 ASSERT(hmeblkp->hblk_shw_bit == 1); 3013 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3014 ASSERT(vshift < 8); 3015 /* 3016 * Atomically set shw mask bit 3017 */ 3018 do { 3019 shw_mask = hmeblkp->hblk_shw_mask; 3020 newshw_mask = shw_mask | (1 << vshift); 3021 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3022 newshw_mask); 3023 } while (newshw_mask != shw_mask); 3024 3025 SFMMU_HASH_UNLOCK(hmebp); 3026 3027 return (hmeblkp); 3028 } 3029 3030 /* 3031 * This routine cleanup a previous shadow hmeblk and changes it to 3032 * a regular hblk. This happens rarely but it is possible 3033 * when a process wants to use large pages and there are hblks still 3034 * lying around from the previous as that used these hmeblks. 3035 * The alternative was to cleanup the shadow hblks at unload time 3036 * but since so few user processes actually use large pages, it is 3037 * better to be lazy and cleanup at this time. 3038 */ 3039 static void 3040 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3041 struct hmehash_bucket *hmebp) 3042 { 3043 caddr_t addr, endaddr; 3044 int hashno, size; 3045 3046 ASSERT(hmeblkp->hblk_shw_bit); 3047 3048 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3049 3050 if (!hmeblkp->hblk_shw_mask) { 3051 hmeblkp->hblk_shw_bit = 0; 3052 return; 3053 } 3054 addr = (caddr_t)get_hblk_base(hmeblkp); 3055 endaddr = get_hblk_endaddr(hmeblkp); 3056 size = get_hblk_ttesz(hmeblkp); 3057 hashno = size - 1; 3058 ASSERT(hashno > 0); 3059 SFMMU_HASH_UNLOCK(hmebp); 3060 3061 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3062 3063 SFMMU_HASH_LOCK(hmebp); 3064 } 3065 3066 static void 3067 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3068 int hashno) 3069 { 3070 int hmeshift, shadow = 0; 3071 hmeblk_tag hblktag; 3072 struct hmehash_bucket *hmebp; 3073 struct hme_blk *hmeblkp; 3074 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3075 uint64_t hblkpa, prevpa, nx_pa; 3076 3077 ASSERT(hashno > 0); 3078 hblktag.htag_id = sfmmup; 3079 hblktag.htag_rehash = hashno; 3080 3081 hmeshift = HME_HASH_SHIFT(hashno); 3082 3083 while (addr < endaddr) { 3084 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3085 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3086 SFMMU_HASH_LOCK(hmebp); 3087 /* inline HME_HASH_SEARCH */ 3088 hmeblkp = hmebp->hmeblkp; 3089 hblkpa = hmebp->hmeh_nextpa; 3090 prevpa = 0; 3091 pr_hblk = NULL; 3092 while (hmeblkp) { 3093 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 3094 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3095 /* found hme_blk */ 3096 if (hmeblkp->hblk_shw_bit) { 3097 if (hmeblkp->hblk_shw_mask) { 3098 shadow = 1; 3099 sfmmu_shadow_hcleanup(sfmmup, 3100 hmeblkp, hmebp); 3101 break; 3102 } else { 3103 hmeblkp->hblk_shw_bit = 0; 3104 } 3105 } 3106 3107 /* 3108 * Hblk_hmecnt and hblk_vcnt could be non zero 3109 * since hblk_unload() does not gurantee that. 3110 * 3111 * XXX - this could cause tteload() to spin 3112 * where sfmmu_shadow_hcleanup() is called. 3113 */ 3114 } 3115 3116 nx_hblk = hmeblkp->hblk_next; 3117 nx_pa = hmeblkp->hblk_nextpa; 3118 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3119 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 3120 pr_hblk); 3121 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 3122 } else { 3123 pr_hblk = hmeblkp; 3124 prevpa = hblkpa; 3125 } 3126 hmeblkp = nx_hblk; 3127 hblkpa = nx_pa; 3128 } 3129 3130 SFMMU_HASH_UNLOCK(hmebp); 3131 3132 if (shadow) { 3133 /* 3134 * We found another shadow hblk so cleaned its 3135 * children. We need to go back and cleanup 3136 * the original hblk so we don't change the 3137 * addr. 3138 */ 3139 shadow = 0; 3140 } else { 3141 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3142 (1 << hmeshift)); 3143 } 3144 } 3145 sfmmu_hblks_list_purge(&list); 3146 } 3147 3148 /* 3149 * Release one hardware address translation lock on the given address range. 3150 */ 3151 void 3152 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3153 { 3154 struct hmehash_bucket *hmebp; 3155 hmeblk_tag hblktag; 3156 int hmeshift, hashno = 1; 3157 struct hme_blk *hmeblkp, *list = NULL; 3158 caddr_t endaddr; 3159 3160 ASSERT(sfmmup != NULL); 3161 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3162 3163 ASSERT((sfmmup == ksfmmup) || 3164 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3165 ASSERT((len & MMU_PAGEOFFSET) == 0); 3166 endaddr = addr + len; 3167 hblktag.htag_id = sfmmup; 3168 3169 /* 3170 * Spitfire supports 4 page sizes. 3171 * Most pages are expected to be of the smallest page size (8K) and 3172 * these will not need to be rehashed. 64K pages also don't need to be 3173 * rehashed because an hmeblk spans 64K of address space. 512K pages 3174 * might need 1 rehash and and 4M pages might need 2 rehashes. 3175 */ 3176 while (addr < endaddr) { 3177 hmeshift = HME_HASH_SHIFT(hashno); 3178 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3179 hblktag.htag_rehash = hashno; 3180 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3181 3182 SFMMU_HASH_LOCK(hmebp); 3183 3184 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3185 if (hmeblkp != NULL) { 3186 /* 3187 * If we encounter a shadow hmeblk then 3188 * we know there are no valid hmeblks mapping 3189 * this address at this size or larger. 3190 * Just increment address by the smallest 3191 * page size. 3192 */ 3193 if (hmeblkp->hblk_shw_bit) { 3194 addr += MMU_PAGESIZE; 3195 } else { 3196 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3197 endaddr); 3198 } 3199 SFMMU_HASH_UNLOCK(hmebp); 3200 hashno = 1; 3201 continue; 3202 } 3203 SFMMU_HASH_UNLOCK(hmebp); 3204 3205 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3206 /* 3207 * We have traversed the whole list and rehashed 3208 * if necessary without finding the address to unlock 3209 * which should never happen. 3210 */ 3211 panic("sfmmu_unlock: addr not found. " 3212 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3213 } else { 3214 hashno++; 3215 } 3216 } 3217 3218 sfmmu_hblks_list_purge(&list); 3219 } 3220 3221 /* 3222 * Function to unlock a range of addresses in an hmeblk. It returns the 3223 * next address that needs to be unlocked. 3224 * Should be called with the hash lock held. 3225 */ 3226 static caddr_t 3227 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 3228 { 3229 struct sf_hment *sfhme; 3230 tte_t tteold, ttemod; 3231 int ttesz, ret; 3232 3233 ASSERT(in_hblk_range(hmeblkp, addr)); 3234 ASSERT(hmeblkp->hblk_shw_bit == 0); 3235 3236 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 3237 ttesz = get_hblk_ttesz(hmeblkp); 3238 3239 HBLKTOHME(sfhme, hmeblkp, addr); 3240 while (addr < endaddr) { 3241 readtte: 3242 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3243 if (TTE_IS_VALID(&tteold)) { 3244 3245 ttemod = tteold; 3246 3247 ret = sfmmu_modifytte_try(&tteold, &ttemod, 3248 &sfhme->hme_tte); 3249 3250 if (ret < 0) 3251 goto readtte; 3252 3253 if (hmeblkp->hblk_lckcnt == 0) 3254 panic("zero hblk lckcnt"); 3255 3256 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 3257 (uintptr_t)endaddr) 3258 panic("can't unlock large tte"); 3259 3260 ASSERT(hmeblkp->hblk_lckcnt > 0); 3261 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 3262 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 3263 } else { 3264 panic("sfmmu_hblk_unlock: invalid tte"); 3265 } 3266 addr += TTEBYTES(ttesz); 3267 sfhme++; 3268 } 3269 return (addr); 3270 } 3271 3272 /* 3273 * Physical Address Mapping Framework 3274 * 3275 * General rules: 3276 * 3277 * (1) Applies only to seg_kmem memory pages. To make things easier, 3278 * seg_kpm addresses are also accepted by the routines, but nothing 3279 * is done with them since by definition their PA mappings are static. 3280 * (2) hat_add_callback() may only be called while holding the page lock 3281 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()). 3282 * (3) prehandler() and posthandler() may not call hat_add_callback() or 3283 * hat_delete_callback(), nor should they allocate memory. Post quiesce 3284 * callbacks may not sleep or acquire adaptive mutex locks. 3285 * (4) Either prehandler() or posthandler() (but not both) may be specified 3286 * as being NULL. Specifying an errhandler() is optional. 3287 * 3288 * Details of using the framework: 3289 * 3290 * registering a callback (hat_register_callback()) 3291 * 3292 * Pass prehandler, posthandler, errhandler addresses 3293 * as described below. If capture_cpus argument is nonzero, 3294 * suspend callback to the prehandler will occur with CPUs 3295 * captured and executing xc_loop() and CPUs will remain 3296 * captured until after the posthandler suspend callback 3297 * occurs. 3298 * 3299 * adding a callback (hat_add_callback()) 3300 * 3301 * as_pagelock(); 3302 * hat_add_callback(); 3303 * save returned pfn in private data structures or program registers; 3304 * as_pageunlock(); 3305 * 3306 * prehandler() 3307 * 3308 * Stop all accesses by physical address to this memory page. 3309 * Called twice: the first, PRESUSPEND, is a context safe to acquire 3310 * adaptive locks. The second, SUSPEND, is called at high PIL with 3311 * CPUs captured so adaptive locks may NOT be acquired (and all spin 3312 * locks must be XCALL_PIL or higher locks). 3313 * 3314 * May return the following errors: 3315 * EIO: A fatal error has occurred. This will result in panic. 3316 * EAGAIN: The page cannot be suspended. This will fail the 3317 * relocation. 3318 * 0: Success. 3319 * 3320 * posthandler() 3321 * 3322 * Save new pfn in private data structures or program registers; 3323 * not allowed to fail (non-zero return values will result in panic). 3324 * 3325 * errhandler() 3326 * 3327 * called when an error occurs related to the callback. Currently 3328 * the only such error is HAT_CB_ERR_LEAKED which indicates that 3329 * a page is being freed, but there are still outstanding callback(s) 3330 * registered on the page. 3331 * 3332 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 3333 * 3334 * stop using physical address 3335 * hat_delete_callback(); 3336 * 3337 */ 3338 3339 /* 3340 * Register a callback class. Each subsystem should do this once and 3341 * cache the id_t returned for use in setting up and tearing down callbacks. 3342 * 3343 * There is no facility for removing callback IDs once they are created; 3344 * the "key" should be unique for each module, so in case a module is unloaded 3345 * and subsequently re-loaded, we can recycle the module's previous entry. 3346 */ 3347 id_t 3348 hat_register_callback(int key, 3349 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 3350 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 3351 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 3352 int capture_cpus) 3353 { 3354 id_t id; 3355 3356 /* 3357 * Search the table for a pre-existing callback associated with 3358 * the identifier "key". If one exists, we re-use that entry in 3359 * the table for this instance, otherwise we assign the next 3360 * available table slot. 3361 */ 3362 for (id = 0; id < sfmmu_max_cb_id; id++) { 3363 if (sfmmu_cb_table[id].key == key) 3364 break; 3365 } 3366 3367 if (id == sfmmu_max_cb_id) { 3368 id = sfmmu_cb_nextid++; 3369 if (id >= sfmmu_max_cb_id) 3370 panic("hat_register_callback: out of callback IDs"); 3371 } 3372 3373 ASSERT(prehandler != NULL || posthandler != NULL); 3374 3375 sfmmu_cb_table[id].key = key; 3376 sfmmu_cb_table[id].prehandler = prehandler; 3377 sfmmu_cb_table[id].posthandler = posthandler; 3378 sfmmu_cb_table[id].errhandler = errhandler; 3379 sfmmu_cb_table[id].capture_cpus = capture_cpus; 3380 3381 return (id); 3382 } 3383 3384 /* 3385 * Add relocation callbacks to the specified addr/len which will be called 3386 * when relocating the associated page. See the description of pre and 3387 * posthandler above for more details. IMPT: this operation is only valid 3388 * on seg_kmem pages!! 3389 * 3390 * If HAC_PAGELOCK is included in flags, the underlying memory page is 3391 * locked internally so the caller must be able to deal with the callback 3392 * running even before this function has returned. If HAC_PAGELOCK is not 3393 * set, it is assumed that the underlying memory pages are locked. 3394 * 3395 * Since the caller must track the individual page boundaries anyway, 3396 * we only allow a callback to be added to a single page (large 3397 * or small). Thus [addr, addr + len) MUST be contained within a single 3398 * page. 3399 * 3400 * Registering multiple callbacks on the same [addr, addr+len) is supported, 3401 * in which case the corresponding callback will be called once with each 3402 * unique parameter specified. The number of subsequent deletes must match 3403 * since reference counts are held. If a callback is desired for each 3404 * virtual object with the same parameter specified for multiple callbacks, 3405 * a different virtual address should be specified at the time of 3406 * callback registration. 3407 * 3408 * Returns the pfn of the underlying kernel page in *rpfn 3409 * on success, or PFN_INVALID on failure. 3410 * 3411 * Returns values: 3412 * 0: success 3413 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 3414 * EINVAL: callback ID is not valid 3415 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 3416 * space, or crosses a page boundary 3417 */ 3418 int 3419 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 3420 void *pvt, pfn_t *rpfn) 3421 { 3422 struct hmehash_bucket *hmebp; 3423 hmeblk_tag hblktag; 3424 struct hme_blk *hmeblkp; 3425 int hmeshift, hashno; 3426 caddr_t saddr, eaddr, baseaddr; 3427 struct pa_hment *pahmep, *tpahmep; 3428 struct sf_hment *sfhmep, *osfhmep, *tsfhmep; 3429 kmutex_t *pml; 3430 tte_t tte; 3431 page_t *pp, *rpp; 3432 pfn_t pfn; 3433 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 3434 int locked = 0; 3435 3436 /* 3437 * For KPM mappings, just return the physical address since we 3438 * don't need to register any callbacks. 3439 */ 3440 if (IS_KPM_ADDR(vaddr)) { 3441 uint64_t paddr; 3442 SFMMU_KPM_VTOP(vaddr, paddr); 3443 *rpfn = btop(paddr); 3444 return (0); 3445 } 3446 3447 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 3448 *rpfn = PFN_INVALID; 3449 return (EINVAL); 3450 } 3451 3452 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 3453 *rpfn = PFN_INVALID; 3454 return (ENOMEM); 3455 } 3456 3457 sfhmep = &pahmep->sfment; 3458 3459 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3460 eaddr = saddr + len; 3461 3462 rehash: 3463 /* Find the mapping(s) for this page */ 3464 for (hashno = TTE64K, hmeblkp = NULL; 3465 hmeblkp == NULL && hashno <= mmu_hashcnt; 3466 hashno++) { 3467 hmeshift = HME_HASH_SHIFT(hashno); 3468 hblktag.htag_id = ksfmmup; 3469 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3470 hblktag.htag_rehash = hashno; 3471 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3472 3473 SFMMU_HASH_LOCK(hmebp); 3474 3475 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3476 3477 if (hmeblkp == NULL) 3478 SFMMU_HASH_UNLOCK(hmebp); 3479 } 3480 3481 if (hmeblkp == NULL) { 3482 *rpfn = PFN_INVALID; 3483 return (ENXIO); 3484 } 3485 3486 /* 3487 * Make sure the boundaries for the callback fall within this 3488 * single mapping. 3489 */ 3490 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 3491 ASSERT(saddr >= baseaddr); 3492 if (eaddr > (caddr_t)get_hblk_endaddr(hmeblkp)) { 3493 SFMMU_HASH_UNLOCK(hmebp); 3494 *rpfn = PFN_INVALID; 3495 return (ENXIO); 3496 } 3497 3498 HBLKTOHME(osfhmep, hmeblkp, saddr); 3499 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3500 3501 ASSERT(TTE_IS_VALID(&tte)); 3502 pfn = sfmmu_ttetopfn(&tte, vaddr); 3503 3504 pp = osfhmep->hme_page; 3505 pml = sfmmu_mlist_enter(pp); 3506 3507 if ((flags & HAC_PAGELOCK) && !locked) { 3508 if (!page_trylock(pp, SE_SHARED)) { 3509 /* 3510 * Somebody is holding SE_EXCL lock. Drop all 3511 * our locks, lookup the page in &kvp, and 3512 * retry. 3513 */ 3514 sfmmu_mlist_exit(pml); 3515 SFMMU_HASH_UNLOCK(hmebp); 3516 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3517 ASSERT(pp != NULL); 3518 rpp = PP_PAGEROOT(pp); 3519 if (rpp != pp) { 3520 page_unlock(pp); 3521 (void) page_lock(rpp, SE_SHARED, NULL, 3522 P_NO_RECLAIM); 3523 } 3524 locked = 1; 3525 goto rehash; 3526 } 3527 locked = 1; 3528 } 3529 3530 if (!PAGE_LOCKED(pp) && !panicstr) 3531 panic("hat_add_callback: page 0x%p not locked", pp); 3532 3533 if (osfhmep->hme_page != pp || pp->p_vnode != &kvp || 3534 pp->p_offset < (u_offset_t)baseaddr || 3535 pp->p_offset > (u_offset_t)eaddr) { 3536 /* 3537 * The page moved before we got our hands on it. Drop 3538 * all the locks and try again. 3539 */ 3540 ASSERT((flags & HAC_PAGELOCK) != 0); 3541 sfmmu_mlist_exit(pml); 3542 SFMMU_HASH_UNLOCK(hmebp); 3543 page_unlock(pp); 3544 locked = 0; 3545 goto rehash; 3546 } 3547 3548 ASSERT(osfhmep->hme_page == pp); 3549 3550 for (tsfhmep = pp->p_mapping; tsfhmep != NULL; 3551 tsfhmep = tsfhmep->hme_next) { 3552 3553 /* 3554 * skip va to pa mappings 3555 */ 3556 if (!IS_PAHME(tsfhmep)) 3557 continue; 3558 3559 tpahmep = tsfhmep->hme_data; 3560 ASSERT(tpahmep != NULL); 3561 3562 /* 3563 * See if the pahment already exists. 3564 */ 3565 if ((tpahmep->pvt == pvt) && 3566 (tpahmep->addr == vaddr) && 3567 (tpahmep->len == len)) { 3568 ASSERT(tpahmep->cb_id == callback_id); 3569 tpahmep->refcnt++; 3570 pp->p_share++; 3571 3572 sfmmu_mlist_exit(pml); 3573 SFMMU_HASH_UNLOCK(hmebp); 3574 3575 if (locked) 3576 page_unlock(pp); 3577 3578 kmem_cache_free(pa_hment_cache, pahmep); 3579 3580 *rpfn = pfn; 3581 return (0); 3582 } 3583 } 3584 3585 /* 3586 * setup this shiny new pa_hment .. 3587 */ 3588 pp->p_share++; 3589 pahmep->cb_id = callback_id; 3590 pahmep->addr = vaddr; 3591 pahmep->len = len; 3592 pahmep->refcnt = 1; 3593 pahmep->flags = 0; 3594 pahmep->pvt = pvt; 3595 3596 /* 3597 * .. and also set up the sf_hment and link to p_mapping list. 3598 */ 3599 sfhmep->hme_tte.ll = 0; 3600 sfhmep->hme_data = pahmep; 3601 sfhmep->hme_prev = osfhmep; 3602 sfhmep->hme_next = osfhmep->hme_next; 3603 3604 if (osfhmep->hme_next) 3605 osfhmep->hme_next->hme_prev = sfhmep; 3606 3607 osfhmep->hme_next = sfhmep; 3608 3609 sfmmu_mlist_exit(pml); 3610 SFMMU_HASH_UNLOCK(hmebp); 3611 3612 *rpfn = pfn; 3613 if (locked) 3614 page_unlock(pp); 3615 3616 return (0); 3617 } 3618 3619 /* 3620 * Remove the relocation callbacks from the specified addr/len. 3621 */ 3622 void 3623 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags) 3624 { 3625 struct hmehash_bucket *hmebp; 3626 hmeblk_tag hblktag; 3627 struct hme_blk *hmeblkp; 3628 int hmeshift, hashno; 3629 caddr_t saddr, eaddr, baseaddr; 3630 struct pa_hment *pahmep; 3631 struct sf_hment *sfhmep, *osfhmep; 3632 kmutex_t *pml; 3633 tte_t tte; 3634 page_t *pp, *rpp; 3635 int locked = 0; 3636 3637 if (IS_KPM_ADDR(vaddr)) 3638 return; 3639 3640 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 3641 eaddr = saddr + len; 3642 3643 rehash: 3644 /* Find the mapping(s) for this page */ 3645 for (hashno = TTE64K, hmeblkp = NULL; 3646 hmeblkp == NULL && hashno <= mmu_hashcnt; 3647 hashno++) { 3648 hmeshift = HME_HASH_SHIFT(hashno); 3649 hblktag.htag_id = ksfmmup; 3650 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 3651 hblktag.htag_rehash = hashno; 3652 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 3653 3654 SFMMU_HASH_LOCK(hmebp); 3655 3656 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3657 3658 if (hmeblkp == NULL) 3659 SFMMU_HASH_UNLOCK(hmebp); 3660 } 3661 3662 if (hmeblkp == NULL) { 3663 if (!panicstr) { 3664 panic("hat_delete_callback: addr 0x%p not found", 3665 saddr); 3666 } 3667 return; 3668 } 3669 3670 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 3671 HBLKTOHME(osfhmep, hmeblkp, saddr); 3672 3673 sfmmu_copytte(&osfhmep->hme_tte, &tte); 3674 ASSERT(TTE_IS_VALID(&tte)); 3675 3676 pp = osfhmep->hme_page; 3677 pml = sfmmu_mlist_enter(pp); 3678 3679 if ((flags & HAC_PAGELOCK) && !locked) { 3680 if (!page_trylock(pp, SE_SHARED)) { 3681 /* 3682 * Somebody is holding SE_EXCL lock. Drop all 3683 * our locks, lookup the page in &kvp, and 3684 * retry. 3685 */ 3686 sfmmu_mlist_exit(pml); 3687 SFMMU_HASH_UNLOCK(hmebp); 3688 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 3689 ASSERT(pp != NULL); 3690 rpp = PP_PAGEROOT(pp); 3691 if (rpp != pp) { 3692 page_unlock(pp); 3693 (void) page_lock(rpp, SE_SHARED, NULL, 3694 P_NO_RECLAIM); 3695 } 3696 locked = 1; 3697 goto rehash; 3698 } 3699 locked = 1; 3700 } 3701 3702 ASSERT(PAGE_LOCKED(pp)); 3703 3704 if (osfhmep->hme_page != pp || pp->p_vnode != &kvp || 3705 pp->p_offset < (u_offset_t)baseaddr || 3706 pp->p_offset > (u_offset_t)eaddr) { 3707 /* 3708 * The page moved before we got our hands on it. Drop 3709 * all the locks and try again. 3710 */ 3711 ASSERT((flags & HAC_PAGELOCK) != 0); 3712 sfmmu_mlist_exit(pml); 3713 SFMMU_HASH_UNLOCK(hmebp); 3714 page_unlock(pp); 3715 locked = 0; 3716 goto rehash; 3717 } 3718 3719 ASSERT(osfhmep->hme_page == pp); 3720 3721 for (sfhmep = pp->p_mapping; sfhmep != NULL; 3722 sfhmep = sfhmep->hme_next) { 3723 3724 /* 3725 * skip va<->pa mappings 3726 */ 3727 if (!IS_PAHME(sfhmep)) 3728 continue; 3729 3730 pahmep = sfhmep->hme_data; 3731 ASSERT(pahmep != NULL); 3732 3733 /* 3734 * if pa_hment matches, remove it 3735 */ 3736 if ((pahmep->pvt == pvt) && 3737 (pahmep->addr == vaddr) && 3738 (pahmep->len == len)) { 3739 break; 3740 } 3741 } 3742 3743 if (sfhmep == NULL) { 3744 if (!panicstr) { 3745 panic("hat_delete_callback: pa_hment not found, pp %p", 3746 (void *)pp); 3747 } 3748 return; 3749 } 3750 3751 /* 3752 * Note: at this point a valid kernel mapping must still be 3753 * present on this page. 3754 */ 3755 pp->p_share--; 3756 if (pp->p_share <= 0) 3757 panic("hat_delete_callback: zero p_share"); 3758 3759 if (--pahmep->refcnt == 0) { 3760 if (pahmep->flags != 0) 3761 panic("hat_delete_callback: pa_hment is busy"); 3762 3763 /* 3764 * Remove sfhmep from the mapping list for the page. 3765 */ 3766 if (sfhmep->hme_prev) { 3767 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 3768 } else { 3769 pp->p_mapping = sfhmep->hme_next; 3770 } 3771 3772 if (sfhmep->hme_next) 3773 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 3774 3775 sfmmu_mlist_exit(pml); 3776 SFMMU_HASH_UNLOCK(hmebp); 3777 3778 if (locked) 3779 page_unlock(pp); 3780 3781 kmem_cache_free(pa_hment_cache, pahmep); 3782 return; 3783 } 3784 3785 sfmmu_mlist_exit(pml); 3786 SFMMU_HASH_UNLOCK(hmebp); 3787 if (locked) 3788 page_unlock(pp); 3789 } 3790 3791 /* 3792 * hat_probe returns 1 if the translation for the address 'addr' is 3793 * loaded, zero otherwise. 3794 * 3795 * hat_probe should be used only for advisorary purposes because it may 3796 * occasionally return the wrong value. The implementation must guarantee that 3797 * returning the wrong value is a very rare event. hat_probe is used 3798 * to implement optimizations in the segment drivers. 3799 * 3800 */ 3801 int 3802 hat_probe(struct hat *sfmmup, caddr_t addr) 3803 { 3804 pfn_t pfn; 3805 tte_t tte; 3806 3807 ASSERT(sfmmup != NULL); 3808 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3809 3810 ASSERT((sfmmup == ksfmmup) || 3811 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3812 3813 if (sfmmup == ksfmmup) { 3814 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 3815 == PFN_SUSPENDED) { 3816 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 3817 } 3818 } else { 3819 pfn = sfmmu_uvatopfn(addr, sfmmup); 3820 } 3821 3822 if (pfn != PFN_INVALID) 3823 return (1); 3824 else 3825 return (0); 3826 } 3827 3828 ssize_t 3829 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 3830 { 3831 tte_t tte; 3832 3833 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3834 3835 sfmmu_gettte(sfmmup, addr, &tte); 3836 if (TTE_IS_VALID(&tte)) { 3837 return (TTEBYTES(TTE_CSZ(&tte))); 3838 } 3839 return (-1); 3840 } 3841 3842 static void 3843 sfmmu_gettte(struct hat *sfmmup, caddr_t addr, tte_t *ttep) 3844 { 3845 struct hmehash_bucket *hmebp; 3846 hmeblk_tag hblktag; 3847 int hmeshift, hashno = 1; 3848 struct hme_blk *hmeblkp, *list = NULL; 3849 struct sf_hment *sfhmep; 3850 3851 /* support for ISM */ 3852 ism_map_t *ism_map; 3853 ism_blk_t *ism_blkp; 3854 int i; 3855 sfmmu_t *ism_hatid = NULL; 3856 sfmmu_t *locked_hatid = NULL; 3857 3858 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 3859 3860 ism_blkp = sfmmup->sfmmu_iblk; 3861 if (ism_blkp) { 3862 sfmmu_ismhat_enter(sfmmup, 0); 3863 locked_hatid = sfmmup; 3864 } 3865 while (ism_blkp && ism_hatid == NULL) { 3866 ism_map = ism_blkp->iblk_maps; 3867 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 3868 if (addr >= ism_start(ism_map[i]) && 3869 addr < ism_end(ism_map[i])) { 3870 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 3871 addr = (caddr_t)(addr - 3872 ism_start(ism_map[i])); 3873 break; 3874 } 3875 } 3876 ism_blkp = ism_blkp->iblk_next; 3877 } 3878 if (locked_hatid) { 3879 sfmmu_ismhat_exit(locked_hatid, 0); 3880 } 3881 3882 hblktag.htag_id = sfmmup; 3883 ttep->ll = 0; 3884 3885 do { 3886 hmeshift = HME_HASH_SHIFT(hashno); 3887 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3888 hblktag.htag_rehash = hashno; 3889 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3890 3891 SFMMU_HASH_LOCK(hmebp); 3892 3893 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3894 if (hmeblkp != NULL) { 3895 HBLKTOHME(sfhmep, hmeblkp, addr); 3896 sfmmu_copytte(&sfhmep->hme_tte, ttep); 3897 SFMMU_HASH_UNLOCK(hmebp); 3898 break; 3899 } 3900 SFMMU_HASH_UNLOCK(hmebp); 3901 hashno++; 3902 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 3903 3904 sfmmu_hblks_list_purge(&list); 3905 } 3906 3907 uint_t 3908 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 3909 { 3910 tte_t tte; 3911 3912 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3913 3914 sfmmu_gettte(sfmmup, addr, &tte); 3915 if (TTE_IS_VALID(&tte)) { 3916 *attr = sfmmu_ptov_attr(&tte); 3917 return (0); 3918 } 3919 *attr = 0; 3920 return ((uint_t)0xffffffff); 3921 } 3922 3923 /* 3924 * Enables more attributes on specified address range (ie. logical OR) 3925 */ 3926 void 3927 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 3928 { 3929 if (hat->sfmmu_xhat_provider) { 3930 XHAT_SETATTR(hat, addr, len, attr); 3931 return; 3932 } else { 3933 /* 3934 * This must be a CPU HAT. If the address space has 3935 * XHATs attached, change attributes for all of them, 3936 * just in case 3937 */ 3938 ASSERT(hat->sfmmu_as != NULL); 3939 if (hat->sfmmu_as->a_xhat != NULL) 3940 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 3941 } 3942 3943 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 3944 } 3945 3946 /* 3947 * Assigns attributes to the specified address range. All the attributes 3948 * are specified. 3949 */ 3950 void 3951 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 3952 { 3953 if (hat->sfmmu_xhat_provider) { 3954 XHAT_CHGATTR(hat, addr, len, attr); 3955 return; 3956 } else { 3957 /* 3958 * This must be a CPU HAT. If the address space has 3959 * XHATs attached, change attributes for all of them, 3960 * just in case 3961 */ 3962 ASSERT(hat->sfmmu_as != NULL); 3963 if (hat->sfmmu_as->a_xhat != NULL) 3964 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 3965 } 3966 3967 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 3968 } 3969 3970 /* 3971 * Remove attributes on the specified address range (ie. loginal NAND) 3972 */ 3973 void 3974 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 3975 { 3976 if (hat->sfmmu_xhat_provider) { 3977 XHAT_CLRATTR(hat, addr, len, attr); 3978 return; 3979 } else { 3980 /* 3981 * This must be a CPU HAT. If the address space has 3982 * XHATs attached, change attributes for all of them, 3983 * just in case 3984 */ 3985 ASSERT(hat->sfmmu_as != NULL); 3986 if (hat->sfmmu_as->a_xhat != NULL) 3987 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 3988 } 3989 3990 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 3991 } 3992 3993 /* 3994 * Change attributes on an address range to that specified by attr and mode. 3995 */ 3996 static void 3997 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 3998 int mode) 3999 { 4000 struct hmehash_bucket *hmebp; 4001 hmeblk_tag hblktag; 4002 int hmeshift, hashno = 1; 4003 struct hme_blk *hmeblkp, *list = NULL; 4004 caddr_t endaddr; 4005 cpuset_t cpuset; 4006 demap_range_t dmr; 4007 4008 CPUSET_ZERO(cpuset); 4009 4010 ASSERT((sfmmup == ksfmmup) || 4011 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4012 ASSERT((len & MMU_PAGEOFFSET) == 0); 4013 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4014 4015 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4016 ((addr + len) > (caddr_t)USERLIMIT)) { 4017 panic("user addr %p in kernel space", 4018 (void *)addr); 4019 } 4020 4021 endaddr = addr + len; 4022 hblktag.htag_id = sfmmup; 4023 DEMAP_RANGE_INIT(sfmmup, &dmr); 4024 4025 while (addr < endaddr) { 4026 hmeshift = HME_HASH_SHIFT(hashno); 4027 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4028 hblktag.htag_rehash = hashno; 4029 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4030 4031 SFMMU_HASH_LOCK(hmebp); 4032 4033 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4034 if (hmeblkp != NULL) { 4035 /* 4036 * We've encountered a shadow hmeblk so skip the range 4037 * of the next smaller mapping size. 4038 */ 4039 if (hmeblkp->hblk_shw_bit) { 4040 ASSERT(sfmmup != ksfmmup); 4041 ASSERT(hashno > 1); 4042 addr = (caddr_t)P2END((uintptr_t)addr, 4043 TTEBYTES(hashno - 1)); 4044 } else { 4045 addr = sfmmu_hblk_chgattr(sfmmup, 4046 hmeblkp, addr, endaddr, &dmr, attr, mode); 4047 } 4048 SFMMU_HASH_UNLOCK(hmebp); 4049 hashno = 1; 4050 continue; 4051 } 4052 SFMMU_HASH_UNLOCK(hmebp); 4053 4054 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4055 /* 4056 * We have traversed the whole list and rehashed 4057 * if necessary without finding the address to chgattr. 4058 * This is ok, so we increment the address by the 4059 * smallest hmeblk range for kernel mappings or for 4060 * user mappings with no large pages, and the largest 4061 * hmeblk range, to account for shadow hmeblks, for 4062 * user mappings with large pages and continue. 4063 */ 4064 if (sfmmup == ksfmmup) 4065 addr = (caddr_t)P2END((uintptr_t)addr, 4066 TTEBYTES(1)); 4067 else 4068 addr = (caddr_t)P2END((uintptr_t)addr, 4069 TTEBYTES(hashno)); 4070 hashno = 1; 4071 } else { 4072 hashno++; 4073 } 4074 } 4075 4076 sfmmu_hblks_list_purge(&list); 4077 DEMAP_RANGE_FLUSH(&dmr); 4078 cpuset = sfmmup->sfmmu_cpusran; 4079 xt_sync(cpuset); 4080 } 4081 4082 /* 4083 * This function chgattr on a range of addresses in an hmeblk. It returns the 4084 * next addres that needs to be chgattr. 4085 * It should be called with the hash lock held. 4086 * XXX It should be possible to optimize chgattr by not flushing every time but 4087 * on the other hand: 4088 * 1. do one flush crosscall. 4089 * 2. only flush if we are increasing permissions (make sure this will work) 4090 */ 4091 static caddr_t 4092 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4093 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4094 { 4095 tte_t tte, tteattr, tteflags, ttemod; 4096 struct sf_hment *sfhmep; 4097 int ttesz; 4098 struct page *pp = NULL; 4099 kmutex_t *pml, *pmtx; 4100 int ret; 4101 int use_demap_range; 4102 #if defined(SF_ERRATA_57) 4103 int check_exec; 4104 #endif 4105 4106 ASSERT(in_hblk_range(hmeblkp, addr)); 4107 ASSERT(hmeblkp->hblk_shw_bit == 0); 4108 4109 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4110 ttesz = get_hblk_ttesz(hmeblkp); 4111 4112 /* 4113 * Flush the current demap region if addresses have been 4114 * skipped or the page size doesn't match. 4115 */ 4116 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4117 if (use_demap_range) { 4118 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4119 } else { 4120 DEMAP_RANGE_FLUSH(dmrp); 4121 } 4122 4123 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4124 #if defined(SF_ERRATA_57) 4125 check_exec = (sfmmup != ksfmmup) && 4126 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4127 TTE_IS_EXECUTABLE(&tteattr); 4128 #endif 4129 HBLKTOHME(sfhmep, hmeblkp, addr); 4130 while (addr < endaddr) { 4131 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4132 if (TTE_IS_VALID(&tte)) { 4133 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4134 /* 4135 * if the new attr is the same as old 4136 * continue 4137 */ 4138 goto next_addr; 4139 } 4140 if (!TTE_IS_WRITABLE(&tteattr)) { 4141 /* 4142 * make sure we clear hw modify bit if we 4143 * removing write protections 4144 */ 4145 tteflags.tte_intlo |= TTE_HWWR_INT; 4146 } 4147 4148 pml = NULL; 4149 pp = sfhmep->hme_page; 4150 if (pp) { 4151 pml = sfmmu_mlist_enter(pp); 4152 } 4153 4154 if (pp != sfhmep->hme_page) { 4155 /* 4156 * tte must have been unloaded. 4157 */ 4158 ASSERT(pml); 4159 sfmmu_mlist_exit(pml); 4160 continue; 4161 } 4162 4163 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4164 4165 ttemod = tte; 4166 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4167 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4168 4169 #if defined(SF_ERRATA_57) 4170 if (check_exec && addr < errata57_limit) 4171 ttemod.tte_exec_perm = 0; 4172 #endif 4173 ret = sfmmu_modifytte_try(&tte, &ttemod, 4174 &sfhmep->hme_tte); 4175 4176 if (ret < 0) { 4177 /* tte changed underneath us */ 4178 if (pml) { 4179 sfmmu_mlist_exit(pml); 4180 } 4181 continue; 4182 } 4183 4184 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4185 /* 4186 * need to sync if we are clearing modify bit. 4187 */ 4188 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4189 } 4190 4191 if (pp && PP_ISRO(pp)) { 4192 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4193 pmtx = sfmmu_page_enter(pp); 4194 PP_CLRRO(pp); 4195 sfmmu_page_exit(pmtx); 4196 } 4197 } 4198 4199 if (ret > 0 && use_demap_range) { 4200 DEMAP_RANGE_MARKPG(dmrp, addr); 4201 } else if (ret > 0) { 4202 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4203 } 4204 4205 if (pml) { 4206 sfmmu_mlist_exit(pml); 4207 } 4208 } 4209 next_addr: 4210 addr += TTEBYTES(ttesz); 4211 sfhmep++; 4212 DEMAP_RANGE_NEXTPG(dmrp); 4213 } 4214 return (addr); 4215 } 4216 4217 /* 4218 * This routine converts virtual attributes to physical ones. It will 4219 * update the tteflags field with the tte mask corresponding to the attributes 4220 * affected and it returns the new attributes. It will also clear the modify 4221 * bit if we are taking away write permission. This is necessary since the 4222 * modify bit is the hardware permission bit and we need to clear it in order 4223 * to detect write faults. 4224 */ 4225 static uint64_t 4226 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 4227 { 4228 tte_t ttevalue; 4229 4230 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 4231 4232 switch (mode) { 4233 case SFMMU_CHGATTR: 4234 /* all attributes specified */ 4235 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 4236 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 4237 ttemaskp->tte_inthi = TTEINTHI_ATTR; 4238 ttemaskp->tte_intlo = TTEINTLO_ATTR; 4239 break; 4240 case SFMMU_SETATTR: 4241 ASSERT(!(attr & ~HAT_PROT_MASK)); 4242 ttemaskp->ll = 0; 4243 ttevalue.ll = 0; 4244 /* 4245 * a valid tte implies exec and read for sfmmu 4246 * so no need to do anything about them. 4247 * since priviledged access implies user access 4248 * PROT_USER doesn't make sense either. 4249 */ 4250 if (attr & PROT_WRITE) { 4251 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 4252 ttevalue.tte_intlo |= TTE_WRPRM_INT; 4253 } 4254 break; 4255 case SFMMU_CLRATTR: 4256 /* attributes will be nand with current ones */ 4257 if (attr & ~(PROT_WRITE | PROT_USER)) { 4258 panic("sfmmu: attr %x not supported", attr); 4259 } 4260 ttemaskp->ll = 0; 4261 ttevalue.ll = 0; 4262 if (attr & PROT_WRITE) { 4263 /* clear both writable and modify bit */ 4264 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 4265 } 4266 if (attr & PROT_USER) { 4267 ttemaskp->tte_intlo |= TTE_PRIV_INT; 4268 ttevalue.tte_intlo |= TTE_PRIV_INT; 4269 } 4270 break; 4271 default: 4272 panic("sfmmu_vtop_attr: bad mode %x", mode); 4273 } 4274 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 4275 return (ttevalue.ll); 4276 } 4277 4278 static uint_t 4279 sfmmu_ptov_attr(tte_t *ttep) 4280 { 4281 uint_t attr; 4282 4283 ASSERT(TTE_IS_VALID(ttep)); 4284 4285 attr = PROT_READ; 4286 4287 if (TTE_IS_WRITABLE(ttep)) { 4288 attr |= PROT_WRITE; 4289 } 4290 if (TTE_IS_EXECUTABLE(ttep)) { 4291 attr |= PROT_EXEC; 4292 } 4293 if (!TTE_IS_PRIVILEGED(ttep)) { 4294 attr |= PROT_USER; 4295 } 4296 if (TTE_IS_NFO(ttep)) { 4297 attr |= HAT_NOFAULT; 4298 } 4299 if (TTE_IS_NOSYNC(ttep)) { 4300 attr |= HAT_NOSYNC; 4301 } 4302 if (TTE_IS_SIDEFFECT(ttep)) { 4303 attr |= SFMMU_SIDEFFECT; 4304 } 4305 if (!TTE_IS_VCACHEABLE(ttep)) { 4306 attr |= SFMMU_UNCACHEVTTE; 4307 } 4308 if (!TTE_IS_PCACHEABLE(ttep)) { 4309 attr |= SFMMU_UNCACHEPTTE; 4310 } 4311 return (attr); 4312 } 4313 4314 /* 4315 * hat_chgprot is a deprecated hat call. New segment drivers 4316 * should store all attributes and use hat_*attr calls. 4317 * 4318 * Change the protections in the virtual address range 4319 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 4320 * then remove write permission, leaving the other 4321 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 4322 * 4323 */ 4324 void 4325 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 4326 { 4327 struct hmehash_bucket *hmebp; 4328 hmeblk_tag hblktag; 4329 int hmeshift, hashno = 1; 4330 struct hme_blk *hmeblkp, *list = NULL; 4331 caddr_t endaddr; 4332 cpuset_t cpuset; 4333 demap_range_t dmr; 4334 4335 ASSERT((len & MMU_PAGEOFFSET) == 0); 4336 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4337 4338 if (sfmmup->sfmmu_xhat_provider) { 4339 XHAT_CHGPROT(sfmmup, addr, len, vprot); 4340 return; 4341 } else { 4342 /* 4343 * This must be a CPU HAT. If the address space has 4344 * XHATs attached, change attributes for all of them, 4345 * just in case 4346 */ 4347 ASSERT(sfmmup->sfmmu_as != NULL); 4348 if (sfmmup->sfmmu_as->a_xhat != NULL) 4349 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 4350 } 4351 4352 CPUSET_ZERO(cpuset); 4353 4354 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 4355 ((addr + len) > (caddr_t)USERLIMIT)) { 4356 panic("user addr %p vprot %x in kernel space", 4357 (void *)addr, vprot); 4358 } 4359 endaddr = addr + len; 4360 hblktag.htag_id = sfmmup; 4361 DEMAP_RANGE_INIT(sfmmup, &dmr); 4362 4363 while (addr < endaddr) { 4364 hmeshift = HME_HASH_SHIFT(hashno); 4365 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4366 hblktag.htag_rehash = hashno; 4367 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4368 4369 SFMMU_HASH_LOCK(hmebp); 4370 4371 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4372 if (hmeblkp != NULL) { 4373 /* 4374 * We've encountered a shadow hmeblk so skip the range 4375 * of the next smaller mapping size. 4376 */ 4377 if (hmeblkp->hblk_shw_bit) { 4378 ASSERT(sfmmup != ksfmmup); 4379 ASSERT(hashno > 1); 4380 addr = (caddr_t)P2END((uintptr_t)addr, 4381 TTEBYTES(hashno - 1)); 4382 } else { 4383 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 4384 addr, endaddr, &dmr, vprot); 4385 } 4386 SFMMU_HASH_UNLOCK(hmebp); 4387 hashno = 1; 4388 continue; 4389 } 4390 SFMMU_HASH_UNLOCK(hmebp); 4391 4392 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4393 /* 4394 * We have traversed the whole list and rehashed 4395 * if necessary without finding the address to chgprot. 4396 * This is ok so we increment the address by the 4397 * smallest hmeblk range for kernel mappings and the 4398 * largest hmeblk range, to account for shadow hmeblks, 4399 * for user mappings and continue. 4400 */ 4401 if (sfmmup == ksfmmup) 4402 addr = (caddr_t)P2END((uintptr_t)addr, 4403 TTEBYTES(1)); 4404 else 4405 addr = (caddr_t)P2END((uintptr_t)addr, 4406 TTEBYTES(hashno)); 4407 hashno = 1; 4408 } else { 4409 hashno++; 4410 } 4411 } 4412 4413 sfmmu_hblks_list_purge(&list); 4414 DEMAP_RANGE_FLUSH(&dmr); 4415 cpuset = sfmmup->sfmmu_cpusran; 4416 xt_sync(cpuset); 4417 } 4418 4419 /* 4420 * This function chgprots a range of addresses in an hmeblk. It returns the 4421 * next addres that needs to be chgprot. 4422 * It should be called with the hash lock held. 4423 * XXX It shold be possible to optimize chgprot by not flushing every time but 4424 * on the other hand: 4425 * 1. do one flush crosscall. 4426 * 2. only flush if we are increasing permissions (make sure this will work) 4427 */ 4428 static caddr_t 4429 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4430 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 4431 { 4432 uint_t pprot; 4433 tte_t tte, ttemod; 4434 struct sf_hment *sfhmep; 4435 uint_t tteflags; 4436 int ttesz; 4437 struct page *pp = NULL; 4438 kmutex_t *pml, *pmtx; 4439 int ret; 4440 int use_demap_range; 4441 #if defined(SF_ERRATA_57) 4442 int check_exec; 4443 #endif 4444 4445 ASSERT(in_hblk_range(hmeblkp, addr)); 4446 ASSERT(hmeblkp->hblk_shw_bit == 0); 4447 4448 #ifdef DEBUG 4449 if (get_hblk_ttesz(hmeblkp) != TTE8K && 4450 (endaddr < get_hblk_endaddr(hmeblkp))) { 4451 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 4452 } 4453 #endif /* DEBUG */ 4454 4455 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4456 ttesz = get_hblk_ttesz(hmeblkp); 4457 4458 pprot = sfmmu_vtop_prot(vprot, &tteflags); 4459 #if defined(SF_ERRATA_57) 4460 check_exec = (sfmmup != ksfmmup) && 4461 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4462 ((vprot & PROT_EXEC) == PROT_EXEC); 4463 #endif 4464 HBLKTOHME(sfhmep, hmeblkp, addr); 4465 4466 /* 4467 * Flush the current demap region if addresses have been 4468 * skipped or the page size doesn't match. 4469 */ 4470 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 4471 if (use_demap_range) { 4472 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4473 } else { 4474 DEMAP_RANGE_FLUSH(dmrp); 4475 } 4476 4477 while (addr < endaddr) { 4478 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4479 if (TTE_IS_VALID(&tte)) { 4480 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 4481 /* 4482 * if the new protection is the same as old 4483 * continue 4484 */ 4485 goto next_addr; 4486 } 4487 pml = NULL; 4488 pp = sfhmep->hme_page; 4489 if (pp) { 4490 pml = sfmmu_mlist_enter(pp); 4491 } 4492 if (pp != sfhmep->hme_page) { 4493 /* 4494 * tte most have been unloaded 4495 * underneath us. Recheck 4496 */ 4497 ASSERT(pml); 4498 sfmmu_mlist_exit(pml); 4499 continue; 4500 } 4501 4502 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4503 4504 ttemod = tte; 4505 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 4506 #if defined(SF_ERRATA_57) 4507 if (check_exec && addr < errata57_limit) 4508 ttemod.tte_exec_perm = 0; 4509 #endif 4510 ret = sfmmu_modifytte_try(&tte, &ttemod, 4511 &sfhmep->hme_tte); 4512 4513 if (ret < 0) { 4514 /* tte changed underneath us */ 4515 if (pml) { 4516 sfmmu_mlist_exit(pml); 4517 } 4518 continue; 4519 } 4520 4521 if (tteflags & TTE_HWWR_INT) { 4522 /* 4523 * need to sync if we are clearing modify bit. 4524 */ 4525 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4526 } 4527 4528 if (pp && PP_ISRO(pp)) { 4529 if (pprot & TTE_WRPRM_INT) { 4530 pmtx = sfmmu_page_enter(pp); 4531 PP_CLRRO(pp); 4532 sfmmu_page_exit(pmtx); 4533 } 4534 } 4535 4536 if (ret > 0 && use_demap_range) { 4537 DEMAP_RANGE_MARKPG(dmrp, addr); 4538 } else if (ret > 0) { 4539 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4540 } 4541 4542 if (pml) { 4543 sfmmu_mlist_exit(pml); 4544 } 4545 } 4546 next_addr: 4547 addr += TTEBYTES(ttesz); 4548 sfhmep++; 4549 DEMAP_RANGE_NEXTPG(dmrp); 4550 } 4551 return (addr); 4552 } 4553 4554 /* 4555 * This routine is deprecated and should only be used by hat_chgprot. 4556 * The correct routine is sfmmu_vtop_attr. 4557 * This routine converts virtual page protections to physical ones. It will 4558 * update the tteflags field with the tte mask corresponding to the protections 4559 * affected and it returns the new protections. It will also clear the modify 4560 * bit if we are taking away write permission. This is necessary since the 4561 * modify bit is the hardware permission bit and we need to clear it in order 4562 * to detect write faults. 4563 * It accepts the following special protections: 4564 * ~PROT_WRITE = remove write permissions. 4565 * ~PROT_USER = remove user permissions. 4566 */ 4567 static uint_t 4568 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 4569 { 4570 if (vprot == (uint_t)~PROT_WRITE) { 4571 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 4572 return (0); /* will cause wrprm to be cleared */ 4573 } 4574 if (vprot == (uint_t)~PROT_USER) { 4575 *tteflagsp = TTE_PRIV_INT; 4576 return (0); /* will cause privprm to be cleared */ 4577 } 4578 if ((vprot == 0) || (vprot == PROT_USER) || 4579 ((vprot & PROT_ALL) != vprot)) { 4580 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4581 } 4582 4583 switch (vprot) { 4584 case (PROT_READ): 4585 case (PROT_EXEC): 4586 case (PROT_EXEC | PROT_READ): 4587 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4588 return (TTE_PRIV_INT); /* set prv and clr wrt */ 4589 case (PROT_WRITE): 4590 case (PROT_WRITE | PROT_READ): 4591 case (PROT_EXEC | PROT_WRITE): 4592 case (PROT_EXEC | PROT_WRITE | PROT_READ): 4593 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4594 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 4595 case (PROT_USER | PROT_READ): 4596 case (PROT_USER | PROT_EXEC): 4597 case (PROT_USER | PROT_EXEC | PROT_READ): 4598 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 4599 return (0); /* clr prv and wrt */ 4600 case (PROT_USER | PROT_WRITE): 4601 case (PROT_USER | PROT_WRITE | PROT_READ): 4602 case (PROT_USER | PROT_EXEC | PROT_WRITE): 4603 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 4604 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 4605 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 4606 default: 4607 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 4608 } 4609 return (0); 4610 } 4611 4612 /* 4613 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 4614 * the normal algorithm would take too long for a very large VA range with 4615 * few real mappings. This routine just walks thru all HMEs in the global 4616 * hash table to find and remove mappings. 4617 */ 4618 static void 4619 hat_unload_large_virtual( 4620 struct hat *sfmmup, 4621 caddr_t startaddr, 4622 size_t len, 4623 uint_t flags, 4624 hat_callback_t *callback) 4625 { 4626 struct hmehash_bucket *hmebp; 4627 struct hme_blk *hmeblkp; 4628 struct hme_blk *pr_hblk = NULL; 4629 struct hme_blk *nx_hblk; 4630 struct hme_blk *list = NULL; 4631 int i; 4632 uint64_t hblkpa, prevpa, nx_pa; 4633 hatlock_t *hatlockp; 4634 struct tsb_info *tsbinfop; 4635 struct ctx *ctx; 4636 caddr_t endaddr = startaddr + len; 4637 caddr_t sa; 4638 caddr_t ea; 4639 caddr_t cb_sa[MAX_CB_ADDR]; 4640 caddr_t cb_ea[MAX_CB_ADDR]; 4641 int addr_cnt = 0; 4642 int a = 0; 4643 int cnum; 4644 4645 hatlockp = sfmmu_hat_enter(sfmmup); 4646 4647 /* 4648 * Since we know we're unmapping a huge range of addresses, 4649 * just throw away the context and switch to another. It's 4650 * cheaper than trying to unmap all of the TTEs we may find 4651 * from the TLB individually, which is too expensive in terms 4652 * of xcalls. Better yet, if we're exiting, no need to flush 4653 * anything at all! 4654 */ 4655 if (!sfmmup->sfmmu_free) { 4656 ctx = sfmmutoctx(sfmmup); 4657 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 4658 cnum = sfmmutoctxnum(sfmmup); 4659 if (cnum != INVALID_CONTEXT) { 4660 sfmmu_tlb_swap_ctx(sfmmup, ctx); 4661 } 4662 rw_exit(&ctx->ctx_rwlock); 4663 4664 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 4665 tsbinfop = tsbinfop->tsb_next) { 4666 if (tsbinfop->tsb_flags & TSB_SWAPPED) 4667 continue; 4668 sfmmu_inv_tsb(tsbinfop->tsb_va, 4669 TSB_BYTES(tsbinfop->tsb_szc)); 4670 } 4671 } 4672 4673 /* 4674 * Loop through all the hash buckets of HME blocks looking for matches. 4675 */ 4676 for (i = 0; i <= UHMEHASH_SZ; i++) { 4677 hmebp = &uhme_hash[i]; 4678 SFMMU_HASH_LOCK(hmebp); 4679 hmeblkp = hmebp->hmeblkp; 4680 hblkpa = hmebp->hmeh_nextpa; 4681 prevpa = 0; 4682 pr_hblk = NULL; 4683 while (hmeblkp) { 4684 nx_hblk = hmeblkp->hblk_next; 4685 nx_pa = hmeblkp->hblk_nextpa; 4686 4687 /* 4688 * skip if not this context, if a shadow block or 4689 * if the mapping is not in the requested range 4690 */ 4691 if (hmeblkp->hblk_tag.htag_id != sfmmup || 4692 hmeblkp->hblk_shw_bit || 4693 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 4694 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 4695 pr_hblk = hmeblkp; 4696 prevpa = hblkpa; 4697 goto next_block; 4698 } 4699 4700 /* 4701 * unload if there are any current valid mappings 4702 */ 4703 if (hmeblkp->hblk_vcnt != 0 || 4704 hmeblkp->hblk_hmecnt != 0) 4705 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 4706 sa, ea, NULL, flags); 4707 4708 /* 4709 * on unmap we also release the HME block itself, once 4710 * all mappings are gone. 4711 */ 4712 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 4713 !hmeblkp->hblk_vcnt && 4714 !hmeblkp->hblk_hmecnt) { 4715 ASSERT(!hmeblkp->hblk_lckcnt); 4716 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 4717 prevpa, pr_hblk); 4718 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 4719 } else { 4720 pr_hblk = hmeblkp; 4721 prevpa = hblkpa; 4722 } 4723 4724 if (callback == NULL) 4725 goto next_block; 4726 4727 /* 4728 * HME blocks may span more than one page, but we may be 4729 * unmapping only one page, so check for a smaller range 4730 * for the callback 4731 */ 4732 if (sa < startaddr) 4733 sa = startaddr; 4734 if (--ea > endaddr) 4735 ea = endaddr - 1; 4736 4737 cb_sa[addr_cnt] = sa; 4738 cb_ea[addr_cnt] = ea; 4739 if (++addr_cnt == MAX_CB_ADDR) { 4740 for (a = 0; a < MAX_CB_ADDR; ++a) { 4741 callback->hcb_start_addr = cb_sa[a]; 4742 callback->hcb_end_addr = cb_ea[a]; 4743 callback->hcb_function(callback); 4744 } 4745 addr_cnt = 0; 4746 } 4747 4748 next_block: 4749 hmeblkp = nx_hblk; 4750 hblkpa = nx_pa; 4751 } 4752 SFMMU_HASH_UNLOCK(hmebp); 4753 } 4754 4755 sfmmu_hblks_list_purge(&list); 4756 4757 for (a = 0; a < addr_cnt; ++a) { 4758 callback->hcb_start_addr = cb_sa[a]; 4759 callback->hcb_end_addr = cb_ea[a]; 4760 callback->hcb_function(callback); 4761 } 4762 4763 sfmmu_hat_exit(hatlockp); 4764 4765 /* 4766 * Check TSB and TLB page sizes if the process isn't exiting. 4767 */ 4768 if (!sfmmup->sfmmu_free) 4769 sfmmu_check_page_sizes(sfmmup, 0); 4770 } 4771 4772 4773 /* 4774 * Unload all the mappings in the range [addr..addr+len). addr and len must 4775 * be MMU_PAGESIZE aligned. 4776 */ 4777 4778 extern struct seg *segkmap; 4779 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 4780 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 4781 4782 4783 void 4784 hat_unload_callback( 4785 struct hat *sfmmup, 4786 caddr_t addr, 4787 size_t len, 4788 uint_t flags, 4789 hat_callback_t *callback) 4790 { 4791 struct hmehash_bucket *hmebp; 4792 hmeblk_tag hblktag; 4793 int hmeshift, hashno, iskernel; 4794 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 4795 caddr_t endaddr; 4796 cpuset_t cpuset; 4797 uint64_t hblkpa, prevpa; 4798 int addr_count = 0; 4799 int a; 4800 caddr_t cb_start_addr[MAX_CB_ADDR]; 4801 caddr_t cb_end_addr[MAX_CB_ADDR]; 4802 int issegkmap = ISSEGKMAP(sfmmup, addr); 4803 demap_range_t dmr, *dmrp; 4804 4805 if (sfmmup->sfmmu_xhat_provider) { 4806 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 4807 return; 4808 } else { 4809 /* 4810 * This must be a CPU HAT. If the address space has 4811 * XHATs attached, unload the mappings for all of them, 4812 * just in case 4813 */ 4814 ASSERT(sfmmup->sfmmu_as != NULL); 4815 if (sfmmup->sfmmu_as->a_xhat != NULL) 4816 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 4817 len, flags, callback); 4818 } 4819 4820 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 4821 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4822 4823 ASSERT(sfmmup != NULL); 4824 ASSERT((len & MMU_PAGEOFFSET) == 0); 4825 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 4826 4827 /* 4828 * Probing through a large VA range (say 63 bits) will be slow, even 4829 * at 4 Meg steps between the probes. So, when the virtual address range 4830 * is very large, search the HME entries for what to unload. 4831 * 4832 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 4833 * 4834 * UHMEHASH_SZ is number of hash buckets to examine 4835 * 4836 */ 4837 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 4838 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 4839 return; 4840 } 4841 4842 CPUSET_ZERO(cpuset); 4843 4844 /* 4845 * If the process is exiting, we can save a lot of fuss since 4846 * we'll flush the TLB when we free the ctx anyway. 4847 */ 4848 if (sfmmup->sfmmu_free) 4849 dmrp = NULL; 4850 else 4851 dmrp = &dmr; 4852 4853 DEMAP_RANGE_INIT(sfmmup, dmrp); 4854 endaddr = addr + len; 4855 hblktag.htag_id = sfmmup; 4856 4857 /* 4858 * It is likely for the vm to call unload over a wide range of 4859 * addresses that are actually very sparsely populated by 4860 * translations. In order to speed this up the sfmmu hat supports 4861 * the concept of shadow hmeblks. Dummy large page hmeblks that 4862 * correspond to actual small translations are allocated at tteload 4863 * time and are referred to as shadow hmeblks. Now, during unload 4864 * time, we first check if we have a shadow hmeblk for that 4865 * translation. The absence of one means the corresponding address 4866 * range is empty and can be skipped. 4867 * 4868 * The kernel is an exception to above statement and that is why 4869 * we don't use shadow hmeblks and hash starting from the smallest 4870 * page size. 4871 */ 4872 if (sfmmup == KHATID) { 4873 iskernel = 1; 4874 hashno = TTE64K; 4875 } else { 4876 iskernel = 0; 4877 if (mmu_page_sizes == max_mmu_page_sizes) { 4878 hashno = TTE256M; 4879 } else { 4880 hashno = TTE4M; 4881 } 4882 } 4883 while (addr < endaddr) { 4884 hmeshift = HME_HASH_SHIFT(hashno); 4885 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4886 hblktag.htag_rehash = hashno; 4887 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4888 4889 SFMMU_HASH_LOCK(hmebp); 4890 4891 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk, 4892 prevpa, &list); 4893 if (hmeblkp == NULL) { 4894 /* 4895 * didn't find an hmeblk. skip the appropiate 4896 * address range. 4897 */ 4898 SFMMU_HASH_UNLOCK(hmebp); 4899 if (iskernel) { 4900 if (hashno < mmu_hashcnt) { 4901 hashno++; 4902 continue; 4903 } else { 4904 hashno = TTE64K; 4905 addr = (caddr_t)roundup((uintptr_t)addr 4906 + 1, MMU_PAGESIZE64K); 4907 continue; 4908 } 4909 } 4910 addr = (caddr_t)roundup((uintptr_t)addr + 1, 4911 (1 << hmeshift)); 4912 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 4913 ASSERT(hashno == TTE64K); 4914 continue; 4915 } 4916 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 4917 hashno = TTE512K; 4918 continue; 4919 } 4920 if (mmu_page_sizes == max_mmu_page_sizes) { 4921 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 4922 hashno = TTE4M; 4923 continue; 4924 } 4925 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 4926 hashno = TTE32M; 4927 continue; 4928 } 4929 hashno = TTE256M; 4930 continue; 4931 } else { 4932 hashno = TTE4M; 4933 continue; 4934 } 4935 } 4936 ASSERT(hmeblkp); 4937 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 4938 /* 4939 * If the valid count is zero we can skip the range 4940 * mapped by this hmeblk. 4941 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 4942 * is used by segment drivers as a hint 4943 * that the mapping resource won't be used any longer. 4944 * The best example of this is during exit(). 4945 */ 4946 addr = (caddr_t)roundup((uintptr_t)addr + 1, 4947 get_hblk_span(hmeblkp)); 4948 if ((flags & HAT_UNLOAD_UNMAP) || 4949 (iskernel && !issegkmap)) { 4950 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 4951 pr_hblk); 4952 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 4953 } 4954 SFMMU_HASH_UNLOCK(hmebp); 4955 4956 if (iskernel) { 4957 hashno = TTE64K; 4958 continue; 4959 } 4960 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 4961 ASSERT(hashno == TTE64K); 4962 continue; 4963 } 4964 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 4965 hashno = TTE512K; 4966 continue; 4967 } 4968 if (mmu_page_sizes == max_mmu_page_sizes) { 4969 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 4970 hashno = TTE4M; 4971 continue; 4972 } 4973 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 4974 hashno = TTE32M; 4975 continue; 4976 } 4977 hashno = TTE256M; 4978 continue; 4979 } else { 4980 hashno = TTE4M; 4981 continue; 4982 } 4983 } 4984 if (hmeblkp->hblk_shw_bit) { 4985 /* 4986 * If we encounter a shadow hmeblk we know there is 4987 * smaller sized hmeblks mapping the same address space. 4988 * Decrement the hash size and rehash. 4989 */ 4990 ASSERT(sfmmup != KHATID); 4991 hashno--; 4992 SFMMU_HASH_UNLOCK(hmebp); 4993 continue; 4994 } 4995 4996 /* 4997 * track callback address ranges. 4998 * only start a new range when it's not contiguous 4999 */ 5000 if (callback != NULL) { 5001 if (addr_count > 0 && 5002 addr == cb_end_addr[addr_count - 1]) 5003 --addr_count; 5004 else 5005 cb_start_addr[addr_count] = addr; 5006 } 5007 5008 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5009 dmrp, flags); 5010 5011 if (callback != NULL) 5012 cb_end_addr[addr_count++] = addr; 5013 5014 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5015 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5016 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, 5017 pr_hblk); 5018 sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list); 5019 } 5020 SFMMU_HASH_UNLOCK(hmebp); 5021 5022 /* 5023 * Notify our caller as to exactly which pages 5024 * have been unloaded. We do these in clumps, 5025 * to minimize the number of xt_sync()s that need to occur. 5026 */ 5027 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5028 DEMAP_RANGE_FLUSH(dmrp); 5029 if (dmrp != NULL) { 5030 cpuset = sfmmup->sfmmu_cpusran; 5031 xt_sync(cpuset); 5032 } 5033 5034 for (a = 0; a < MAX_CB_ADDR; ++a) { 5035 callback->hcb_start_addr = cb_start_addr[a]; 5036 callback->hcb_end_addr = cb_end_addr[a]; 5037 callback->hcb_function(callback); 5038 } 5039 addr_count = 0; 5040 } 5041 if (iskernel) { 5042 hashno = TTE64K; 5043 continue; 5044 } 5045 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5046 ASSERT(hashno == TTE64K); 5047 continue; 5048 } 5049 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5050 hashno = TTE512K; 5051 continue; 5052 } 5053 if (mmu_page_sizes == max_mmu_page_sizes) { 5054 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5055 hashno = TTE4M; 5056 continue; 5057 } 5058 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5059 hashno = TTE32M; 5060 continue; 5061 } 5062 hashno = TTE256M; 5063 } else { 5064 hashno = TTE4M; 5065 } 5066 } 5067 5068 sfmmu_hblks_list_purge(&list); 5069 DEMAP_RANGE_FLUSH(dmrp); 5070 if (dmrp != NULL) { 5071 cpuset = sfmmup->sfmmu_cpusran; 5072 xt_sync(cpuset); 5073 } 5074 if (callback && addr_count != 0) { 5075 for (a = 0; a < addr_count; ++a) { 5076 callback->hcb_start_addr = cb_start_addr[a]; 5077 callback->hcb_end_addr = cb_end_addr[a]; 5078 callback->hcb_function(callback); 5079 } 5080 } 5081 5082 /* 5083 * Check TSB and TLB page sizes if the process isn't exiting. 5084 */ 5085 if (!sfmmup->sfmmu_free) 5086 sfmmu_check_page_sizes(sfmmup, 0); 5087 } 5088 5089 /* 5090 * Unload all the mappings in the range [addr..addr+len). addr and len must 5091 * be MMU_PAGESIZE aligned. 5092 */ 5093 void 5094 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5095 { 5096 if (sfmmup->sfmmu_xhat_provider) { 5097 XHAT_UNLOAD(sfmmup, addr, len, flags); 5098 return; 5099 } 5100 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5101 } 5102 5103 5104 /* 5105 * Find the largest mapping size for this page. 5106 */ 5107 static int 5108 fnd_mapping_sz(page_t *pp) 5109 { 5110 int sz; 5111 int p_index; 5112 5113 p_index = PP_MAPINDEX(pp); 5114 5115 sz = 0; 5116 p_index >>= 1; /* don't care about 8K bit */ 5117 for (; p_index; p_index >>= 1) { 5118 sz++; 5119 } 5120 5121 return (sz); 5122 } 5123 5124 /* 5125 * This function unloads a range of addresses for an hmeblk. 5126 * It returns the next address to be unloaded. 5127 * It should be called with the hash lock held. 5128 */ 5129 static caddr_t 5130 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5131 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5132 { 5133 tte_t tte, ttemod; 5134 struct sf_hment *sfhmep; 5135 int ttesz; 5136 long ttecnt; 5137 page_t *pp; 5138 kmutex_t *pml; 5139 int ret; 5140 int use_demap_range; 5141 5142 ASSERT(in_hblk_range(hmeblkp, addr)); 5143 ASSERT(!hmeblkp->hblk_shw_bit); 5144 #ifdef DEBUG 5145 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5146 (endaddr < get_hblk_endaddr(hmeblkp))) { 5147 panic("sfmmu_hblk_unload: partial unload of large page"); 5148 } 5149 #endif /* DEBUG */ 5150 5151 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5152 ttesz = get_hblk_ttesz(hmeblkp); 5153 5154 use_demap_range = (do_virtual_coloring && 5155 TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 5156 if (use_demap_range) { 5157 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5158 } else { 5159 DEMAP_RANGE_FLUSH(dmrp); 5160 } 5161 ttecnt = 0; 5162 HBLKTOHME(sfhmep, hmeblkp, addr); 5163 5164 while (addr < endaddr) { 5165 pml = NULL; 5166 again: 5167 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5168 if (TTE_IS_VALID(&tte)) { 5169 pp = sfhmep->hme_page; 5170 if (pp && pml == NULL) { 5171 pml = sfmmu_mlist_enter(pp); 5172 } 5173 5174 /* 5175 * Verify if hme still points to 'pp' now that 5176 * we have p_mapping lock. 5177 */ 5178 if (sfhmep->hme_page != pp) { 5179 if (pp != NULL && sfhmep->hme_page != NULL) { 5180 if (pml) { 5181 sfmmu_mlist_exit(pml); 5182 } 5183 /* Re-start this iteration. */ 5184 continue; 5185 } 5186 ASSERT((pp != NULL) && 5187 (sfhmep->hme_page == NULL)); 5188 goto tte_unloaded; 5189 } 5190 5191 /* 5192 * This point on we have both HASH and p_mapping 5193 * lock. 5194 */ 5195 ASSERT(pp == sfhmep->hme_page); 5196 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5197 5198 /* 5199 * We need to loop on modify tte because it is 5200 * possible for pagesync to come along and 5201 * change the software bits beneath us. 5202 * 5203 * Page_unload can also invalidate the tte after 5204 * we read tte outside of p_mapping lock. 5205 */ 5206 ttemod = tte; 5207 5208 TTE_SET_INVALID(&ttemod); 5209 ret = sfmmu_modifytte_try(&tte, &ttemod, 5210 &sfhmep->hme_tte); 5211 5212 if (ret <= 0) { 5213 if (TTE_IS_VALID(&tte)) { 5214 goto again; 5215 } else { 5216 /* 5217 * We read in a valid pte, but it 5218 * is unloaded by page_unload. 5219 * hme_page has become NULL and 5220 * we hold no p_mapping lock. 5221 */ 5222 ASSERT(pp == NULL && pml == NULL); 5223 goto tte_unloaded; 5224 } 5225 } 5226 5227 if (!(flags & HAT_UNLOAD_NOSYNC)) { 5228 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5229 } 5230 5231 /* 5232 * Ok- we invalidated the tte. Do the rest of the job. 5233 */ 5234 ttecnt++; 5235 5236 if (flags & HAT_UNLOAD_UNLOCK) { 5237 ASSERT(hmeblkp->hblk_lckcnt > 0); 5238 atomic_add_16(&hmeblkp->hblk_lckcnt, -1); 5239 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 5240 } 5241 5242 /* 5243 * Normally we would need to flush the page 5244 * from the virtual cache at this point in 5245 * order to prevent a potential cache alias 5246 * inconsistency. 5247 * The particular scenario we need to worry 5248 * about is: 5249 * Given: va1 and va2 are two virtual address 5250 * that alias and map the same physical 5251 * address. 5252 * 1. mapping exists from va1 to pa and data 5253 * has been read into the cache. 5254 * 2. unload va1. 5255 * 3. load va2 and modify data using va2. 5256 * 4 unload va2. 5257 * 5. load va1 and reference data. Unless we 5258 * flush the data cache when we unload we will 5259 * get stale data. 5260 * Fortunately, page coloring eliminates the 5261 * above scenario by remembering the color a 5262 * physical page was last or is currently 5263 * mapped to. Now, we delay the flush until 5264 * the loading of translations. Only when the 5265 * new translation is of a different color 5266 * are we forced to flush. 5267 */ 5268 if (use_demap_range) { 5269 /* 5270 * Mark this page as needing a demap. 5271 */ 5272 DEMAP_RANGE_MARKPG(dmrp, addr); 5273 } else { 5274 if (do_virtual_coloring) { 5275 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 5276 sfmmup->sfmmu_free, 0); 5277 } else { 5278 pfn_t pfnum; 5279 5280 pfnum = TTE_TO_PFN(addr, &tte); 5281 sfmmu_tlbcache_demap(addr, sfmmup, 5282 hmeblkp, pfnum, sfmmup->sfmmu_free, 5283 FLUSH_NECESSARY_CPUS, 5284 CACHE_FLUSH, 0); 5285 } 5286 } 5287 5288 if (pp) { 5289 /* 5290 * Remove the hment from the mapping list 5291 */ 5292 ASSERT(hmeblkp->hblk_hmecnt > 0); 5293 5294 /* 5295 * Again, we cannot 5296 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 5297 */ 5298 HME_SUB(sfhmep, pp); 5299 membar_stst(); 5300 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 5301 } 5302 5303 ASSERT(hmeblkp->hblk_vcnt > 0); 5304 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 5305 5306 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 5307 !hmeblkp->hblk_lckcnt); 5308 5309 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 5310 if (PP_ISTNC(pp)) { 5311 /* 5312 * If page was temporary 5313 * uncached, try to recache 5314 * it. Note that HME_SUB() was 5315 * called above so p_index and 5316 * mlist had been updated. 5317 */ 5318 conv_tnc(pp, ttesz); 5319 } else if (pp->p_mapping == NULL) { 5320 ASSERT(kpm_enable); 5321 /* 5322 * Page is marked to be in VAC conflict 5323 * to an existing kpm mapping and/or is 5324 * kpm mapped using only the regular 5325 * pagesize. 5326 */ 5327 sfmmu_kpm_hme_unload(pp); 5328 } 5329 } 5330 } else if ((pp = sfhmep->hme_page) != NULL) { 5331 /* 5332 * TTE is invalid but the hme 5333 * still exists. let pageunload 5334 * complete its job. 5335 */ 5336 ASSERT(pml == NULL); 5337 pml = sfmmu_mlist_enter(pp); 5338 if (sfhmep->hme_page != NULL) { 5339 sfmmu_mlist_exit(pml); 5340 pml = NULL; 5341 goto again; 5342 } 5343 ASSERT(sfhmep->hme_page == NULL); 5344 } else if (hmeblkp->hblk_hmecnt != 0) { 5345 /* 5346 * pageunload may have not finished decrementing 5347 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 5348 * wait for pageunload to finish. Rely on pageunload 5349 * to decrement hblk_hmecnt after hblk_vcnt. 5350 */ 5351 pfn_t pfn = TTE_TO_TTEPFN(&tte); 5352 ASSERT(pml == NULL); 5353 if (pf_is_memory(pfn)) { 5354 pp = page_numtopp_nolock(pfn); 5355 if (pp != NULL) { 5356 pml = sfmmu_mlist_enter(pp); 5357 sfmmu_mlist_exit(pml); 5358 pml = NULL; 5359 } 5360 } 5361 } 5362 5363 tte_unloaded: 5364 /* 5365 * At this point, the tte we are looking at 5366 * should be unloaded, and hme has been unlinked 5367 * from page too. This is important because in 5368 * pageunload, it does ttesync() then HME_SUB. 5369 * We need to make sure HME_SUB has been completed 5370 * so we know ttesync() has been completed. Otherwise, 5371 * at exit time, after return from hat layer, VM will 5372 * release as structure which hat_setstat() (called 5373 * by ttesync()) needs. 5374 */ 5375 #ifdef DEBUG 5376 { 5377 tte_t dtte; 5378 5379 ASSERT(sfhmep->hme_page == NULL); 5380 5381 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 5382 ASSERT(!TTE_IS_VALID(&dtte)); 5383 } 5384 #endif 5385 5386 if (pml) { 5387 sfmmu_mlist_exit(pml); 5388 } 5389 5390 addr += TTEBYTES(ttesz); 5391 sfhmep++; 5392 DEMAP_RANGE_NEXTPG(dmrp); 5393 } 5394 if (ttecnt > 0) 5395 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 5396 return (addr); 5397 } 5398 5399 /* 5400 * Synchronize all the mappings in the range [addr..addr+len). 5401 * Can be called with clearflag having two states: 5402 * HAT_SYNC_DONTZERO means just return the rm stats 5403 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 5404 */ 5405 void 5406 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 5407 { 5408 struct hmehash_bucket *hmebp; 5409 hmeblk_tag hblktag; 5410 int hmeshift, hashno = 1; 5411 struct hme_blk *hmeblkp, *list = NULL; 5412 caddr_t endaddr; 5413 cpuset_t cpuset; 5414 5415 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 5416 ASSERT((sfmmup == ksfmmup) || 5417 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5418 ASSERT((len & MMU_PAGEOFFSET) == 0); 5419 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 5420 (clearflag == HAT_SYNC_ZERORM)); 5421 5422 CPUSET_ZERO(cpuset); 5423 5424 endaddr = addr + len; 5425 hblktag.htag_id = sfmmup; 5426 /* 5427 * Spitfire supports 4 page sizes. 5428 * Most pages are expected to be of the smallest page 5429 * size (8K) and these will not need to be rehashed. 64K 5430 * pages also don't need to be rehashed because the an hmeblk 5431 * spans 64K of address space. 512K pages might need 1 rehash and 5432 * and 4M pages 2 rehashes. 5433 */ 5434 while (addr < endaddr) { 5435 hmeshift = HME_HASH_SHIFT(hashno); 5436 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5437 hblktag.htag_rehash = hashno; 5438 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5439 5440 SFMMU_HASH_LOCK(hmebp); 5441 5442 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5443 if (hmeblkp != NULL) { 5444 /* 5445 * We've encountered a shadow hmeblk so skip the range 5446 * of the next smaller mapping size. 5447 */ 5448 if (hmeblkp->hblk_shw_bit) { 5449 ASSERT(sfmmup != ksfmmup); 5450 ASSERT(hashno > 1); 5451 addr = (caddr_t)P2END((uintptr_t)addr, 5452 TTEBYTES(hashno - 1)); 5453 } else { 5454 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 5455 addr, endaddr, clearflag); 5456 } 5457 SFMMU_HASH_UNLOCK(hmebp); 5458 hashno = 1; 5459 continue; 5460 } 5461 SFMMU_HASH_UNLOCK(hmebp); 5462 5463 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5464 /* 5465 * We have traversed the whole list and rehashed 5466 * if necessary without finding the address to sync. 5467 * This is ok so we increment the address by the 5468 * smallest hmeblk range for kernel mappings and the 5469 * largest hmeblk range, to account for shadow hmeblks, 5470 * for user mappings and continue. 5471 */ 5472 if (sfmmup == ksfmmup) 5473 addr = (caddr_t)P2END((uintptr_t)addr, 5474 TTEBYTES(1)); 5475 else 5476 addr = (caddr_t)P2END((uintptr_t)addr, 5477 TTEBYTES(hashno)); 5478 hashno = 1; 5479 } else { 5480 hashno++; 5481 } 5482 } 5483 sfmmu_hblks_list_purge(&list); 5484 cpuset = sfmmup->sfmmu_cpusran; 5485 xt_sync(cpuset); 5486 } 5487 5488 static caddr_t 5489 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5490 caddr_t endaddr, int clearflag) 5491 { 5492 tte_t tte, ttemod; 5493 struct sf_hment *sfhmep; 5494 int ttesz; 5495 struct page *pp; 5496 kmutex_t *pml; 5497 int ret; 5498 5499 ASSERT(hmeblkp->hblk_shw_bit == 0); 5500 5501 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5502 5503 ttesz = get_hblk_ttesz(hmeblkp); 5504 HBLKTOHME(sfhmep, hmeblkp, addr); 5505 5506 while (addr < endaddr) { 5507 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5508 if (TTE_IS_VALID(&tte)) { 5509 pml = NULL; 5510 pp = sfhmep->hme_page; 5511 if (pp) { 5512 pml = sfmmu_mlist_enter(pp); 5513 } 5514 if (pp != sfhmep->hme_page) { 5515 /* 5516 * tte most have been unloaded 5517 * underneath us. Recheck 5518 */ 5519 ASSERT(pml); 5520 sfmmu_mlist_exit(pml); 5521 continue; 5522 } 5523 5524 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5525 5526 if (clearflag == HAT_SYNC_ZERORM) { 5527 ttemod = tte; 5528 TTE_CLR_RM(&ttemod); 5529 ret = sfmmu_modifytte_try(&tte, &ttemod, 5530 &sfhmep->hme_tte); 5531 if (ret < 0) { 5532 if (pml) { 5533 sfmmu_mlist_exit(pml); 5534 } 5535 continue; 5536 } 5537 5538 if (ret > 0) { 5539 sfmmu_tlb_demap(addr, sfmmup, 5540 hmeblkp, 0, 0); 5541 } 5542 } 5543 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5544 if (pml) { 5545 sfmmu_mlist_exit(pml); 5546 } 5547 } 5548 addr += TTEBYTES(ttesz); 5549 sfhmep++; 5550 } 5551 return (addr); 5552 } 5553 5554 /* 5555 * This function will sync a tte to the page struct and it will 5556 * update the hat stats. Currently it allows us to pass a NULL pp 5557 * and we will simply update the stats. We may want to change this 5558 * so we only keep stats for pages backed by pp's. 5559 */ 5560 static void 5561 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 5562 { 5563 uint_t rm = 0; 5564 int sz; 5565 pgcnt_t npgs; 5566 5567 ASSERT(TTE_IS_VALID(ttep)); 5568 5569 if (TTE_IS_NOSYNC(ttep)) { 5570 return; 5571 } 5572 5573 if (TTE_IS_REF(ttep)) { 5574 rm = P_REF; 5575 } 5576 if (TTE_IS_MOD(ttep)) { 5577 rm |= P_MOD; 5578 } 5579 5580 if (rm == 0) { 5581 return; 5582 } 5583 5584 sz = TTE_CSZ(ttep); 5585 if (sfmmup->sfmmu_rmstat) { 5586 int i; 5587 caddr_t vaddr = addr; 5588 5589 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 5590 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 5591 } 5592 5593 } 5594 5595 /* 5596 * XXX I want to use cas to update nrm bits but they 5597 * currently belong in common/vm and not in hat where 5598 * they should be. 5599 * The nrm bits are protected by the same mutex as 5600 * the one that protects the page's mapping list. 5601 */ 5602 if (!pp) 5603 return; 5604 ASSERT(sfmmu_mlist_held(pp)); 5605 /* 5606 * If the tte is for a large page, we need to sync all the 5607 * pages covered by the tte. 5608 */ 5609 if (sz != TTE8K) { 5610 ASSERT(pp->p_szc != 0); 5611 pp = PP_GROUPLEADER(pp, sz); 5612 ASSERT(sfmmu_mlist_held(pp)); 5613 } 5614 5615 /* Get number of pages from tte size. */ 5616 npgs = TTEPAGES(sz); 5617 5618 do { 5619 ASSERT(pp); 5620 ASSERT(sfmmu_mlist_held(pp)); 5621 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 5622 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 5623 hat_page_setattr(pp, rm); 5624 5625 /* 5626 * Are we done? If not, we must have a large mapping. 5627 * For large mappings we need to sync the rest of the pages 5628 * covered by this tte; goto the next page. 5629 */ 5630 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 5631 } 5632 5633 /* 5634 * Execute pre-callback handler of each pa_hment linked to pp 5635 * 5636 * Inputs: 5637 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 5638 * capture_cpus: pointer to return value (below) 5639 * 5640 * Returns: 5641 * Propagates the subsystem callback return values back to the caller; 5642 * returns 0 on success. If capture_cpus is non-NULL, the value returned 5643 * is zero if all of the pa_hments are of a type that do not require 5644 * capturing CPUs prior to suspending the mapping, else it is 1. 5645 */ 5646 static int 5647 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 5648 { 5649 struct sf_hment *sfhmep; 5650 struct pa_hment *pahmep; 5651 int (*f)(caddr_t, uint_t, uint_t, void *); 5652 int ret; 5653 id_t id; 5654 int locked = 0; 5655 kmutex_t *pml; 5656 5657 ASSERT(PAGE_EXCL(pp)); 5658 if (!sfmmu_mlist_held(pp)) { 5659 pml = sfmmu_mlist_enter(pp); 5660 locked = 1; 5661 } 5662 5663 if (capture_cpus) 5664 *capture_cpus = 0; 5665 5666 top: 5667 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5668 /* 5669 * skip sf_hments corresponding to VA<->PA mappings; 5670 * for pa_hment's, hme_tte.ll is zero 5671 */ 5672 if (!IS_PAHME(sfhmep)) 5673 continue; 5674 5675 pahmep = sfhmep->hme_data; 5676 ASSERT(pahmep != NULL); 5677 5678 /* 5679 * skip if pre-handler has been called earlier in this loop 5680 */ 5681 if (pahmep->flags & flag) 5682 continue; 5683 5684 id = pahmep->cb_id; 5685 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5686 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 5687 *capture_cpus = 1; 5688 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 5689 pahmep->flags |= flag; 5690 continue; 5691 } 5692 5693 /* 5694 * Drop the mapping list lock to avoid locking order issues. 5695 */ 5696 if (locked) 5697 sfmmu_mlist_exit(pml); 5698 5699 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 5700 if (ret != 0) 5701 return (ret); /* caller must do the cleanup */ 5702 5703 if (locked) { 5704 pml = sfmmu_mlist_enter(pp); 5705 pahmep->flags |= flag; 5706 goto top; 5707 } 5708 5709 pahmep->flags |= flag; 5710 } 5711 5712 if (locked) 5713 sfmmu_mlist_exit(pml); 5714 5715 return (0); 5716 } 5717 5718 /* 5719 * Execute post-callback handler of each pa_hment linked to pp 5720 * 5721 * Same overall assumptions and restrictions apply as for 5722 * hat_pageprocess_precallbacks(). 5723 */ 5724 static void 5725 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 5726 { 5727 pfn_t pgpfn = pp->p_pagenum; 5728 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 5729 pfn_t newpfn; 5730 struct sf_hment *sfhmep; 5731 struct pa_hment *pahmep; 5732 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 5733 id_t id; 5734 int locked = 0; 5735 kmutex_t *pml; 5736 5737 ASSERT(PAGE_EXCL(pp)); 5738 if (!sfmmu_mlist_held(pp)) { 5739 pml = sfmmu_mlist_enter(pp); 5740 locked = 1; 5741 } 5742 5743 top: 5744 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5745 /* 5746 * skip sf_hments corresponding to VA<->PA mappings; 5747 * for pa_hment's, hme_tte.ll is zero 5748 */ 5749 if (!IS_PAHME(sfhmep)) 5750 continue; 5751 5752 pahmep = sfhmep->hme_data; 5753 ASSERT(pahmep != NULL); 5754 5755 if ((pahmep->flags & flag) == 0) 5756 continue; 5757 5758 pahmep->flags &= ~flag; 5759 5760 id = pahmep->cb_id; 5761 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 5762 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 5763 continue; 5764 5765 /* 5766 * Convert the base page PFN into the constituent PFN 5767 * which is needed by the callback handler. 5768 */ 5769 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 5770 5771 /* 5772 * Drop the mapping list lock to avoid locking order issues. 5773 */ 5774 if (locked) 5775 sfmmu_mlist_exit(pml); 5776 5777 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 5778 != 0) 5779 panic("sfmmu: posthandler failed"); 5780 5781 if (locked) { 5782 pml = sfmmu_mlist_enter(pp); 5783 goto top; 5784 } 5785 } 5786 5787 if (locked) 5788 sfmmu_mlist_exit(pml); 5789 } 5790 5791 /* 5792 * Suspend locked kernel mapping 5793 */ 5794 void 5795 hat_pagesuspend(struct page *pp) 5796 { 5797 struct sf_hment *sfhmep; 5798 sfmmu_t *sfmmup; 5799 tte_t tte, ttemod; 5800 struct hme_blk *hmeblkp; 5801 caddr_t addr; 5802 int index, cons; 5803 cpuset_t cpuset; 5804 5805 ASSERT(PAGE_EXCL(pp)); 5806 ASSERT(sfmmu_mlist_held(pp)); 5807 5808 mutex_enter(&kpr_suspendlock); 5809 5810 /* 5811 * Call into dtrace to tell it we're about to suspend a 5812 * kernel mapping. This prevents us from running into issues 5813 * with probe context trying to touch a suspended page 5814 * in the relocation codepath itself. 5815 */ 5816 if (dtrace_kreloc_init) 5817 (*dtrace_kreloc_init)(); 5818 5819 index = PP_MAPINDEX(pp); 5820 cons = TTE8K; 5821 5822 retry: 5823 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 5824 5825 if (IS_PAHME(sfhmep)) 5826 continue; 5827 5828 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 5829 continue; 5830 5831 /* 5832 * Loop until we successfully set the suspend bit in 5833 * the TTE. 5834 */ 5835 again: 5836 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5837 ASSERT(TTE_IS_VALID(&tte)); 5838 5839 ttemod = tte; 5840 TTE_SET_SUSPEND(&ttemod); 5841 if (sfmmu_modifytte_try(&tte, &ttemod, 5842 &sfhmep->hme_tte) < 0) 5843 goto again; 5844 5845 /* 5846 * Invalidate TSB entry 5847 */ 5848 hmeblkp = sfmmu_hmetohblk(sfhmep); 5849 5850 sfmmup = hblktosfmmu(hmeblkp); 5851 ASSERT(sfmmup == ksfmmup); 5852 5853 addr = tte_to_vaddr(hmeblkp, tte); 5854 5855 /* 5856 * No need to make sure that the TSB for this sfmmu is 5857 * not being relocated since it is ksfmmup and thus it 5858 * will never be relocated. 5859 */ 5860 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 5861 5862 /* 5863 * Update xcall stats 5864 */ 5865 cpuset = cpu_ready_set; 5866 CPUSET_DEL(cpuset, CPU->cpu_id); 5867 5868 /* LINTED: constant in conditional context */ 5869 SFMMU_XCALL_STATS(KCONTEXT); 5870 5871 /* 5872 * Flush TLB entry on remote CPU's 5873 */ 5874 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, KCONTEXT); 5875 xt_sync(cpuset); 5876 5877 /* 5878 * Flush TLB entry on local CPU 5879 */ 5880 vtag_flushpage(addr, KCONTEXT); 5881 } 5882 5883 while (index != 0) { 5884 index = index >> 1; 5885 if (index != 0) 5886 cons++; 5887 if (index & 0x1) { 5888 pp = PP_GROUPLEADER(pp, cons); 5889 goto retry; 5890 } 5891 } 5892 } 5893 5894 #ifdef DEBUG 5895 5896 #define N_PRLE 1024 5897 struct prle { 5898 page_t *targ; 5899 page_t *repl; 5900 int status; 5901 int pausecpus; 5902 hrtime_t whence; 5903 }; 5904 5905 static struct prle page_relocate_log[N_PRLE]; 5906 static int prl_entry; 5907 static kmutex_t prl_mutex; 5908 5909 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 5910 mutex_enter(&prl_mutex); \ 5911 page_relocate_log[prl_entry].targ = *(t); \ 5912 page_relocate_log[prl_entry].repl = *(r); \ 5913 page_relocate_log[prl_entry].status = (s); \ 5914 page_relocate_log[prl_entry].pausecpus = (p); \ 5915 page_relocate_log[prl_entry].whence = gethrtime(); \ 5916 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 5917 mutex_exit(&prl_mutex); 5918 5919 #else /* !DEBUG */ 5920 #define PAGE_RELOCATE_LOG(t, r, s, p) 5921 #endif 5922 5923 /* 5924 * Core Kernel Page Relocation Algorithm 5925 * 5926 * Input: 5927 * 5928 * target : constituent pages are SE_EXCL locked. 5929 * replacement: constituent pages are SE_EXCL locked. 5930 * 5931 * Output: 5932 * 5933 * nrelocp: number of pages relocated 5934 */ 5935 int 5936 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 5937 { 5938 page_t *targ, *repl; 5939 page_t *tpp, *rpp; 5940 kmutex_t *low, *high; 5941 spgcnt_t npages, i; 5942 page_t *pl = NULL; 5943 int old_pil; 5944 cpuset_t cpuset; 5945 int cap_cpus; 5946 int ret; 5947 5948 if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) { 5949 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 5950 return (EAGAIN); 5951 } 5952 5953 mutex_enter(&kpr_mutex); 5954 kreloc_thread = curthread; 5955 5956 targ = *target; 5957 repl = *replacement; 5958 ASSERT(repl != NULL); 5959 ASSERT(targ->p_szc == repl->p_szc); 5960 5961 npages = page_get_pagecnt(targ->p_szc); 5962 5963 /* 5964 * unload VA<->PA mappings that are not locked 5965 */ 5966 tpp = targ; 5967 for (i = 0; i < npages; i++) { 5968 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 5969 tpp++; 5970 } 5971 5972 /* 5973 * Do "presuspend" callbacks, in a context from which we can still 5974 * block as needed. Note that we don't hold the mapping list lock 5975 * of "targ" at this point due to potential locking order issues; 5976 * we assume that between the hat_pageunload() above and holding 5977 * the SE_EXCL lock that the mapping list *cannot* change at this 5978 * point. 5979 */ 5980 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 5981 if (ret != 0) { 5982 /* 5983 * EIO translates to fatal error, for all others cleanup 5984 * and return EAGAIN. 5985 */ 5986 ASSERT(ret != EIO); 5987 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 5988 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 5989 kreloc_thread = NULL; 5990 mutex_exit(&kpr_mutex); 5991 return (EAGAIN); 5992 } 5993 5994 /* 5995 * acquire p_mapping list lock for both the target and replacement 5996 * root pages. 5997 * 5998 * low and high refer to the need to grab the mlist locks in a 5999 * specific order in order to prevent race conditions. Thus the 6000 * lower lock must be grabbed before the higher lock. 6001 * 6002 * This will block hat_unload's accessing p_mapping list. Since 6003 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6004 * blocked. Thus, no one else will be accessing the p_mapping list 6005 * while we suspend and reload the locked mapping below. 6006 */ 6007 tpp = targ; 6008 rpp = repl; 6009 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6010 6011 kpreempt_disable(); 6012 6013 /* 6014 * If the replacement page is of a different virtual color 6015 * than the page it is replacing, we need to handle the VAC 6016 * consistency for it just as we would if we were setting up 6017 * a new mapping to a page. 6018 */ 6019 if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) { 6020 if (tpp->p_vcolor != rpp->p_vcolor) { 6021 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6022 rpp->p_pagenum); 6023 } 6024 } 6025 6026 /* 6027 * We raise our PIL to 13 so that we don't get captured by 6028 * another CPU or pinned by an interrupt thread. We can't go to 6029 * PIL 14 since the nexus driver(s) may need to interrupt at 6030 * that level in the case of IOMMU pseudo mappings. 6031 */ 6032 cpuset = cpu_ready_set; 6033 CPUSET_DEL(cpuset, CPU->cpu_id); 6034 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6035 old_pil = splr(XCALL_PIL); 6036 } else { 6037 old_pil = -1; 6038 xc_attention(cpuset); 6039 } 6040 ASSERT(getpil() == XCALL_PIL); 6041 6042 /* 6043 * Now do suspend callbacks. In the case of an IOMMU mapping 6044 * this will suspend all DMA activity to the page while it is 6045 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6046 * may be captured at this point we should have acquired any needed 6047 * locks in the presuspend callback. 6048 */ 6049 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6050 if (ret != 0) { 6051 repl = targ; 6052 goto suspend_fail; 6053 } 6054 6055 /* 6056 * Raise the PIL yet again, this time to block all high-level 6057 * interrupts on this CPU. This is necessary to prevent an 6058 * interrupt routine from pinning the thread which holds the 6059 * mapping suspended and then touching the suspended page. 6060 * 6061 * Once the page is suspended we also need to be careful to 6062 * avoid calling any functions which touch any seg_kmem memory 6063 * since that memory may be backed by the very page we are 6064 * relocating in here! 6065 */ 6066 hat_pagesuspend(targ); 6067 6068 /* 6069 * Now that we are confident everybody has stopped using this page, 6070 * copy the page contents. Note we use a physical copy to prevent 6071 * locking issues and to avoid fpRAS because we can't handle it in 6072 * this context. 6073 */ 6074 for (i = 0; i < npages; i++, tpp++, rpp++) { 6075 /* 6076 * Copy the contents of the page. 6077 */ 6078 ppcopy_kernel(tpp, rpp); 6079 } 6080 6081 tpp = targ; 6082 rpp = repl; 6083 for (i = 0; i < npages; i++, tpp++, rpp++) { 6084 /* 6085 * Copy attributes. VAC consistency was handled above, 6086 * if required. 6087 */ 6088 rpp->p_nrm = tpp->p_nrm; 6089 tpp->p_nrm = 0; 6090 rpp->p_index = tpp->p_index; 6091 tpp->p_index = 0; 6092 rpp->p_vcolor = tpp->p_vcolor; 6093 } 6094 6095 /* 6096 * First, unsuspend the page, if we set the suspend bit, and transfer 6097 * the mapping list from the target page to the replacement page. 6098 * Next process postcallbacks; since pa_hment's are linked only to the 6099 * p_mapping list of root page, we don't iterate over the constituent 6100 * pages. 6101 */ 6102 hat_pagereload(targ, repl); 6103 6104 suspend_fail: 6105 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6106 6107 /* 6108 * Now lower our PIL and release any captured CPUs since we 6109 * are out of the "danger zone". After this it will again be 6110 * safe to acquire adaptive mutex locks, or to drop them... 6111 */ 6112 if (old_pil != -1) { 6113 splx(old_pil); 6114 } else { 6115 xc_dismissed(cpuset); 6116 } 6117 6118 kpreempt_enable(); 6119 6120 sfmmu_mlist_reloc_exit(low, high); 6121 6122 /* 6123 * Postsuspend callbacks should drop any locks held across 6124 * the suspend callbacks. As before, we don't hold the mapping 6125 * list lock at this point.. our assumption is that the mapping 6126 * list still can't change due to our holding SE_EXCL lock and 6127 * there being no unlocked mappings left. Hence the restriction 6128 * on calling context to hat_delete_callback() 6129 */ 6130 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6131 if (ret != 0) { 6132 /* 6133 * The second presuspend call failed: we got here through 6134 * the suspend_fail label above. 6135 */ 6136 ASSERT(ret != EIO); 6137 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6138 kreloc_thread = NULL; 6139 mutex_exit(&kpr_mutex); 6140 return (EAGAIN); 6141 } 6142 6143 /* 6144 * Now that we're out of the performance critical section we can 6145 * take care of updating the hash table, since we still 6146 * hold all the pages locked SE_EXCL at this point we 6147 * needn't worry about things changing out from under us. 6148 */ 6149 tpp = targ; 6150 rpp = repl; 6151 for (i = 0; i < npages; i++, tpp++, rpp++) { 6152 6153 /* 6154 * replace targ with replacement in page_hash table 6155 */ 6156 targ = tpp; 6157 page_relocate_hash(rpp, targ); 6158 6159 /* 6160 * concatenate target; caller of platform_page_relocate() 6161 * expects target to be concatenated after returning. 6162 */ 6163 ASSERT(targ->p_next == targ); 6164 ASSERT(targ->p_prev == targ); 6165 page_list_concat(&pl, &targ); 6166 } 6167 6168 ASSERT(*target == pl); 6169 *nrelocp = npages; 6170 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6171 kreloc_thread = NULL; 6172 mutex_exit(&kpr_mutex); 6173 return (0); 6174 } 6175 6176 /* 6177 * Called when stray pa_hments are found attached to a page which is 6178 * being freed. Notify the subsystem which attached the pa_hment of 6179 * the error if it registered a suitable handler, else panic. 6180 */ 6181 static void 6182 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6183 { 6184 id_t cb_id = pahmep->cb_id; 6185 6186 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6187 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6188 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6189 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6190 return; /* non-fatal */ 6191 } 6192 panic("pa_hment leaked: 0x%p", pahmep); 6193 } 6194 6195 /* 6196 * Remove all mappings to page 'pp'. 6197 */ 6198 int 6199 hat_pageunload(struct page *pp, uint_t forceflag) 6200 { 6201 struct page *origpp = pp; 6202 struct sf_hment *sfhme, *tmphme; 6203 struct hme_blk *hmeblkp; 6204 kmutex_t *pml, *pmtx; 6205 cpuset_t cpuset, tset; 6206 int index, cons; 6207 int xhme_blks; 6208 int pa_hments; 6209 6210 ASSERT(PAGE_EXCL(pp)); 6211 6212 retry_xhat: 6213 tmphme = NULL; 6214 xhme_blks = 0; 6215 pa_hments = 0; 6216 CPUSET_ZERO(cpuset); 6217 6218 pml = sfmmu_mlist_enter(pp); 6219 6220 if (pp->p_kpmref) 6221 sfmmu_kpm_pageunload(pp); 6222 ASSERT(!PP_ISMAPPED_KPM(pp)); 6223 6224 index = PP_MAPINDEX(pp); 6225 cons = TTE8K; 6226 retry: 6227 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6228 tmphme = sfhme->hme_next; 6229 6230 if (IS_PAHME(sfhme)) { 6231 ASSERT(sfhme->hme_data != NULL); 6232 pa_hments++; 6233 continue; 6234 } 6235 6236 hmeblkp = sfmmu_hmetohblk(sfhme); 6237 if (hmeblkp->hblk_xhat_bit) { 6238 struct xhat_hme_blk *xblk = 6239 (struct xhat_hme_blk *)hmeblkp; 6240 6241 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 6242 pp, forceflag, XBLK2PROVBLK(xblk)); 6243 6244 xhme_blks = 1; 6245 continue; 6246 } 6247 6248 /* 6249 * If there are kernel mappings don't unload them, they will 6250 * be suspended. 6251 */ 6252 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 6253 hmeblkp->hblk_tag.htag_id == ksfmmup) 6254 continue; 6255 6256 tset = sfmmu_pageunload(pp, sfhme, cons); 6257 CPUSET_OR(cpuset, tset); 6258 } 6259 6260 while (index != 0) { 6261 index = index >> 1; 6262 if (index != 0) 6263 cons++; 6264 if (index & 0x1) { 6265 /* Go to leading page */ 6266 pp = PP_GROUPLEADER(pp, cons); 6267 ASSERT(sfmmu_mlist_held(pp)); 6268 goto retry; 6269 } 6270 } 6271 6272 /* 6273 * cpuset may be empty if the page was only mapped by segkpm, 6274 * in which case we won't actually cross-trap. 6275 */ 6276 xt_sync(cpuset); 6277 6278 /* 6279 * The page should have no mappings at this point, unless 6280 * we were called from hat_page_relocate() in which case we 6281 * leave the locked mappings which will be suspended later. 6282 */ 6283 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 6284 (forceflag == SFMMU_KERNEL_RELOC)); 6285 6286 if (PP_ISTNC(pp)) { 6287 if (cons == TTE8K) { 6288 pmtx = sfmmu_page_enter(pp); 6289 PP_CLRTNC(pp); 6290 sfmmu_page_exit(pmtx); 6291 } else { 6292 conv_tnc(pp, cons); 6293 } 6294 } 6295 6296 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 6297 /* 6298 * Unlink any pa_hments and free them, calling back 6299 * the responsible subsystem to notify it of the error. 6300 * This can occur in situations such as drivers leaking 6301 * DMA handles: naughty, but common enough that we'd like 6302 * to keep the system running rather than bringing it 6303 * down with an obscure error like "pa_hment leaked" 6304 * which doesn't aid the user in debugging their driver. 6305 */ 6306 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6307 tmphme = sfhme->hme_next; 6308 if (IS_PAHME(sfhme)) { 6309 struct pa_hment *pahmep = sfhme->hme_data; 6310 sfmmu_pahment_leaked(pahmep); 6311 HME_SUB(sfhme, pp); 6312 kmem_cache_free(pa_hment_cache, pahmep); 6313 } 6314 } 6315 6316 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 6317 } 6318 6319 sfmmu_mlist_exit(pml); 6320 6321 /* 6322 * XHAT may not have finished unloading pages 6323 * because some other thread was waiting for 6324 * mlist lock and XHAT_PAGEUNLOAD let it do 6325 * the job. 6326 */ 6327 if (xhme_blks) { 6328 pp = origpp; 6329 goto retry_xhat; 6330 } 6331 6332 return (0); 6333 } 6334 6335 static cpuset_t 6336 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 6337 { 6338 struct hme_blk *hmeblkp; 6339 sfmmu_t *sfmmup; 6340 tte_t tte, ttemod; 6341 #ifdef DEBUG 6342 tte_t orig_old; 6343 #endif /* DEBUG */ 6344 caddr_t addr; 6345 int ttesz; 6346 int ret; 6347 cpuset_t cpuset; 6348 6349 ASSERT(pp != NULL); 6350 ASSERT(sfmmu_mlist_held(pp)); 6351 ASSERT(pp->p_vnode != &kvp); 6352 6353 CPUSET_ZERO(cpuset); 6354 6355 hmeblkp = sfmmu_hmetohblk(sfhme); 6356 6357 readtte: 6358 sfmmu_copytte(&sfhme->hme_tte, &tte); 6359 if (TTE_IS_VALID(&tte)) { 6360 sfmmup = hblktosfmmu(hmeblkp); 6361 ttesz = get_hblk_ttesz(hmeblkp); 6362 /* 6363 * Only unload mappings of 'cons' size. 6364 */ 6365 if (ttesz != cons) 6366 return (cpuset); 6367 6368 /* 6369 * Note that we have p_mapping lock, but no hash lock here. 6370 * hblk_unload() has to have both hash lock AND p_mapping 6371 * lock before it tries to modify tte. So, the tte could 6372 * not become invalid in the sfmmu_modifytte_try() below. 6373 */ 6374 ttemod = tte; 6375 #ifdef DEBUG 6376 orig_old = tte; 6377 #endif /* DEBUG */ 6378 6379 TTE_SET_INVALID(&ttemod); 6380 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6381 if (ret < 0) { 6382 #ifdef DEBUG 6383 /* only R/M bits can change. */ 6384 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 6385 #endif /* DEBUG */ 6386 goto readtte; 6387 } 6388 6389 if (ret == 0) { 6390 panic("pageunload: cas failed?"); 6391 } 6392 6393 addr = tte_to_vaddr(hmeblkp, tte); 6394 6395 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6396 6397 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 6398 6399 /* 6400 * We need to flush the page from the virtual cache 6401 * in order to prevent a virtual cache alias 6402 * inconsistency. The particular scenario we need 6403 * to worry about is: 6404 * Given: va1 and va2 are two virtual address that 6405 * alias and will map the same physical address. 6406 * 1. mapping exists from va1 to pa and data has 6407 * been read into the cache. 6408 * 2. unload va1. 6409 * 3. load va2 and modify data using va2. 6410 * 4 unload va2. 6411 * 5. load va1 and reference data. Unless we flush 6412 * the data cache when we unload we will get 6413 * stale data. 6414 * This scenario is taken care of by using virtual 6415 * page coloring. 6416 */ 6417 if (sfmmup->sfmmu_ismhat) { 6418 /* 6419 * Flush TSBs, TLBs and caches 6420 * of every process 6421 * sharing this ism segment. 6422 */ 6423 sfmmu_hat_lock_all(); 6424 mutex_enter(&ism_mlist_lock); 6425 kpreempt_disable(); 6426 if (do_virtual_coloring) 6427 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6428 pp->p_pagenum, CACHE_NO_FLUSH); 6429 else 6430 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 6431 pp->p_pagenum, CACHE_FLUSH); 6432 kpreempt_enable(); 6433 mutex_exit(&ism_mlist_lock); 6434 sfmmu_hat_unlock_all(); 6435 cpuset = cpu_ready_set; 6436 } else if (do_virtual_coloring) { 6437 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6438 cpuset = sfmmup->sfmmu_cpusran; 6439 } else { 6440 sfmmu_tlbcache_demap(addr, sfmmup, hmeblkp, 6441 pp->p_pagenum, 0, FLUSH_NECESSARY_CPUS, 6442 CACHE_FLUSH, 0); 6443 cpuset = sfmmup->sfmmu_cpusran; 6444 } 6445 6446 /* 6447 * Hme_sub has to run after ttesync() and a_rss update. 6448 * See hblk_unload(). 6449 */ 6450 HME_SUB(sfhme, pp); 6451 membar_stst(); 6452 6453 /* 6454 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 6455 * since pteload may have done a HME_ADD() right after 6456 * we did the HME_SUB() above. Hmecnt is now maintained 6457 * by cas only. no lock guranteed its value. The only 6458 * gurantee we have is the hmecnt should not be less than 6459 * what it should be so the hblk will not be taken away. 6460 * It's also important that we decremented the hmecnt after 6461 * we are done with hmeblkp so that this hmeblk won't be 6462 * stolen. 6463 */ 6464 ASSERT(hmeblkp->hblk_hmecnt > 0); 6465 ASSERT(hmeblkp->hblk_vcnt > 0); 6466 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6467 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6468 /* 6469 * This is bug 4063182. 6470 * XXX: fixme 6471 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6472 * !hmeblkp->hblk_lckcnt); 6473 */ 6474 } else { 6475 panic("invalid tte? pp %p &tte %p", 6476 (void *)pp, (void *)&tte); 6477 } 6478 6479 return (cpuset); 6480 } 6481 6482 /* 6483 * While relocating a kernel page, this function will move the mappings 6484 * from tpp to dpp and modify any associated data with these mappings. 6485 * It also unsuspends the suspended kernel mapping. 6486 */ 6487 static void 6488 hat_pagereload(struct page *tpp, struct page *dpp) 6489 { 6490 struct sf_hment *sfhme; 6491 tte_t tte, ttemod; 6492 int index, cons; 6493 6494 ASSERT(getpil() == PIL_MAX); 6495 ASSERT(sfmmu_mlist_held(tpp)); 6496 ASSERT(sfmmu_mlist_held(dpp)); 6497 6498 index = PP_MAPINDEX(tpp); 6499 cons = TTE8K; 6500 6501 /* Update real mappings to the page */ 6502 retry: 6503 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 6504 if (IS_PAHME(sfhme)) 6505 continue; 6506 sfmmu_copytte(&sfhme->hme_tte, &tte); 6507 ttemod = tte; 6508 6509 /* 6510 * replace old pfn with new pfn in TTE 6511 */ 6512 PFN_TO_TTE(ttemod, dpp->p_pagenum); 6513 6514 /* 6515 * clear suspend bit 6516 */ 6517 ASSERT(TTE_IS_SUSPEND(&ttemod)); 6518 TTE_CLR_SUSPEND(&ttemod); 6519 6520 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 6521 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 6522 6523 /* 6524 * set hme_page point to new page 6525 */ 6526 sfhme->hme_page = dpp; 6527 } 6528 6529 /* 6530 * move p_mapping list from old page to new page 6531 */ 6532 dpp->p_mapping = tpp->p_mapping; 6533 tpp->p_mapping = NULL; 6534 dpp->p_share = tpp->p_share; 6535 tpp->p_share = 0; 6536 6537 while (index != 0) { 6538 index = index >> 1; 6539 if (index != 0) 6540 cons++; 6541 if (index & 0x1) { 6542 tpp = PP_GROUPLEADER(tpp, cons); 6543 dpp = PP_GROUPLEADER(dpp, cons); 6544 goto retry; 6545 } 6546 } 6547 6548 if (dtrace_kreloc_fini) 6549 (*dtrace_kreloc_fini)(); 6550 mutex_exit(&kpr_suspendlock); 6551 } 6552 6553 uint_t 6554 hat_pagesync(struct page *pp, uint_t clearflag) 6555 { 6556 struct sf_hment *sfhme, *tmphme = NULL; 6557 struct hme_blk *hmeblkp; 6558 kmutex_t *pml; 6559 cpuset_t cpuset, tset; 6560 int index, cons; 6561 extern ulong_t po_share; 6562 page_t *save_pp = pp; 6563 6564 CPUSET_ZERO(cpuset); 6565 6566 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 6567 return (PP_GENERIC_ATTR(pp)); 6568 } 6569 6570 if ((clearflag == (HAT_SYNC_STOPON_REF | HAT_SYNC_DONTZERO)) && 6571 PP_ISREF(pp)) { 6572 return (PP_GENERIC_ATTR(pp)); 6573 } 6574 6575 if ((clearflag == (HAT_SYNC_STOPON_MOD | HAT_SYNC_DONTZERO)) && 6576 PP_ISMOD(pp)) { 6577 return (PP_GENERIC_ATTR(pp)); 6578 } 6579 6580 if ((clearflag & HAT_SYNC_STOPON_SHARED) != 0 && 6581 (pp->p_share > po_share) && 6582 !(clearflag & HAT_SYNC_ZERORM)) { 6583 if (PP_ISRO(pp)) 6584 hat_page_setattr(pp, P_REF); 6585 return (PP_GENERIC_ATTR(pp)); 6586 } 6587 6588 clearflag &= ~HAT_SYNC_STOPON_SHARED; 6589 pml = sfmmu_mlist_enter(pp); 6590 index = PP_MAPINDEX(pp); 6591 cons = TTE8K; 6592 retry: 6593 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6594 /* 6595 * We need to save the next hment on the list since 6596 * it is possible for pagesync to remove an invalid hment 6597 * from the list. 6598 */ 6599 tmphme = sfhme->hme_next; 6600 /* 6601 * If we are looking for large mappings and this hme doesn't 6602 * reach the range we are seeking, just ignore its. 6603 */ 6604 hmeblkp = sfmmu_hmetohblk(sfhme); 6605 if (hmeblkp->hblk_xhat_bit) 6606 continue; 6607 6608 if (hme_size(sfhme) < cons) 6609 continue; 6610 tset = sfmmu_pagesync(pp, sfhme, 6611 clearflag & ~HAT_SYNC_STOPON_RM); 6612 CPUSET_OR(cpuset, tset); 6613 /* 6614 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 6615 * as the "ref" or "mod" is set. 6616 */ 6617 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 6618 ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 6619 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) { 6620 index = 0; 6621 break; 6622 } 6623 } 6624 6625 while (index) { 6626 index = index >> 1; 6627 cons++; 6628 if (index & 0x1) { 6629 /* Go to leading page */ 6630 pp = PP_GROUPLEADER(pp, cons); 6631 goto retry; 6632 } 6633 } 6634 6635 xt_sync(cpuset); 6636 sfmmu_mlist_exit(pml); 6637 return (PP_GENERIC_ATTR(save_pp)); 6638 } 6639 6640 /* 6641 * Get all the hardware dependent attributes for a page struct 6642 */ 6643 static cpuset_t 6644 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 6645 uint_t clearflag) 6646 { 6647 caddr_t addr; 6648 tte_t tte, ttemod; 6649 struct hme_blk *hmeblkp; 6650 int ret; 6651 sfmmu_t *sfmmup; 6652 cpuset_t cpuset; 6653 6654 ASSERT(pp != NULL); 6655 ASSERT(sfmmu_mlist_held(pp)); 6656 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6657 (clearflag == HAT_SYNC_ZERORM)); 6658 6659 SFMMU_STAT(sf_pagesync); 6660 6661 CPUSET_ZERO(cpuset); 6662 6663 sfmmu_pagesync_retry: 6664 6665 sfmmu_copytte(&sfhme->hme_tte, &tte); 6666 if (TTE_IS_VALID(&tte)) { 6667 hmeblkp = sfmmu_hmetohblk(sfhme); 6668 sfmmup = hblktosfmmu(hmeblkp); 6669 addr = tte_to_vaddr(hmeblkp, tte); 6670 if (clearflag == HAT_SYNC_ZERORM) { 6671 ttemod = tte; 6672 TTE_CLR_RM(&ttemod); 6673 ret = sfmmu_modifytte_try(&tte, &ttemod, 6674 &sfhme->hme_tte); 6675 if (ret < 0) { 6676 /* 6677 * cas failed and the new value is not what 6678 * we want. 6679 */ 6680 goto sfmmu_pagesync_retry; 6681 } 6682 6683 if (ret > 0) { 6684 /* we win the cas */ 6685 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6686 cpuset = sfmmup->sfmmu_cpusran; 6687 } 6688 } 6689 6690 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6691 } 6692 return (cpuset); 6693 } 6694 6695 /* 6696 * Remove write permission from a mappings to a page, so that 6697 * we can detect the next modification of it. This requires modifying 6698 * the TTE then invalidating (demap) any TLB entry using that TTE. 6699 * This code is similar to sfmmu_pagesync(). 6700 */ 6701 static cpuset_t 6702 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 6703 { 6704 caddr_t addr; 6705 tte_t tte; 6706 tte_t ttemod; 6707 struct hme_blk *hmeblkp; 6708 int ret; 6709 sfmmu_t *sfmmup; 6710 cpuset_t cpuset; 6711 6712 ASSERT(pp != NULL); 6713 ASSERT(sfmmu_mlist_held(pp)); 6714 6715 CPUSET_ZERO(cpuset); 6716 SFMMU_STAT(sf_clrwrt); 6717 6718 retry: 6719 6720 sfmmu_copytte(&sfhme->hme_tte, &tte); 6721 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 6722 hmeblkp = sfmmu_hmetohblk(sfhme); 6723 6724 /* 6725 * xhat mappings should never be to a VMODSORT page. 6726 */ 6727 ASSERT(hmeblkp->hblk_xhat_bit == 0); 6728 6729 sfmmup = hblktosfmmu(hmeblkp); 6730 addr = tte_to_vaddr(hmeblkp, tte); 6731 6732 ttemod = tte; 6733 TTE_CLR_WRT(&ttemod); 6734 TTE_CLR_MOD(&ttemod); 6735 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 6736 6737 /* 6738 * if cas failed and the new value is not what 6739 * we want retry 6740 */ 6741 if (ret < 0) 6742 goto retry; 6743 6744 /* we win the cas */ 6745 if (ret > 0) { 6746 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 6747 cpuset = sfmmup->sfmmu_cpusran; 6748 } 6749 } 6750 6751 return (cpuset); 6752 } 6753 6754 /* 6755 * Walk all mappings of a page, removing write permission and clearing the 6756 * ref/mod bits. This code is similar to hat_pagesync() 6757 */ 6758 static void 6759 hat_page_clrwrt(page_t *pp) 6760 { 6761 struct sf_hment *sfhme; 6762 struct sf_hment *tmphme = NULL; 6763 kmutex_t *pml; 6764 cpuset_t cpuset; 6765 cpuset_t tset; 6766 int index; 6767 int cons; 6768 6769 CPUSET_ZERO(cpuset); 6770 6771 pml = sfmmu_mlist_enter(pp); 6772 index = PP_MAPINDEX(pp); 6773 cons = TTE8K; 6774 retry: 6775 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 6776 tmphme = sfhme->hme_next; 6777 6778 /* 6779 * If we are looking for large mappings and this hme doesn't 6780 * reach the range we are seeking, just ignore its. 6781 */ 6782 6783 if (hme_size(sfhme) < cons) 6784 continue; 6785 6786 tset = sfmmu_pageclrwrt(pp, sfhme); 6787 CPUSET_OR(cpuset, tset); 6788 } 6789 6790 while (index) { 6791 index = index >> 1; 6792 cons++; 6793 if (index & 0x1) { 6794 /* Go to leading page */ 6795 pp = PP_GROUPLEADER(pp, cons); 6796 goto retry; 6797 } 6798 } 6799 6800 xt_sync(cpuset); 6801 sfmmu_mlist_exit(pml); 6802 } 6803 6804 /* 6805 * Set the given REF/MOD/RO bits for the given page. 6806 * For a vnode with a sorted v_pages list, we need to change 6807 * the attributes and the v_pages list together under page_vnode_mutex. 6808 */ 6809 void 6810 hat_page_setattr(page_t *pp, uint_t flag) 6811 { 6812 vnode_t *vp = pp->p_vnode; 6813 page_t **listp; 6814 kmutex_t *pmtx; 6815 kmutex_t *vphm = NULL; 6816 6817 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 6818 6819 /* 6820 * nothing to do if attribute already set 6821 */ 6822 if ((pp->p_nrm & flag) == flag) 6823 return; 6824 6825 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 6826 vphm = page_vnode_mutex(vp); 6827 mutex_enter(vphm); 6828 } 6829 6830 pmtx = sfmmu_page_enter(pp); 6831 pp->p_nrm |= flag; 6832 sfmmu_page_exit(pmtx); 6833 6834 if (vphm != NULL) { 6835 /* 6836 * Some File Systems examine v_pages for NULL w/o 6837 * grabbing the vphm mutex. Must not let it become NULL when 6838 * pp is the only page on the list. 6839 */ 6840 if (pp->p_vpnext != pp) { 6841 page_vpsub(&vp->v_pages, pp); 6842 if (vp->v_pages != NULL) 6843 listp = &vp->v_pages->p_vpprev->p_vpnext; 6844 else 6845 listp = &vp->v_pages; 6846 page_vpadd(listp, pp); 6847 } 6848 mutex_exit(vphm); 6849 } 6850 } 6851 6852 void 6853 hat_page_clrattr(page_t *pp, uint_t flag) 6854 { 6855 vnode_t *vp = pp->p_vnode; 6856 kmutex_t *vphm = NULL; 6857 kmutex_t *pmtx; 6858 6859 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 6860 6861 /* 6862 * For vnode with a sorted v_pages list, we need to change 6863 * the attributes and the v_pages list together under page_vnode_mutex. 6864 */ 6865 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 6866 vphm = page_vnode_mutex(vp); 6867 mutex_enter(vphm); 6868 } 6869 6870 pmtx = sfmmu_page_enter(pp); 6871 pp->p_nrm &= ~flag; 6872 sfmmu_page_exit(pmtx); 6873 6874 if (vphm != NULL) { 6875 /* 6876 * Some File Systems examine v_pages for NULL w/o 6877 * grabbing the vphm mutex. Must not let it become NULL when 6878 * pp is the only page on the list. 6879 */ 6880 if (pp->p_vpnext != pp) { 6881 page_vpsub(&vp->v_pages, pp); 6882 page_vpadd(&vp->v_pages, pp); 6883 } 6884 mutex_exit(vphm); 6885 6886 /* 6887 * VMODSORT works by removing write permissions and getting 6888 * a fault when a page is made dirty. At this point 6889 * we need to remove write permission from all mappings 6890 * to this page. 6891 */ 6892 hat_page_clrwrt(pp); 6893 } 6894 } 6895 6896 6897 uint_t 6898 hat_page_getattr(page_t *pp, uint_t flag) 6899 { 6900 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 6901 return ((uint_t)(pp->p_nrm & flag)); 6902 } 6903 6904 /* 6905 * DEBUG kernels: verify that a kernel va<->pa translation 6906 * is safe by checking the underlying page_t is in a page 6907 * relocation-safe state. 6908 */ 6909 #ifdef DEBUG 6910 void 6911 sfmmu_check_kpfn(pfn_t pfn) 6912 { 6913 page_t *pp; 6914 int index, cons; 6915 6916 if (hat_check_vtop == 0) 6917 return; 6918 6919 if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr) 6920 return; 6921 6922 pp = page_numtopp_nolock(pfn); 6923 if (!pp) 6924 return; 6925 6926 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 6927 return; 6928 6929 /* 6930 * Handed a large kernel page, we dig up the root page since we 6931 * know the root page might have the lock also. 6932 */ 6933 if (pp->p_szc != 0) { 6934 index = PP_MAPINDEX(pp); 6935 cons = TTE8K; 6936 again: 6937 while (index != 0) { 6938 index >>= 1; 6939 if (index != 0) 6940 cons++; 6941 if (index & 0x1) { 6942 pp = PP_GROUPLEADER(pp, cons); 6943 goto again; 6944 } 6945 } 6946 } 6947 6948 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 6949 return; 6950 6951 /* 6952 * Pages need to be locked or allocated "permanent" (either from 6953 * static_arena arena or explicitly setting PG_NORELOC when calling 6954 * page_create_va()) for VA->PA translations to be valid. 6955 */ 6956 if (!PP_ISNORELOC(pp)) 6957 panic("Illegal VA->PA translation, pp 0x%p not permanent", pp); 6958 else 6959 panic("Illegal VA->PA translation, pp 0x%p not locked", pp); 6960 } 6961 #endif /* DEBUG */ 6962 6963 /* 6964 * Returns a page frame number for a given virtual address. 6965 * Returns PFN_INVALID to indicate an invalid mapping 6966 */ 6967 pfn_t 6968 hat_getpfnum(struct hat *hat, caddr_t addr) 6969 { 6970 pfn_t pfn; 6971 tte_t tte; 6972 6973 /* 6974 * We would like to 6975 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 6976 * but we can't because the iommu driver will call this 6977 * routine at interrupt time and it can't grab the as lock 6978 * or it will deadlock: A thread could have the as lock 6979 * and be waiting for io. The io can't complete 6980 * because the interrupt thread is blocked trying to grab 6981 * the as lock. 6982 */ 6983 6984 ASSERT(hat->sfmmu_xhat_provider == NULL); 6985 6986 if (hat == ksfmmup) { 6987 if (segkpm && IS_KPM_ADDR(addr)) 6988 return (sfmmu_kpm_vatopfn(addr)); 6989 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 6990 == PFN_SUSPENDED) { 6991 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 6992 } 6993 sfmmu_check_kpfn(pfn); 6994 return (pfn); 6995 } else { 6996 return (sfmmu_uvatopfn(addr, hat)); 6997 } 6998 } 6999 7000 /* 7001 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 7002 * Use hat_getpfnum(kas.a_hat, ...) instead. 7003 * 7004 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 7005 * but can't right now due to the fact that some software has grown to use 7006 * this interface incorrectly. So for now when the interface is misused, 7007 * return a warning to the user that in the future it won't work in the 7008 * way they're abusing it, and carry on (after disabling page relocation). 7009 */ 7010 pfn_t 7011 hat_getkpfnum(caddr_t addr) 7012 { 7013 pfn_t pfn; 7014 tte_t tte; 7015 int badcaller = 0; 7016 extern int segkmem_reloc; 7017 7018 if (segkpm && IS_KPM_ADDR(addr)) { 7019 badcaller = 1; 7020 pfn = sfmmu_kpm_vatopfn(addr); 7021 } else { 7022 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7023 == PFN_SUSPENDED) { 7024 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7025 } 7026 badcaller = pf_is_memory(pfn); 7027 } 7028 7029 if (badcaller) { 7030 /* 7031 * We can't return PFN_INVALID or the caller may panic 7032 * or corrupt the system. The only alternative is to 7033 * disable page relocation at this point for all kernel 7034 * memory. This will impact any callers of page_relocate() 7035 * such as FMA or DR. 7036 * 7037 * RFE: Add junk here to spit out an ereport so the sysadmin 7038 * can be advised that he should upgrade his device driver 7039 * so that this doesn't happen. 7040 */ 7041 hat_getkpfnum_badcall(caller()); 7042 if (hat_kpr_enabled && segkmem_reloc) { 7043 hat_kpr_enabled = 0; 7044 segkmem_reloc = 0; 7045 cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED"); 7046 } 7047 } 7048 return (pfn); 7049 } 7050 7051 pfn_t 7052 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup) 7053 { 7054 struct hmehash_bucket *hmebp; 7055 hmeblk_tag hblktag; 7056 int hmeshift, hashno = 1; 7057 struct hme_blk *hmeblkp = NULL; 7058 7059 struct sf_hment *sfhmep; 7060 tte_t tte; 7061 pfn_t pfn; 7062 7063 /* support for ISM */ 7064 ism_map_t *ism_map; 7065 ism_blk_t *ism_blkp; 7066 int i; 7067 sfmmu_t *ism_hatid = NULL; 7068 sfmmu_t *locked_hatid = NULL; 7069 7070 7071 ASSERT(sfmmup != ksfmmup); 7072 SFMMU_STAT(sf_user_vtop); 7073 /* 7074 * Set ism_hatid if vaddr falls in a ISM segment. 7075 */ 7076 ism_blkp = sfmmup->sfmmu_iblk; 7077 if (ism_blkp) { 7078 sfmmu_ismhat_enter(sfmmup, 0); 7079 locked_hatid = sfmmup; 7080 } 7081 while (ism_blkp && ism_hatid == NULL) { 7082 ism_map = ism_blkp->iblk_maps; 7083 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7084 if (vaddr >= ism_start(ism_map[i]) && 7085 vaddr < ism_end(ism_map[i])) { 7086 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7087 vaddr = (caddr_t)(vaddr - 7088 ism_start(ism_map[i])); 7089 break; 7090 } 7091 } 7092 ism_blkp = ism_blkp->iblk_next; 7093 } 7094 if (locked_hatid) { 7095 sfmmu_ismhat_exit(locked_hatid, 0); 7096 } 7097 7098 hblktag.htag_id = sfmmup; 7099 do { 7100 hmeshift = HME_HASH_SHIFT(hashno); 7101 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7102 hblktag.htag_rehash = hashno; 7103 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7104 7105 SFMMU_HASH_LOCK(hmebp); 7106 7107 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7108 if (hmeblkp != NULL) { 7109 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7110 sfmmu_copytte(&sfhmep->hme_tte, &tte); 7111 if (TTE_IS_VALID(&tte)) { 7112 pfn = TTE_TO_PFN(vaddr, &tte); 7113 } else { 7114 pfn = PFN_INVALID; 7115 } 7116 SFMMU_HASH_UNLOCK(hmebp); 7117 return (pfn); 7118 } 7119 SFMMU_HASH_UNLOCK(hmebp); 7120 hashno++; 7121 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7122 return (PFN_INVALID); 7123 } 7124 7125 7126 /* 7127 * For compatability with AT&T and later optimizations 7128 */ 7129 /* ARGSUSED */ 7130 void 7131 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 7132 { 7133 ASSERT(hat != NULL); 7134 ASSERT(hat->sfmmu_xhat_provider == NULL); 7135 } 7136 7137 /* 7138 * Return the number of mappings to a particular page. 7139 * This number is an approximation of the number of 7140 * number of people sharing the page. 7141 */ 7142 ulong_t 7143 hat_page_getshare(page_t *pp) 7144 { 7145 page_t *spp = pp; /* start page */ 7146 kmutex_t *pml; 7147 ulong_t cnt; 7148 int index, sz = TTE64K; 7149 7150 /* 7151 * We need to grab the mlist lock to make sure any outstanding 7152 * load/unloads complete. Otherwise we could return zero 7153 * even though the unload(s) hasn't finished yet. 7154 */ 7155 pml = sfmmu_mlist_enter(spp); 7156 cnt = spp->p_share; 7157 7158 if (kpm_enable) 7159 cnt += spp->p_kpmref; 7160 7161 /* 7162 * If we have any large mappings, we count the number of 7163 * mappings that this large page is part of. 7164 */ 7165 index = PP_MAPINDEX(spp); 7166 index >>= 1; 7167 while (index) { 7168 pp = PP_GROUPLEADER(spp, sz); 7169 if ((index & 0x1) && pp != spp) { 7170 cnt += pp->p_share; 7171 spp = pp; 7172 } 7173 index >>= 1; 7174 sz++; 7175 } 7176 sfmmu_mlist_exit(pml); 7177 return (cnt); 7178 } 7179 7180 /* 7181 * Unload all large mappings to the pp and reset the p_szc field of every 7182 * constituent page according to the remaining mappings. 7183 * 7184 * pp must be locked SE_EXCL. Even though no other constituent pages are 7185 * locked it's legal to unload the large mappings to the pp because all 7186 * constituent pages of large locked mappings have to be locked SE_SHARED. 7187 * This means if we have SE_EXCL lock on one of constituent pages none of the 7188 * large mappings to pp are locked. 7189 * 7190 * Decrease p_szc field starting from the last constituent page and ending 7191 * with the root page. This method is used because other threads rely on the 7192 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 7193 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 7194 * ensures that p_szc changes of the constituent pages appears atomic for all 7195 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 7196 * 7197 * This mechanism is only used for file system pages where it's not always 7198 * possible to get SE_EXCL locks on all constituent pages to demote the size 7199 * code (as is done for anonymous or kernel large pages). 7200 * 7201 * See more comments in front of sfmmu_mlspl_enter(). 7202 */ 7203 void 7204 hat_page_demote(page_t *pp) 7205 { 7206 int index; 7207 int sz; 7208 cpuset_t cpuset; 7209 int sync = 0; 7210 page_t *rootpp; 7211 struct sf_hment *sfhme; 7212 struct sf_hment *tmphme = NULL; 7213 struct hme_blk *hmeblkp; 7214 uint_t pszc; 7215 page_t *lastpp; 7216 cpuset_t tset; 7217 pgcnt_t npgs; 7218 kmutex_t *pml; 7219 kmutex_t *pmtx; 7220 7221 ASSERT(PAGE_EXCL(pp)); 7222 ASSERT(!PP_ISFREE(pp)); 7223 ASSERT(page_szc_lock_assert(pp)); 7224 pml = sfmmu_mlist_enter(pp); 7225 pmtx = sfmmu_page_enter(pp); 7226 7227 pszc = pp->p_szc; 7228 if (pszc == 0) { 7229 goto out; 7230 } 7231 7232 index = PP_MAPINDEX(pp) >> 1; 7233 7234 if (index) { 7235 CPUSET_ZERO(cpuset); 7236 sz = TTE64K; 7237 sync = 1; 7238 } 7239 7240 while (index) { 7241 if (!(index & 0x1)) { 7242 index >>= 1; 7243 sz++; 7244 continue; 7245 } 7246 ASSERT(sz <= pszc); 7247 rootpp = PP_GROUPLEADER(pp, sz); 7248 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 7249 tmphme = sfhme->hme_next; 7250 hmeblkp = sfmmu_hmetohblk(sfhme); 7251 if (hme_size(sfhme) != sz) { 7252 continue; 7253 } 7254 if (hmeblkp->hblk_xhat_bit) { 7255 cmn_err(CE_PANIC, 7256 "hat_page_demote: xhat hmeblk"); 7257 } 7258 tset = sfmmu_pageunload(rootpp, sfhme, sz); 7259 CPUSET_OR(cpuset, tset); 7260 } 7261 if (index >>= 1) { 7262 sz++; 7263 } 7264 } 7265 7266 ASSERT(!PP_ISMAPPED_LARGE(pp)); 7267 7268 if (sync) { 7269 xt_sync(cpuset); 7270 if (PP_ISTNC(pp)) { 7271 conv_tnc(rootpp, sz); 7272 } 7273 } 7274 7275 ASSERT(pp->p_szc == pszc); 7276 rootpp = PP_PAGEROOT(pp); 7277 ASSERT(rootpp->p_szc == pszc); 7278 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 7279 7280 while (lastpp != rootpp) { 7281 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 7282 ASSERT(sz < pszc); 7283 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 7284 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 7285 while (--npgs > 0) { 7286 lastpp->p_szc = (uchar_t)sz; 7287 lastpp = PP_PAGEPREV(lastpp); 7288 } 7289 if (sz) { 7290 /* 7291 * make sure before current root's pszc 7292 * is updated all updates to constituent pages pszc 7293 * fields are globally visible. 7294 */ 7295 membar_producer(); 7296 } 7297 lastpp->p_szc = sz; 7298 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 7299 if (lastpp != rootpp) { 7300 lastpp = PP_PAGEPREV(lastpp); 7301 } 7302 } 7303 if (sz == 0) { 7304 /* the loop above doesn't cover this case */ 7305 rootpp->p_szc = 0; 7306 } 7307 out: 7308 ASSERT(pp->p_szc == 0); 7309 sfmmu_page_exit(pmtx); 7310 sfmmu_mlist_exit(pml); 7311 } 7312 7313 /* 7314 * Refresh the HAT ismttecnt[] element for size szc. 7315 * Caller must have set ISM busy flag to prevent mapping 7316 * lists from changing while we're traversing them. 7317 */ 7318 pgcnt_t 7319 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 7320 { 7321 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 7322 ism_map_t *ism_map; 7323 pgcnt_t npgs = 0; 7324 int j; 7325 7326 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 7327 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 7328 ism_map = ism_blkp->iblk_maps; 7329 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) 7330 npgs += ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 7331 } 7332 sfmmup->sfmmu_ismttecnt[szc] = npgs; 7333 return (npgs); 7334 } 7335 7336 /* 7337 * Yield the memory claim requirement for an address space. 7338 * 7339 * This is currently implemented as the number of bytes that have active 7340 * hardware translations that have page structures. Therefore, it can 7341 * underestimate the traditional resident set size, eg, if the 7342 * physical page is present and the hardware translation is missing; 7343 * and it can overestimate the rss, eg, if there are active 7344 * translations to a frame buffer with page structs. 7345 * Also, it does not take sharing into account. 7346 * 7347 * Note that we don't acquire locks here since this function is most often 7348 * called from the clock thread. 7349 */ 7350 size_t 7351 hat_get_mapped_size(struct hat *hat) 7352 { 7353 size_t assize = 0; 7354 int i; 7355 7356 if (hat == NULL) 7357 return (0); 7358 7359 ASSERT(hat->sfmmu_xhat_provider == NULL); 7360 7361 for (i = 0; i < mmu_page_sizes; i++) 7362 assize += (pgcnt_t)hat->sfmmu_ttecnt[i] * TTEBYTES(i); 7363 7364 if (hat->sfmmu_iblk == NULL) 7365 return (assize); 7366 7367 for (i = 0; i < mmu_page_sizes; i++) 7368 assize += (pgcnt_t)hat->sfmmu_ismttecnt[i] * TTEBYTES(i); 7369 7370 return (assize); 7371 } 7372 7373 int 7374 hat_stats_enable(struct hat *hat) 7375 { 7376 hatlock_t *hatlockp; 7377 7378 ASSERT(hat->sfmmu_xhat_provider == NULL); 7379 7380 hatlockp = sfmmu_hat_enter(hat); 7381 hat->sfmmu_rmstat++; 7382 sfmmu_hat_exit(hatlockp); 7383 return (1); 7384 } 7385 7386 void 7387 hat_stats_disable(struct hat *hat) 7388 { 7389 hatlock_t *hatlockp; 7390 7391 ASSERT(hat->sfmmu_xhat_provider == NULL); 7392 7393 hatlockp = sfmmu_hat_enter(hat); 7394 hat->sfmmu_rmstat--; 7395 sfmmu_hat_exit(hatlockp); 7396 } 7397 7398 /* 7399 * Routines for entering or removing ourselves from the 7400 * ism_hat's mapping list. 7401 */ 7402 static void 7403 iment_add(struct ism_ment *iment, struct hat *ism_hat) 7404 { 7405 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7406 7407 iment->iment_prev = NULL; 7408 iment->iment_next = ism_hat->sfmmu_iment; 7409 if (ism_hat->sfmmu_iment) { 7410 ism_hat->sfmmu_iment->iment_prev = iment; 7411 } 7412 ism_hat->sfmmu_iment = iment; 7413 } 7414 7415 static void 7416 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 7417 { 7418 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 7419 7420 if (ism_hat->sfmmu_iment == NULL) { 7421 panic("ism map entry remove - no entries"); 7422 } 7423 7424 if (iment->iment_prev) { 7425 ASSERT(ism_hat->sfmmu_iment != iment); 7426 iment->iment_prev->iment_next = iment->iment_next; 7427 } else { 7428 ASSERT(ism_hat->sfmmu_iment == iment); 7429 ism_hat->sfmmu_iment = iment->iment_next; 7430 } 7431 7432 if (iment->iment_next) { 7433 iment->iment_next->iment_prev = iment->iment_prev; 7434 } 7435 7436 /* 7437 * zero out the entry 7438 */ 7439 iment->iment_next = NULL; 7440 iment->iment_prev = NULL; 7441 iment->iment_hat = NULL; 7442 } 7443 7444 /* 7445 * Hat_share()/unshare() return an (non-zero) error 7446 * when saddr and daddr are not properly aligned. 7447 * 7448 * The top level mapping element determines the alignment 7449 * requirement for saddr and daddr, depending on different 7450 * architectures. 7451 * 7452 * When hat_share()/unshare() are not supported, 7453 * HATOP_SHARE()/UNSHARE() return 0 7454 */ 7455 int 7456 hat_share(struct hat *sfmmup, caddr_t addr, 7457 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 7458 { 7459 ism_blk_t *ism_blkp; 7460 ism_blk_t *new_iblk; 7461 ism_map_t *ism_map; 7462 ism_ment_t *ism_ment; 7463 int i, added; 7464 hatlock_t *hatlockp; 7465 int reload_mmu = 0; 7466 uint_t ismshift = page_get_shift(ismszc); 7467 size_t ismpgsz = page_get_pagesize(ismszc); 7468 uint_t ismmask = (uint_t)ismpgsz - 1; 7469 size_t sh_size = ISM_SHIFT(ismshift, len); 7470 ushort_t ismhatflag; 7471 7472 #ifdef DEBUG 7473 caddr_t eaddr = addr + len; 7474 #endif /* DEBUG */ 7475 7476 ASSERT(ism_hatid != NULL && sfmmup != NULL); 7477 ASSERT(sptaddr == ISMID_STARTADDR); 7478 /* 7479 * Check the alignment. 7480 */ 7481 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 7482 return (EINVAL); 7483 7484 /* 7485 * Check size alignment. 7486 */ 7487 if (!ISM_ALIGNED(ismshift, len)) 7488 return (EINVAL); 7489 7490 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 7491 7492 /* 7493 * Allocate ism_ment for the ism_hat's mapping list, and an 7494 * ism map blk in case we need one. We must do our 7495 * allocations before acquiring locks to prevent a deadlock 7496 * in the kmem allocator on the mapping list lock. 7497 */ 7498 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 7499 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 7500 7501 /* 7502 * Serialize ISM mappings with the ISM busy flag, and also the 7503 * trap handlers. 7504 */ 7505 sfmmu_ismhat_enter(sfmmup, 0); 7506 7507 /* 7508 * Allocate an ism map blk if necessary. 7509 */ 7510 if (sfmmup->sfmmu_iblk == NULL) { 7511 sfmmup->sfmmu_iblk = new_iblk; 7512 bzero(new_iblk, sizeof (*new_iblk)); 7513 new_iblk->iblk_nextpa = (uint64_t)-1; 7514 membar_stst(); /* make sure next ptr visible to all CPUs */ 7515 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 7516 reload_mmu = 1; 7517 new_iblk = NULL; 7518 } 7519 7520 #ifdef DEBUG 7521 /* 7522 * Make sure mapping does not already exist. 7523 */ 7524 ism_blkp = sfmmup->sfmmu_iblk; 7525 while (ism_blkp) { 7526 ism_map = ism_blkp->iblk_maps; 7527 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 7528 if ((addr >= ism_start(ism_map[i]) && 7529 addr < ism_end(ism_map[i])) || 7530 eaddr > ism_start(ism_map[i]) && 7531 eaddr <= ism_end(ism_map[i])) { 7532 panic("sfmmu_share: Already mapped!"); 7533 } 7534 } 7535 ism_blkp = ism_blkp->iblk_next; 7536 } 7537 #endif /* DEBUG */ 7538 7539 ASSERT(ismszc >= TTE4M); 7540 if (ismszc == TTE4M) { 7541 ismhatflag = HAT_4M_FLAG; 7542 } else if (ismszc == TTE32M) { 7543 ismhatflag = HAT_32M_FLAG; 7544 } else if (ismszc == TTE256M) { 7545 ismhatflag = HAT_256M_FLAG; 7546 } 7547 /* 7548 * Add mapping to first available mapping slot. 7549 */ 7550 ism_blkp = sfmmup->sfmmu_iblk; 7551 added = 0; 7552 while (!added) { 7553 ism_map = ism_blkp->iblk_maps; 7554 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7555 if (ism_map[i].imap_ismhat == NULL) { 7556 7557 ism_map[i].imap_ismhat = ism_hatid; 7558 ism_map[i].imap_vb_shift = (ushort_t)ismshift; 7559 ism_map[i].imap_hatflags = ismhatflag; 7560 ism_map[i].imap_sz_mask = ismmask; 7561 /* 7562 * imap_seg is checked in ISM_CHECK to see if 7563 * non-NULL, then other info assumed valid. 7564 */ 7565 membar_stst(); 7566 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 7567 ism_map[i].imap_ment = ism_ment; 7568 7569 /* 7570 * Now add ourselves to the ism_hat's 7571 * mapping list. 7572 */ 7573 ism_ment->iment_hat = sfmmup; 7574 ism_ment->iment_base_va = addr; 7575 ism_hatid->sfmmu_ismhat = 1; 7576 ism_hatid->sfmmu_flags = 0; 7577 mutex_enter(&ism_mlist_lock); 7578 iment_add(ism_ment, ism_hatid); 7579 mutex_exit(&ism_mlist_lock); 7580 added = 1; 7581 break; 7582 } 7583 } 7584 if (!added && ism_blkp->iblk_next == NULL) { 7585 ism_blkp->iblk_next = new_iblk; 7586 new_iblk = NULL; 7587 bzero(ism_blkp->iblk_next, 7588 sizeof (*ism_blkp->iblk_next)); 7589 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 7590 membar_stst(); 7591 ism_blkp->iblk_nextpa = 7592 va_to_pa((caddr_t)ism_blkp->iblk_next); 7593 } 7594 ism_blkp = ism_blkp->iblk_next; 7595 } 7596 7597 /* 7598 * Update our counters for this sfmmup's ism mappings. 7599 */ 7600 for (i = 0; i <= ismszc; i++) { 7601 if (!(disable_ism_large_pages & (1 << i))) 7602 (void) ism_tsb_entries(sfmmup, i); 7603 } 7604 7605 hatlockp = sfmmu_hat_enter(sfmmup); 7606 7607 /* 7608 * For ISM and DISM we do not support 512K pages, so we only 7609 * only search the 4M and 8K/64K hashes for 4 pagesize cpus, and search 7610 * the 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 7611 */ 7612 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 7613 7614 if (ismszc > TTE4M && !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) 7615 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7616 7617 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) 7618 SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG); 7619 7620 /* 7621 * If we updated the ismblkpa for this HAT or we need 7622 * to start searching the 256M or 32M or 4M hash, we must 7623 * make sure all CPUs running this process reload their 7624 * tsbmiss area. Otherwise they will fail to load the mappings 7625 * in the tsbmiss handler and will loop calling pagefault(). 7626 */ 7627 switch (ismszc) { 7628 case TTE256M: 7629 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) { 7630 SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG); 7631 sfmmu_sync_mmustate(sfmmup); 7632 } 7633 break; 7634 case TTE32M: 7635 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) { 7636 SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG); 7637 sfmmu_sync_mmustate(sfmmup); 7638 } 7639 break; 7640 case TTE4M: 7641 if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) { 7642 SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG); 7643 sfmmu_sync_mmustate(sfmmup); 7644 } 7645 break; 7646 default: 7647 break; 7648 } 7649 7650 /* 7651 * Now we can drop the locks. 7652 */ 7653 sfmmu_ismhat_exit(sfmmup, 1); 7654 sfmmu_hat_exit(hatlockp); 7655 7656 /* 7657 * Free up ismblk if we didn't use it. 7658 */ 7659 if (new_iblk != NULL) 7660 kmem_cache_free(ism_blk_cache, new_iblk); 7661 7662 /* 7663 * Check TSB and TLB page sizes. 7664 */ 7665 sfmmu_check_page_sizes(sfmmup, 1); 7666 7667 return (0); 7668 } 7669 7670 /* 7671 * hat_unshare removes exactly one ism_map from 7672 * this process's as. It expects multiple calls 7673 * to hat_unshare for multiple shm segments. 7674 */ 7675 void 7676 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 7677 { 7678 ism_map_t *ism_map; 7679 ism_ment_t *free_ment = NULL; 7680 ism_blk_t *ism_blkp; 7681 struct hat *ism_hatid; 7682 struct ctx *ctx; 7683 int cnum, found, i; 7684 hatlock_t *hatlockp; 7685 struct tsb_info *tsbinfo; 7686 uint_t ismshift = page_get_shift(ismszc); 7687 size_t sh_size = ISM_SHIFT(ismshift, len); 7688 7689 ASSERT(ISM_ALIGNED(ismshift, addr)); 7690 ASSERT(ISM_ALIGNED(ismshift, len)); 7691 ASSERT(sfmmup != NULL); 7692 ASSERT(sfmmup != ksfmmup); 7693 7694 if (sfmmup->sfmmu_xhat_provider) { 7695 XHAT_UNSHARE(sfmmup, addr, len); 7696 return; 7697 } else { 7698 /* 7699 * This must be a CPU HAT. If the address space has 7700 * XHATs attached, inform all XHATs that ISM segment 7701 * is going away 7702 */ 7703 ASSERT(sfmmup->sfmmu_as != NULL); 7704 if (sfmmup->sfmmu_as->a_xhat != NULL) 7705 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 7706 } 7707 7708 /* 7709 * Make sure that during the entire time ISM mappings are removed, 7710 * the trap handlers serialize behind us, and that no one else 7711 * can be mucking with ISM mappings. This also lets us get away 7712 * with not doing expensive cross calls to flush the TLB -- we 7713 * just discard the context, flush the entire TSB, and call it 7714 * a day. 7715 */ 7716 sfmmu_ismhat_enter(sfmmup, 0); 7717 7718 /* 7719 * Remove the mapping. 7720 * 7721 * We can't have any holes in the ism map. 7722 * The tsb miss code while searching the ism map will 7723 * stop on an empty map slot. So we must move 7724 * everyone past the hole up 1 if any. 7725 * 7726 * Also empty ism map blks are not freed until the 7727 * process exits. This is to prevent a MT race condition 7728 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 7729 */ 7730 found = 0; 7731 ism_blkp = sfmmup->sfmmu_iblk; 7732 while (!found && ism_blkp) { 7733 ism_map = ism_blkp->iblk_maps; 7734 for (i = 0; i < ISM_MAP_SLOTS; i++) { 7735 if (addr == ism_start(ism_map[i]) && 7736 sh_size == (size_t)(ism_size(ism_map[i]))) { 7737 found = 1; 7738 break; 7739 } 7740 } 7741 if (!found) 7742 ism_blkp = ism_blkp->iblk_next; 7743 } 7744 7745 if (found) { 7746 ism_hatid = ism_map[i].imap_ismhat; 7747 ASSERT(ism_hatid != NULL); 7748 ASSERT(ism_hatid->sfmmu_ismhat == 1); 7749 ASSERT(ism_hatid->sfmmu_cnum == INVALID_CONTEXT); 7750 7751 /* 7752 * First remove ourselves from the ism mapping list. 7753 */ 7754 mutex_enter(&ism_mlist_lock); 7755 iment_sub(ism_map[i].imap_ment, ism_hatid); 7756 mutex_exit(&ism_mlist_lock); 7757 free_ment = ism_map[i].imap_ment; 7758 7759 /* 7760 * Now gurantee that any other cpu 7761 * that tries to process an ISM miss 7762 * will go to tl=0. 7763 */ 7764 hatlockp = sfmmu_hat_enter(sfmmup); 7765 ctx = sfmmutoctx(sfmmup); 7766 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 7767 cnum = sfmmutoctxnum(sfmmup); 7768 7769 if (cnum != INVALID_CONTEXT) { 7770 sfmmu_tlb_swap_ctx(sfmmup, ctx); 7771 } 7772 rw_exit(&ctx->ctx_rwlock); 7773 sfmmu_hat_exit(hatlockp); 7774 7775 /* 7776 * We delete the ism map by copying 7777 * the next map over the current one. 7778 * We will take the next one in the maps 7779 * array or from the next ism_blk. 7780 */ 7781 while (ism_blkp) { 7782 ism_map = ism_blkp->iblk_maps; 7783 while (i < (ISM_MAP_SLOTS - 1)) { 7784 ism_map[i] = ism_map[i + 1]; 7785 i++; 7786 } 7787 /* i == (ISM_MAP_SLOTS - 1) */ 7788 ism_blkp = ism_blkp->iblk_next; 7789 if (ism_blkp) { 7790 ism_map[i] = ism_blkp->iblk_maps[0]; 7791 i = 0; 7792 } else { 7793 ism_map[i].imap_seg = 0; 7794 ism_map[i].imap_vb_shift = 0; 7795 ism_map[i].imap_hatflags = 0; 7796 ism_map[i].imap_sz_mask = 0; 7797 ism_map[i].imap_ismhat = NULL; 7798 ism_map[i].imap_ment = NULL; 7799 } 7800 } 7801 7802 /* 7803 * Now flush entire TSB for the process, since 7804 * demapping page by page can be too expensive. 7805 * We don't have to flush the TLB here anymore 7806 * since we switch to a new TLB ctx instead. 7807 * Also, there is no need to flush if the process 7808 * is exiting since the TSB will be freed later. 7809 */ 7810 if (!sfmmup->sfmmu_free) { 7811 hatlockp = sfmmu_hat_enter(sfmmup); 7812 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 7813 tsbinfo = tsbinfo->tsb_next) { 7814 if (tsbinfo->tsb_flags & TSB_SWAPPED) 7815 continue; 7816 sfmmu_inv_tsb(tsbinfo->tsb_va, 7817 TSB_BYTES(tsbinfo->tsb_szc)); 7818 } 7819 sfmmu_hat_exit(hatlockp); 7820 } 7821 } 7822 7823 /* 7824 * Update our counters for this sfmmup's ism mappings. 7825 */ 7826 for (i = 0; i <= ismszc; i++) { 7827 if (!(disable_ism_large_pages & (1 << i))) 7828 (void) ism_tsb_entries(sfmmup, i); 7829 } 7830 7831 sfmmu_ismhat_exit(sfmmup, 0); 7832 7833 /* 7834 * We must do our freeing here after dropping locks 7835 * to prevent a deadlock in the kmem allocator on the 7836 * mapping list lock. 7837 */ 7838 if (free_ment != NULL) 7839 kmem_cache_free(ism_ment_cache, free_ment); 7840 7841 /* 7842 * Check TSB and TLB page sizes if the process isn't exiting. 7843 */ 7844 if (!sfmmup->sfmmu_free) 7845 sfmmu_check_page_sizes(sfmmup, 0); 7846 } 7847 7848 /* ARGSUSED */ 7849 static int 7850 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 7851 { 7852 /* void *buf is sfmmu_t pointer */ 7853 return (0); 7854 } 7855 7856 /* ARGSUSED */ 7857 static void 7858 sfmmu_idcache_destructor(void *buf, void *cdrarg) 7859 { 7860 /* void *buf is sfmmu_t pointer */ 7861 } 7862 7863 /* 7864 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 7865 * field to be the pa of this hmeblk 7866 */ 7867 /* ARGSUSED */ 7868 static int 7869 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 7870 { 7871 struct hme_blk *hmeblkp; 7872 7873 bzero(buf, (size_t)cdrarg); 7874 hmeblkp = (struct hme_blk *)buf; 7875 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 7876 7877 #ifdef HBLK_TRACE 7878 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 7879 #endif /* HBLK_TRACE */ 7880 7881 return (0); 7882 } 7883 7884 /* ARGSUSED */ 7885 static void 7886 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 7887 { 7888 7889 #ifdef HBLK_TRACE 7890 7891 struct hme_blk *hmeblkp; 7892 7893 hmeblkp = (struct hme_blk *)buf; 7894 mutex_destroy(&hmeblkp->hblk_audit_lock); 7895 7896 #endif /* HBLK_TRACE */ 7897 } 7898 7899 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 7900 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 7901 /* 7902 * The kmem allocator will callback into our reclaim routine when the system 7903 * is running low in memory. We traverse the hash and free up all unused but 7904 * still cached hme_blks. We also traverse the free list and free them up 7905 * as well. 7906 */ 7907 /*ARGSUSED*/ 7908 static void 7909 sfmmu_hblkcache_reclaim(void *cdrarg) 7910 { 7911 int i; 7912 uint64_t hblkpa, prevpa, nx_pa; 7913 struct hmehash_bucket *hmebp; 7914 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 7915 static struct hmehash_bucket *uhmehash_reclaim_hand; 7916 static struct hmehash_bucket *khmehash_reclaim_hand; 7917 struct hme_blk *list = NULL; 7918 7919 hmebp = uhmehash_reclaim_hand; 7920 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 7921 uhmehash_reclaim_hand = hmebp = uhme_hash; 7922 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 7923 7924 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 7925 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 7926 hmeblkp = hmebp->hmeblkp; 7927 hblkpa = hmebp->hmeh_nextpa; 7928 prevpa = 0; 7929 pr_hblk = NULL; 7930 while (hmeblkp) { 7931 nx_hblk = hmeblkp->hblk_next; 7932 nx_pa = hmeblkp->hblk_nextpa; 7933 if (!hmeblkp->hblk_vcnt && 7934 !hmeblkp->hblk_hmecnt) { 7935 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 7936 prevpa, pr_hblk); 7937 sfmmu_hblk_free(hmebp, hmeblkp, 7938 hblkpa, &list); 7939 } else { 7940 pr_hblk = hmeblkp; 7941 prevpa = hblkpa; 7942 } 7943 hmeblkp = nx_hblk; 7944 hblkpa = nx_pa; 7945 } 7946 SFMMU_HASH_UNLOCK(hmebp); 7947 } 7948 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 7949 hmebp = uhme_hash; 7950 } 7951 7952 hmebp = khmehash_reclaim_hand; 7953 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 7954 khmehash_reclaim_hand = hmebp = khme_hash; 7955 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 7956 7957 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 7958 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 7959 hmeblkp = hmebp->hmeblkp; 7960 hblkpa = hmebp->hmeh_nextpa; 7961 prevpa = 0; 7962 pr_hblk = NULL; 7963 while (hmeblkp) { 7964 nx_hblk = hmeblkp->hblk_next; 7965 nx_pa = hmeblkp->hblk_nextpa; 7966 if (!hmeblkp->hblk_vcnt && 7967 !hmeblkp->hblk_hmecnt) { 7968 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 7969 prevpa, pr_hblk); 7970 sfmmu_hblk_free(hmebp, hmeblkp, 7971 hblkpa, &list); 7972 } else { 7973 pr_hblk = hmeblkp; 7974 prevpa = hblkpa; 7975 } 7976 hmeblkp = nx_hblk; 7977 hblkpa = nx_pa; 7978 } 7979 SFMMU_HASH_UNLOCK(hmebp); 7980 } 7981 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 7982 hmebp = khme_hash; 7983 } 7984 sfmmu_hblks_list_purge(&list); 7985 } 7986 7987 /* 7988 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 7989 * same goes for sfmmu_get_addrvcolor(). 7990 * 7991 * This function will return the virtual color for the specified page. The 7992 * virtual color corresponds to this page current mapping or its last mapping. 7993 * It is used by memory allocators to choose addresses with the correct 7994 * alignment so vac consistency is automatically maintained. If the page 7995 * has no color it returns -1. 7996 */ 7997 int 7998 sfmmu_get_ppvcolor(struct page *pp) 7999 { 8000 int color; 8001 8002 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 8003 return (-1); 8004 } 8005 color = PP_GET_VCOLOR(pp); 8006 ASSERT(color < mmu_btop(shm_alignment)); 8007 return (color); 8008 } 8009 8010 /* 8011 * This function will return the desired alignment for vac consistency 8012 * (vac color) given a virtual address. If no vac is present it returns -1. 8013 */ 8014 int 8015 sfmmu_get_addrvcolor(caddr_t vaddr) 8016 { 8017 if (cache & CACHE_VAC) { 8018 return (addr_to_vcolor(vaddr)); 8019 } else { 8020 return (-1); 8021 } 8022 8023 } 8024 8025 /* 8026 * Check for conflicts. 8027 * A conflict exists if the new and existent mappings do not match in 8028 * their "shm_alignment fields. If conflicts exist, the existant mappings 8029 * are flushed unless one of them is locked. If one of them is locked, then 8030 * the mappings are flushed and converted to non-cacheable mappings. 8031 */ 8032 static void 8033 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 8034 { 8035 struct hat *tmphat; 8036 struct sf_hment *sfhmep, *tmphme = NULL; 8037 struct hme_blk *hmeblkp; 8038 int vcolor; 8039 tte_t tte; 8040 8041 ASSERT(sfmmu_mlist_held(pp)); 8042 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 8043 8044 vcolor = addr_to_vcolor(addr); 8045 if (PP_NEWPAGE(pp)) { 8046 PP_SET_VCOLOR(pp, vcolor); 8047 return; 8048 } 8049 8050 if (PP_GET_VCOLOR(pp) == vcolor) { 8051 return; 8052 } 8053 8054 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 8055 /* 8056 * Previous user of page had a different color 8057 * but since there are no current users 8058 * we just flush the cache and change the color. 8059 */ 8060 SFMMU_STAT(sf_pgcolor_conflict); 8061 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8062 PP_SET_VCOLOR(pp, vcolor); 8063 return; 8064 } 8065 8066 /* 8067 * If we get here we have a vac conflict with a current 8068 * mapping. VAC conflict policy is as follows. 8069 * - The default is to unload the other mappings unless: 8070 * - If we have a large mapping we uncache the page. 8071 * We need to uncache the rest of the large page too. 8072 * - If any of the mappings are locked we uncache the page. 8073 * - If the requested mapping is inconsistent 8074 * with another mapping and that mapping 8075 * is in the same address space we have to 8076 * make it non-cached. The default thing 8077 * to do is unload the inconsistent mapping 8078 * but if they are in the same address space 8079 * we run the risk of unmapping the pc or the 8080 * stack which we will use as we return to the user, 8081 * in which case we can then fault on the thing 8082 * we just unloaded and get into an infinite loop. 8083 */ 8084 if (PP_ISMAPPED_LARGE(pp)) { 8085 int sz; 8086 8087 /* 8088 * Existing mapping is for big pages. We don't unload 8089 * existing big mappings to satisfy new mappings. 8090 * Always convert all mappings to TNC. 8091 */ 8092 sz = fnd_mapping_sz(pp); 8093 pp = PP_GROUPLEADER(pp, sz); 8094 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 8095 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 8096 TTEPAGES(sz)); 8097 8098 return; 8099 } 8100 8101 /* 8102 * check if any mapping is in same as or if it is locked 8103 * since in that case we need to uncache. 8104 */ 8105 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8106 tmphme = sfhmep->hme_next; 8107 hmeblkp = sfmmu_hmetohblk(sfhmep); 8108 if (hmeblkp->hblk_xhat_bit) 8109 continue; 8110 tmphat = hblktosfmmu(hmeblkp); 8111 sfmmu_copytte(&sfhmep->hme_tte, &tte); 8112 ASSERT(TTE_IS_VALID(&tte)); 8113 if ((tmphat == hat) || hmeblkp->hblk_lckcnt) { 8114 /* 8115 * We have an uncache conflict 8116 */ 8117 SFMMU_STAT(sf_uncache_conflict); 8118 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 8119 return; 8120 } 8121 } 8122 8123 /* 8124 * We have an unload conflict 8125 * We have already checked for LARGE mappings, therefore 8126 * the remaining mapping(s) must be TTE8K. 8127 */ 8128 SFMMU_STAT(sf_unload_conflict); 8129 8130 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 8131 tmphme = sfhmep->hme_next; 8132 hmeblkp = sfmmu_hmetohblk(sfhmep); 8133 if (hmeblkp->hblk_xhat_bit) 8134 continue; 8135 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 8136 } 8137 8138 if (PP_ISMAPPED_KPM(pp)) 8139 sfmmu_kpm_vac_unload(pp, addr); 8140 8141 /* 8142 * Unloads only do TLB flushes so we need to flush the 8143 * cache here. 8144 */ 8145 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 8146 PP_SET_VCOLOR(pp, vcolor); 8147 } 8148 8149 /* 8150 * Whenever a mapping is unloaded and the page is in TNC state, 8151 * we see if the page can be made cacheable again. 'pp' is 8152 * the page that we just unloaded a mapping from, the size 8153 * of mapping that was unloaded is 'ottesz'. 8154 * Remark: 8155 * The recache policy for mpss pages can leave a performance problem 8156 * under the following circumstances: 8157 * . A large page in uncached mode has just been unmapped. 8158 * . All constituent pages are TNC due to a conflicting small mapping. 8159 * . There are many other, non conflicting, small mappings around for 8160 * a lot of the constituent pages. 8161 * . We're called w/ the "old" groupleader page and the old ottesz, 8162 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 8163 * we end up w/ TTE8K or npages == 1. 8164 * . We call tst_tnc w/ the old groupleader only, and if there is no 8165 * conflict, we re-cache only this page. 8166 * . All other small mappings are not checked and will be left in TNC mode. 8167 * The problem is not very serious because: 8168 * . mpss is actually only defined for heap and stack, so the probability 8169 * is not very high that a large page mapping exists in parallel to a small 8170 * one (this is possible, but seems to be bad programming style in the 8171 * appl). 8172 * . The problem gets a little bit more serious, when those TNC pages 8173 * have to be mapped into kernel space, e.g. for networking. 8174 * . When VAC alias conflicts occur in applications, this is regarded 8175 * as an application bug. So if kstat's show them, the appl should 8176 * be changed anyway. 8177 */ 8178 static void 8179 conv_tnc(page_t *pp, int ottesz) 8180 { 8181 int cursz, dosz; 8182 pgcnt_t curnpgs, dopgs; 8183 pgcnt_t pg64k; 8184 page_t *pp2; 8185 8186 /* 8187 * Determine how big a range we check for TNC and find 8188 * leader page. cursz is the size of the biggest 8189 * mapping that still exist on 'pp'. 8190 */ 8191 if (PP_ISMAPPED_LARGE(pp)) { 8192 cursz = fnd_mapping_sz(pp); 8193 } else { 8194 cursz = TTE8K; 8195 } 8196 8197 if (ottesz >= cursz) { 8198 dosz = ottesz; 8199 pp2 = pp; 8200 } else { 8201 dosz = cursz; 8202 pp2 = PP_GROUPLEADER(pp, dosz); 8203 } 8204 8205 pg64k = TTEPAGES(TTE64K); 8206 dopgs = TTEPAGES(dosz); 8207 8208 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 8209 8210 while (dopgs != 0) { 8211 curnpgs = TTEPAGES(cursz); 8212 if (tst_tnc(pp2, curnpgs)) { 8213 SFMMU_STAT_ADD(sf_recache, curnpgs); 8214 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 8215 curnpgs); 8216 } 8217 8218 ASSERT(dopgs >= curnpgs); 8219 dopgs -= curnpgs; 8220 8221 if (dopgs == 0) { 8222 break; 8223 } 8224 8225 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 8226 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 8227 cursz = fnd_mapping_sz(pp2); 8228 } else { 8229 cursz = TTE8K; 8230 } 8231 } 8232 } 8233 8234 /* 8235 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 8236 * returns 0 otherwise. Note that oaddr argument is valid for only 8237 * 8k pages. 8238 */ 8239 static int 8240 tst_tnc(page_t *pp, pgcnt_t npages) 8241 { 8242 struct sf_hment *sfhme; 8243 struct hme_blk *hmeblkp; 8244 tte_t tte; 8245 caddr_t vaddr; 8246 int clr_valid = 0; 8247 int color, color1, bcolor; 8248 int i, ncolors; 8249 8250 ASSERT(pp != NULL); 8251 ASSERT(!(cache & CACHE_WRITEBACK)); 8252 8253 if (npages > 1) { 8254 ncolors = CACHE_NUM_COLOR; 8255 } 8256 8257 for (i = 0; i < npages; i++) { 8258 ASSERT(sfmmu_mlist_held(pp)); 8259 ASSERT(PP_ISTNC(pp)); 8260 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 8261 8262 if (PP_ISPNC(pp)) { 8263 return (0); 8264 } 8265 8266 clr_valid = 0; 8267 if (PP_ISMAPPED_KPM(pp)) { 8268 caddr_t kpmvaddr; 8269 8270 ASSERT(kpm_enable); 8271 kpmvaddr = hat_kpm_page2va(pp, 1); 8272 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 8273 color1 = addr_to_vcolor(kpmvaddr); 8274 clr_valid = 1; 8275 } 8276 8277 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8278 hmeblkp = sfmmu_hmetohblk(sfhme); 8279 if (hmeblkp->hblk_xhat_bit) 8280 continue; 8281 8282 sfmmu_copytte(&sfhme->hme_tte, &tte); 8283 ASSERT(TTE_IS_VALID(&tte)); 8284 8285 vaddr = tte_to_vaddr(hmeblkp, tte); 8286 color = addr_to_vcolor(vaddr); 8287 8288 if (npages > 1) { 8289 /* 8290 * If there is a big mapping, make sure 8291 * 8K mapping is consistent with the big 8292 * mapping. 8293 */ 8294 bcolor = i % ncolors; 8295 if (color != bcolor) { 8296 return (0); 8297 } 8298 } 8299 if (!clr_valid) { 8300 clr_valid = 1; 8301 color1 = color; 8302 } 8303 8304 if (color1 != color) { 8305 return (0); 8306 } 8307 } 8308 8309 pp = PP_PAGENEXT(pp); 8310 } 8311 8312 return (1); 8313 } 8314 8315 static void 8316 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 8317 pgcnt_t npages) 8318 { 8319 kmutex_t *pmtx; 8320 int i, ncolors, bcolor; 8321 kpm_hlk_t *kpmp; 8322 cpuset_t cpuset; 8323 8324 ASSERT(pp != NULL); 8325 ASSERT(!(cache & CACHE_WRITEBACK)); 8326 8327 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 8328 pmtx = sfmmu_page_enter(pp); 8329 8330 /* 8331 * Fast path caching single unmapped page 8332 */ 8333 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 8334 flags == HAT_CACHE) { 8335 PP_CLRTNC(pp); 8336 PP_CLRPNC(pp); 8337 sfmmu_page_exit(pmtx); 8338 sfmmu_kpm_kpmp_exit(kpmp); 8339 return; 8340 } 8341 8342 /* 8343 * We need to capture all cpus in order to change cacheability 8344 * because we can't allow one cpu to access the same physical 8345 * page using a cacheable and a non-cachebale mapping at the same 8346 * time. Since we may end up walking the ism mapping list 8347 * have to grab it's lock now since we can't after all the 8348 * cpus have been captured. 8349 */ 8350 sfmmu_hat_lock_all(); 8351 mutex_enter(&ism_mlist_lock); 8352 kpreempt_disable(); 8353 cpuset = cpu_ready_set; 8354 xc_attention(cpuset); 8355 8356 if (npages > 1) { 8357 /* 8358 * Make sure all colors are flushed since the 8359 * sfmmu_page_cache() only flushes one color- 8360 * it does not know big pages. 8361 */ 8362 ncolors = CACHE_NUM_COLOR; 8363 if (flags & HAT_TMPNC) { 8364 for (i = 0; i < ncolors; i++) { 8365 sfmmu_cache_flushcolor(i, pp->p_pagenum); 8366 } 8367 cache_flush_flag = CACHE_NO_FLUSH; 8368 } 8369 } 8370 8371 for (i = 0; i < npages; i++) { 8372 8373 ASSERT(sfmmu_mlist_held(pp)); 8374 8375 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 8376 8377 if (npages > 1) { 8378 bcolor = i % ncolors; 8379 } else { 8380 bcolor = NO_VCOLOR; 8381 } 8382 8383 sfmmu_page_cache(pp, flags, cache_flush_flag, 8384 bcolor); 8385 } 8386 8387 pp = PP_PAGENEXT(pp); 8388 } 8389 8390 xt_sync(cpuset); 8391 xc_dismissed(cpuset); 8392 mutex_exit(&ism_mlist_lock); 8393 sfmmu_hat_unlock_all(); 8394 sfmmu_page_exit(pmtx); 8395 sfmmu_kpm_kpmp_exit(kpmp); 8396 kpreempt_enable(); 8397 } 8398 8399 /* 8400 * This function changes the virtual cacheability of all mappings to a 8401 * particular page. When changing from uncache to cacheable the mappings will 8402 * only be changed if all of them have the same virtual color. 8403 * We need to flush the cache in all cpus. It is possible that 8404 * a process referenced a page as cacheable but has sinced exited 8405 * and cleared the mapping list. We still to flush it but have no 8406 * state so all cpus is the only alternative. 8407 */ 8408 static void 8409 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 8410 { 8411 struct sf_hment *sfhme; 8412 struct hme_blk *hmeblkp; 8413 sfmmu_t *sfmmup; 8414 tte_t tte, ttemod; 8415 caddr_t vaddr; 8416 int ret, color; 8417 pfn_t pfn; 8418 8419 color = bcolor; 8420 pfn = pp->p_pagenum; 8421 8422 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 8423 8424 hmeblkp = sfmmu_hmetohblk(sfhme); 8425 8426 if (hmeblkp->hblk_xhat_bit) 8427 continue; 8428 8429 sfmmu_copytte(&sfhme->hme_tte, &tte); 8430 ASSERT(TTE_IS_VALID(&tte)); 8431 vaddr = tte_to_vaddr(hmeblkp, tte); 8432 color = addr_to_vcolor(vaddr); 8433 8434 #ifdef DEBUG 8435 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 8436 ASSERT(color == bcolor); 8437 } 8438 #endif 8439 8440 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 8441 8442 ttemod = tte; 8443 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 8444 TTE_CLR_VCACHEABLE(&ttemod); 8445 } else { /* flags & HAT_CACHE */ 8446 TTE_SET_VCACHEABLE(&ttemod); 8447 } 8448 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 8449 if (ret < 0) { 8450 /* 8451 * Since all cpus are captured modifytte should not 8452 * fail. 8453 */ 8454 panic("sfmmu_page_cache: write to tte failed"); 8455 } 8456 8457 sfmmup = hblktosfmmu(hmeblkp); 8458 if (cache_flush_flag == CACHE_FLUSH) { 8459 /* 8460 * Flush TSBs, TLBs and caches 8461 */ 8462 if (sfmmup->sfmmu_ismhat) { 8463 if (flags & HAT_CACHE) { 8464 SFMMU_STAT(sf_ism_recache); 8465 } else { 8466 SFMMU_STAT(sf_ism_uncache); 8467 } 8468 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8469 pfn, CACHE_FLUSH); 8470 } else { 8471 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 8472 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 8473 } 8474 8475 /* 8476 * all cache entries belonging to this pfn are 8477 * now flushed. 8478 */ 8479 cache_flush_flag = CACHE_NO_FLUSH; 8480 } else { 8481 8482 /* 8483 * Flush only TSBs and TLBs. 8484 */ 8485 if (sfmmup->sfmmu_ismhat) { 8486 if (flags & HAT_CACHE) { 8487 SFMMU_STAT(sf_ism_recache); 8488 } else { 8489 SFMMU_STAT(sf_ism_uncache); 8490 } 8491 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 8492 pfn, CACHE_NO_FLUSH); 8493 } else { 8494 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 8495 } 8496 } 8497 } 8498 8499 if (PP_ISMAPPED_KPM(pp)) 8500 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 8501 8502 switch (flags) { 8503 8504 default: 8505 panic("sfmmu_pagecache: unknown flags"); 8506 break; 8507 8508 case HAT_CACHE: 8509 PP_CLRTNC(pp); 8510 PP_CLRPNC(pp); 8511 PP_SET_VCOLOR(pp, color); 8512 break; 8513 8514 case HAT_TMPNC: 8515 PP_SETTNC(pp); 8516 PP_SET_VCOLOR(pp, NO_VCOLOR); 8517 break; 8518 8519 case HAT_UNCACHE: 8520 PP_SETPNC(pp); 8521 PP_CLRTNC(pp); 8522 PP_SET_VCOLOR(pp, NO_VCOLOR); 8523 break; 8524 } 8525 } 8526 8527 /* 8528 * This routine gets called when the system has run out of free contexts. 8529 * This will simply choose context passed to it to be stolen and reused. 8530 */ 8531 /* ARGSUSED */ 8532 static void 8533 sfmmu_reuse_ctx(struct ctx *ctx, sfmmu_t *sfmmup) 8534 { 8535 sfmmu_t *stolen_sfmmup; 8536 cpuset_t cpuset; 8537 ushort_t cnum = ctxtoctxnum(ctx); 8538 8539 ASSERT(cnum != KCONTEXT); 8540 ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0); /* write locked */ 8541 8542 /* 8543 * simply steal and reuse the ctx passed to us. 8544 */ 8545 stolen_sfmmup = ctx->ctx_sfmmu; 8546 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8547 ASSERT(stolen_sfmmup->sfmmu_cnum == cnum); 8548 ASSERT(stolen_sfmmup != ksfmmup); 8549 8550 TRACE_CTXS(&ctx_trace_mutex, ctx_trace_ptr, cnum, stolen_sfmmup, 8551 sfmmup, CTX_TRC_STEAL); 8552 SFMMU_STAT(sf_ctxsteal); 8553 8554 /* 8555 * Update sfmmu and ctx structs. After this point all threads 8556 * belonging to this hat/proc will fault and not use the ctx 8557 * being stolen. 8558 */ 8559 kpreempt_disable(); 8560 /* 8561 * Enforce reverse order of assignments from sfmmu_get_ctx(). This 8562 * is done to prevent a race where a thread faults with the context 8563 * but the TSB has changed. 8564 */ 8565 stolen_sfmmup->sfmmu_cnum = INVALID_CONTEXT; 8566 membar_enter(); 8567 ctx->ctx_sfmmu = NULL; 8568 8569 /* 8570 * 1. flush TLB in all CPUs that ran the process whose ctx 8571 * we are stealing. 8572 * 2. change context for all other CPUs to INVALID_CONTEXT, 8573 * if they are running in the context that we are going to steal. 8574 */ 8575 cpuset = stolen_sfmmup->sfmmu_cpusran; 8576 CPUSET_DEL(cpuset, CPU->cpu_id); 8577 CPUSET_AND(cpuset, cpu_ready_set); 8578 SFMMU_XCALL_STATS(cnum); 8579 xt_some(cpuset, sfmmu_ctx_steal_tl1, cnum, INVALID_CONTEXT); 8580 xt_sync(cpuset); 8581 8582 /* 8583 * flush TLB of local processor 8584 */ 8585 vtag_flushctx(cnum); 8586 8587 /* 8588 * If we just stole the ctx from the current process 8589 * on local cpu then we also invalidate his context 8590 * here. 8591 */ 8592 if (sfmmu_getctx_sec() == cnum) { 8593 sfmmu_setctx_sec(INVALID_CONTEXT); 8594 sfmmu_clear_utsbinfo(); 8595 } 8596 8597 kpreempt_enable(); 8598 SFMMU_STAT(sf_tlbflush_ctx); 8599 } 8600 8601 /* 8602 * Returns a context with the reader lock held. 8603 * 8604 * We maintain 2 different list of contexts. The first list 8605 * is the free list and it is headed by ctxfree. These contexts 8606 * are ready to use. The second list is the dirty list and is 8607 * headed by ctxdirty. These contexts have been freed but haven't 8608 * been flushed from the TLB. 8609 * 8610 * It's the responsibility of the caller to guarantee that the 8611 * process serializes on calls here by taking the HAT lock for 8612 * the hat. 8613 * 8614 * Changing the page size is a rather complicated process, so 8615 * rather than jump through lots of hoops to special case it, 8616 * the easiest way to go about it is to tell the MMU we want 8617 * to change page sizes and then switch to using a different 8618 * context. When we program the context registers for the 8619 * process, we can take care of setting up the (new) page size 8620 * for that context at that point. 8621 */ 8622 8623 static struct ctx * 8624 sfmmu_get_ctx(sfmmu_t *sfmmup) 8625 { 8626 struct ctx *ctx; 8627 ushort_t cnum; 8628 struct ctx *lastctx = &ctxs[nctxs-1]; 8629 struct ctx *firstctx = &ctxs[NUM_LOCKED_CTXS]; 8630 uint_t found_stealable_ctx; 8631 uint_t retry_count = 0; 8632 8633 #define NEXT_CTX(ctx) (((ctx) >= lastctx) ? firstctx : ((ctx) + 1)) 8634 8635 retry: 8636 8637 ASSERT(sfmmup->sfmmu_cnum != KCONTEXT); 8638 /* 8639 * Check to see if this process has already got a ctx. 8640 * In that case just set the sec-ctx, grab a readers lock, and 8641 * return. 8642 * 8643 * We have to double check after we get the readers lock on the 8644 * context, since it could be stolen in this short window. 8645 */ 8646 if (sfmmup->sfmmu_cnum >= NUM_LOCKED_CTXS) { 8647 ctx = sfmmutoctx(sfmmup); 8648 rw_enter(&ctx->ctx_rwlock, RW_READER); 8649 if (ctx->ctx_sfmmu == sfmmup) { 8650 return (ctx); 8651 } else { 8652 rw_exit(&ctx->ctx_rwlock); 8653 } 8654 } 8655 8656 found_stealable_ctx = 0; 8657 mutex_enter(&ctx_list_lock); 8658 if ((ctx = ctxfree) != NULL) { 8659 /* 8660 * Found a ctx in free list. Delete it from the list and 8661 * use it. There's a short window where the stealer can 8662 * look at the context before we grab the lock on the 8663 * context, so we have to handle that with the free flag. 8664 */ 8665 SFMMU_STAT(sf_ctxfree); 8666 ctxfree = ctx->ctx_free; 8667 ctx->ctx_sfmmu = NULL; 8668 mutex_exit(&ctx_list_lock); 8669 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 8670 ASSERT(ctx->ctx_sfmmu == NULL); 8671 ASSERT((ctx->ctx_flags & CTX_FREE_FLAG) != 0); 8672 } else if ((ctx = ctxdirty) != NULL) { 8673 /* 8674 * No free contexts. If we have at least one dirty ctx 8675 * then flush the TLBs on all cpus if necessary and move 8676 * the dirty list to the free list. 8677 */ 8678 SFMMU_STAT(sf_ctxdirty); 8679 ctxdirty = NULL; 8680 if (delay_tlb_flush) 8681 sfmmu_tlb_all_demap(); 8682 ctxfree = ctx->ctx_free; 8683 ctx->ctx_sfmmu = NULL; 8684 mutex_exit(&ctx_list_lock); 8685 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 8686 ASSERT(ctx->ctx_sfmmu == NULL); 8687 ASSERT((ctx->ctx_flags & CTX_FREE_FLAG) != 0); 8688 } else { 8689 /* 8690 * No free context available, so steal one. 8691 * 8692 * The policy to choose the appropriate context is simple; 8693 * just sweep all the ctxs using ctxhand. This will steal 8694 * the LRU ctx. 8695 * 8696 * We however only steal a non-free context that can be 8697 * write locked. Keep searching till we find a stealable 8698 * ctx. 8699 */ 8700 mutex_exit(&ctx_list_lock); 8701 ctx = ctxhand; 8702 do { 8703 /* 8704 * If you get the writers lock, and the ctx isn't 8705 * a free ctx, THEN you can steal this ctx. 8706 */ 8707 if ((ctx->ctx_flags & CTX_FREE_FLAG) == 0 && 8708 rw_tryenter(&ctx->ctx_rwlock, RW_WRITER) != 0) { 8709 if (ctx->ctx_flags & CTX_FREE_FLAG) { 8710 /* let the first guy have it */ 8711 rw_exit(&ctx->ctx_rwlock); 8712 } else { 8713 found_stealable_ctx = 1; 8714 break; 8715 } 8716 } 8717 ctx = NEXT_CTX(ctx); 8718 } while (ctx != ctxhand); 8719 8720 if (found_stealable_ctx) { 8721 /* 8722 * Try and reuse the ctx. 8723 */ 8724 sfmmu_reuse_ctx(ctx, sfmmup); 8725 8726 } else if (retry_count++ < GET_CTX_RETRY_CNT) { 8727 goto retry; 8728 8729 } else { 8730 panic("Can't find any stealable context"); 8731 } 8732 } 8733 8734 ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0); /* write locked */ 8735 ctx->ctx_sfmmu = sfmmup; 8736 8737 /* 8738 * Clear the ctx_flags field. 8739 */ 8740 ctx->ctx_flags = 0; 8741 8742 cnum = ctxtoctxnum(ctx); 8743 membar_exit(); 8744 sfmmup->sfmmu_cnum = cnum; 8745 8746 /* 8747 * Let the MMU set up the page sizes to use for 8748 * this context in the TLB. Don't program 2nd dtlb for ism hat. 8749 */ 8750 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) 8751 mmu_set_ctx_page_sizes(sfmmup); 8752 8753 /* 8754 * Downgrade to reader's lock. 8755 */ 8756 rw_downgrade(&ctx->ctx_rwlock); 8757 8758 /* 8759 * If this value doesn't get set to what we want 8760 * it won't matter, so don't worry about locking. 8761 */ 8762 ctxhand = NEXT_CTX(ctx); 8763 8764 /* 8765 * Better not have been stolen while we held the ctx' 8766 * lock or we're hosed. 8767 */ 8768 ASSERT(sfmmup == sfmmutoctx(sfmmup)->ctx_sfmmu); 8769 8770 return (ctx); 8771 8772 #undef NEXT_CTX 8773 } 8774 8775 8776 /* 8777 * Set the process context to INVALID_CONTEXT (but 8778 * without stealing the ctx) so that it faults and 8779 * reloads the MMU state from TL=0. Caller must 8780 * hold the hat lock since we don't acquire it here. 8781 */ 8782 static void 8783 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 8784 { 8785 int cnum; 8786 cpuset_t cpuset; 8787 8788 ASSERT(sfmmup != ksfmmup); 8789 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8790 8791 kpreempt_disable(); 8792 8793 cnum = sfmmutoctxnum(sfmmup); 8794 if (cnum != INVALID_CONTEXT) { 8795 cpuset = sfmmup->sfmmu_cpusran; 8796 CPUSET_DEL(cpuset, CPU->cpu_id); 8797 CPUSET_AND(cpuset, cpu_ready_set); 8798 SFMMU_XCALL_STATS(cnum); 8799 8800 xt_some(cpuset, sfmmu_raise_tsb_exception, 8801 cnum, INVALID_CONTEXT); 8802 xt_sync(cpuset); 8803 8804 /* 8805 * If the process is running on the local CPU 8806 * we need to update the MMU state here as well. 8807 */ 8808 if (sfmmu_getctx_sec() == cnum) 8809 sfmmu_load_mmustate(sfmmup); 8810 8811 SFMMU_STAT(sf_tsb_raise_exception); 8812 } 8813 8814 kpreempt_enable(); 8815 } 8816 8817 8818 /* 8819 * Replace the specified TSB with a new TSB. This function gets called when 8820 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 8821 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 8822 * (8K). 8823 * 8824 * Caller must hold the HAT lock, but should assume any tsb_info 8825 * pointers it has are no longer valid after calling this function. 8826 * 8827 * Return values: 8828 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 8829 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 8830 * something to this tsbinfo/TSB 8831 * TSB_SUCCESS Operation succeeded 8832 */ 8833 static tsb_replace_rc_t 8834 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 8835 hatlock_t *hatlockp, uint_t flags) 8836 { 8837 struct tsb_info *new_tsbinfo = NULL; 8838 struct tsb_info *curtsb, *prevtsb; 8839 uint_t tte_sz_mask; 8840 cpuset_t cpuset; 8841 struct ctx *ctx = NULL; 8842 int ctxnum; 8843 8844 ASSERT(sfmmup != ksfmmup); 8845 ASSERT(sfmmup->sfmmu_ismhat == 0); 8846 ASSERT(sfmmu_hat_lock_held(sfmmup)); 8847 ASSERT(szc <= tsb_max_growsize); 8848 8849 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 8850 return (TSB_LOSTRACE); 8851 8852 /* 8853 * Find the tsb_info ahead of this one in the list, and 8854 * also make sure that the tsb_info passed in really 8855 * exists! 8856 */ 8857 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8858 curtsb != old_tsbinfo && curtsb != NULL; 8859 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8860 ASSERT(curtsb != NULL); 8861 8862 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8863 /* 8864 * The process is swapped out, so just set the new size 8865 * code. When it swaps back in, we'll allocate a new one 8866 * of the new chosen size. 8867 */ 8868 curtsb->tsb_szc = szc; 8869 return (TSB_SUCCESS); 8870 } 8871 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 8872 8873 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 8874 8875 /* 8876 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 8877 * If we fail to allocate a TSB, exit. 8878 */ 8879 sfmmu_hat_exit(hatlockp); 8880 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, tte_sz_mask, 8881 flags, sfmmup)) { 8882 (void) sfmmu_hat_enter(sfmmup); 8883 if (!(flags & TSB_SWAPIN)) 8884 SFMMU_STAT(sf_tsb_resize_failures); 8885 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8886 return (TSB_ALLOCFAIL); 8887 } 8888 (void) sfmmu_hat_enter(sfmmup); 8889 8890 /* 8891 * Re-check to make sure somebody else didn't muck with us while we 8892 * didn't hold the HAT lock. If the process swapped out, fine, just 8893 * exit; this can happen if we try to shrink the TSB from the context 8894 * of another process (such as on an ISM unmap), though it is rare. 8895 */ 8896 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 8897 SFMMU_STAT(sf_tsb_resize_failures); 8898 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8899 sfmmu_hat_exit(hatlockp); 8900 sfmmu_tsbinfo_free(new_tsbinfo); 8901 (void) sfmmu_hat_enter(sfmmup); 8902 return (TSB_LOSTRACE); 8903 } 8904 8905 #ifdef DEBUG 8906 /* Reverify that the tsb_info still exists.. for debugging only */ 8907 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 8908 curtsb != old_tsbinfo && curtsb != NULL; 8909 prevtsb = curtsb, curtsb = curtsb->tsb_next); 8910 ASSERT(curtsb != NULL); 8911 #endif /* DEBUG */ 8912 8913 /* 8914 * Quiesce any CPUs running this process on their next TLB miss 8915 * so they atomically see the new tsb_info. We temporarily set the 8916 * context to invalid context so new threads that come on processor 8917 * after we do the xcall to cpusran will also serialize behind the 8918 * HAT lock on TLB miss and will see the new TSB. Since this short 8919 * race with a new thread coming on processor is relatively rare, 8920 * this synchronization mechanism should be cheaper than always 8921 * pausing all CPUs for the duration of the setup, which is what 8922 * the old implementation did. This is particuarly true if we are 8923 * copying a huge chunk of memory around during that window. 8924 * 8925 * The memory barriers are to make sure things stay consistent 8926 * with resume() since it does not hold the HAT lock while 8927 * walking the list of tsb_info structures. 8928 */ 8929 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 8930 /* The TSB is either growing or shrinking. */ 8931 ctx = sfmmutoctx(sfmmup); 8932 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 8933 8934 ctxnum = sfmmutoctxnum(sfmmup); 8935 sfmmup->sfmmu_cnum = INVALID_CONTEXT; 8936 membar_enter(); /* make sure visible on all CPUs */ 8937 8938 kpreempt_disable(); 8939 if (ctxnum != INVALID_CONTEXT) { 8940 cpuset = sfmmup->sfmmu_cpusran; 8941 CPUSET_DEL(cpuset, CPU->cpu_id); 8942 CPUSET_AND(cpuset, cpu_ready_set); 8943 SFMMU_XCALL_STATS(ctxnum); 8944 8945 xt_some(cpuset, sfmmu_raise_tsb_exception, 8946 ctxnum, INVALID_CONTEXT); 8947 xt_sync(cpuset); 8948 8949 SFMMU_STAT(sf_tsb_raise_exception); 8950 } 8951 kpreempt_enable(); 8952 } else { 8953 /* 8954 * It is illegal to swap in TSBs from a process other 8955 * than a process being swapped in. This in turn 8956 * implies we do not have a valid MMU context here 8957 * since a process needs one to resolve translation 8958 * misses. 8959 */ 8960 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 8961 ASSERT(sfmmutoctxnum(sfmmup) == INVALID_CONTEXT); 8962 } 8963 8964 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 8965 membar_stst(); /* strict ordering required */ 8966 if (prevtsb) 8967 prevtsb->tsb_next = new_tsbinfo; 8968 else 8969 sfmmup->sfmmu_tsb = new_tsbinfo; 8970 membar_enter(); /* make sure new TSB globally visible */ 8971 sfmmu_setup_tsbinfo(sfmmup); 8972 8973 /* 8974 * We need to migrate TSB entries from the old TSB to the new TSB 8975 * if tsb_remap_ttes is set and the TSB is growing. 8976 */ 8977 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 8978 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 8979 8980 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 8981 kpreempt_disable(); 8982 membar_exit(); 8983 sfmmup->sfmmu_cnum = ctxnum; 8984 if (ctxnum != INVALID_CONTEXT && 8985 sfmmu_getctx_sec() == ctxnum) { 8986 sfmmu_load_mmustate(sfmmup); 8987 } 8988 kpreempt_enable(); 8989 rw_exit(&ctx->ctx_rwlock); 8990 } 8991 8992 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 8993 8994 /* 8995 * Drop the HAT lock to free our old tsb_info. 8996 */ 8997 sfmmu_hat_exit(hatlockp); 8998 8999 if ((flags & TSB_GROW) == TSB_GROW) { 9000 SFMMU_STAT(sf_tsb_grow); 9001 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 9002 SFMMU_STAT(sf_tsb_shrink); 9003 } 9004 9005 sfmmu_tsbinfo_free(old_tsbinfo); 9006 9007 (void) sfmmu_hat_enter(sfmmup); 9008 return (TSB_SUCCESS); 9009 } 9010 9011 /* 9012 * Steal context from process, forcing the process to switch to another 9013 * context on the next TLB miss, and therefore start using the TLB that 9014 * is reprogrammed for the new page sizes. 9015 */ 9016 void 9017 sfmmu_steal_context(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 9018 { 9019 struct ctx *ctx; 9020 int i, cnum; 9021 hatlock_t *hatlockp = NULL; 9022 9023 hatlockp = sfmmu_hat_enter(sfmmup); 9024 /* USIII+-IV+ optimization, requires hat lock */ 9025 if (tmp_pgsz) { 9026 for (i = 0; i < mmu_page_sizes; i++) 9027 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 9028 } 9029 SFMMU_STAT(sf_tlb_reprog_pgsz); 9030 ctx = sfmmutoctx(sfmmup); 9031 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 9032 cnum = sfmmutoctxnum(sfmmup); 9033 9034 if (cnum != INVALID_CONTEXT) { 9035 sfmmu_tlb_swap_ctx(sfmmup, ctx); 9036 } 9037 rw_exit(&ctx->ctx_rwlock); 9038 sfmmu_hat_exit(hatlockp); 9039 } 9040 9041 /* 9042 * This function assumes that there are either four or six supported page 9043 * sizes and at most two programmable TLBs, so we need to decide which 9044 * page sizes are most important and then tell the MMU layer so it 9045 * can adjust the TLB page sizes accordingly (if supported). 9046 * 9047 * If these assumptions change, this function will need to be 9048 * updated to support whatever the new limits are. 9049 * 9050 * The growing flag is nonzero if we are growing the address space, 9051 * and zero if it is shrinking. This allows us to decide whether 9052 * to grow or shrink our TSB, depending upon available memory 9053 * conditions. 9054 */ 9055 static void 9056 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 9057 { 9058 uint64_t ttecnt[MMU_PAGE_SIZES]; 9059 uint64_t tte8k_cnt, tte4m_cnt; 9060 uint8_t i; 9061 int sectsb_thresh; 9062 9063 /* 9064 * Kernel threads, processes with small address spaces not using 9065 * large pages, and dummy ISM HATs need not apply. 9066 */ 9067 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 9068 return; 9069 9070 if ((sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) == 0 && 9071 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 9072 return; 9073 9074 for (i = 0; i < mmu_page_sizes; i++) { 9075 ttecnt[i] = SFMMU_TTE_CNT(sfmmup, i); 9076 } 9077 9078 /* Check pagesizes in use, and possibly reprogram DTLB. */ 9079 if (&mmu_check_page_sizes) 9080 mmu_check_page_sizes(sfmmup, ttecnt); 9081 9082 /* 9083 * Calculate the number of 8k ttes to represent the span of these 9084 * pages. 9085 */ 9086 tte8k_cnt = ttecnt[TTE8K] + 9087 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 9088 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 9089 if (mmu_page_sizes == max_mmu_page_sizes) { 9090 tte4m_cnt = ttecnt[TTE4M] + 9091 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 9092 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 9093 } else { 9094 tte4m_cnt = ttecnt[TTE4M]; 9095 } 9096 9097 /* 9098 * Inflate TSB sizes by a factor of 2 if this process 9099 * uses 4M text pages to minimize extra conflict misses 9100 * in the first TSB since without counting text pages 9101 * 8K TSB may become too small. 9102 * 9103 * Also double the size of the second TSB to minimize 9104 * extra conflict misses due to competition between 4M text pages 9105 * and data pages. 9106 * 9107 * We need to adjust the second TSB allocation threshold by the 9108 * inflation factor, since there is no point in creating a second 9109 * TSB when we know all the mappings can fit in the I/D TLBs. 9110 */ 9111 sectsb_thresh = tsb_sectsb_threshold; 9112 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 9113 tte8k_cnt <<= 1; 9114 tte4m_cnt <<= 1; 9115 sectsb_thresh <<= 1; 9116 } 9117 9118 /* 9119 * Check to see if our TSB is the right size; we may need to 9120 * grow or shrink it. If the process is small, our work is 9121 * finished at this point. 9122 */ 9123 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 9124 return; 9125 } 9126 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 9127 } 9128 9129 static void 9130 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 9131 uint64_t tte4m_cnt, int sectsb_thresh) 9132 { 9133 int tsb_bits; 9134 uint_t tsb_szc; 9135 struct tsb_info *tsbinfop; 9136 hatlock_t *hatlockp = NULL; 9137 9138 hatlockp = sfmmu_hat_enter(sfmmup); 9139 ASSERT(hatlockp != NULL); 9140 tsbinfop = sfmmup->sfmmu_tsb; 9141 ASSERT(tsbinfop != NULL); 9142 9143 /* 9144 * If we're growing, select the size based on RSS. If we're 9145 * shrinking, leave some room so we don't have to turn around and 9146 * grow again immediately. 9147 */ 9148 if (growing) 9149 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 9150 else 9151 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 9152 9153 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9154 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9155 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9156 hatlockp, TSB_SHRINK); 9157 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 9158 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 9159 hatlockp, TSB_GROW); 9160 } 9161 tsbinfop = sfmmup->sfmmu_tsb; 9162 9163 /* 9164 * With the TLB and first TSB out of the way, we need to see if 9165 * we need a second TSB for 4M pages. If we managed to reprogram 9166 * the TLB page sizes above, the process will start using this new 9167 * TSB right away; otherwise, it will start using it on the next 9168 * context switch. Either way, it's no big deal so there's no 9169 * synchronization with the trap handlers here unless we grow the 9170 * TSB (in which case it's required to prevent using the old one 9171 * after it's freed). Note: second tsb is required for 32M/256M 9172 * page sizes. 9173 */ 9174 if (tte4m_cnt > sectsb_thresh) { 9175 /* 9176 * If we're growing, select the size based on RSS. If we're 9177 * shrinking, leave some room so we don't have to turn 9178 * around and grow again immediately. 9179 */ 9180 if (growing) 9181 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 9182 else 9183 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 9184 if (tsbinfop->tsb_next == NULL) { 9185 struct tsb_info *newtsb; 9186 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 9187 0 : TSB_ALLOC; 9188 9189 sfmmu_hat_exit(hatlockp); 9190 9191 /* 9192 * Try to allocate a TSB for 4[32|256]M pages. If we 9193 * can't get the size we want, retry w/a minimum sized 9194 * TSB. If that still didn't work, give up; we can 9195 * still run without one. 9196 */ 9197 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 9198 TSB4M|TSB32M|TSB256M:TSB4M; 9199 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 9200 allocflags, sfmmup) != 0) && 9201 (sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 9202 tsb_bits, allocflags, sfmmup) != 0)) { 9203 return; 9204 } 9205 9206 hatlockp = sfmmu_hat_enter(sfmmup); 9207 9208 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 9209 sfmmup->sfmmu_tsb->tsb_next = newtsb; 9210 SFMMU_STAT(sf_tsb_sectsb_create); 9211 sfmmu_setup_tsbinfo(sfmmup); 9212 sfmmu_hat_exit(hatlockp); 9213 return; 9214 } else { 9215 /* 9216 * It's annoying, but possible for us 9217 * to get here.. we dropped the HAT lock 9218 * because of locking order in the kmem 9219 * allocator, and while we were off getting 9220 * our memory, some other thread decided to 9221 * do us a favor and won the race to get a 9222 * second TSB for this process. Sigh. 9223 */ 9224 sfmmu_hat_exit(hatlockp); 9225 sfmmu_tsbinfo_free(newtsb); 9226 return; 9227 } 9228 } 9229 9230 /* 9231 * We have a second TSB, see if it's big enough. 9232 */ 9233 tsbinfop = tsbinfop->tsb_next; 9234 9235 /* 9236 * Check to see if our second TSB is the right size; 9237 * we may need to grow or shrink it. 9238 * To prevent thrashing (e.g. growing the TSB on a 9239 * subsequent map operation), only try to shrink if 9240 * the TSB reach exceeds twice the virtual address 9241 * space size. 9242 */ 9243 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 9244 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 9245 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9246 tsb_szc, hatlockp, TSB_SHRINK); 9247 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 9248 TSB_OK_GROW()) { 9249 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 9250 tsb_szc, hatlockp, TSB_GROW); 9251 } 9252 } 9253 9254 sfmmu_hat_exit(hatlockp); 9255 } 9256 9257 /* 9258 * Get the preferred page size code for a hat. 9259 * This is only advice, so locking is not done; 9260 * this transitory information could change 9261 * following the call anyway. This interface is 9262 * sun4 private. 9263 */ 9264 /*ARGSUSED*/ 9265 uint_t 9266 hat_preferred_pgsz(struct hat *hat, caddr_t vaddr, size_t maplen, int maptype) 9267 { 9268 sfmmu_t *sfmmup = (sfmmu_t *)hat; 9269 uint_t szc, maxszc = mmu_page_sizes - 1; 9270 size_t pgsz; 9271 9272 if (maptype == MAPPGSZ_ISM) { 9273 for (szc = maxszc; szc >= TTE4M; szc--) { 9274 if (disable_ism_large_pages & (1 << szc)) 9275 continue; 9276 9277 pgsz = hw_page_array[szc].hp_size; 9278 if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz)) 9279 return (szc); 9280 } 9281 return (TTE4M); 9282 } else if (&mmu_preferred_pgsz) { /* USIII+-USIV+ */ 9283 return (mmu_preferred_pgsz(sfmmup, vaddr, maplen)); 9284 } else { /* USIII, USII, Niagara */ 9285 for (szc = maxszc; szc > TTE8K; szc--) { 9286 if (disable_large_pages & (1 << szc)) 9287 continue; 9288 9289 pgsz = hw_page_array[szc].hp_size; 9290 if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz)) 9291 return (szc); 9292 } 9293 return (TTE8K); 9294 } 9295 } 9296 9297 /* 9298 * Free up a ctx 9299 */ 9300 static void 9301 sfmmu_free_ctx(sfmmu_t *sfmmup, struct ctx *ctx) 9302 { 9303 int ctxnum; 9304 9305 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 9306 9307 TRACE_CTXS(&ctx_trace_mutex, ctx_trace_ptr, sfmmup->sfmmu_cnum, 9308 sfmmup, 0, CTX_TRC_FREE); 9309 9310 if (sfmmup->sfmmu_cnum == INVALID_CONTEXT) { 9311 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 9312 rw_exit(&ctx->ctx_rwlock); 9313 return; 9314 } 9315 9316 ASSERT(sfmmup == ctx->ctx_sfmmu); 9317 9318 ctx->ctx_sfmmu = NULL; 9319 ctx->ctx_flags = 0; 9320 sfmmup->sfmmu_cnum = INVALID_CONTEXT; 9321 membar_enter(); 9322 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 9323 ctxnum = sfmmu_getctx_sec(); 9324 if (ctxnum == ctxtoctxnum(ctx)) { 9325 sfmmu_setctx_sec(INVALID_CONTEXT); 9326 sfmmu_clear_utsbinfo(); 9327 } 9328 9329 /* 9330 * Put the freed ctx on the dirty list 9331 */ 9332 mutex_enter(&ctx_list_lock); 9333 CTX_SET_FLAGS(ctx, CTX_FREE_FLAG); 9334 ctx->ctx_free = ctxdirty; 9335 ctxdirty = ctx; 9336 mutex_exit(&ctx_list_lock); 9337 9338 rw_exit(&ctx->ctx_rwlock); 9339 } 9340 9341 /* 9342 * Free up a sfmmu 9343 * Since the sfmmu is currently embedded in the hat struct we simply zero 9344 * out our fields and free up the ism map blk list if any. 9345 */ 9346 static void 9347 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 9348 { 9349 ism_blk_t *blkp, *nx_blkp; 9350 #ifdef DEBUG 9351 ism_map_t *map; 9352 int i; 9353 #endif 9354 9355 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 9356 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 9357 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 9358 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 9359 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 9360 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 9361 ASSERT(sfmmup->sfmmu_cnum == INVALID_CONTEXT); 9362 sfmmup->sfmmu_free = 0; 9363 sfmmup->sfmmu_ismhat = 0; 9364 9365 blkp = sfmmup->sfmmu_iblk; 9366 sfmmup->sfmmu_iblk = NULL; 9367 9368 while (blkp) { 9369 #ifdef DEBUG 9370 map = blkp->iblk_maps; 9371 for (i = 0; i < ISM_MAP_SLOTS; i++) { 9372 ASSERT(map[i].imap_seg == 0); 9373 ASSERT(map[i].imap_ismhat == NULL); 9374 ASSERT(map[i].imap_ment == NULL); 9375 } 9376 #endif 9377 nx_blkp = blkp->iblk_next; 9378 blkp->iblk_next = NULL; 9379 blkp->iblk_nextpa = (uint64_t)-1; 9380 kmem_cache_free(ism_blk_cache, blkp); 9381 blkp = nx_blkp; 9382 } 9383 } 9384 9385 /* 9386 * Locking primitves accessed by HATLOCK macros 9387 */ 9388 9389 #define SFMMU_SPL_MTX (0x0) 9390 #define SFMMU_ML_MTX (0x1) 9391 9392 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 9393 SPL_HASH(pg) : MLIST_HASH(pg)) 9394 9395 kmutex_t * 9396 sfmmu_page_enter(struct page *pp) 9397 { 9398 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 9399 } 9400 9401 static void 9402 sfmmu_page_exit(kmutex_t *spl) 9403 { 9404 mutex_exit(spl); 9405 } 9406 9407 static int 9408 sfmmu_page_spl_held(struct page *pp) 9409 { 9410 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 9411 } 9412 9413 kmutex_t * 9414 sfmmu_mlist_enter(struct page *pp) 9415 { 9416 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 9417 } 9418 9419 void 9420 sfmmu_mlist_exit(kmutex_t *mml) 9421 { 9422 mutex_exit(mml); 9423 } 9424 9425 int 9426 sfmmu_mlist_held(struct page *pp) 9427 { 9428 9429 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 9430 } 9431 9432 /* 9433 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 9434 * sfmmu_mlist_enter() case mml_table lock array is used and for 9435 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 9436 * 9437 * The lock is taken on a root page so that it protects an operation on all 9438 * constituent pages of a large page pp belongs to. 9439 * 9440 * The routine takes a lock from the appropriate array. The lock is determined 9441 * by hashing the root page. After taking the lock this routine checks if the 9442 * root page has the same size code that was used to determine the root (i.e 9443 * that root hasn't changed). If root page has the expected p_szc field we 9444 * have the right lock and it's returned to the caller. If root's p_szc 9445 * decreased we release the lock and retry from the beginning. This case can 9446 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 9447 * value and taking the lock. The number of retries due to p_szc decrease is 9448 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 9449 * determined by hashing pp itself. 9450 * 9451 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 9452 * possible that p_szc can increase. To increase p_szc a thread has to lock 9453 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 9454 * callers that don't hold a page locked recheck if hmeblk through which pp 9455 * was found still maps this pp. If it doesn't map it anymore returned lock 9456 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 9457 * p_szc increase after taking the lock it returns this lock without further 9458 * retries because in this case the caller doesn't care about which lock was 9459 * taken. The caller will drop it right away. 9460 * 9461 * After the routine returns it's guaranteed that hat_page_demote() can't 9462 * change p_szc field of any of constituent pages of a large page pp belongs 9463 * to as long as pp was either locked at least SHARED prior to this call or 9464 * the caller finds that hment that pointed to this pp still references this 9465 * pp (this also assumes that the caller holds hme hash bucket lock so that 9466 * the same pp can't be remapped into the same hmeblk after it was unmapped by 9467 * hat_pageunload()). 9468 */ 9469 static kmutex_t * 9470 sfmmu_mlspl_enter(struct page *pp, int type) 9471 { 9472 kmutex_t *mtx; 9473 uint_t prev_rszc = UINT_MAX; 9474 page_t *rootpp; 9475 uint_t szc; 9476 uint_t rszc; 9477 uint_t pszc = pp->p_szc; 9478 9479 ASSERT(pp != NULL); 9480 9481 again: 9482 if (pszc == 0) { 9483 mtx = SFMMU_MLSPL_MTX(type, pp); 9484 mutex_enter(mtx); 9485 return (mtx); 9486 } 9487 9488 /* The lock lives in the root page */ 9489 rootpp = PP_GROUPLEADER(pp, pszc); 9490 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9491 mutex_enter(mtx); 9492 9493 /* 9494 * Return mml in the following 3 cases: 9495 * 9496 * 1) If pp itself is root since if its p_szc decreased before we took 9497 * the lock pp is still the root of smaller szc page. And if its p_szc 9498 * increased it doesn't matter what lock we return (see comment in 9499 * front of this routine). 9500 * 9501 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 9502 * large page we have the right lock since any previous potential 9503 * hat_page_demote() is done demoting from greater than current root's 9504 * p_szc because hat_page_demote() changes root's p_szc last. No 9505 * further hat_page_demote() can start or be in progress since it 9506 * would need the same lock we currently hold. 9507 * 9508 * 3) If rootpp's p_szc increased since previous iteration it doesn't 9509 * matter what lock we return (see comment in front of this routine). 9510 */ 9511 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 9512 rszc >= prev_rszc) { 9513 return (mtx); 9514 } 9515 9516 /* 9517 * hat_page_demote() could have decreased root's p_szc. 9518 * In this case pp's p_szc must also be smaller than pszc. 9519 * Retry. 9520 */ 9521 if (rszc < pszc) { 9522 szc = pp->p_szc; 9523 if (szc < pszc) { 9524 mutex_exit(mtx); 9525 pszc = szc; 9526 goto again; 9527 } 9528 /* 9529 * pp's p_szc increased after it was decreased. 9530 * page cannot be mapped. Return current lock. The caller 9531 * will drop it right away. 9532 */ 9533 return (mtx); 9534 } 9535 9536 /* 9537 * root's p_szc is greater than pp's p_szc. 9538 * hat_page_demote() is not done with all pages 9539 * yet. Wait for it to complete. 9540 */ 9541 mutex_exit(mtx); 9542 rootpp = PP_GROUPLEADER(rootpp, rszc); 9543 mtx = SFMMU_MLSPL_MTX(type, rootpp); 9544 mutex_enter(mtx); 9545 mutex_exit(mtx); 9546 prev_rszc = rszc; 9547 goto again; 9548 } 9549 9550 static int 9551 sfmmu_mlspl_held(struct page *pp, int type) 9552 { 9553 kmutex_t *mtx; 9554 9555 ASSERT(pp != NULL); 9556 /* The lock lives in the root page */ 9557 pp = PP_PAGEROOT(pp); 9558 ASSERT(pp != NULL); 9559 9560 mtx = SFMMU_MLSPL_MTX(type, pp); 9561 return (MUTEX_HELD(mtx)); 9562 } 9563 9564 static uint_t 9565 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 9566 { 9567 struct hme_blk *hblkp; 9568 9569 if (freehblkp != NULL) { 9570 mutex_enter(&freehblkp_lock); 9571 if (freehblkp != NULL) { 9572 /* 9573 * If the current thread is owning hblk_reserve, 9574 * let it succede even if freehblkcnt is really low. 9575 */ 9576 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 9577 SFMMU_STAT(sf_get_free_throttle); 9578 mutex_exit(&freehblkp_lock); 9579 return (0); 9580 } 9581 freehblkcnt--; 9582 *hmeblkpp = freehblkp; 9583 hblkp = *hmeblkpp; 9584 freehblkp = hblkp->hblk_next; 9585 mutex_exit(&freehblkp_lock); 9586 hblkp->hblk_next = NULL; 9587 SFMMU_STAT(sf_get_free_success); 9588 return (1); 9589 } 9590 mutex_exit(&freehblkp_lock); 9591 } 9592 SFMMU_STAT(sf_get_free_fail); 9593 return (0); 9594 } 9595 9596 static uint_t 9597 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 9598 { 9599 struct hme_blk *hblkp; 9600 9601 /* 9602 * If the current thread is mapping into kernel space, 9603 * let it succede even if freehblkcnt is max 9604 * so that it will avoid freeing it to kmem. 9605 * This will prevent stack overflow due to 9606 * possible recursion since kmem_cache_free() 9607 * might require creation of a slab which 9608 * in turn needs an hmeblk to map that slab; 9609 * let's break this vicious chain at the first 9610 * opportunity. 9611 */ 9612 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9613 mutex_enter(&freehblkp_lock); 9614 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 9615 SFMMU_STAT(sf_put_free_success); 9616 freehblkcnt++; 9617 hmeblkp->hblk_next = freehblkp; 9618 freehblkp = hmeblkp; 9619 mutex_exit(&freehblkp_lock); 9620 return (1); 9621 } 9622 mutex_exit(&freehblkp_lock); 9623 } 9624 9625 /* 9626 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 9627 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 9628 * we are not in the process of mapping into kernel space. 9629 */ 9630 ASSERT(!critical); 9631 while (freehblkcnt > HBLK_RESERVE_CNT) { 9632 mutex_enter(&freehblkp_lock); 9633 if (freehblkcnt > HBLK_RESERVE_CNT) { 9634 freehblkcnt--; 9635 hblkp = freehblkp; 9636 freehblkp = hblkp->hblk_next; 9637 mutex_exit(&freehblkp_lock); 9638 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 9639 kmem_cache_free(sfmmu8_cache, hblkp); 9640 continue; 9641 } 9642 mutex_exit(&freehblkp_lock); 9643 } 9644 SFMMU_STAT(sf_put_free_fail); 9645 return (0); 9646 } 9647 9648 static void 9649 sfmmu_hblk_swap(struct hme_blk *new) 9650 { 9651 struct hme_blk *old, *hblkp, *prev; 9652 uint64_t hblkpa, prevpa, newpa; 9653 caddr_t base, vaddr, endaddr; 9654 struct hmehash_bucket *hmebp; 9655 struct sf_hment *osfhme, *nsfhme; 9656 page_t *pp; 9657 kmutex_t *pml; 9658 tte_t tte; 9659 9660 #ifdef DEBUG 9661 hmeblk_tag hblktag; 9662 struct hme_blk *found; 9663 #endif 9664 old = HBLK_RESERVE; 9665 9666 /* 9667 * save pa before bcopy clobbers it 9668 */ 9669 newpa = new->hblk_nextpa; 9670 9671 base = (caddr_t)get_hblk_base(old); 9672 endaddr = base + get_hblk_span(old); 9673 9674 /* 9675 * acquire hash bucket lock. 9676 */ 9677 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K); 9678 9679 /* 9680 * copy contents from old to new 9681 */ 9682 bcopy((void *)old, (void *)new, HME8BLK_SZ); 9683 9684 /* 9685 * add new to hash chain 9686 */ 9687 sfmmu_hblk_hash_add(hmebp, new, newpa); 9688 9689 /* 9690 * search hash chain for hblk_reserve; this needs to be performed 9691 * after adding new, otherwise prevpa and prev won't correspond 9692 * to the hblk which is prior to old in hash chain when we call 9693 * sfmmu_hblk_hash_rm to remove old later. 9694 */ 9695 for (prevpa = 0, prev = NULL, 9696 hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp; 9697 hblkp != NULL && hblkp != old; 9698 prevpa = hblkpa, prev = hblkp, 9699 hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next); 9700 9701 if (hblkp != old) 9702 panic("sfmmu_hblk_swap: hblk_reserve not found"); 9703 9704 /* 9705 * p_mapping list is still pointing to hments in hblk_reserve; 9706 * fix up p_mapping list so that they point to hments in new. 9707 * 9708 * Since all these mappings are created by hblk_reserve_thread 9709 * on the way and it's using at least one of the buffers from each of 9710 * the newly minted slabs, there is no danger of any of these 9711 * mappings getting unloaded by another thread. 9712 * 9713 * tsbmiss could only modify ref/mod bits of hments in old/new. 9714 * Since all of these hments hold mappings established by segkmem 9715 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 9716 * have no meaning for the mappings in hblk_reserve. hments in 9717 * old and new are identical except for ref/mod bits. 9718 */ 9719 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 9720 9721 HBLKTOHME(osfhme, old, vaddr); 9722 sfmmu_copytte(&osfhme->hme_tte, &tte); 9723 9724 if (TTE_IS_VALID(&tte)) { 9725 if ((pp = osfhme->hme_page) == NULL) 9726 panic("sfmmu_hblk_swap: page not mapped"); 9727 9728 pml = sfmmu_mlist_enter(pp); 9729 9730 if (pp != osfhme->hme_page) 9731 panic("sfmmu_hblk_swap: mapping changed"); 9732 9733 HBLKTOHME(nsfhme, new, vaddr); 9734 9735 HME_ADD(nsfhme, pp); 9736 HME_SUB(osfhme, pp); 9737 9738 sfmmu_mlist_exit(pml); 9739 } 9740 } 9741 9742 /* 9743 * remove old from hash chain 9744 */ 9745 sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev); 9746 9747 #ifdef DEBUG 9748 9749 hblktag.htag_id = ksfmmup; 9750 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 9751 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 9752 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 9753 9754 if (found != new) 9755 panic("sfmmu_hblk_swap: new hblk not found"); 9756 #endif 9757 9758 SFMMU_HASH_UNLOCK(hmebp); 9759 9760 /* 9761 * Reset hblk_reserve 9762 */ 9763 bzero((void *)old, HME8BLK_SZ); 9764 old->hblk_nextpa = va_to_pa((caddr_t)old); 9765 } 9766 9767 /* 9768 * Grab the mlist mutex for both pages passed in. 9769 * 9770 * low and high will be returned as pointers to the mutexes for these pages. 9771 * low refers to the mutex residing in the lower bin of the mlist hash, while 9772 * high refers to the mutex residing in the higher bin of the mlist hash. This 9773 * is due to the locking order restrictions on the same thread grabbing 9774 * multiple mlist mutexes. The low lock must be acquired before the high lock. 9775 * 9776 * If both pages hash to the same mutex, only grab that single mutex, and 9777 * high will be returned as NULL 9778 * If the pages hash to different bins in the hash, grab the lower addressed 9779 * lock first and then the higher addressed lock in order to follow the locking 9780 * rules involved with the same thread grabbing multiple mlist mutexes. 9781 * low and high will both have non-NULL values. 9782 */ 9783 static void 9784 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 9785 kmutex_t **low, kmutex_t **high) 9786 { 9787 kmutex_t *mml_targ, *mml_repl; 9788 9789 /* 9790 * no need to do the dance around szc as in sfmmu_mlist_enter() 9791 * because this routine is only called by hat_page_relocate() and all 9792 * targ and repl pages are already locked EXCL so szc can't change. 9793 */ 9794 9795 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 9796 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 9797 9798 if (mml_targ == mml_repl) { 9799 *low = mml_targ; 9800 *high = NULL; 9801 } else { 9802 if (mml_targ < mml_repl) { 9803 *low = mml_targ; 9804 *high = mml_repl; 9805 } else { 9806 *low = mml_repl; 9807 *high = mml_targ; 9808 } 9809 } 9810 9811 mutex_enter(*low); 9812 if (*high) 9813 mutex_enter(*high); 9814 } 9815 9816 static void 9817 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 9818 { 9819 if (high) 9820 mutex_exit(high); 9821 mutex_exit(low); 9822 } 9823 9824 static hatlock_t * 9825 sfmmu_hat_enter(sfmmu_t *sfmmup) 9826 { 9827 hatlock_t *hatlockp; 9828 9829 if (sfmmup != ksfmmup) { 9830 hatlockp = TSB_HASH(sfmmup); 9831 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 9832 return (hatlockp); 9833 } 9834 return (NULL); 9835 } 9836 9837 static hatlock_t * 9838 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 9839 { 9840 hatlock_t *hatlockp; 9841 9842 if (sfmmup != ksfmmup) { 9843 hatlockp = TSB_HASH(sfmmup); 9844 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 9845 return (NULL); 9846 return (hatlockp); 9847 } 9848 return (NULL); 9849 } 9850 9851 static void 9852 sfmmu_hat_exit(hatlock_t *hatlockp) 9853 { 9854 if (hatlockp != NULL) 9855 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 9856 } 9857 9858 static void 9859 sfmmu_hat_lock_all(void) 9860 { 9861 int i; 9862 for (i = 0; i < SFMMU_NUM_LOCK; i++) 9863 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 9864 } 9865 9866 static void 9867 sfmmu_hat_unlock_all(void) 9868 { 9869 int i; 9870 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 9871 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 9872 } 9873 9874 int 9875 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 9876 { 9877 ASSERT(sfmmup != ksfmmup); 9878 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 9879 } 9880 9881 /* 9882 * Locking primitives to provide consistency between ISM unmap 9883 * and other operations. Since ISM unmap can take a long time, we 9884 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 9885 * contention on the hatlock buckets while ISM segments are being 9886 * unmapped. The tradeoff is that the flags don't prevent priority 9887 * inversion from occurring, so we must request kernel priority in 9888 * case we have to sleep to keep from getting buried while holding 9889 * the HAT_ISMBUSY flag set, which in turn could block other kernel 9890 * threads from running (for example, in sfmmu_uvatopfn()). 9891 */ 9892 static void 9893 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 9894 { 9895 hatlock_t *hatlockp; 9896 9897 THREAD_KPRI_REQUEST(); 9898 if (!hatlock_held) 9899 hatlockp = sfmmu_hat_enter(sfmmup); 9900 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 9901 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 9902 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 9903 if (!hatlock_held) 9904 sfmmu_hat_exit(hatlockp); 9905 } 9906 9907 static void 9908 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 9909 { 9910 hatlock_t *hatlockp; 9911 9912 if (!hatlock_held) 9913 hatlockp = sfmmu_hat_enter(sfmmup); 9914 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 9915 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 9916 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 9917 if (!hatlock_held) 9918 sfmmu_hat_exit(hatlockp); 9919 THREAD_KPRI_RELEASE(); 9920 } 9921 9922 /* 9923 * 9924 * Algorithm: 9925 * 9926 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 9927 * hblks. 9928 * 9929 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 9930 * 9931 * (a) try to return an hblk from reserve pool of free hblks; 9932 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 9933 * and return hblk_reserve. 9934 * 9935 * (3) call kmem_cache_alloc() to allocate hblk; 9936 * 9937 * (a) if hblk_reserve_lock is held by the current thread, 9938 * atomically replace hblk_reserve by the hblk that is 9939 * returned by kmem_cache_alloc; release hblk_reserve_lock 9940 * and call kmem_cache_alloc() again. 9941 * (b) if reserve pool is not full, add the hblk that is 9942 * returned by kmem_cache_alloc to reserve pool and 9943 * call kmem_cache_alloc again. 9944 * 9945 */ 9946 static struct hme_blk * 9947 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 9948 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 9949 uint_t flags) 9950 { 9951 struct hme_blk *hmeblkp = NULL; 9952 struct hme_blk *newhblkp; 9953 struct hme_blk *shw_hblkp = NULL; 9954 struct kmem_cache *sfmmu_cache = NULL; 9955 uint64_t hblkpa; 9956 ulong_t index; 9957 uint_t owner; /* set to 1 if using hblk_reserve */ 9958 uint_t forcefree; 9959 int sleep; 9960 9961 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 9962 9963 /* 9964 * If segkmem is not created yet, allocate from static hmeblks 9965 * created at the end of startup_modules(). See the block comment 9966 * in startup_modules() describing how we estimate the number of 9967 * static hmeblks that will be needed during re-map. 9968 */ 9969 if (!hblk_alloc_dynamic) { 9970 9971 if (size == TTE8K) { 9972 index = nucleus_hblk8.index; 9973 if (index >= nucleus_hblk8.len) { 9974 /* 9975 * If we panic here, see startup_modules() to 9976 * make sure that we are calculating the 9977 * number of hblk8's that we need correctly. 9978 */ 9979 panic("no nucleus hblk8 to allocate"); 9980 } 9981 hmeblkp = 9982 (struct hme_blk *)&nucleus_hblk8.list[index]; 9983 nucleus_hblk8.index++; 9984 SFMMU_STAT(sf_hblk8_nalloc); 9985 } else { 9986 index = nucleus_hblk1.index; 9987 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 9988 /* 9989 * If we panic here, see startup_modules() 9990 * and H8TOH1; most likely you need to 9991 * update the calculation of the number 9992 * of hblk1's the kernel needs to boot. 9993 */ 9994 panic("no nucleus hblk1 to allocate"); 9995 } 9996 hmeblkp = 9997 (struct hme_blk *)&nucleus_hblk1.list[index]; 9998 nucleus_hblk1.index++; 9999 SFMMU_STAT(sf_hblk1_nalloc); 10000 } 10001 10002 goto hblk_init; 10003 } 10004 10005 SFMMU_HASH_UNLOCK(hmebp); 10006 10007 if (sfmmup != KHATID) { 10008 if (mmu_page_sizes == max_mmu_page_sizes) { 10009 if (size < TTE256M) 10010 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10011 size, flags); 10012 } else { 10013 if (size < TTE4M) 10014 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 10015 size, flags); 10016 } 10017 } 10018 10019 fill_hblk: 10020 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 10021 10022 if (owner && size == TTE8K) { 10023 10024 /* 10025 * We are really in a tight spot. We already own 10026 * hblk_reserve and we need another hblk. In anticipation 10027 * of this kind of scenario, we specifically set aside 10028 * HBLK_RESERVE_MIN number of hblks to be used exclusively 10029 * by owner of hblk_reserve. 10030 */ 10031 SFMMU_STAT(sf_hblk_recurse_cnt); 10032 10033 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 10034 panic("sfmmu_hblk_alloc: reserve list is empty"); 10035 10036 goto hblk_verify; 10037 } 10038 10039 ASSERT(!owner); 10040 10041 if ((flags & HAT_NO_KALLOC) == 0) { 10042 10043 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 10044 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 10045 10046 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 10047 hmeblkp = sfmmu_hblk_steal(size); 10048 } else { 10049 /* 10050 * if we are the owner of hblk_reserve, 10051 * swap hblk_reserve with hmeblkp and 10052 * start a fresh life. Hope things go 10053 * better this time. 10054 */ 10055 if (hblk_reserve_thread == curthread) { 10056 ASSERT(sfmmu_cache == sfmmu8_cache); 10057 sfmmu_hblk_swap(hmeblkp); 10058 hblk_reserve_thread = NULL; 10059 mutex_exit(&hblk_reserve_lock); 10060 goto fill_hblk; 10061 } 10062 /* 10063 * let's donate this hblk to our reserve list if 10064 * we are not mapping kernel range 10065 */ 10066 if (size == TTE8K && sfmmup != KHATID) 10067 if (sfmmu_put_free_hblk(hmeblkp, 0)) 10068 goto fill_hblk; 10069 } 10070 } else { 10071 /* 10072 * We are here to map the slab in sfmmu8_cache; let's 10073 * check if we could tap our reserve list; if successful, 10074 * this will avoid the pain of going thru sfmmu_hblk_swap 10075 */ 10076 SFMMU_STAT(sf_hblk_slab_cnt); 10077 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 10078 /* 10079 * let's start hblk_reserve dance 10080 */ 10081 SFMMU_STAT(sf_hblk_reserve_cnt); 10082 owner = 1; 10083 mutex_enter(&hblk_reserve_lock); 10084 hmeblkp = HBLK_RESERVE; 10085 hblk_reserve_thread = curthread; 10086 } 10087 } 10088 10089 hblk_verify: 10090 ASSERT(hmeblkp != NULL); 10091 set_hblk_sz(hmeblkp, size); 10092 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10093 SFMMU_HASH_LOCK(hmebp); 10094 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10095 if (newhblkp != NULL) { 10096 SFMMU_HASH_UNLOCK(hmebp); 10097 if (hmeblkp != HBLK_RESERVE) { 10098 /* 10099 * This is really tricky! 10100 * 10101 * vmem_alloc(vmem_seg_arena) 10102 * vmem_alloc(vmem_internal_arena) 10103 * segkmem_alloc(heap_arena) 10104 * vmem_alloc(heap_arena) 10105 * page_create() 10106 * hat_memload() 10107 * kmem_cache_free() 10108 * kmem_cache_alloc() 10109 * kmem_slab_create() 10110 * vmem_alloc(kmem_internal_arena) 10111 * segkmem_alloc(heap_arena) 10112 * vmem_alloc(heap_arena) 10113 * page_create() 10114 * hat_memload() 10115 * kmem_cache_free() 10116 * ... 10117 * 10118 * Thus, hat_memload() could call kmem_cache_free 10119 * for enough number of times that we could easily 10120 * hit the bottom of the stack or run out of reserve 10121 * list of vmem_seg structs. So, we must donate 10122 * this hblk to reserve list if it's allocated 10123 * from sfmmu8_cache *and* mapping kernel range. 10124 * We don't need to worry about freeing hmeblk1's 10125 * to kmem since they don't map any kmem slabs. 10126 * 10127 * Note: When segkmem supports largepages, we must 10128 * free hmeblk1's to reserve list as well. 10129 */ 10130 forcefree = (sfmmup == KHATID) ? 1 : 0; 10131 if (size == TTE8K && 10132 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 10133 goto re_verify; 10134 } 10135 ASSERT(sfmmup != KHATID); 10136 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10137 } else { 10138 /* 10139 * Hey! we don't need hblk_reserve any more. 10140 */ 10141 ASSERT(owner); 10142 hblk_reserve_thread = NULL; 10143 mutex_exit(&hblk_reserve_lock); 10144 owner = 0; 10145 } 10146 re_verify: 10147 /* 10148 * let's check if the goodies are still present 10149 */ 10150 SFMMU_HASH_LOCK(hmebp); 10151 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 10152 if (newhblkp != NULL) { 10153 /* 10154 * return newhblkp if it's not hblk_reserve; 10155 * if newhblkp is hblk_reserve, return it 10156 * _only if_ we are the owner of hblk_reserve. 10157 */ 10158 if (newhblkp != HBLK_RESERVE || owner) { 10159 return (newhblkp); 10160 } else { 10161 /* 10162 * we just hit hblk_reserve in the hash and 10163 * we are not the owner of that; 10164 * 10165 * block until hblk_reserve_thread completes 10166 * swapping hblk_reserve and try the dance 10167 * once again. 10168 */ 10169 SFMMU_HASH_UNLOCK(hmebp); 10170 mutex_enter(&hblk_reserve_lock); 10171 mutex_exit(&hblk_reserve_lock); 10172 SFMMU_STAT(sf_hblk_reserve_hit); 10173 goto fill_hblk; 10174 } 10175 } else { 10176 /* 10177 * it's no more! try the dance once again. 10178 */ 10179 SFMMU_HASH_UNLOCK(hmebp); 10180 goto fill_hblk; 10181 } 10182 } 10183 10184 hblk_init: 10185 set_hblk_sz(hmeblkp, size); 10186 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10187 hmeblkp->hblk_next = (struct hme_blk *)NULL; 10188 hmeblkp->hblk_tag = hblktag; 10189 hmeblkp->hblk_shadow = shw_hblkp; 10190 hblkpa = hmeblkp->hblk_nextpa; 10191 hmeblkp->hblk_nextpa = 0; 10192 10193 ASSERT(get_hblk_ttesz(hmeblkp) == size); 10194 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 10195 ASSERT(hmeblkp->hblk_hmecnt == 0); 10196 ASSERT(hmeblkp->hblk_vcnt == 0); 10197 ASSERT(hmeblkp->hblk_lckcnt == 0); 10198 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10199 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 10200 return (hmeblkp); 10201 } 10202 10203 /* 10204 * This function performs any cleanup required on the hme_blk 10205 * and returns it to the free list. 10206 */ 10207 /* ARGSUSED */ 10208 static void 10209 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10210 uint64_t hblkpa, struct hme_blk **listp) 10211 { 10212 int shw_size, vshift; 10213 struct hme_blk *shw_hblkp; 10214 uint_t shw_mask, newshw_mask; 10215 uintptr_t vaddr; 10216 int size; 10217 uint_t critical; 10218 10219 ASSERT(hmeblkp); 10220 ASSERT(!hmeblkp->hblk_hmecnt); 10221 ASSERT(!hmeblkp->hblk_vcnt); 10222 ASSERT(!hmeblkp->hblk_lckcnt); 10223 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 10224 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 10225 10226 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 10227 10228 size = get_hblk_ttesz(hmeblkp); 10229 shw_hblkp = hmeblkp->hblk_shadow; 10230 if (shw_hblkp) { 10231 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 10232 if (mmu_page_sizes == max_mmu_page_sizes) { 10233 ASSERT(size < TTE256M); 10234 } else { 10235 ASSERT(size < TTE4M); 10236 } 10237 10238 shw_size = get_hblk_ttesz(shw_hblkp); 10239 vaddr = get_hblk_base(hmeblkp); 10240 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10241 ASSERT(vshift < 8); 10242 /* 10243 * Atomically clear shadow mask bit 10244 */ 10245 do { 10246 shw_mask = shw_hblkp->hblk_shw_mask; 10247 ASSERT(shw_mask & (1 << vshift)); 10248 newshw_mask = shw_mask & ~(1 << vshift); 10249 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10250 shw_mask, newshw_mask); 10251 } while (newshw_mask != shw_mask); 10252 hmeblkp->hblk_shadow = NULL; 10253 } 10254 hmeblkp->hblk_next = NULL; 10255 hmeblkp->hblk_nextpa = hblkpa; 10256 hmeblkp->hblk_shw_bit = 0; 10257 10258 if (hmeblkp->hblk_nuc_bit == 0) { 10259 10260 if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical)) 10261 return; 10262 10263 hmeblkp->hblk_next = *listp; 10264 *listp = hmeblkp; 10265 } 10266 } 10267 10268 static void 10269 sfmmu_hblks_list_purge(struct hme_blk **listp) 10270 { 10271 struct hme_blk *hmeblkp; 10272 10273 while ((hmeblkp = *listp) != NULL) { 10274 *listp = hmeblkp->hblk_next; 10275 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 10276 } 10277 } 10278 10279 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 10280 10281 static uint_t sfmmu_hblk_steal_twice; 10282 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 10283 10284 /* 10285 * Steal a hmeblk 10286 * Enough hmeblks were allocated at startup (nucleus hmeblks) and also 10287 * hmeblks were added dynamically. We should never ever not be able to 10288 * find one. Look for an unused/unlocked hmeblk in user hash table. 10289 */ 10290 static struct hme_blk * 10291 sfmmu_hblk_steal(int size) 10292 { 10293 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 10294 struct hmehash_bucket *hmebp; 10295 struct hme_blk *hmeblkp = NULL, *pr_hblk; 10296 uint64_t hblkpa, prevpa; 10297 int i; 10298 10299 for (;;) { 10300 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 10301 uhmehash_steal_hand; 10302 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 10303 10304 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 10305 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 10306 SFMMU_HASH_LOCK(hmebp); 10307 hmeblkp = hmebp->hmeblkp; 10308 hblkpa = hmebp->hmeh_nextpa; 10309 prevpa = 0; 10310 pr_hblk = NULL; 10311 while (hmeblkp) { 10312 /* 10313 * check if it is a hmeblk that is not locked 10314 * and not shared. skip shadow hmeblks with 10315 * shadow_mask set i.e valid count non zero. 10316 */ 10317 if ((get_hblk_ttesz(hmeblkp) == size) && 10318 (hmeblkp->hblk_shw_bit == 0 || 10319 hmeblkp->hblk_vcnt == 0) && 10320 (hmeblkp->hblk_lckcnt == 0)) { 10321 /* 10322 * there is a high probability that we 10323 * will find a free one. search some 10324 * buckets for a free hmeblk initially 10325 * before unloading a valid hmeblk. 10326 */ 10327 if ((hmeblkp->hblk_vcnt == 0 && 10328 hmeblkp->hblk_hmecnt == 0) || (i >= 10329 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 10330 if (sfmmu_steal_this_hblk(hmebp, 10331 hmeblkp, hblkpa, prevpa, 10332 pr_hblk)) { 10333 /* 10334 * Hblk is unloaded 10335 * successfully 10336 */ 10337 break; 10338 } 10339 } 10340 } 10341 pr_hblk = hmeblkp; 10342 prevpa = hblkpa; 10343 hblkpa = hmeblkp->hblk_nextpa; 10344 hmeblkp = hmeblkp->hblk_next; 10345 } 10346 10347 SFMMU_HASH_UNLOCK(hmebp); 10348 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 10349 hmebp = uhme_hash; 10350 } 10351 uhmehash_steal_hand = hmebp; 10352 10353 if (hmeblkp != NULL) 10354 break; 10355 10356 /* 10357 * in the worst case, look for a free one in the kernel 10358 * hash table. 10359 */ 10360 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 10361 SFMMU_HASH_LOCK(hmebp); 10362 hmeblkp = hmebp->hmeblkp; 10363 hblkpa = hmebp->hmeh_nextpa; 10364 prevpa = 0; 10365 pr_hblk = NULL; 10366 while (hmeblkp) { 10367 /* 10368 * check if it is free hmeblk 10369 */ 10370 if ((get_hblk_ttesz(hmeblkp) == size) && 10371 (hmeblkp->hblk_lckcnt == 0) && 10372 (hmeblkp->hblk_vcnt == 0) && 10373 (hmeblkp->hblk_hmecnt == 0)) { 10374 if (sfmmu_steal_this_hblk(hmebp, 10375 hmeblkp, hblkpa, prevpa, pr_hblk)) { 10376 break; 10377 } else { 10378 /* 10379 * Cannot fail since we have 10380 * hash lock. 10381 */ 10382 panic("fail to steal?"); 10383 } 10384 } 10385 10386 pr_hblk = hmeblkp; 10387 prevpa = hblkpa; 10388 hblkpa = hmeblkp->hblk_nextpa; 10389 hmeblkp = hmeblkp->hblk_next; 10390 } 10391 10392 SFMMU_HASH_UNLOCK(hmebp); 10393 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 10394 hmebp = khme_hash; 10395 } 10396 10397 if (hmeblkp != NULL) 10398 break; 10399 sfmmu_hblk_steal_twice++; 10400 } 10401 return (hmeblkp); 10402 } 10403 10404 /* 10405 * This routine does real work to prepare a hblk to be "stolen" by 10406 * unloading the mappings, updating shadow counts .... 10407 * It returns 1 if the block is ready to be reused (stolen), or 0 10408 * means the block cannot be stolen yet- pageunload is still working 10409 * on this hblk. 10410 */ 10411 static int 10412 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 10413 uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk) 10414 { 10415 int shw_size, vshift; 10416 struct hme_blk *shw_hblkp; 10417 uintptr_t vaddr; 10418 uint_t shw_mask, newshw_mask; 10419 10420 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10421 10422 /* 10423 * check if the hmeblk is free, unload if necessary 10424 */ 10425 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10426 sfmmu_t *sfmmup; 10427 demap_range_t dmr; 10428 10429 sfmmup = hblktosfmmu(hmeblkp); 10430 DEMAP_RANGE_INIT(sfmmup, &dmr); 10431 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 10432 (caddr_t)get_hblk_base(hmeblkp), 10433 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 10434 DEMAP_RANGE_FLUSH(&dmr); 10435 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 10436 /* 10437 * Pageunload is working on the same hblk. 10438 */ 10439 return (0); 10440 } 10441 10442 sfmmu_hblk_steal_unload_count++; 10443 } 10444 10445 ASSERT(hmeblkp->hblk_lckcnt == 0); 10446 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 10447 10448 sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk); 10449 hmeblkp->hblk_nextpa = hblkpa; 10450 10451 shw_hblkp = hmeblkp->hblk_shadow; 10452 if (shw_hblkp) { 10453 shw_size = get_hblk_ttesz(shw_hblkp); 10454 vaddr = get_hblk_base(hmeblkp); 10455 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 10456 ASSERT(vshift < 8); 10457 /* 10458 * Atomically clear shadow mask bit 10459 */ 10460 do { 10461 shw_mask = shw_hblkp->hblk_shw_mask; 10462 ASSERT(shw_mask & (1 << vshift)); 10463 newshw_mask = shw_mask & ~(1 << vshift); 10464 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 10465 shw_mask, newshw_mask); 10466 } while (newshw_mask != shw_mask); 10467 hmeblkp->hblk_shadow = NULL; 10468 } 10469 10470 /* 10471 * remove shadow bit if we are stealing an unused shadow hmeblk. 10472 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 10473 * we are indeed allocating a shadow hmeblk. 10474 */ 10475 hmeblkp->hblk_shw_bit = 0; 10476 10477 sfmmu_hblk_steal_count++; 10478 SFMMU_STAT(sf_steal_count); 10479 10480 return (1); 10481 } 10482 10483 struct hme_blk * 10484 sfmmu_hmetohblk(struct sf_hment *sfhme) 10485 { 10486 struct hme_blk *hmeblkp; 10487 struct sf_hment *sfhme0; 10488 struct hme_blk *hblk_dummy = 0; 10489 10490 /* 10491 * No dummy sf_hments, please. 10492 */ 10493 ASSERT(sfhme->hme_tte.ll != 0); 10494 10495 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 10496 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 10497 (uintptr_t)&hblk_dummy->hblk_hme[0]); 10498 10499 return (hmeblkp); 10500 } 10501 10502 /* 10503 * Make sure that there is a valid ctx, if not get a ctx. 10504 * Also, get a readers lock on the ctx, so that the ctx cannot 10505 * be stolen underneath us. 10506 */ 10507 static void 10508 sfmmu_disallow_ctx_steal(sfmmu_t *sfmmup) 10509 { 10510 struct ctx *ctx; 10511 10512 ASSERT(sfmmup != ksfmmup); 10513 ASSERT(sfmmup->sfmmu_ismhat == 0); 10514 10515 /* 10516 * If ctx has been stolen, get a ctx. 10517 */ 10518 if (sfmmup->sfmmu_cnum == INVALID_CONTEXT) { 10519 /* 10520 * Our ctx was stolen. Get a ctx with rlock. 10521 */ 10522 ctx = sfmmu_get_ctx(sfmmup); 10523 return; 10524 } else { 10525 ctx = sfmmutoctx(sfmmup); 10526 } 10527 10528 /* 10529 * Get the reader lock. 10530 */ 10531 rw_enter(&ctx->ctx_rwlock, RW_READER); 10532 if (ctx->ctx_sfmmu != sfmmup) { 10533 /* 10534 * The ctx got stolen, so spin again. 10535 */ 10536 rw_exit(&ctx->ctx_rwlock); 10537 ctx = sfmmu_get_ctx(sfmmup); 10538 } 10539 10540 ASSERT(sfmmup->sfmmu_cnum >= NUM_LOCKED_CTXS); 10541 } 10542 10543 /* 10544 * Decrement reference count for our ctx. If the reference count 10545 * becomes 0, our ctx can be stolen by someone. 10546 */ 10547 static void 10548 sfmmu_allow_ctx_steal(sfmmu_t *sfmmup) 10549 { 10550 struct ctx *ctx; 10551 10552 ASSERT(sfmmup != ksfmmup); 10553 ASSERT(sfmmup->sfmmu_ismhat == 0); 10554 ctx = sfmmutoctx(sfmmup); 10555 10556 ASSERT(sfmmup == ctx->ctx_sfmmu); 10557 ASSERT(sfmmup->sfmmu_cnum != INVALID_CONTEXT); 10558 rw_exit(&ctx->ctx_rwlock); 10559 } 10560 10561 /* 10562 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 10563 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 10564 * KM_SLEEP allocation. 10565 * 10566 * Return 0 on success, -1 otherwise. 10567 */ 10568 static void 10569 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 10570 { 10571 struct tsb_info *tsbinfop, *next; 10572 tsb_replace_rc_t rc; 10573 boolean_t gotfirst = B_FALSE; 10574 10575 ASSERT(sfmmup != ksfmmup); 10576 ASSERT(sfmmu_hat_lock_held(sfmmup)); 10577 10578 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 10579 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10580 } 10581 10582 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10583 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 10584 } else { 10585 return; 10586 } 10587 10588 ASSERT(sfmmup->sfmmu_tsb != NULL); 10589 10590 /* 10591 * Loop over all tsbinfo's replacing them with ones that actually have 10592 * a TSB. If any of the replacements ever fail, bail out of the loop. 10593 */ 10594 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 10595 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 10596 next = tsbinfop->tsb_next; 10597 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 10598 hatlockp, TSB_SWAPIN); 10599 if (rc != TSB_SUCCESS) { 10600 break; 10601 } 10602 gotfirst = B_TRUE; 10603 } 10604 10605 switch (rc) { 10606 case TSB_SUCCESS: 10607 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10608 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10609 return; 10610 case TSB_ALLOCFAIL: 10611 break; 10612 default: 10613 panic("sfmmu_replace_tsb returned unrecognized failure code " 10614 "%d", rc); 10615 } 10616 10617 /* 10618 * In this case, we failed to get one of our TSBs. If we failed to 10619 * get the first TSB, get one of minimum size (8KB). Walk the list 10620 * and throw away the tsbinfos, starting where the allocation failed; 10621 * we can get by with just one TSB as long as we don't leave the 10622 * SWAPPED tsbinfo structures lying around. 10623 */ 10624 tsbinfop = sfmmup->sfmmu_tsb; 10625 next = tsbinfop->tsb_next; 10626 tsbinfop->tsb_next = NULL; 10627 10628 sfmmu_hat_exit(hatlockp); 10629 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 10630 next = tsbinfop->tsb_next; 10631 sfmmu_tsbinfo_free(tsbinfop); 10632 } 10633 hatlockp = sfmmu_hat_enter(sfmmup); 10634 10635 /* 10636 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 10637 * pages. 10638 */ 10639 if (!gotfirst) { 10640 tsbinfop = sfmmup->sfmmu_tsb; 10641 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 10642 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 10643 ASSERT(rc == TSB_SUCCESS); 10644 } 10645 10646 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 10647 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10648 } 10649 10650 /* 10651 * Handle exceptions for low level tsb_handler. 10652 * 10653 * There are many scenarios that could land us here: 10654 * 10655 * 1) Process has no context. In this case, ctx is 10656 * INVALID_CONTEXT and sfmmup->sfmmu_cnum == 1 so 10657 * we will acquire a context before returning. 10658 * 2) Need to re-load our MMU state. In this case, 10659 * ctx is INVALID_CONTEXT and sfmmup->sfmmu_cnum != 1. 10660 * 3) ISM mappings are being updated. This is handled 10661 * just like case #2. 10662 * 4) We wish to program a new page size into the TLB. 10663 * This is handled just like case #1, since changing 10664 * TLB page size requires us to flush the TLB. 10665 * 5) Window fault and no valid translation found. 10666 * 10667 * Cases 1-4, ctx is INVALID_CONTEXT so we handle it and then 10668 * exit which will retry the trapped instruction. Case #5 we 10669 * punt to trap() which will raise us a trap level and handle 10670 * the fault before unwinding. 10671 * 10672 * Note that the process will run in INVALID_CONTEXT before 10673 * faulting into here and subsequently loading the MMU registers 10674 * (including the TSB base register) associated with this process. 10675 * For this reason, the trap handlers must all test for 10676 * INVALID_CONTEXT before attempting to access any registers other 10677 * than the context registers. 10678 */ 10679 void 10680 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 10681 { 10682 sfmmu_t *sfmmup; 10683 uint_t ctxnum; 10684 klwp_id_t lwp; 10685 char lwp_save_state; 10686 hatlock_t *hatlockp; 10687 struct tsb_info *tsbinfop; 10688 10689 SFMMU_STAT(sf_tsb_exceptions); 10690 sfmmup = astosfmmu(curthread->t_procp->p_as); 10691 ctxnum = tagaccess & TAGACC_CTX_MASK; 10692 10693 ASSERT(sfmmup != ksfmmup && ctxnum != KCONTEXT); 10694 ASSERT(sfmmup->sfmmu_ismhat == 0); 10695 /* 10696 * First, make sure we come out of here with a valid ctx, 10697 * since if we don't get one we'll simply loop on the 10698 * faulting instruction. 10699 * 10700 * If the ISM mappings are changing, the TSB is being relocated, or 10701 * the process is swapped out we serialize behind the controlling 10702 * thread with the sfmmu_flags and sfmmu_tsb_cv condition variable. 10703 * Otherwise we synchronize with the context stealer or the thread 10704 * that required us to change out our MMU registers (such 10705 * as a thread changing out our TSB while we were running) by 10706 * locking the HAT and grabbing the rwlock on the context as a 10707 * reader temporarily. 10708 */ 10709 if (ctxnum == INVALID_CONTEXT || 10710 SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10711 /* 10712 * Must set lwp state to LWP_SYS before 10713 * trying to acquire any adaptive lock 10714 */ 10715 lwp = ttolwp(curthread); 10716 ASSERT(lwp); 10717 lwp_save_state = lwp->lwp_state; 10718 lwp->lwp_state = LWP_SYS; 10719 10720 hatlockp = sfmmu_hat_enter(sfmmup); 10721 retry: 10722 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 10723 tsbinfop = tsbinfop->tsb_next) { 10724 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 10725 cv_wait(&sfmmup->sfmmu_tsb_cv, 10726 HATLOCK_MUTEXP(hatlockp)); 10727 goto retry; 10728 } 10729 } 10730 10731 /* 10732 * Wait for ISM maps to be updated. 10733 */ 10734 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 10735 cv_wait(&sfmmup->sfmmu_tsb_cv, 10736 HATLOCK_MUTEXP(hatlockp)); 10737 goto retry; 10738 } 10739 10740 /* 10741 * If we're swapping in, get TSB(s). Note that we must do 10742 * this before we get a ctx or load the MMU state. Once 10743 * we swap in we have to recheck to make sure the TSB(s) and 10744 * ISM mappings didn't change while we slept. 10745 */ 10746 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 10747 sfmmu_tsb_swapin(sfmmup, hatlockp); 10748 goto retry; 10749 } 10750 10751 sfmmu_disallow_ctx_steal(sfmmup); 10752 ctxnum = sfmmup->sfmmu_cnum; 10753 kpreempt_disable(); 10754 sfmmu_setctx_sec(ctxnum); 10755 sfmmu_load_mmustate(sfmmup); 10756 kpreempt_enable(); 10757 sfmmu_allow_ctx_steal(sfmmup); 10758 sfmmu_hat_exit(hatlockp); 10759 /* 10760 * Must restore lwp_state if not calling 10761 * trap() for further processing. Restore 10762 * it anyway. 10763 */ 10764 lwp->lwp_state = lwp_save_state; 10765 if (sfmmup->sfmmu_ttecnt[TTE8K] != 0 || 10766 sfmmup->sfmmu_ttecnt[TTE64K] != 0 || 10767 sfmmup->sfmmu_ttecnt[TTE512K] != 0 || 10768 sfmmup->sfmmu_ttecnt[TTE4M] != 0 || 10769 sfmmup->sfmmu_ttecnt[TTE32M] != 0 || 10770 sfmmup->sfmmu_ttecnt[TTE256M] != 0) { 10771 return; 10772 } 10773 if (traptype == T_DATA_PROT) { 10774 traptype = T_DATA_MMU_MISS; 10775 } 10776 } 10777 trap(rp, (caddr_t)tagaccess, traptype, 0); 10778 } 10779 10780 /* 10781 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 10782 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 10783 * rather than spinning to avoid send mondo timeouts with 10784 * interrupts enabled. When the lock is acquired it is immediately 10785 * released and we return back to sfmmu_vatopfn just after 10786 * the GET_TTE call. 10787 */ 10788 void 10789 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 10790 { 10791 struct page **pp; 10792 10793 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10794 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 10795 } 10796 10797 /* 10798 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 10799 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 10800 * cross traps which cannot be handled while spinning in the 10801 * trap handlers. Simply enter and exit the kpr_suspendlock spin 10802 * mutex, which is held by the holder of the suspend bit, and then 10803 * retry the trapped instruction after unwinding. 10804 */ 10805 /*ARGSUSED*/ 10806 void 10807 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 10808 { 10809 ASSERT(curthread != kreloc_thread); 10810 mutex_enter(&kpr_suspendlock); 10811 mutex_exit(&kpr_suspendlock); 10812 } 10813 10814 /* 10815 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 10816 * This routine may be called with all cpu's captured. Therefore, the 10817 * caller is responsible for holding all locks and disabling kernel 10818 * preemption. 10819 */ 10820 /* ARGSUSED */ 10821 static void 10822 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 10823 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 10824 { 10825 cpuset_t cpuset; 10826 caddr_t va; 10827 ism_ment_t *ment; 10828 sfmmu_t *sfmmup; 10829 int ctxnum; 10830 int vcolor; 10831 int ttesz; 10832 10833 /* 10834 * Walk the ism_hat's mapping list and flush the page 10835 * from every hat sharing this ism_hat. This routine 10836 * may be called while all cpu's have been captured. 10837 * Therefore we can't attempt to grab any locks. For now 10838 * this means we will protect the ism mapping list under 10839 * a single lock which will be grabbed by the caller. 10840 * If hat_share/unshare scalibility becomes a performance 10841 * problem then we may need to re-think ism mapping list locking. 10842 */ 10843 ASSERT(ism_sfmmup->sfmmu_ismhat); 10844 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 10845 addr = addr - ISMID_STARTADDR; 10846 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 10847 10848 sfmmup = ment->iment_hat; 10849 ctxnum = sfmmup->sfmmu_cnum; 10850 va = ment->iment_base_va; 10851 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 10852 10853 /* 10854 * Flush TSB of ISM mappings. 10855 */ 10856 ttesz = get_hblk_ttesz(hmeblkp); 10857 if (ttesz == TTE8K || ttesz == TTE4M) { 10858 sfmmu_unload_tsb(sfmmup, va, ttesz); 10859 } else { 10860 caddr_t sva = va; 10861 caddr_t eva; 10862 ASSERT(addr == (caddr_t)get_hblk_base(hmeblkp)); 10863 eva = sva + get_hblk_span(hmeblkp); 10864 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); 10865 } 10866 10867 if (ctxnum != INVALID_CONTEXT) { 10868 /* 10869 * Flush TLBs. We don't need to do this for 10870 * invalid context since the flushing is already 10871 * done as part of context stealing. 10872 */ 10873 cpuset = sfmmup->sfmmu_cpusran; 10874 CPUSET_AND(cpuset, cpu_ready_set); 10875 CPUSET_DEL(cpuset, CPU->cpu_id); 10876 SFMMU_XCALL_STATS(ctxnum); 10877 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 10878 ctxnum); 10879 vtag_flushpage(va, ctxnum); 10880 } 10881 10882 /* 10883 * Flush D$ 10884 * When flushing D$ we must flush all 10885 * cpu's. See sfmmu_cache_flush(). 10886 */ 10887 if (cache_flush_flag == CACHE_FLUSH) { 10888 cpuset = cpu_ready_set; 10889 CPUSET_DEL(cpuset, CPU->cpu_id); 10890 SFMMU_XCALL_STATS(ctxnum); 10891 vcolor = addr_to_vcolor(va); 10892 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10893 vac_flushpage(pfnum, vcolor); 10894 } 10895 } 10896 } 10897 10898 /* 10899 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 10900 * a particular virtual address and ctx. If noflush is set we do not 10901 * flush the TLB/TSB. This function may or may not be called with the 10902 * HAT lock held. 10903 */ 10904 static void 10905 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10906 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 10907 int hat_lock_held) 10908 { 10909 int ctxnum, vcolor; 10910 cpuset_t cpuset; 10911 hatlock_t *hatlockp; 10912 10913 /* 10914 * There is no longer a need to protect against ctx being 10915 * stolen here since we don't store the ctx in the TSB anymore. 10916 */ 10917 vcolor = addr_to_vcolor(addr); 10918 10919 kpreempt_disable(); 10920 if (!tlb_noflush) { 10921 /* 10922 * Flush the TSB. 10923 */ 10924 if (!hat_lock_held) 10925 hatlockp = sfmmu_hat_enter(sfmmup); 10926 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10927 ctxnum = (int)sfmmutoctxnum(sfmmup); 10928 if (!hat_lock_held) 10929 sfmmu_hat_exit(hatlockp); 10930 10931 if (ctxnum != INVALID_CONTEXT) { 10932 /* 10933 * Flush TLBs. We don't need to do this if our 10934 * context is invalid context. Since we hold the 10935 * HAT lock the context must have been stolen and 10936 * hence will be flushed before re-use. 10937 */ 10938 cpuset = sfmmup->sfmmu_cpusran; 10939 CPUSET_AND(cpuset, cpu_ready_set); 10940 CPUSET_DEL(cpuset, CPU->cpu_id); 10941 SFMMU_XCALL_STATS(ctxnum); 10942 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 10943 ctxnum); 10944 vtag_flushpage(addr, ctxnum); 10945 } 10946 } 10947 10948 /* 10949 * Flush the D$ 10950 * 10951 * Even if the ctx is stolen, we need to flush the 10952 * cache. Our ctx stealer only flushes the TLBs. 10953 */ 10954 if (cache_flush_flag == CACHE_FLUSH) { 10955 if (cpu_flag & FLUSH_ALL_CPUS) { 10956 cpuset = cpu_ready_set; 10957 } else { 10958 cpuset = sfmmup->sfmmu_cpusran; 10959 CPUSET_AND(cpuset, cpu_ready_set); 10960 } 10961 CPUSET_DEL(cpuset, CPU->cpu_id); 10962 SFMMU_XCALL_STATS(sfmmutoctxnum(sfmmup)); 10963 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 10964 vac_flushpage(pfnum, vcolor); 10965 } 10966 kpreempt_enable(); 10967 } 10968 10969 /* 10970 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 10971 * address and ctx. If noflush is set we do not currently do anything. 10972 * This function may or may not be called with the HAT lock held. 10973 */ 10974 static void 10975 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 10976 int tlb_noflush, int hat_lock_held) 10977 { 10978 int ctxnum; 10979 cpuset_t cpuset; 10980 hatlock_t *hatlockp; 10981 10982 /* 10983 * If the process is exiting we have nothing to do. 10984 */ 10985 if (tlb_noflush) 10986 return; 10987 10988 /* 10989 * Flush TSB. 10990 */ 10991 if (!hat_lock_held) 10992 hatlockp = sfmmu_hat_enter(sfmmup); 10993 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp); 10994 ctxnum = sfmmutoctxnum(sfmmup); 10995 if (!hat_lock_held) 10996 sfmmu_hat_exit(hatlockp); 10997 10998 /* 10999 * Flush TLBs. We don't need to do this if our context is invalid 11000 * context. Since we hold the HAT lock the context must have been 11001 * stolen and hence will be flushed before re-use. 11002 */ 11003 if (ctxnum != INVALID_CONTEXT) { 11004 /* 11005 * There is no need to protect against ctx being stolen. 11006 * If the ctx is stolen we will simply get an extra flush. 11007 */ 11008 kpreempt_disable(); 11009 cpuset = sfmmup->sfmmu_cpusran; 11010 CPUSET_AND(cpuset, cpu_ready_set); 11011 CPUSET_DEL(cpuset, CPU->cpu_id); 11012 SFMMU_XCALL_STATS(ctxnum); 11013 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, ctxnum); 11014 vtag_flushpage(addr, ctxnum); 11015 kpreempt_enable(); 11016 } 11017 } 11018 11019 /* 11020 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 11021 * call handler that can flush a range of pages to save on xcalls. 11022 */ 11023 static int sfmmu_xcall_save; 11024 11025 static void 11026 sfmmu_tlb_range_demap(demap_range_t *dmrp) 11027 { 11028 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 11029 int ctxnum; 11030 hatlock_t *hatlockp; 11031 cpuset_t cpuset; 11032 uint64_t ctx_pgcnt; 11033 pgcnt_t pgcnt = 0; 11034 int pgunload = 0; 11035 int dirtypg = 0; 11036 caddr_t addr = dmrp->dmr_addr; 11037 caddr_t eaddr; 11038 uint64_t bitvec = dmrp->dmr_bitvec; 11039 11040 ASSERT(bitvec & 1); 11041 11042 /* 11043 * Flush TSB and calculate number of pages to flush. 11044 */ 11045 while (bitvec != 0) { 11046 dirtypg = 0; 11047 /* 11048 * Find the first page to flush and then count how many 11049 * pages there are after it that also need to be flushed. 11050 * This way the number of TSB flushes is minimized. 11051 */ 11052 while ((bitvec & 1) == 0) { 11053 pgcnt++; 11054 addr += MMU_PAGESIZE; 11055 bitvec >>= 1; 11056 } 11057 while (bitvec & 1) { 11058 dirtypg++; 11059 bitvec >>= 1; 11060 } 11061 eaddr = addr + ptob(dirtypg); 11062 hatlockp = sfmmu_hat_enter(sfmmup); 11063 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 11064 sfmmu_hat_exit(hatlockp); 11065 pgunload += dirtypg; 11066 addr = eaddr; 11067 pgcnt += dirtypg; 11068 } 11069 11070 /* 11071 * In the case where context is invalid context, bail. 11072 * We hold the hat lock while checking the ctx to prevent 11073 * a race with sfmmu_replace_tsb() which temporarily sets 11074 * the ctx to INVALID_CONTEXT to force processes to enter 11075 * sfmmu_tsbmiss_exception(). 11076 */ 11077 hatlockp = sfmmu_hat_enter(sfmmup); 11078 ctxnum = sfmmutoctxnum(sfmmup); 11079 sfmmu_hat_exit(hatlockp); 11080 if (ctxnum == INVALID_CONTEXT) { 11081 dmrp->dmr_bitvec = 0; 11082 return; 11083 } 11084 11085 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 11086 if (sfmmup->sfmmu_free == 0) { 11087 addr = dmrp->dmr_addr; 11088 bitvec = dmrp->dmr_bitvec; 11089 ctx_pgcnt = (uint64_t)((ctxnum << 16) | pgcnt); 11090 kpreempt_disable(); 11091 cpuset = sfmmup->sfmmu_cpusran; 11092 CPUSET_AND(cpuset, cpu_ready_set); 11093 CPUSET_DEL(cpuset, CPU->cpu_id); 11094 SFMMU_XCALL_STATS(ctxnum); 11095 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 11096 ctx_pgcnt); 11097 for (; bitvec != 0; bitvec >>= 1) { 11098 if (bitvec & 1) 11099 vtag_flushpage(addr, ctxnum); 11100 addr += MMU_PAGESIZE; 11101 } 11102 kpreempt_enable(); 11103 sfmmu_xcall_save += (pgunload-1); 11104 } 11105 dmrp->dmr_bitvec = 0; 11106 } 11107 11108 /* 11109 * Flushes only TLB. 11110 */ 11111 static void 11112 sfmmu_tlb_ctx_demap(sfmmu_t *sfmmup) 11113 { 11114 int ctxnum; 11115 cpuset_t cpuset; 11116 11117 ctxnum = (int)sfmmutoctxnum(sfmmup); 11118 if (ctxnum == INVALID_CONTEXT) { 11119 /* 11120 * if ctx was stolen then simply return 11121 * whoever stole ctx is responsible for flush. 11122 */ 11123 return; 11124 } 11125 ASSERT(ctxnum != KCONTEXT); 11126 /* 11127 * There is no need to protect against ctx being stolen. If the 11128 * ctx is stolen we will simply get an extra flush. 11129 */ 11130 kpreempt_disable(); 11131 11132 cpuset = sfmmup->sfmmu_cpusran; 11133 CPUSET_DEL(cpuset, CPU->cpu_id); 11134 CPUSET_AND(cpuset, cpu_ready_set); 11135 SFMMU_XCALL_STATS(ctxnum); 11136 11137 /* 11138 * Flush TLB. 11139 * RFE: it might be worth delaying the TLB flush as well. In that 11140 * case each cpu would have to traverse the dirty list and flush 11141 * each one of those ctx from the TLB. 11142 */ 11143 vtag_flushctx(ctxnum); 11144 xt_some(cpuset, vtag_flushctx_tl1, ctxnum, 0); 11145 11146 kpreempt_enable(); 11147 SFMMU_STAT(sf_tlbflush_ctx); 11148 } 11149 11150 /* 11151 * Flushes all TLBs. 11152 */ 11153 static void 11154 sfmmu_tlb_all_demap(void) 11155 { 11156 cpuset_t cpuset; 11157 11158 /* 11159 * There is no need to protect against ctx being stolen. If the 11160 * ctx is stolen we will simply get an extra flush. 11161 */ 11162 kpreempt_disable(); 11163 11164 cpuset = cpu_ready_set; 11165 CPUSET_DEL(cpuset, CPU->cpu_id); 11166 /* LINTED: constant in conditional context */ 11167 SFMMU_XCALL_STATS(INVALID_CONTEXT); 11168 11169 vtag_flushall(); 11170 xt_some(cpuset, vtag_flushall_tl1, 0, 0); 11171 xt_sync(cpuset); 11172 11173 kpreempt_enable(); 11174 SFMMU_STAT(sf_tlbflush_all); 11175 } 11176 11177 /* 11178 * In cases where we need to synchronize with TLB/TSB miss trap 11179 * handlers, _and_ need to flush the TLB, it's a lot easier to 11180 * steal the context from the process and free it than to do a 11181 * special song and dance to keep things consistent for the 11182 * handlers. 11183 * 11184 * Since the process suddenly ends up without a context and our caller 11185 * holds the hat lock, threads that fault after this function is called 11186 * will pile up on the lock. We can then do whatever we need to 11187 * atomically from the context of the caller. The first blocked thread 11188 * to resume executing will get the process a new context, and the 11189 * process will resume executing. 11190 * 11191 * One added advantage of this approach is that on MMUs that 11192 * support a "flush all" operation, we will delay the flush until 11193 * we run out of contexts, and then flush the TLB one time. This 11194 * is rather rare, so it's a lot less expensive than making 8000 11195 * x-calls to flush the TLB 8000 times. Another is that we can do 11196 * all of this without pausing CPUs, due to some knowledge of how 11197 * resume() loads processes onto the processor; it sets the thread 11198 * into cpusran, and _then_ looks at cnum. Because we do things in 11199 * the reverse order here, we guarantee exactly one of the following 11200 * statements is always true: 11201 * 11202 * 1) Nobody is in resume() so we have nothing to worry about anyway. 11203 * 2) The thread in resume() isn't in cpusran when we do the xcall, 11204 * so we know when it does set itself it'll see cnum is 11205 * INVALID_CONTEXT. 11206 * 3) The thread in resume() is in cpusran, and already might have 11207 * looked at the old cnum. That's OK, because we'll xcall it 11208 * and, if necessary, flush the TLB along with the rest of the 11209 * crowd. 11210 */ 11211 static void 11212 sfmmu_tlb_swap_ctx(sfmmu_t *sfmmup, struct ctx *ctx) 11213 { 11214 cpuset_t cpuset; 11215 int cnum; 11216 11217 if (sfmmup->sfmmu_cnum == INVALID_CONTEXT) 11218 return; 11219 11220 SFMMU_STAT(sf_ctx_swap); 11221 11222 kpreempt_disable(); 11223 11224 ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0); 11225 ASSERT(ctx->ctx_sfmmu == sfmmup); 11226 11227 cnum = ctxtoctxnum(ctx); 11228 ASSERT(sfmmup->sfmmu_cnum == cnum); 11229 ASSERT(cnum >= NUM_LOCKED_CTXS); 11230 11231 sfmmup->sfmmu_cnum = INVALID_CONTEXT; 11232 membar_enter(); /* make sure visible on all CPUs */ 11233 ctx->ctx_sfmmu = NULL; 11234 11235 cpuset = sfmmup->sfmmu_cpusran; 11236 CPUSET_DEL(cpuset, CPU->cpu_id); 11237 CPUSET_AND(cpuset, cpu_ready_set); 11238 SFMMU_XCALL_STATS(cnum); 11239 11240 /* 11241 * Force anybody running this process on CPU 11242 * to enter sfmmu_tsbmiss_exception() on the 11243 * next TLB miss, synchronize behind us on 11244 * the HAT lock, and grab a new context. At 11245 * that point the new page size will become 11246 * active in the TLB for the new context. 11247 * See sfmmu_get_ctx() for details. 11248 */ 11249 if (delay_tlb_flush) { 11250 xt_some(cpuset, sfmmu_raise_tsb_exception, 11251 cnum, INVALID_CONTEXT); 11252 SFMMU_STAT(sf_tlbflush_deferred); 11253 } else { 11254 xt_some(cpuset, sfmmu_ctx_steal_tl1, cnum, INVALID_CONTEXT); 11255 vtag_flushctx(cnum); 11256 SFMMU_STAT(sf_tlbflush_ctx); 11257 } 11258 xt_sync(cpuset); 11259 11260 /* 11261 * If we just stole the ctx from the current 11262 * process on local CPU we need to invalidate 11263 * this CPU context as well. 11264 */ 11265 if (sfmmu_getctx_sec() == cnum) { 11266 sfmmu_setctx_sec(INVALID_CONTEXT); 11267 sfmmu_clear_utsbinfo(); 11268 } 11269 11270 kpreempt_enable(); 11271 11272 /* 11273 * Now put old ctx on the dirty list since we may not 11274 * have flushed the context out of the TLB. We'll let 11275 * the next guy who uses this ctx flush it instead. 11276 */ 11277 mutex_enter(&ctx_list_lock); 11278 CTX_SET_FLAGS(ctx, CTX_FREE_FLAG); 11279 ctx->ctx_free = ctxdirty; 11280 ctxdirty = ctx; 11281 mutex_exit(&ctx_list_lock); 11282 } 11283 11284 /* 11285 * We need to flush the cache in all cpus. It is possible that 11286 * a process referenced a page as cacheable but has sinced exited 11287 * and cleared the mapping list. We still to flush it but have no 11288 * state so all cpus is the only alternative. 11289 */ 11290 void 11291 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 11292 { 11293 cpuset_t cpuset; 11294 int ctxnum = INVALID_CONTEXT; 11295 11296 kpreempt_disable(); 11297 cpuset = cpu_ready_set; 11298 CPUSET_DEL(cpuset, CPU->cpu_id); 11299 SFMMU_XCALL_STATS(ctxnum); /* account to any ctx */ 11300 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 11301 xt_sync(cpuset); 11302 vac_flushpage(pfnum, vcolor); 11303 kpreempt_enable(); 11304 } 11305 11306 void 11307 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 11308 { 11309 cpuset_t cpuset; 11310 int ctxnum = INVALID_CONTEXT; 11311 11312 ASSERT(vcolor >= 0); 11313 11314 kpreempt_disable(); 11315 cpuset = cpu_ready_set; 11316 CPUSET_DEL(cpuset, CPU->cpu_id); 11317 SFMMU_XCALL_STATS(ctxnum); /* account to any ctx */ 11318 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 11319 xt_sync(cpuset); 11320 vac_flushcolor(vcolor, pfnum); 11321 kpreempt_enable(); 11322 } 11323 11324 /* 11325 * We need to prevent processes from accessing the TSB using a cached physical 11326 * address. It's alright if they try to access the TSB via virtual address 11327 * since they will just fault on that virtual address once the mapping has 11328 * been suspended. 11329 */ 11330 #pragma weak sendmondo_in_recover 11331 11332 /* ARGSUSED */ 11333 static int 11334 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 11335 { 11336 hatlock_t *hatlockp; 11337 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11338 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11339 struct ctx *ctx; 11340 int cnum; 11341 extern uint32_t sendmondo_in_recover; 11342 11343 if (flags != HAT_PRESUSPEND) 11344 return (0); 11345 11346 hatlockp = sfmmu_hat_enter(sfmmup); 11347 11348 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 11349 11350 /* 11351 * For Cheetah+ Erratum 25: 11352 * Wait for any active recovery to finish. We can't risk 11353 * relocating the TSB of the thread running mondo_recover_proc() 11354 * since, if we did that, we would deadlock. The scenario we are 11355 * trying to avoid is as follows: 11356 * 11357 * THIS CPU RECOVER CPU 11358 * -------- ----------- 11359 * Begins recovery, walking through TSB 11360 * hat_pagesuspend() TSB TTE 11361 * TLB miss on TSB TTE, spins at TL1 11362 * xt_sync() 11363 * send_mondo_timeout() 11364 * mondo_recover_proc() 11365 * ((deadlocked)) 11366 * 11367 * The second half of the workaround is that mondo_recover_proc() 11368 * checks to see if the tsb_info has the RELOC flag set, and if it 11369 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 11370 * and hence avoiding the TLB miss that could result in a deadlock. 11371 */ 11372 if (&sendmondo_in_recover) { 11373 membar_enter(); /* make sure RELOC flag visible */ 11374 while (sendmondo_in_recover) { 11375 drv_usecwait(1); 11376 membar_consumer(); 11377 } 11378 } 11379 11380 ctx = sfmmutoctx(sfmmup); 11381 rw_enter(&ctx->ctx_rwlock, RW_WRITER); 11382 cnum = sfmmutoctxnum(sfmmup); 11383 11384 if (cnum != INVALID_CONTEXT) { 11385 /* 11386 * Force all threads for this sfmmu to sfmmu_tsbmiss_exception 11387 * on their next TLB miss. 11388 */ 11389 sfmmu_tlb_swap_ctx(sfmmup, ctx); 11390 } 11391 11392 rw_exit(&ctx->ctx_rwlock); 11393 11394 sfmmu_hat_exit(hatlockp); 11395 11396 return (0); 11397 } 11398 11399 /* ARGSUSED */ 11400 static int 11401 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 11402 void *tsbinfo, pfn_t newpfn) 11403 { 11404 hatlock_t *hatlockp; 11405 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 11406 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 11407 11408 if (flags != HAT_POSTUNSUSPEND) 11409 return (0); 11410 11411 hatlockp = sfmmu_hat_enter(sfmmup); 11412 11413 SFMMU_STAT(sf_tsb_reloc); 11414 11415 /* 11416 * The process may have swapped out while we were relocating one 11417 * of its TSBs. If so, don't bother doing the setup since the 11418 * process can't be using the memory anymore. 11419 */ 11420 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 11421 ASSERT(va == tsbinfop->tsb_va); 11422 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 11423 sfmmu_setup_tsbinfo(sfmmup); 11424 11425 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 11426 sfmmu_inv_tsb(tsbinfop->tsb_va, 11427 TSB_BYTES(tsbinfop->tsb_szc)); 11428 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 11429 } 11430 } 11431 11432 membar_exit(); 11433 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 11434 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11435 11436 sfmmu_hat_exit(hatlockp); 11437 11438 return (0); 11439 } 11440 11441 /* 11442 * Allocate and initialize a tsb_info structure. Note that we may or may not 11443 * allocate a TSB here, depending on the flags passed in. 11444 */ 11445 static int 11446 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 11447 uint_t flags, sfmmu_t *sfmmup) 11448 { 11449 int err; 11450 11451 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 11452 sfmmu_tsbinfo_cache, KM_SLEEP); 11453 11454 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 11455 tsb_szc, flags, sfmmup)) != 0) { 11456 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 11457 SFMMU_STAT(sf_tsb_allocfail); 11458 *tsbinfopp = NULL; 11459 return (err); 11460 } 11461 SFMMU_STAT(sf_tsb_alloc); 11462 11463 /* 11464 * Bump the TSB size counters for this TSB size. 11465 */ 11466 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 11467 return (0); 11468 } 11469 11470 static void 11471 sfmmu_tsb_free(struct tsb_info *tsbinfo) 11472 { 11473 caddr_t tsbva = tsbinfo->tsb_va; 11474 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 11475 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 11476 vmem_t *vmp = tsbinfo->tsb_vmp; 11477 11478 /* 11479 * If we allocated this TSB from relocatable kernel memory, then we 11480 * need to uninstall the callback handler. 11481 */ 11482 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 11483 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11484 caddr_t slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 11485 page_t **ppl; 11486 int ret; 11487 11488 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 11489 ASSERT(ret == 0); 11490 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 11491 0); 11492 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 11493 } 11494 11495 if (kmem_cachep != NULL) { 11496 kmem_cache_free(kmem_cachep, tsbva); 11497 } else { 11498 vmem_xfree(vmp, (void *)tsbva, tsb_size); 11499 } 11500 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 11501 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 11502 } 11503 11504 static void 11505 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 11506 { 11507 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 11508 sfmmu_tsb_free(tsbinfo); 11509 } 11510 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 11511 11512 } 11513 11514 /* 11515 * Setup all the references to physical memory for this tsbinfo. 11516 * The underlying page(s) must be locked. 11517 */ 11518 static void 11519 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 11520 { 11521 ASSERT(pfn != PFN_INVALID); 11522 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 11523 11524 #ifndef sun4v 11525 if (tsbinfo->tsb_szc == 0) { 11526 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 11527 PROT_WRITE|PROT_READ, TTE8K); 11528 } else { 11529 /* 11530 * Round down PA and use a large mapping; the handlers will 11531 * compute the TSB pointer at the correct offset into the 11532 * big virtual page. NOTE: this assumes all TSBs larger 11533 * than 8K must come from physically contiguous slabs of 11534 * size tsb_slab_size. 11535 */ 11536 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 11537 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 11538 } 11539 tsbinfo->tsb_pa = ptob(pfn); 11540 11541 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 11542 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 11543 11544 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 11545 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 11546 #else /* sun4v */ 11547 tsbinfo->tsb_pa = ptob(pfn); 11548 #endif /* sun4v */ 11549 } 11550 11551 11552 /* 11553 * Returns zero on success, ENOMEM if over the high water mark, 11554 * or EAGAIN if the caller needs to retry with a smaller TSB 11555 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 11556 * 11557 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 11558 * is specified and the TSB requested is PAGESIZE, though it 11559 * may sleep waiting for memory if sufficient memory is not 11560 * available. 11561 */ 11562 static int 11563 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 11564 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 11565 { 11566 caddr_t vaddr = NULL; 11567 caddr_t slab_vaddr; 11568 uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 11569 int tsbbytes = TSB_BYTES(tsbcode); 11570 int lowmem = 0; 11571 struct kmem_cache *kmem_cachep = NULL; 11572 vmem_t *vmp = NULL; 11573 lgrp_id_t lgrpid = LGRP_NONE; 11574 pfn_t pfn; 11575 uint_t cbflags = HAC_SLEEP; 11576 page_t **pplist; 11577 int ret; 11578 11579 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 11580 flags |= TSB_ALLOC; 11581 11582 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 11583 11584 tsbinfo->tsb_sfmmu = sfmmup; 11585 11586 /* 11587 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 11588 * return. 11589 */ 11590 if ((flags & TSB_ALLOC) == 0) { 11591 tsbinfo->tsb_szc = tsbcode; 11592 tsbinfo->tsb_ttesz_mask = tteszmask; 11593 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 11594 tsbinfo->tsb_pa = -1; 11595 tsbinfo->tsb_tte.ll = 0; 11596 tsbinfo->tsb_next = NULL; 11597 tsbinfo->tsb_flags = TSB_SWAPPED; 11598 tsbinfo->tsb_cache = NULL; 11599 tsbinfo->tsb_vmp = NULL; 11600 return (0); 11601 } 11602 11603 #ifdef DEBUG 11604 /* 11605 * For debugging: 11606 * Randomly force allocation failures every tsb_alloc_mtbf 11607 * tries if TSB_FORCEALLOC is not specified. This will 11608 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 11609 * it is even, to allow testing of both failure paths... 11610 */ 11611 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 11612 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 11613 tsb_alloc_count = 0; 11614 tsb_alloc_fail_mtbf++; 11615 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 11616 } 11617 #endif /* DEBUG */ 11618 11619 /* 11620 * Enforce high water mark if we are not doing a forced allocation 11621 * and are not shrinking a process' TSB. 11622 */ 11623 if ((flags & TSB_SHRINK) == 0 && 11624 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 11625 if ((flags & TSB_FORCEALLOC) == 0) 11626 return (ENOMEM); 11627 lowmem = 1; 11628 } 11629 11630 /* 11631 * Allocate from the correct location based upon the size of the TSB 11632 * compared to the base page size, and what memory conditions dictate. 11633 * Note we always do nonblocking allocations from the TSB arena since 11634 * we don't want memory fragmentation to cause processes to block 11635 * indefinitely waiting for memory; until the kernel algorithms that 11636 * coalesce large pages are improved this is our best option. 11637 * 11638 * Algorithm: 11639 * If allocating a "large" TSB (>8K), allocate from the 11640 * appropriate kmem_tsb_default_arena vmem arena 11641 * else if low on memory or the TSB_FORCEALLOC flag is set or 11642 * tsb_forceheap is set 11643 * Allocate from kernel heap via sfmmu_tsb8k_cache with 11644 * KM_SLEEP (never fails) 11645 * else 11646 * Allocate from appropriate sfmmu_tsb_cache with 11647 * KM_NOSLEEP 11648 * endif 11649 */ 11650 if (tsb_lgrp_affinity) 11651 lgrpid = lgrp_home_id(curthread); 11652 if (lgrpid == LGRP_NONE) 11653 lgrpid = 0; /* use lgrp of boot CPU */ 11654 11655 if (tsbbytes > MMU_PAGESIZE) { 11656 vmp = kmem_tsb_default_arena[lgrpid]; 11657 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 0, 0, 11658 NULL, NULL, VM_NOSLEEP); 11659 #ifdef DEBUG 11660 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 11661 #else /* !DEBUG */ 11662 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 11663 #endif /* DEBUG */ 11664 kmem_cachep = sfmmu_tsb8k_cache; 11665 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 11666 ASSERT(vaddr != NULL); 11667 } else { 11668 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 11669 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 11670 } 11671 11672 tsbinfo->tsb_cache = kmem_cachep; 11673 tsbinfo->tsb_vmp = vmp; 11674 11675 if (vaddr == NULL) { 11676 return (EAGAIN); 11677 } 11678 11679 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 11680 kmem_cachep = tsbinfo->tsb_cache; 11681 11682 /* 11683 * If we are allocating from outside the cage, then we need to 11684 * register a relocation callback handler. Note that for now 11685 * since pseudo mappings always hang off of the slab's root page, 11686 * we need only lock the first 8K of the TSB slab. This is a bit 11687 * hacky but it is good for performance. 11688 */ 11689 if (kmem_cachep != sfmmu_tsb8k_cache) { 11690 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 11691 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 11692 ASSERT(ret == 0); 11693 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 11694 cbflags, (void *)tsbinfo, &pfn); 11695 11696 /* 11697 * Need to free up resources if we could not successfully 11698 * add the callback function and return an error condition. 11699 */ 11700 if (ret != 0) { 11701 if (kmem_cachep) { 11702 kmem_cache_free(kmem_cachep, vaddr); 11703 } else { 11704 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 11705 } 11706 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 11707 S_WRITE); 11708 return (EAGAIN); 11709 } 11710 } else { 11711 /* 11712 * Since allocation of 8K TSBs from heap is rare and occurs 11713 * during memory pressure we allocate them from permanent 11714 * memory rather than using callbacks to get the PFN. 11715 */ 11716 pfn = hat_getpfnum(kas.a_hat, vaddr); 11717 } 11718 11719 tsbinfo->tsb_va = vaddr; 11720 tsbinfo->tsb_szc = tsbcode; 11721 tsbinfo->tsb_ttesz_mask = tteszmask; 11722 tsbinfo->tsb_next = NULL; 11723 tsbinfo->tsb_flags = 0; 11724 11725 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 11726 11727 if (kmem_cachep != sfmmu_tsb8k_cache) { 11728 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 11729 } 11730 11731 sfmmu_inv_tsb(vaddr, tsbbytes); 11732 return (0); 11733 } 11734 11735 /* 11736 * Initialize per cpu tsb and per cpu tsbmiss_area 11737 */ 11738 void 11739 sfmmu_init_tsbs(void) 11740 { 11741 int i; 11742 struct tsbmiss *tsbmissp; 11743 struct kpmtsbm *kpmtsbmp; 11744 #ifndef sun4v 11745 extern int dcache_line_mask; 11746 #endif /* sun4v */ 11747 extern uint_t vac_colors; 11748 11749 /* 11750 * Init. tsb miss area. 11751 */ 11752 tsbmissp = tsbmiss_area; 11753 11754 for (i = 0; i < NCPU; tsbmissp++, i++) { 11755 /* 11756 * initialize the tsbmiss area. 11757 * Do this for all possible CPUs as some may be added 11758 * while the system is running. There is no cost to this. 11759 */ 11760 tsbmissp->ksfmmup = ksfmmup; 11761 #ifndef sun4v 11762 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 11763 #endif /* sun4v */ 11764 tsbmissp->khashstart = 11765 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 11766 tsbmissp->uhashstart = 11767 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 11768 tsbmissp->khashsz = khmehash_num; 11769 tsbmissp->uhashsz = uhmehash_num; 11770 } 11771 11772 if (kpm_enable == 0) 11773 return; 11774 11775 if (kpm_smallpages) { 11776 /* 11777 * If we're using base pagesize pages for seg_kpm 11778 * mappings, we use the kernel TSB since we can't afford 11779 * to allocate a second huge TSB for these mappings. 11780 */ 11781 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11782 kpm_tsbsz = ktsb_szcode; 11783 kpmsm_tsbbase = kpm_tsbbase; 11784 kpmsm_tsbsz = kpm_tsbsz; 11785 } else { 11786 /* 11787 * In VAC conflict case, just put the entries in the 11788 * kernel 8K indexed TSB for now so we can find them. 11789 * This could really be changed in the future if we feel 11790 * the need... 11791 */ 11792 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 11793 kpmsm_tsbsz = ktsb_szcode; 11794 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 11795 kpm_tsbsz = ktsb4m_szcode; 11796 } 11797 11798 kpmtsbmp = kpmtsbm_area; 11799 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 11800 /* 11801 * Initialize the kpmtsbm area. 11802 * Do this for all possible CPUs as some may be added 11803 * while the system is running. There is no cost to this. 11804 */ 11805 kpmtsbmp->vbase = kpm_vbase; 11806 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 11807 kpmtsbmp->sz_shift = kpm_size_shift; 11808 kpmtsbmp->kpmp_shift = kpmp_shift; 11809 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 11810 if (kpm_smallpages == 0) { 11811 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 11812 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 11813 } else { 11814 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 11815 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 11816 } 11817 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 11818 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 11819 #ifdef DEBUG 11820 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 11821 #endif /* DEBUG */ 11822 if (ktsb_phys) 11823 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 11824 } 11825 11826 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 11827 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 11828 } 11829 11830 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 11831 struct tsb_info ktsb_info[2]; 11832 11833 /* 11834 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 11835 */ 11836 void 11837 sfmmu_init_ktsbinfo() 11838 { 11839 ASSERT(ksfmmup != NULL); 11840 ASSERT(ksfmmup->sfmmu_tsb == NULL); 11841 /* 11842 * Allocate tsbinfos for kernel and copy in data 11843 * to make debug easier and sun4v setup easier. 11844 */ 11845 ktsb_info[0].tsb_sfmmu = ksfmmup; 11846 ktsb_info[0].tsb_szc = ktsb_szcode; 11847 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 11848 ktsb_info[0].tsb_va = ktsb_base; 11849 ktsb_info[0].tsb_pa = ktsb_pbase; 11850 ktsb_info[0].tsb_flags = 0; 11851 ktsb_info[0].tsb_tte.ll = 0; 11852 ktsb_info[0].tsb_cache = NULL; 11853 11854 ktsb_info[1].tsb_sfmmu = ksfmmup; 11855 ktsb_info[1].tsb_szc = ktsb4m_szcode; 11856 ktsb_info[1].tsb_ttesz_mask = TSB4M; 11857 ktsb_info[1].tsb_va = ktsb4m_base; 11858 ktsb_info[1].tsb_pa = ktsb4m_pbase; 11859 ktsb_info[1].tsb_flags = 0; 11860 ktsb_info[1].tsb_tte.ll = 0; 11861 ktsb_info[1].tsb_cache = NULL; 11862 11863 /* Link them into ksfmmup. */ 11864 ktsb_info[0].tsb_next = &ktsb_info[1]; 11865 ktsb_info[1].tsb_next = NULL; 11866 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 11867 11868 sfmmu_setup_tsbinfo(ksfmmup); 11869 } 11870 11871 /* 11872 * Cache the last value returned from va_to_pa(). If the VA specified 11873 * in the current call to cached_va_to_pa() maps to the same Page (as the 11874 * previous call to cached_va_to_pa()), then compute the PA using 11875 * cached info, else call va_to_pa(). 11876 * 11877 * Note: this function is neither MT-safe nor consistent in the presence 11878 * of multiple, interleaved threads. This function was created to enable 11879 * an optimization used during boot (at a point when there's only one thread 11880 * executing on the "boot CPU", and before startup_vm() has been called). 11881 */ 11882 static uint64_t 11883 cached_va_to_pa(void *vaddr) 11884 { 11885 static uint64_t prev_vaddr_base = 0; 11886 static uint64_t prev_pfn = 0; 11887 11888 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 11889 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 11890 } else { 11891 uint64_t pa = va_to_pa(vaddr); 11892 11893 if (pa != ((uint64_t)-1)) { 11894 /* 11895 * Computed physical address is valid. Cache its 11896 * related info for the next cached_va_to_pa() call. 11897 */ 11898 prev_pfn = pa & MMU_PAGEMASK; 11899 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 11900 } 11901 11902 return (pa); 11903 } 11904 } 11905 11906 /* 11907 * Carve up our nucleus hblk region. We may allocate more hblks than 11908 * asked due to rounding errors but we are guaranteed to have at least 11909 * enough space to allocate the requested number of hblk8's and hblk1's. 11910 */ 11911 void 11912 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 11913 { 11914 struct hme_blk *hmeblkp; 11915 size_t hme8blk_sz, hme1blk_sz; 11916 size_t i; 11917 size_t hblk8_bound; 11918 ulong_t j = 0, k = 0; 11919 11920 ASSERT(addr != NULL && size != 0); 11921 11922 /* Need to use proper structure alignment */ 11923 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 11924 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 11925 11926 nucleus_hblk8.list = (void *)addr; 11927 nucleus_hblk8.index = 0; 11928 11929 /* 11930 * Use as much memory as possible for hblk8's since we 11931 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 11932 * We need to hold back enough space for the hblk1's which 11933 * we'll allocate next. 11934 */ 11935 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 11936 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 11937 hmeblkp = (struct hme_blk *)addr; 11938 addr += hme8blk_sz; 11939 hmeblkp->hblk_nuc_bit = 1; 11940 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11941 } 11942 nucleus_hblk8.len = j; 11943 ASSERT(j >= nhblk8); 11944 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 11945 11946 nucleus_hblk1.list = (void *)addr; 11947 nucleus_hblk1.index = 0; 11948 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 11949 hmeblkp = (struct hme_blk *)addr; 11950 addr += hme1blk_sz; 11951 hmeblkp->hblk_nuc_bit = 1; 11952 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 11953 } 11954 ASSERT(k >= nhblk1); 11955 nucleus_hblk1.len = k; 11956 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 11957 } 11958 11959 /* 11960 * This function is currently not supported on this platform. For what 11961 * it's supposed to do, see hat.c and hat_srmmu.c 11962 */ 11963 /* ARGSUSED */ 11964 faultcode_t 11965 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 11966 uint_t flags) 11967 { 11968 ASSERT(hat->sfmmu_xhat_provider == NULL); 11969 return (FC_NOSUPPORT); 11970 } 11971 11972 /* 11973 * Searchs the mapping list of the page for a mapping of the same size. If not 11974 * found the corresponding bit is cleared in the p_index field. When large 11975 * pages are more prevalent in the system, we can maintain the mapping list 11976 * in order and we don't have to traverse the list each time. Just check the 11977 * next and prev entries, and if both are of different size, we clear the bit. 11978 */ 11979 static void 11980 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 11981 { 11982 struct sf_hment *sfhmep; 11983 struct hme_blk *hmeblkp; 11984 int index; 11985 pgcnt_t npgs; 11986 11987 ASSERT(ttesz > TTE8K); 11988 11989 ASSERT(sfmmu_mlist_held(pp)); 11990 11991 ASSERT(PP_ISMAPPED_LARGE(pp)); 11992 11993 /* 11994 * Traverse mapping list looking for another mapping of same size. 11995 * since we only want to clear index field if all mappings of 11996 * that size are gone. 11997 */ 11998 11999 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 12000 hmeblkp = sfmmu_hmetohblk(sfhmep); 12001 if (hmeblkp->hblk_xhat_bit) 12002 continue; 12003 if (hme_size(sfhmep) == ttesz) { 12004 /* 12005 * another mapping of the same size. don't clear index. 12006 */ 12007 return; 12008 } 12009 } 12010 12011 /* 12012 * Clear the p_index bit for large page. 12013 */ 12014 index = PAGESZ_TO_INDEX(ttesz); 12015 npgs = TTEPAGES(ttesz); 12016 while (npgs-- > 0) { 12017 ASSERT(pp->p_index & index); 12018 pp->p_index &= ~index; 12019 pp = PP_PAGENEXT(pp); 12020 } 12021 } 12022 12023 /* 12024 * return supported features 12025 */ 12026 /* ARGSUSED */ 12027 int 12028 hat_supported(enum hat_features feature, void *arg) 12029 { 12030 switch (feature) { 12031 case HAT_SHARED_PT: 12032 case HAT_DYNAMIC_ISM_UNMAP: 12033 case HAT_VMODSORT: 12034 return (1); 12035 default: 12036 return (0); 12037 } 12038 } 12039 12040 void 12041 hat_enter(struct hat *hat) 12042 { 12043 hatlock_t *hatlockp; 12044 12045 if (hat != ksfmmup) { 12046 hatlockp = TSB_HASH(hat); 12047 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 12048 } 12049 } 12050 12051 void 12052 hat_exit(struct hat *hat) 12053 { 12054 hatlock_t *hatlockp; 12055 12056 if (hat != ksfmmup) { 12057 hatlockp = TSB_HASH(hat); 12058 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 12059 } 12060 } 12061 12062 /*ARGSUSED*/ 12063 void 12064 hat_reserve(struct as *as, caddr_t addr, size_t len) 12065 { 12066 } 12067 12068 static void 12069 hat_kstat_init(void) 12070 { 12071 kstat_t *ksp; 12072 12073 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 12074 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 12075 KSTAT_FLAG_VIRTUAL); 12076 if (ksp) { 12077 ksp->ks_data = (void *) &sfmmu_global_stat; 12078 kstat_install(ksp); 12079 } 12080 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 12081 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 12082 KSTAT_FLAG_VIRTUAL); 12083 if (ksp) { 12084 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 12085 kstat_install(ksp); 12086 } 12087 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 12088 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 12089 KSTAT_FLAG_WRITABLE); 12090 if (ksp) { 12091 ksp->ks_update = sfmmu_kstat_percpu_update; 12092 kstat_install(ksp); 12093 } 12094 } 12095 12096 /* ARGSUSED */ 12097 static int 12098 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 12099 { 12100 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 12101 struct tsbmiss *tsbm = tsbmiss_area; 12102 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 12103 int i; 12104 12105 ASSERT(cpu_kstat); 12106 if (rw == KSTAT_READ) { 12107 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 12108 cpu_kstat->sf_itlb_misses = tsbm->itlb_misses; 12109 cpu_kstat->sf_dtlb_misses = tsbm->dtlb_misses; 12110 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 12111 tsbm->uprot_traps; 12112 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 12113 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 12114 12115 if (tsbm->itlb_misses > 0 && tsbm->dtlb_misses > 0) { 12116 cpu_kstat->sf_tsb_hits = 12117 (tsbm->itlb_misses + tsbm->dtlb_misses) - 12118 (tsbm->utsb_misses + tsbm->ktsb_misses + 12119 kpmtsbm->kpm_tsb_misses); 12120 } else { 12121 cpu_kstat->sf_tsb_hits = 0; 12122 } 12123 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 12124 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 12125 } 12126 } else { 12127 /* KSTAT_WRITE is used to clear stats */ 12128 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 12129 tsbm->itlb_misses = 0; 12130 tsbm->dtlb_misses = 0; 12131 tsbm->utsb_misses = 0; 12132 tsbm->ktsb_misses = 0; 12133 tsbm->uprot_traps = 0; 12134 tsbm->kprot_traps = 0; 12135 kpmtsbm->kpm_dtlb_misses = 0; 12136 kpmtsbm->kpm_tsb_misses = 0; 12137 } 12138 } 12139 return (0); 12140 } 12141 12142 #ifdef DEBUG 12143 12144 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 12145 12146 /* 12147 * A tte checker. *orig_old is the value we read before cas. 12148 * *cur is the value returned by cas. 12149 * *new is the desired value when we do the cas. 12150 * 12151 * *hmeblkp is currently unused. 12152 */ 12153 12154 /* ARGSUSED */ 12155 void 12156 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 12157 { 12158 uint_t i, j, k; 12159 int cpuid = CPU->cpu_id; 12160 12161 gorig[cpuid] = orig_old; 12162 gcur[cpuid] = cur; 12163 gnew[cpuid] = new; 12164 12165 #ifdef lint 12166 hmeblkp = hmeblkp; 12167 #endif 12168 12169 if (TTE_IS_VALID(orig_old)) { 12170 if (TTE_IS_VALID(cur)) { 12171 i = TTE_TO_TTEPFN(orig_old); 12172 j = TTE_TO_TTEPFN(cur); 12173 k = TTE_TO_TTEPFN(new); 12174 if (i != j) { 12175 /* remap error? */ 12176 panic("chk_tte: bad pfn, 0x%x, 0x%x", 12177 i, j); 12178 } 12179 12180 if (i != k) { 12181 /* remap error? */ 12182 panic("chk_tte: bad pfn2, 0x%x, 0x%x", 12183 i, k); 12184 } 12185 } else { 12186 if (TTE_IS_VALID(new)) { 12187 panic("chk_tte: invalid cur? "); 12188 } 12189 12190 i = TTE_TO_TTEPFN(orig_old); 12191 k = TTE_TO_TTEPFN(new); 12192 if (i != k) { 12193 panic("chk_tte: bad pfn3, 0x%x, 0x%x", 12194 i, k); 12195 } 12196 } 12197 } else { 12198 if (TTE_IS_VALID(cur)) { 12199 j = TTE_TO_TTEPFN(cur); 12200 if (TTE_IS_VALID(new)) { 12201 k = TTE_TO_TTEPFN(new); 12202 if (j != k) { 12203 panic("chk_tte: bad pfn4, 0x%x, 0x%x", 12204 j, k); 12205 } 12206 } else { 12207 panic("chk_tte: why here?"); 12208 } 12209 } else { 12210 if (!TTE_IS_VALID(new)) { 12211 panic("chk_tte: why here2 ?"); 12212 } 12213 } 12214 } 12215 } 12216 12217 #endif /* DEBUG */ 12218 12219 extern void prefetch_tsbe_read(struct tsbe *); 12220 extern void prefetch_tsbe_write(struct tsbe *); 12221 12222 12223 /* 12224 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 12225 * us optimal performance on Cheetah+. You can only have 8 outstanding 12226 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 12227 * prefetch to make the most utilization of the prefetch capability. 12228 */ 12229 #define TSBE_PREFETCH_STRIDE (7) 12230 12231 void 12232 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 12233 { 12234 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 12235 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 12236 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 12237 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 12238 struct tsbe *old; 12239 struct tsbe *new; 12240 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 12241 uint64_t va; 12242 int new_offset; 12243 int i; 12244 int vpshift; 12245 int last_prefetch; 12246 12247 if (old_bytes == new_bytes) { 12248 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 12249 } else { 12250 12251 /* 12252 * A TSBE is 16 bytes which means there are four TSBE's per 12253 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 12254 */ 12255 old = (struct tsbe *)old_tsbinfo->tsb_va; 12256 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 12257 for (i = 0; i < old_entries; i++, old++) { 12258 if (((i & (4-1)) == 0) && (i < last_prefetch)) 12259 prefetch_tsbe_read(old); 12260 if (!old->tte_tag.tag_invalid) { 12261 /* 12262 * We have a valid TTE to remap. Check the 12263 * size. We won't remap 64K or 512K TTEs 12264 * because they span more than one TSB entry 12265 * and are indexed using an 8K virt. page. 12266 * Ditto for 32M and 256M TTEs. 12267 */ 12268 if (TTE_CSZ(&old->tte_data) == TTE64K || 12269 TTE_CSZ(&old->tte_data) == TTE512K) 12270 continue; 12271 if (mmu_page_sizes == max_mmu_page_sizes) { 12272 if (TTE_CSZ(&old->tte_data) == TTE32M || 12273 TTE_CSZ(&old->tte_data) == TTE256M) 12274 continue; 12275 } 12276 12277 /* clear the lower 22 bits of the va */ 12278 va = *(uint64_t *)old << 22; 12279 /* turn va into a virtual pfn */ 12280 va >>= 22 - TSB_START_SIZE; 12281 /* 12282 * or in bits from the offset in the tsb 12283 * to get the real virtual pfn. These 12284 * correspond to bits [21:13] in the va 12285 */ 12286 vpshift = 12287 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 12288 0x1ff; 12289 va |= (i << vpshift); 12290 va >>= vpshift; 12291 new_offset = va & (new_entries - 1); 12292 new = new_base + new_offset; 12293 prefetch_tsbe_write(new); 12294 *new = *old; 12295 } 12296 } 12297 } 12298 } 12299 12300 /* 12301 * Kernel Physical Mapping (kpm) facility 12302 */ 12303 12304 /* -- hat_kpm interface section -- */ 12305 12306 /* 12307 * Mapin a locked page and return the vaddr. 12308 * When a kpme is provided by the caller it is added to 12309 * the page p_kpmelist. The page to be mapped in must 12310 * be at least read locked (p_selock). 12311 */ 12312 caddr_t 12313 hat_kpm_mapin(struct page *pp, struct kpme *kpme) 12314 { 12315 kmutex_t *pml; 12316 caddr_t vaddr; 12317 12318 if (kpm_enable == 0) { 12319 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set"); 12320 return ((caddr_t)NULL); 12321 } 12322 12323 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 12324 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked"); 12325 return ((caddr_t)NULL); 12326 } 12327 12328 pml = sfmmu_mlist_enter(pp); 12329 ASSERT(pp->p_kpmref >= 0); 12330 12331 vaddr = (pp->p_kpmref == 0) ? 12332 sfmmu_kpm_mapin(pp) : hat_kpm_page2va(pp, 1); 12333 12334 if (kpme != NULL) { 12335 /* 12336 * Tolerate multiple mapins for the same kpme to avoid 12337 * the need for an extra serialization. 12338 */ 12339 if ((sfmmu_kpme_lookup(kpme, pp)) == 0) 12340 sfmmu_kpme_add(kpme, pp); 12341 12342 ASSERT(pp->p_kpmref > 0); 12343 12344 } else { 12345 pp->p_kpmref++; 12346 } 12347 12348 sfmmu_mlist_exit(pml); 12349 return (vaddr); 12350 } 12351 12352 /* 12353 * Mapout a locked page. 12354 * When a kpme is provided by the caller it is removed from 12355 * the page p_kpmelist. The page to be mapped out must be at 12356 * least read locked (p_selock). 12357 * Note: The seg_kpm layer provides a mapout interface for the 12358 * case that a kpme is used and the underlying page is unlocked. 12359 * This can be used instead of calling this function directly. 12360 */ 12361 void 12362 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 12363 { 12364 kmutex_t *pml; 12365 12366 if (kpm_enable == 0) { 12367 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set"); 12368 return; 12369 } 12370 12371 if (IS_KPM_ADDR(vaddr) == 0) { 12372 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address"); 12373 return; 12374 } 12375 12376 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 12377 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked"); 12378 return; 12379 } 12380 12381 if (kpme != NULL) { 12382 ASSERT(pp == kpme->kpe_page); 12383 pp = kpme->kpe_page; 12384 pml = sfmmu_mlist_enter(pp); 12385 12386 if (sfmmu_kpme_lookup(kpme, pp) == 0) 12387 panic("hat_kpm_mapout: kpme not found pp=%p", 12388 (void *)pp); 12389 12390 ASSERT(pp->p_kpmref > 0); 12391 sfmmu_kpme_sub(kpme, pp); 12392 12393 } else { 12394 pml = sfmmu_mlist_enter(pp); 12395 pp->p_kpmref--; 12396 } 12397 12398 ASSERT(pp->p_kpmref >= 0); 12399 if (pp->p_kpmref == 0) 12400 sfmmu_kpm_mapout(pp, vaddr); 12401 12402 sfmmu_mlist_exit(pml); 12403 } 12404 12405 /* 12406 * Return the kpm virtual address for the page at pp. 12407 * If checkswap is non zero and the page is backed by a 12408 * swap vnode the physical address is used rather than 12409 * p_offset to determine the kpm region. 12410 * Note: The function has to be used w/ extreme care. The 12411 * stability of the page identity is in the responsibility 12412 * of the caller. 12413 */ 12414 caddr_t 12415 hat_kpm_page2va(struct page *pp, int checkswap) 12416 { 12417 int vcolor, vcolor_pa; 12418 uintptr_t paddr, vaddr; 12419 12420 ASSERT(kpm_enable); 12421 12422 paddr = ptob(pp->p_pagenum); 12423 vcolor_pa = addr_to_vcolor(paddr); 12424 12425 if (checkswap && pp->p_vnode && IS_SWAPFSVP(pp->p_vnode)) 12426 vcolor = (PP_ISNC(pp)) ? vcolor_pa : PP_GET_VCOLOR(pp); 12427 else 12428 vcolor = addr_to_vcolor(pp->p_offset); 12429 12430 vaddr = (uintptr_t)kpm_vbase + paddr; 12431 12432 if (vcolor_pa != vcolor) { 12433 vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT); 12434 vaddr += (vcolor_pa > vcolor) ? 12435 ((uintptr_t)vcolor_pa << kpm_size_shift) : 12436 ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift); 12437 } 12438 12439 return ((caddr_t)vaddr); 12440 } 12441 12442 /* 12443 * Return the page for the kpm virtual address vaddr. 12444 * Caller is responsible for the kpm mapping and lock 12445 * state of the page. 12446 */ 12447 page_t * 12448 hat_kpm_vaddr2page(caddr_t vaddr) 12449 { 12450 uintptr_t paddr; 12451 pfn_t pfn; 12452 12453 ASSERT(IS_KPM_ADDR(vaddr)); 12454 12455 SFMMU_KPM_VTOP(vaddr, paddr); 12456 pfn = (pfn_t)btop(paddr); 12457 12458 return (page_numtopp_nolock(pfn)); 12459 } 12460 12461 /* page to kpm_page */ 12462 #define PP2KPMPG(pp, kp) { \ 12463 struct memseg *mseg; \ 12464 pgcnt_t inx; \ 12465 pfn_t pfn; \ 12466 \ 12467 pfn = pp->p_pagenum; \ 12468 mseg = page_numtomemseg_nolock(pfn); \ 12469 ASSERT(mseg); \ 12470 inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase); \ 12471 ASSERT(inx < mseg->kpm_nkpmpgs); \ 12472 kp = &mseg->kpm_pages[inx]; \ 12473 } 12474 12475 /* page to kpm_spage */ 12476 #define PP2KPMSPG(pp, ksp) { \ 12477 struct memseg *mseg; \ 12478 pgcnt_t inx; \ 12479 pfn_t pfn; \ 12480 \ 12481 pfn = pp->p_pagenum; \ 12482 mseg = page_numtomemseg_nolock(pfn); \ 12483 ASSERT(mseg); \ 12484 inx = pfn - mseg->kpm_pbase; \ 12485 ksp = &mseg->kpm_spages[inx]; \ 12486 } 12487 12488 /* 12489 * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred 12490 * which could not be resolved by the trap level tsbmiss handler for the 12491 * following reasons: 12492 * . The vaddr is in VAC alias range (always PAGESIZE mapping size). 12493 * . The kpm (s)page range of vaddr is in a VAC alias prevention state. 12494 * . tsbmiss handling at trap level is not desired (DEBUG kernel only, 12495 * kpm_tsbmtl == 0). 12496 */ 12497 int 12498 hat_kpm_fault(struct hat *hat, caddr_t vaddr) 12499 { 12500 int error; 12501 uintptr_t paddr; 12502 pfn_t pfn; 12503 struct memseg *mseg; 12504 page_t *pp; 12505 12506 if (kpm_enable == 0) { 12507 cmn_err(CE_WARN, "hat_kpm_fault: kpm_enable not set"); 12508 return (ENOTSUP); 12509 } 12510 12511 ASSERT(hat == ksfmmup); 12512 ASSERT(IS_KPM_ADDR(vaddr)); 12513 12514 SFMMU_KPM_VTOP(vaddr, paddr); 12515 pfn = (pfn_t)btop(paddr); 12516 mseg = page_numtomemseg_nolock(pfn); 12517 if (mseg == NULL) 12518 return (EFAULT); 12519 12520 pp = &mseg->pages[(pgcnt_t)(pfn - mseg->pages_base)]; 12521 ASSERT((pfn_t)pp->p_pagenum == pfn); 12522 12523 if (!PAGE_LOCKED(pp)) 12524 return (EFAULT); 12525 12526 if (kpm_smallpages == 0) 12527 error = sfmmu_kpm_fault(vaddr, mseg, pp); 12528 else 12529 error = sfmmu_kpm_fault_small(vaddr, mseg, pp); 12530 12531 return (error); 12532 } 12533 12534 extern krwlock_t memsegslock; 12535 12536 /* 12537 * memseg_hash[] was cleared, need to clear memseg_phash[] too. 12538 */ 12539 void 12540 hat_kpm_mseghash_clear(int nentries) 12541 { 12542 pgcnt_t i; 12543 12544 if (kpm_enable == 0) 12545 return; 12546 12547 for (i = 0; i < nentries; i++) 12548 memseg_phash[i] = MSEG_NULLPTR_PA; 12549 } 12550 12551 /* 12552 * Update memseg_phash[inx] when memseg_hash[inx] was changed. 12553 */ 12554 void 12555 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 12556 { 12557 if (kpm_enable == 0) 12558 return; 12559 12560 memseg_phash[inx] = (msp) ? va_to_pa(msp) : MSEG_NULLPTR_PA; 12561 } 12562 12563 /* 12564 * Update kpm memseg members from basic memseg info. 12565 */ 12566 void 12567 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs, 12568 offset_t kpm_pages_off) 12569 { 12570 if (kpm_enable == 0) 12571 return; 12572 12573 msp->kpm_pages = (kpm_page_t *)((caddr_t)msp->pages + kpm_pages_off); 12574 msp->kpm_nkpmpgs = nkpmpgs; 12575 msp->kpm_pbase = kpmptop(ptokpmp(msp->pages_base)); 12576 msp->pagespa = va_to_pa(msp->pages); 12577 msp->epagespa = va_to_pa(msp->epages); 12578 msp->kpm_pagespa = va_to_pa(msp->kpm_pages); 12579 } 12580 12581 /* 12582 * Setup nextpa when a memseg is inserted. 12583 * Assumes that the memsegslock is already held. 12584 */ 12585 void 12586 hat_kpm_addmem_mseg_insert(struct memseg *msp) 12587 { 12588 if (kpm_enable == 0) 12589 return; 12590 12591 ASSERT(RW_LOCK_HELD(&memsegslock)); 12592 msp->nextpa = (memsegs) ? va_to_pa(memsegs) : MSEG_NULLPTR_PA; 12593 } 12594 12595 /* 12596 * Setup memsegspa when a memseg is (head) inserted. 12597 * Called before memsegs is updated to complete a 12598 * memseg insert operation. 12599 * Assumes that the memsegslock is already held. 12600 */ 12601 void 12602 hat_kpm_addmem_memsegs_update(struct memseg *msp) 12603 { 12604 if (kpm_enable == 0) 12605 return; 12606 12607 ASSERT(RW_LOCK_HELD(&memsegslock)); 12608 ASSERT(memsegs); 12609 memsegspa = va_to_pa(msp); 12610 } 12611 12612 /* 12613 * Return end of metadata for an already setup memseg. 12614 * 12615 * Note: kpm_pages and kpm_spages are aliases and the underlying 12616 * member of struct memseg is a union, therefore they always have 12617 * the same address within a memseg. They must be differentiated 12618 * when pointer arithmetic is used with them. 12619 */ 12620 caddr_t 12621 hat_kpm_mseg_reuse(struct memseg *msp) 12622 { 12623 caddr_t end; 12624 12625 if (kpm_smallpages == 0) 12626 end = (caddr_t)(msp->kpm_pages + msp->kpm_nkpmpgs); 12627 else 12628 end = (caddr_t)(msp->kpm_spages + msp->kpm_nkpmpgs); 12629 12630 return (end); 12631 } 12632 12633 /* 12634 * Update memsegspa (when first memseg in list 12635 * is deleted) or nextpa when a memseg deleted. 12636 * Assumes that the memsegslock is already held. 12637 */ 12638 void 12639 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp) 12640 { 12641 struct memseg *lmsp; 12642 12643 if (kpm_enable == 0) 12644 return; 12645 12646 ASSERT(RW_LOCK_HELD(&memsegslock)); 12647 12648 if (mspp == &memsegs) { 12649 memsegspa = (msp->next) ? 12650 va_to_pa(msp->next) : MSEG_NULLPTR_PA; 12651 } else { 12652 lmsp = (struct memseg *) 12653 ((uint64_t)mspp - offsetof(struct memseg, next)); 12654 lmsp->nextpa = (msp->next) ? 12655 va_to_pa(msp->next) : MSEG_NULLPTR_PA; 12656 } 12657 } 12658 12659 /* 12660 * Update kpm members for all memseg's involved in a split operation 12661 * and do the atomic update of the physical memseg chain. 12662 * 12663 * Note: kpm_pages and kpm_spages are aliases and the underlying member 12664 * of struct memseg is a union, therefore they always have the same 12665 * address within a memseg. With that the direct assignments and 12666 * va_to_pa conversions below don't have to be distinguished wrt. to 12667 * kpm_smallpages. They must be differentiated when pointer arithmetic 12668 * is used with them. 12669 * 12670 * Assumes that the memsegslock is already held. 12671 */ 12672 void 12673 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp, 12674 struct memseg *lo, struct memseg *mid, struct memseg *hi) 12675 { 12676 pgcnt_t start, end, kbase, kstart, num; 12677 struct memseg *lmsp; 12678 12679 if (kpm_enable == 0) 12680 return; 12681 12682 ASSERT(RW_LOCK_HELD(&memsegslock)); 12683 ASSERT(msp && mid && msp->kpm_pages); 12684 12685 kbase = ptokpmp(msp->kpm_pbase); 12686 12687 if (lo) { 12688 num = lo->pages_end - lo->pages_base; 12689 start = kpmptop(ptokpmp(lo->pages_base)); 12690 /* align end to kpm page size granularity */ 12691 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs; 12692 lo->kpm_pbase = start; 12693 lo->kpm_nkpmpgs = ptokpmp(end - start); 12694 lo->kpm_pages = msp->kpm_pages; 12695 lo->kpm_pagespa = va_to_pa(lo->kpm_pages); 12696 lo->pagespa = va_to_pa(lo->pages); 12697 lo->epagespa = va_to_pa(lo->epages); 12698 lo->nextpa = va_to_pa(lo->next); 12699 } 12700 12701 /* mid */ 12702 num = mid->pages_end - mid->pages_base; 12703 kstart = ptokpmp(mid->pages_base); 12704 start = kpmptop(kstart); 12705 /* align end to kpm page size granularity */ 12706 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs; 12707 mid->kpm_pbase = start; 12708 mid->kpm_nkpmpgs = ptokpmp(end - start); 12709 if (kpm_smallpages == 0) { 12710 mid->kpm_pages = msp->kpm_pages + (kstart - kbase); 12711 } else { 12712 mid->kpm_spages = msp->kpm_spages + (kstart - kbase); 12713 } 12714 mid->kpm_pagespa = va_to_pa(mid->kpm_pages); 12715 mid->pagespa = va_to_pa(mid->pages); 12716 mid->epagespa = va_to_pa(mid->epages); 12717 mid->nextpa = (mid->next) ? va_to_pa(mid->next) : MSEG_NULLPTR_PA; 12718 12719 if (hi) { 12720 num = hi->pages_end - hi->pages_base; 12721 kstart = ptokpmp(hi->pages_base); 12722 start = kpmptop(kstart); 12723 /* align end to kpm page size granularity */ 12724 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs; 12725 hi->kpm_pbase = start; 12726 hi->kpm_nkpmpgs = ptokpmp(end - start); 12727 if (kpm_smallpages == 0) { 12728 hi->kpm_pages = msp->kpm_pages + (kstart - kbase); 12729 } else { 12730 hi->kpm_spages = msp->kpm_spages + (kstart - kbase); 12731 } 12732 hi->kpm_pagespa = va_to_pa(hi->kpm_pages); 12733 hi->pagespa = va_to_pa(hi->pages); 12734 hi->epagespa = va_to_pa(hi->epages); 12735 hi->nextpa = (hi->next) ? va_to_pa(hi->next) : MSEG_NULLPTR_PA; 12736 } 12737 12738 /* 12739 * Atomic update of the physical memseg chain 12740 */ 12741 if (mspp == &memsegs) { 12742 memsegspa = (lo) ? va_to_pa(lo) : va_to_pa(mid); 12743 } else { 12744 lmsp = (struct memseg *) 12745 ((uint64_t)mspp - offsetof(struct memseg, next)); 12746 lmsp->nextpa = (lo) ? va_to_pa(lo) : va_to_pa(mid); 12747 } 12748 } 12749 12750 /* 12751 * Walk the memsegs chain, applying func to each memseg span and vcolor. 12752 */ 12753 void 12754 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg) 12755 { 12756 pfn_t pbase, pend; 12757 int vcolor; 12758 void *base; 12759 size_t size; 12760 struct memseg *msp; 12761 extern uint_t vac_colors; 12762 12763 for (msp = memsegs; msp; msp = msp->next) { 12764 pbase = msp->pages_base; 12765 pend = msp->pages_end; 12766 for (vcolor = 0; vcolor < vac_colors; vcolor++) { 12767 base = ptob(pbase) + kpm_vbase + kpm_size * vcolor; 12768 size = ptob(pend - pbase); 12769 func(arg, base, size); 12770 } 12771 } 12772 } 12773 12774 12775 /* -- sfmmu_kpm internal section -- */ 12776 12777 /* 12778 * Return the page frame number if a valid segkpm mapping exists 12779 * for vaddr, otherwise return PFN_INVALID. No locks are grabbed. 12780 * Should only be used by other sfmmu routines. 12781 */ 12782 pfn_t 12783 sfmmu_kpm_vatopfn(caddr_t vaddr) 12784 { 12785 uintptr_t paddr; 12786 pfn_t pfn; 12787 page_t *pp; 12788 12789 ASSERT(kpm_enable && IS_KPM_ADDR(vaddr)); 12790 12791 SFMMU_KPM_VTOP(vaddr, paddr); 12792 pfn = (pfn_t)btop(paddr); 12793 pp = page_numtopp_nolock(pfn); 12794 if (pp && pp->p_kpmref) 12795 return (pfn); 12796 else 12797 return ((pfn_t)PFN_INVALID); 12798 } 12799 12800 /* 12801 * Lookup a kpme in the p_kpmelist. 12802 */ 12803 static int 12804 sfmmu_kpme_lookup(struct kpme *kpme, page_t *pp) 12805 { 12806 struct kpme *p; 12807 12808 for (p = pp->p_kpmelist; p; p = p->kpe_next) { 12809 if (p == kpme) 12810 return (1); 12811 } 12812 return (0); 12813 } 12814 12815 /* 12816 * Insert a kpme into the p_kpmelist and increment 12817 * the per page kpm reference count. 12818 */ 12819 static void 12820 sfmmu_kpme_add(struct kpme *kpme, page_t *pp) 12821 { 12822 ASSERT(pp->p_kpmref >= 0); 12823 12824 /* head insert */ 12825 kpme->kpe_prev = NULL; 12826 kpme->kpe_next = pp->p_kpmelist; 12827 12828 if (pp->p_kpmelist) 12829 pp->p_kpmelist->kpe_prev = kpme; 12830 12831 pp->p_kpmelist = kpme; 12832 kpme->kpe_page = pp; 12833 pp->p_kpmref++; 12834 } 12835 12836 /* 12837 * Remove a kpme from the p_kpmelist and decrement 12838 * the per page kpm reference count. 12839 */ 12840 static void 12841 sfmmu_kpme_sub(struct kpme *kpme, page_t *pp) 12842 { 12843 ASSERT(pp->p_kpmref > 0); 12844 12845 if (kpme->kpe_prev) { 12846 ASSERT(pp->p_kpmelist != kpme); 12847 ASSERT(kpme->kpe_prev->kpe_page == pp); 12848 kpme->kpe_prev->kpe_next = kpme->kpe_next; 12849 } else { 12850 ASSERT(pp->p_kpmelist == kpme); 12851 pp->p_kpmelist = kpme->kpe_next; 12852 } 12853 12854 if (kpme->kpe_next) { 12855 ASSERT(kpme->kpe_next->kpe_page == pp); 12856 kpme->kpe_next->kpe_prev = kpme->kpe_prev; 12857 } 12858 12859 kpme->kpe_next = kpme->kpe_prev = NULL; 12860 kpme->kpe_page = NULL; 12861 pp->p_kpmref--; 12862 } 12863 12864 /* 12865 * Mapin a single page, it is called every time a page changes it's state 12866 * from kpm-unmapped to kpm-mapped. It may not be called, when only a new 12867 * kpm instance does a mapin and wants to share the mapping. 12868 * Assumes that the mlist mutex is already grabbed. 12869 */ 12870 static caddr_t 12871 sfmmu_kpm_mapin(page_t *pp) 12872 { 12873 kpm_page_t *kp; 12874 kpm_hlk_t *kpmp; 12875 caddr_t vaddr; 12876 int kpm_vac_range; 12877 pfn_t pfn; 12878 tte_t tte; 12879 kmutex_t *pmtx; 12880 int uncached; 12881 kpm_spage_t *ksp; 12882 kpm_shlk_t *kpmsp; 12883 int oldval; 12884 12885 ASSERT(sfmmu_mlist_held(pp)); 12886 ASSERT(pp->p_kpmref == 0); 12887 12888 vaddr = sfmmu_kpm_getvaddr(pp, &kpm_vac_range); 12889 12890 ASSERT(IS_KPM_ADDR(vaddr)); 12891 uncached = PP_ISNC(pp); 12892 pfn = pp->p_pagenum; 12893 12894 if (kpm_smallpages) 12895 goto smallpages_mapin; 12896 12897 PP2KPMPG(pp, kp); 12898 12899 kpmp = KPMP_HASH(kp); 12900 mutex_enter(&kpmp->khl_mutex); 12901 12902 ASSERT(PP_ISKPMC(pp) == 0); 12903 ASSERT(PP_ISKPMS(pp) == 0); 12904 12905 if (uncached) { 12906 /* ASSERT(pp->p_share); XXX use hat_page_getshare */ 12907 if (kpm_vac_range == 0) { 12908 if (kp->kp_refcnts == 0) { 12909 /* 12910 * Must remove large page mapping if it exists. 12911 * Pages in uncached state can only be mapped 12912 * small (PAGESIZE) within the regular kpm 12913 * range. 12914 */ 12915 if (kp->kp_refcntc == -1) { 12916 /* remove go indication */ 12917 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 12918 &kpmp->khl_lock, KPMTSBM_STOP); 12919 } 12920 if (kp->kp_refcnt > 0 && kp->kp_refcntc == 0) 12921 sfmmu_kpm_demap_large(vaddr); 12922 } 12923 ASSERT(kp->kp_refcntc >= 0); 12924 kp->kp_refcntc++; 12925 } 12926 pmtx = sfmmu_page_enter(pp); 12927 PP_SETKPMC(pp); 12928 sfmmu_page_exit(pmtx); 12929 } 12930 12931 if ((kp->kp_refcntc > 0 || kp->kp_refcnts > 0) && kpm_vac_range == 0) { 12932 /* 12933 * Have to do a small (PAGESIZE) mapin within this kpm_page 12934 * range since it is marked to be in VAC conflict mode or 12935 * when there are still other small mappings around. 12936 */ 12937 12938 /* tte assembly */ 12939 if (uncached == 0) 12940 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 12941 else 12942 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 12943 12944 /* tsb dropin */ 12945 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 12946 12947 pmtx = sfmmu_page_enter(pp); 12948 PP_SETKPMS(pp); 12949 sfmmu_page_exit(pmtx); 12950 12951 kp->kp_refcnts++; 12952 ASSERT(kp->kp_refcnts > 0); 12953 goto exit; 12954 } 12955 12956 if (kpm_vac_range == 0) { 12957 /* 12958 * Fast path / regular case, no VAC conflict handling 12959 * in progress within this kpm_page range. 12960 */ 12961 if (kp->kp_refcnt == 0) { 12962 12963 /* tte assembly */ 12964 KPM_TTE_VCACHED(tte.ll, pfn, TTE4M); 12965 12966 /* tsb dropin */ 12967 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M); 12968 12969 /* Set go flag for TL tsbmiss handler */ 12970 if (kp->kp_refcntc == 0) 12971 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 12972 &kpmp->khl_lock, KPMTSBM_START); 12973 12974 ASSERT(kp->kp_refcntc == -1); 12975 } 12976 kp->kp_refcnt++; 12977 ASSERT(kp->kp_refcnt); 12978 12979 } else { 12980 /* 12981 * The page is not setup according to the common VAC 12982 * prevention rules for the regular and kpm mapping layer 12983 * E.g. the page layer was not able to deliver a right 12984 * vcolor'ed page for a given vaddr corresponding to 12985 * the wanted p_offset. It has to be mapped in small in 12986 * within the corresponding kpm vac range in order to 12987 * prevent VAC alias conflicts. 12988 */ 12989 12990 /* tte assembly */ 12991 if (uncached == 0) { 12992 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 12993 } else { 12994 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 12995 } 12996 12997 /* tsb dropin */ 12998 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 12999 13000 kp->kp_refcnta++; 13001 if (kp->kp_refcntc == -1) { 13002 ASSERT(kp->kp_refcnt > 0); 13003 13004 /* remove go indication */ 13005 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock, 13006 KPMTSBM_STOP); 13007 } 13008 ASSERT(kp->kp_refcntc >= 0); 13009 } 13010 exit: 13011 mutex_exit(&kpmp->khl_mutex); 13012 return (vaddr); 13013 13014 smallpages_mapin: 13015 if (uncached == 0) { 13016 /* tte assembly */ 13017 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 13018 } else { 13019 /* ASSERT(pp->p_share); XXX use hat_page_getshare */ 13020 pmtx = sfmmu_page_enter(pp); 13021 PP_SETKPMC(pp); 13022 sfmmu_page_exit(pmtx); 13023 /* tte assembly */ 13024 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 13025 } 13026 13027 /* tsb dropin */ 13028 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13029 13030 PP2KPMSPG(pp, ksp); 13031 kpmsp = KPMP_SHASH(ksp); 13032 13033 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, &kpmsp->kshl_lock, 13034 (uncached) ? KPM_MAPPEDSC : KPM_MAPPEDS); 13035 13036 if (oldval != 0) 13037 panic("sfmmu_kpm_mapin: stale smallpages mapping"); 13038 13039 return (vaddr); 13040 } 13041 13042 /* 13043 * Mapout a single page, it is called every time a page changes it's state 13044 * from kpm-mapped to kpm-unmapped. It may not be called, when only a kpm 13045 * instance calls mapout and there are still other instances mapping the 13046 * page. Assumes that the mlist mutex is already grabbed. 13047 * 13048 * Note: In normal mode (no VAC conflict prevention pending) TLB's are 13049 * not flushed. This is the core segkpm behavior to avoid xcalls. It is 13050 * no problem because a translation from a segkpm virtual address to a 13051 * physical address is always the same. The only downside is a slighty 13052 * increased window of vulnerability for misbehaving _kernel_ modules. 13053 */ 13054 static void 13055 sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr) 13056 { 13057 kpm_page_t *kp; 13058 kpm_hlk_t *kpmp; 13059 int alias_range; 13060 kmutex_t *pmtx; 13061 kpm_spage_t *ksp; 13062 kpm_shlk_t *kpmsp; 13063 int oldval; 13064 13065 ASSERT(sfmmu_mlist_held(pp)); 13066 ASSERT(pp->p_kpmref == 0); 13067 13068 alias_range = IS_KPM_ALIAS_RANGE(vaddr); 13069 13070 if (kpm_smallpages) 13071 goto smallpages_mapout; 13072 13073 PP2KPMPG(pp, kp); 13074 kpmp = KPMP_HASH(kp); 13075 mutex_enter(&kpmp->khl_mutex); 13076 13077 if (alias_range) { 13078 ASSERT(PP_ISKPMS(pp) == 0); 13079 if (kp->kp_refcnta <= 0) { 13080 panic("sfmmu_kpm_mapout: bad refcnta kp=%p", 13081 (void *)kp); 13082 } 13083 13084 if (PP_ISTNC(pp)) { 13085 if (PP_ISKPMC(pp) == 0) { 13086 /* 13087 * Uncached kpm mappings must always have 13088 * forced "small page" mode. 13089 */ 13090 panic("sfmmu_kpm_mapout: uncached page not " 13091 "kpm marked"); 13092 } 13093 sfmmu_kpm_demap_small(vaddr); 13094 13095 pmtx = sfmmu_page_enter(pp); 13096 PP_CLRKPMC(pp); 13097 sfmmu_page_exit(pmtx); 13098 13099 /* 13100 * Check if we can resume cached mode. This might 13101 * be the case if the kpm mapping was the only 13102 * mapping in conflict with other non rule 13103 * compliant mappings. The page is no more marked 13104 * as kpm mapped, so the conv_tnc path will not 13105 * change kpm state. 13106 */ 13107 conv_tnc(pp, TTE8K); 13108 13109 } else if (PP_ISKPMC(pp) == 0) { 13110 /* remove TSB entry only */ 13111 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT); 13112 13113 } else { 13114 /* already demapped */ 13115 pmtx = sfmmu_page_enter(pp); 13116 PP_CLRKPMC(pp); 13117 sfmmu_page_exit(pmtx); 13118 } 13119 kp->kp_refcnta--; 13120 goto exit; 13121 } 13122 13123 if (kp->kp_refcntc <= 0 && kp->kp_refcnts == 0) { 13124 /* 13125 * Fast path / regular case. 13126 */ 13127 ASSERT(kp->kp_refcntc >= -1); 13128 ASSERT(!(pp->p_nrm & (P_KPMC | P_KPMS | P_TNC | P_PNC))); 13129 13130 if (kp->kp_refcnt <= 0) 13131 panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp); 13132 13133 if (--kp->kp_refcnt == 0) { 13134 /* remove go indication */ 13135 if (kp->kp_refcntc == -1) { 13136 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 13137 &kpmp->khl_lock, KPMTSBM_STOP); 13138 } 13139 ASSERT(kp->kp_refcntc == 0); 13140 13141 /* remove TSB entry */ 13142 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M); 13143 #ifdef DEBUG 13144 if (kpm_tlb_flush) 13145 sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT); 13146 #endif 13147 } 13148 13149 } else { 13150 /* 13151 * The VAC alias path. 13152 * We come here if the kpm vaddr is not in any alias_range 13153 * and we are unmapping a page within the regular kpm_page 13154 * range. The kpm_page either holds conflict pages and/or 13155 * is in "small page" mode. If the page is not marked 13156 * P_KPMS it couldn't have a valid PAGESIZE sized TSB 13157 * entry. Dcache flushing is done lazy and follows the 13158 * rules of the regular virtual page coloring scheme. 13159 * 13160 * Per page states and required actions: 13161 * P_KPMC: remove a kpm mapping that is conflicting. 13162 * P_KPMS: remove a small kpm mapping within a kpm_page. 13163 * P_TNC: check if we can re-cache the page. 13164 * P_PNC: we cannot re-cache, sorry. 13165 * Per kpm_page: 13166 * kp_refcntc > 0: page is part of a kpm_page with conflicts. 13167 * kp_refcnts > 0: rm a small mapped page within a kpm_page. 13168 */ 13169 13170 if (PP_ISKPMS(pp)) { 13171 if (kp->kp_refcnts < 1) { 13172 panic("sfmmu_kpm_mapout: bad refcnts kp=%p", 13173 (void *)kp); 13174 } 13175 sfmmu_kpm_demap_small(vaddr); 13176 13177 /* 13178 * Check if we can resume cached mode. This might 13179 * be the case if the kpm mapping was the only 13180 * mapping in conflict with other non rule 13181 * compliant mappings. The page is no more marked 13182 * as kpm mapped, so the conv_tnc path will not 13183 * change kpm state. 13184 */ 13185 if (PP_ISTNC(pp)) { 13186 if (!PP_ISKPMC(pp)) { 13187 /* 13188 * Uncached kpm mappings must always 13189 * have forced "small page" mode. 13190 */ 13191 panic("sfmmu_kpm_mapout: uncached " 13192 "page not kpm marked"); 13193 } 13194 conv_tnc(pp, TTE8K); 13195 } 13196 kp->kp_refcnts--; 13197 kp->kp_refcnt++; 13198 pmtx = sfmmu_page_enter(pp); 13199 PP_CLRKPMS(pp); 13200 sfmmu_page_exit(pmtx); 13201 } 13202 13203 if (PP_ISKPMC(pp)) { 13204 if (kp->kp_refcntc < 1) { 13205 panic("sfmmu_kpm_mapout: bad refcntc kp=%p", 13206 (void *)kp); 13207 } 13208 pmtx = sfmmu_page_enter(pp); 13209 PP_CLRKPMC(pp); 13210 sfmmu_page_exit(pmtx); 13211 kp->kp_refcntc--; 13212 } 13213 13214 if (kp->kp_refcnt-- < 1) 13215 panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp); 13216 } 13217 exit: 13218 mutex_exit(&kpmp->khl_mutex); 13219 return; 13220 13221 smallpages_mapout: 13222 PP2KPMSPG(pp, ksp); 13223 kpmsp = KPMP_SHASH(ksp); 13224 13225 if (PP_ISKPMC(pp) == 0) { 13226 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 13227 &kpmsp->kshl_lock, 0); 13228 13229 if (oldval != KPM_MAPPEDS) { 13230 /* 13231 * When we're called after sfmmu_kpm_hme_unload, 13232 * KPM_MAPPEDSC is valid too. 13233 */ 13234 if (oldval != KPM_MAPPEDSC) 13235 panic("sfmmu_kpm_mapout: incorrect mapping"); 13236 } 13237 13238 /* remove TSB entry */ 13239 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT); 13240 #ifdef DEBUG 13241 if (kpm_tlb_flush) 13242 sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT); 13243 #endif 13244 13245 } else if (PP_ISTNC(pp)) { 13246 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 13247 &kpmsp->kshl_lock, 0); 13248 13249 if (oldval != KPM_MAPPEDSC || PP_ISKPMC(pp) == 0) 13250 panic("sfmmu_kpm_mapout: inconsistent TNC mapping"); 13251 13252 sfmmu_kpm_demap_small(vaddr); 13253 13254 pmtx = sfmmu_page_enter(pp); 13255 PP_CLRKPMC(pp); 13256 sfmmu_page_exit(pmtx); 13257 13258 /* 13259 * Check if we can resume cached mode. This might be 13260 * the case if the kpm mapping was the only mapping 13261 * in conflict with other non rule compliant mappings. 13262 * The page is no more marked as kpm mapped, so the 13263 * conv_tnc path will not change the kpm state. 13264 */ 13265 conv_tnc(pp, TTE8K); 13266 13267 } else { 13268 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 13269 &kpmsp->kshl_lock, 0); 13270 13271 if (oldval != KPM_MAPPEDSC) 13272 panic("sfmmu_kpm_mapout: inconsistent mapping"); 13273 13274 pmtx = sfmmu_page_enter(pp); 13275 PP_CLRKPMC(pp); 13276 sfmmu_page_exit(pmtx); 13277 } 13278 } 13279 13280 #define abs(x) ((x) < 0 ? -(x) : (x)) 13281 13282 /* 13283 * Determine appropriate kpm mapping address and handle any kpm/hme 13284 * conflicts. Page mapping list and its vcolor parts must be protected. 13285 */ 13286 static caddr_t 13287 sfmmu_kpm_getvaddr(page_t *pp, int *kpm_vac_rangep) 13288 { 13289 int vcolor, vcolor_pa; 13290 caddr_t vaddr; 13291 uintptr_t paddr; 13292 13293 13294 ASSERT(sfmmu_mlist_held(pp)); 13295 13296 paddr = ptob(pp->p_pagenum); 13297 vcolor_pa = addr_to_vcolor(paddr); 13298 13299 if (IS_SWAPFSVP(pp->p_vnode)) { 13300 vcolor = (PP_NEWPAGE(pp) || PP_ISNC(pp)) ? 13301 vcolor_pa : PP_GET_VCOLOR(pp); 13302 } else { 13303 vcolor = addr_to_vcolor(pp->p_offset); 13304 } 13305 13306 vaddr = kpm_vbase + paddr; 13307 *kpm_vac_rangep = 0; 13308 13309 if (vcolor_pa != vcolor) { 13310 *kpm_vac_rangep = abs(vcolor - vcolor_pa); 13311 vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT); 13312 vaddr += (vcolor_pa > vcolor) ? 13313 ((uintptr_t)vcolor_pa << kpm_size_shift) : 13314 ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift); 13315 13316 ASSERT(!PP_ISMAPPED_LARGE(pp)); 13317 } 13318 13319 if (PP_ISNC(pp)) 13320 return (vaddr); 13321 13322 if (PP_NEWPAGE(pp)) { 13323 PP_SET_VCOLOR(pp, vcolor); 13324 return (vaddr); 13325 } 13326 13327 if (PP_GET_VCOLOR(pp) == vcolor) 13328 return (vaddr); 13329 13330 ASSERT(!PP_ISMAPPED_KPM(pp)); 13331 sfmmu_kpm_vac_conflict(pp, vaddr); 13332 13333 return (vaddr); 13334 } 13335 13336 /* 13337 * VAC conflict state bit values. 13338 * The following defines are used to make the handling of the 13339 * various input states more concise. For that the kpm states 13340 * per kpm_page and per page are combined in a summary state. 13341 * Each single state has a corresponding bit value in the 13342 * summary state. These defines only apply for kpm large page 13343 * mappings. Within comments the abbreviations "kc, c, ks, s" 13344 * are used as short form of the actual state, e.g. "kc" for 13345 * "kp_refcntc > 0", etc. 13346 */ 13347 #define KPM_KC 0x00000008 /* kpm_page: kp_refcntc > 0 */ 13348 #define KPM_C 0x00000004 /* page: P_KPMC set */ 13349 #define KPM_KS 0x00000002 /* kpm_page: kp_refcnts > 0 */ 13350 #define KPM_S 0x00000001 /* page: P_KPMS set */ 13351 13352 /* 13353 * Summary states used in sfmmu_kpm_fault (KPM_TSBM_*). 13354 * See also more detailed comments within in the sfmmu_kpm_fault switch. 13355 * Abbreviations used: 13356 * CONFL: VAC conflict(s) within a kpm_page. 13357 * MAPS: Mapped small: Page mapped in using a regular page size kpm mapping. 13358 * RASM: Re-assembling of a large page mapping possible. 13359 * RPLS: Replace: TSB miss due to TSB replacement only. 13360 * BRKO: Breakup Other: A large kpm mapping has to be broken because another 13361 * page within the kpm_page is already involved in a VAC conflict. 13362 * BRKT: Breakup This: A large kpm mapping has to be broken, this page is 13363 * is involved in a VAC conflict. 13364 */ 13365 #define KPM_TSBM_CONFL_GONE (0) 13366 #define KPM_TSBM_MAPS_RASM (KPM_KS) 13367 #define KPM_TSBM_RPLS_RASM (KPM_KS | KPM_S) 13368 #define KPM_TSBM_MAPS_BRKO (KPM_KC) 13369 #define KPM_TSBM_MAPS (KPM_KC | KPM_KS) 13370 #define KPM_TSBM_RPLS (KPM_KC | KPM_KS | KPM_S) 13371 #define KPM_TSBM_MAPS_BRKT (KPM_KC | KPM_C) 13372 #define KPM_TSBM_MAPS_CONFL (KPM_KC | KPM_C | KPM_KS) 13373 #define KPM_TSBM_RPLS_CONFL (KPM_KC | KPM_C | KPM_KS | KPM_S) 13374 13375 /* 13376 * kpm fault handler for mappings with large page size. 13377 */ 13378 int 13379 sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp) 13380 { 13381 int error; 13382 pgcnt_t inx; 13383 kpm_page_t *kp; 13384 tte_t tte; 13385 pfn_t pfn = pp->p_pagenum; 13386 kpm_hlk_t *kpmp; 13387 kmutex_t *pml; 13388 int alias_range; 13389 int uncached = 0; 13390 kmutex_t *pmtx; 13391 int badstate; 13392 uint_t tsbmcase; 13393 13394 alias_range = IS_KPM_ALIAS_RANGE(vaddr); 13395 13396 inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase); 13397 if (inx >= mseg->kpm_nkpmpgs) { 13398 cmn_err(CE_PANIC, "sfmmu_kpm_fault: kpm overflow in memseg " 13399 "0x%p pp 0x%p", (void *)mseg, (void *)pp); 13400 } 13401 13402 kp = &mseg->kpm_pages[inx]; 13403 kpmp = KPMP_HASH(kp); 13404 13405 pml = sfmmu_mlist_enter(pp); 13406 13407 if (!PP_ISMAPPED_KPM(pp)) { 13408 sfmmu_mlist_exit(pml); 13409 return (EFAULT); 13410 } 13411 13412 mutex_enter(&kpmp->khl_mutex); 13413 13414 if (alias_range) { 13415 ASSERT(!PP_ISMAPPED_LARGE(pp)); 13416 if (kp->kp_refcnta > 0) { 13417 if (PP_ISKPMC(pp)) { 13418 pmtx = sfmmu_page_enter(pp); 13419 PP_CLRKPMC(pp); 13420 sfmmu_page_exit(pmtx); 13421 } 13422 /* 13423 * Check for vcolor conflicts. Return here 13424 * w/ either no conflict (fast path), removed hme 13425 * mapping chains (unload conflict) or uncached 13426 * (uncache conflict). VACaches are cleaned and 13427 * p_vcolor and PP_TNC are set accordingly for the 13428 * conflict cases. Drop kpmp for uncache conflict 13429 * cases since it will be grabbed within 13430 * sfmmu_kpm_page_cache in case of an uncache 13431 * conflict. 13432 */ 13433 mutex_exit(&kpmp->khl_mutex); 13434 sfmmu_kpm_vac_conflict(pp, vaddr); 13435 mutex_enter(&kpmp->khl_mutex); 13436 13437 if (PP_ISNC(pp)) { 13438 uncached = 1; 13439 pmtx = sfmmu_page_enter(pp); 13440 PP_SETKPMC(pp); 13441 sfmmu_page_exit(pmtx); 13442 } 13443 goto smallexit; 13444 13445 } else { 13446 /* 13447 * We got a tsbmiss on a not active kpm_page range. 13448 * Let segkpm_fault decide how to panic. 13449 */ 13450 error = EFAULT; 13451 } 13452 goto exit; 13453 } 13454 13455 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0); 13456 if (kp->kp_refcntc == -1) { 13457 /* 13458 * We should come here only if trap level tsb miss 13459 * handler is disabled. 13460 */ 13461 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 || 13462 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp)); 13463 13464 if (badstate == 0) 13465 goto largeexit; 13466 } 13467 13468 if (badstate || kp->kp_refcntc < 0) 13469 goto badstate_exit; 13470 13471 /* 13472 * Combine the per kpm_page and per page kpm VAC states to 13473 * a summary state in order to make the kpm fault handling 13474 * more concise. 13475 */ 13476 tsbmcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) | 13477 ((kp->kp_refcnts > 0) ? KPM_KS : 0) | 13478 (PP_ISKPMC(pp) ? KPM_C : 0) | 13479 (PP_ISKPMS(pp) ? KPM_S : 0)); 13480 13481 switch (tsbmcase) { 13482 case KPM_TSBM_CONFL_GONE: /* - - - - */ 13483 /* 13484 * That's fine, we either have no more vac conflict in 13485 * this kpm page or someone raced in and has solved the 13486 * vac conflict for us -- call sfmmu_kpm_vac_conflict 13487 * to take care for correcting the vcolor and flushing 13488 * the dcache if required. 13489 */ 13490 mutex_exit(&kpmp->khl_mutex); 13491 sfmmu_kpm_vac_conflict(pp, vaddr); 13492 mutex_enter(&kpmp->khl_mutex); 13493 13494 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 || 13495 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 13496 panic("sfmmu_kpm_fault: inconsistent CONFL_GONE " 13497 "state, pp=%p", (void *)pp); 13498 } 13499 goto largeexit; 13500 13501 case KPM_TSBM_MAPS_RASM: /* - - ks - */ 13502 /* 13503 * All conflicts in this kpm page are gone but there are 13504 * already small mappings around, so we also map this 13505 * page small. This could be the trigger case for a 13506 * small mapping reaper, if this is really needed. 13507 * For now fall thru to the KPM_TSBM_MAPS handling. 13508 */ 13509 13510 case KPM_TSBM_MAPS: /* kc - ks - */ 13511 /* 13512 * Large page mapping is already broken, this page is not 13513 * conflicting, so map it small. Call sfmmu_kpm_vac_conflict 13514 * to take care for correcting the vcolor and flushing 13515 * the dcache if required. 13516 */ 13517 mutex_exit(&kpmp->khl_mutex); 13518 sfmmu_kpm_vac_conflict(pp, vaddr); 13519 mutex_enter(&kpmp->khl_mutex); 13520 13521 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 || 13522 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 13523 panic("sfmmu_kpm_fault: inconsistent MAPS state, " 13524 "pp=%p", (void *)pp); 13525 } 13526 kp->kp_refcnt--; 13527 kp->kp_refcnts++; 13528 pmtx = sfmmu_page_enter(pp); 13529 PP_SETKPMS(pp); 13530 sfmmu_page_exit(pmtx); 13531 goto smallexit; 13532 13533 case KPM_TSBM_RPLS_RASM: /* - - ks s */ 13534 /* 13535 * All conflicts in this kpm page are gone but this page 13536 * is mapped small. This could be the trigger case for a 13537 * small mapping reaper, if this is really needed. 13538 * For now we drop it in small again. Fall thru to the 13539 * KPM_TSBM_RPLS handling. 13540 */ 13541 13542 case KPM_TSBM_RPLS: /* kc - ks s */ 13543 /* 13544 * Large page mapping is already broken, this page is not 13545 * conflicting but already mapped small, so drop it in 13546 * small again. 13547 */ 13548 if (PP_ISNC(pp) || 13549 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 13550 panic("sfmmu_kpm_fault: inconsistent RPLS state, " 13551 "pp=%p", (void *)pp); 13552 } 13553 goto smallexit; 13554 13555 case KPM_TSBM_MAPS_BRKO: /* kc - - - */ 13556 /* 13557 * The kpm page where we live in is marked conflicting 13558 * but this page is not conflicting. So we have to map it 13559 * in small. Call sfmmu_kpm_vac_conflict to take care for 13560 * correcting the vcolor and flushing the dcache if required. 13561 */ 13562 mutex_exit(&kpmp->khl_mutex); 13563 sfmmu_kpm_vac_conflict(pp, vaddr); 13564 mutex_enter(&kpmp->khl_mutex); 13565 13566 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 || 13567 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) { 13568 panic("sfmmu_kpm_fault: inconsistent MAPS_BRKO state, " 13569 "pp=%p", (void *)pp); 13570 } 13571 kp->kp_refcnt--; 13572 kp->kp_refcnts++; 13573 pmtx = sfmmu_page_enter(pp); 13574 PP_SETKPMS(pp); 13575 sfmmu_page_exit(pmtx); 13576 goto smallexit; 13577 13578 case KPM_TSBM_MAPS_BRKT: /* kc c - - */ 13579 case KPM_TSBM_MAPS_CONFL: /* kc c ks - */ 13580 if (!PP_ISMAPPED(pp)) { 13581 /* 13582 * We got a tsbmiss on kpm large page range that is 13583 * marked to contain vac conflicting pages introduced 13584 * by hme mappings. The hme mappings are all gone and 13585 * must have bypassed the kpm alias prevention logic. 13586 */ 13587 panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p", 13588 (void *)pp); 13589 } 13590 13591 /* 13592 * Check for vcolor conflicts. Return here w/ either no 13593 * conflict (fast path), removed hme mapping chains 13594 * (unload conflict) or uncached (uncache conflict). 13595 * Dcache is cleaned and p_vcolor and P_TNC are set 13596 * accordingly. Drop kpmp for uncache conflict cases 13597 * since it will be grabbed within sfmmu_kpm_page_cache 13598 * in case of an uncache conflict. 13599 */ 13600 mutex_exit(&kpmp->khl_mutex); 13601 sfmmu_kpm_vac_conflict(pp, vaddr); 13602 mutex_enter(&kpmp->khl_mutex); 13603 13604 if (kp->kp_refcnt <= 0) 13605 panic("sfmmu_kpm_fault: bad refcnt kp=%p", (void *)kp); 13606 13607 if (PP_ISNC(pp)) { 13608 uncached = 1; 13609 } else { 13610 /* 13611 * When an unload conflict is solved and there are 13612 * no other small mappings around, we can resume 13613 * largepage mode. Otherwise we have to map or drop 13614 * in small. This could be a trigger for a small 13615 * mapping reaper when this was the last conflict 13616 * within the kpm page and when there are only 13617 * other small mappings around. 13618 */ 13619 ASSERT(addr_to_vcolor(vaddr) == PP_GET_VCOLOR(pp)); 13620 ASSERT(kp->kp_refcntc > 0); 13621 kp->kp_refcntc--; 13622 pmtx = sfmmu_page_enter(pp); 13623 PP_CLRKPMC(pp); 13624 sfmmu_page_exit(pmtx); 13625 ASSERT(PP_ISKPMS(pp) == 0); 13626 if (kp->kp_refcntc == 0 && kp->kp_refcnts == 0) 13627 goto largeexit; 13628 } 13629 13630 kp->kp_refcnt--; 13631 kp->kp_refcnts++; 13632 pmtx = sfmmu_page_enter(pp); 13633 PP_SETKPMS(pp); 13634 sfmmu_page_exit(pmtx); 13635 goto smallexit; 13636 13637 case KPM_TSBM_RPLS_CONFL: /* kc c ks s */ 13638 if (!PP_ISMAPPED(pp)) { 13639 /* 13640 * We got a tsbmiss on kpm large page range that is 13641 * marked to contain vac conflicting pages introduced 13642 * by hme mappings. They are all gone and must have 13643 * somehow bypassed the kpm alias prevention logic. 13644 */ 13645 panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p", 13646 (void *)pp); 13647 } 13648 13649 /* 13650 * This state is only possible for an uncached mapping. 13651 */ 13652 if (!PP_ISNC(pp)) { 13653 panic("sfmmu_kpm_fault: page not uncached, pp=%p", 13654 (void *)pp); 13655 } 13656 uncached = 1; 13657 goto smallexit; 13658 13659 default: 13660 badstate_exit: 13661 panic("sfmmu_kpm_fault: inconsistent VAC state, vaddr=%p kp=%p " 13662 "pp=%p", (void *)vaddr, (void *)kp, (void *)pp); 13663 } 13664 13665 smallexit: 13666 /* tte assembly */ 13667 if (uncached == 0) 13668 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 13669 else 13670 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 13671 13672 /* tsb dropin */ 13673 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13674 13675 error = 0; 13676 goto exit; 13677 13678 largeexit: 13679 if (kp->kp_refcnt > 0) { 13680 13681 /* tte assembly */ 13682 KPM_TTE_VCACHED(tte.ll, pfn, TTE4M); 13683 13684 /* tsb dropin */ 13685 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M); 13686 13687 if (kp->kp_refcntc == 0) { 13688 /* Set "go" flag for TL tsbmiss handler */ 13689 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock, 13690 KPMTSBM_START); 13691 } 13692 ASSERT(kp->kp_refcntc == -1); 13693 error = 0; 13694 13695 } else 13696 error = EFAULT; 13697 exit: 13698 mutex_exit(&kpmp->khl_mutex); 13699 sfmmu_mlist_exit(pml); 13700 return (error); 13701 } 13702 13703 /* 13704 * kpm fault handler for mappings with small page size. 13705 */ 13706 int 13707 sfmmu_kpm_fault_small(caddr_t vaddr, struct memseg *mseg, page_t *pp) 13708 { 13709 int error = 0; 13710 pgcnt_t inx; 13711 kpm_spage_t *ksp; 13712 kpm_shlk_t *kpmsp; 13713 kmutex_t *pml; 13714 pfn_t pfn = pp->p_pagenum; 13715 tte_t tte; 13716 kmutex_t *pmtx; 13717 int oldval; 13718 13719 inx = pfn - mseg->kpm_pbase; 13720 ksp = &mseg->kpm_spages[inx]; 13721 kpmsp = KPMP_SHASH(ksp); 13722 13723 pml = sfmmu_mlist_enter(pp); 13724 13725 if (!PP_ISMAPPED_KPM(pp)) { 13726 sfmmu_mlist_exit(pml); 13727 return (EFAULT); 13728 } 13729 13730 /* 13731 * kp_mapped lookup protected by mlist mutex 13732 */ 13733 if (ksp->kp_mapped == KPM_MAPPEDS) { 13734 /* 13735 * Fast path tsbmiss 13736 */ 13737 ASSERT(!PP_ISKPMC(pp)); 13738 ASSERT(!PP_ISNC(pp)); 13739 13740 /* tte assembly */ 13741 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 13742 13743 /* tsb dropin */ 13744 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13745 13746 } else if (ksp->kp_mapped == KPM_MAPPEDSC) { 13747 /* 13748 * Got here due to existing or gone kpm/hme VAC conflict. 13749 * Recheck for vcolor conflicts. Return here w/ either 13750 * no conflict, removed hme mapping chain (unload 13751 * conflict) or uncached (uncache conflict). VACaches 13752 * are cleaned and p_vcolor and PP_TNC are set accordingly 13753 * for the conflict cases. 13754 */ 13755 sfmmu_kpm_vac_conflict(pp, vaddr); 13756 13757 if (PP_ISNC(pp)) { 13758 /* ASSERT(pp->p_share); XXX use hat_page_getshare */ 13759 13760 /* tte assembly */ 13761 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K); 13762 13763 /* tsb dropin */ 13764 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13765 13766 } else { 13767 if (PP_ISKPMC(pp)) { 13768 pmtx = sfmmu_page_enter(pp); 13769 PP_CLRKPMC(pp); 13770 sfmmu_page_exit(pmtx); 13771 } 13772 13773 /* tte assembly */ 13774 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K); 13775 13776 /* tsb dropin */ 13777 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT); 13778 13779 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 13780 &kpmsp->kshl_lock, KPM_MAPPEDS); 13781 13782 if (oldval != KPM_MAPPEDSC) 13783 panic("sfmmu_kpm_fault_small: " 13784 "stale smallpages mapping"); 13785 } 13786 13787 } else { 13788 /* 13789 * We got a tsbmiss on a not active kpm_page range. 13790 * Let decide segkpm_fault how to panic. 13791 */ 13792 error = EFAULT; 13793 } 13794 13795 sfmmu_mlist_exit(pml); 13796 return (error); 13797 } 13798 13799 /* 13800 * Check/handle potential hme/kpm mapping conflicts 13801 */ 13802 static void 13803 sfmmu_kpm_vac_conflict(page_t *pp, caddr_t vaddr) 13804 { 13805 int vcolor; 13806 struct sf_hment *sfhmep; 13807 struct hat *tmphat; 13808 struct sf_hment *tmphme = NULL; 13809 struct hme_blk *hmeblkp; 13810 tte_t tte; 13811 13812 ASSERT(sfmmu_mlist_held(pp)); 13813 13814 if (PP_ISNC(pp)) 13815 return; 13816 13817 vcolor = addr_to_vcolor(vaddr); 13818 if (PP_GET_VCOLOR(pp) == vcolor) 13819 return; 13820 13821 /* 13822 * There could be no vcolor conflict between a large cached 13823 * hme page and a non alias range kpm page (neither large nor 13824 * small mapped). So if a hme conflict already exists between 13825 * a constituent page of a large hme mapping and a shared small 13826 * conflicting hme mapping, both mappings must be already 13827 * uncached at this point. 13828 */ 13829 ASSERT(!PP_ISMAPPED_LARGE(pp)); 13830 13831 if (!PP_ISMAPPED(pp)) { 13832 /* 13833 * Previous hme user of page had a different color 13834 * but since there are no current users 13835 * we just flush the cache and change the color. 13836 */ 13837 SFMMU_STAT(sf_pgcolor_conflict); 13838 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 13839 PP_SET_VCOLOR(pp, vcolor); 13840 return; 13841 } 13842 13843 /* 13844 * If we get here we have a vac conflict with a current hme 13845 * mapping. This must have been established by forcing a wrong 13846 * colored mapping, e.g. by using mmap(2) with MAP_FIXED. 13847 */ 13848 13849 /* 13850 * Check if any mapping is in same as or if it is locked 13851 * since in that case we need to uncache. 13852 */ 13853 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 13854 tmphme = sfhmep->hme_next; 13855 hmeblkp = sfmmu_hmetohblk(sfhmep); 13856 if (hmeblkp->hblk_xhat_bit) 13857 continue; 13858 tmphat = hblktosfmmu(hmeblkp); 13859 sfmmu_copytte(&sfhmep->hme_tte, &tte); 13860 ASSERT(TTE_IS_VALID(&tte)); 13861 if ((tmphat == ksfmmup) || hmeblkp->hblk_lckcnt) { 13862 /* 13863 * We have an uncache conflict 13864 */ 13865 SFMMU_STAT(sf_uncache_conflict); 13866 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 13867 return; 13868 } 13869 } 13870 13871 /* 13872 * We have an unload conflict 13873 */ 13874 SFMMU_STAT(sf_unload_conflict); 13875 13876 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 13877 tmphme = sfhmep->hme_next; 13878 hmeblkp = sfmmu_hmetohblk(sfhmep); 13879 if (hmeblkp->hblk_xhat_bit) 13880 continue; 13881 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 13882 } 13883 13884 /* 13885 * Unloads only does tlb flushes so we need to flush the 13886 * dcache vcolor here. 13887 */ 13888 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 13889 PP_SET_VCOLOR(pp, vcolor); 13890 } 13891 13892 /* 13893 * Remove all kpm mappings using kpme's for pp and check that 13894 * all kpm mappings (w/ and w/o kpme's) are gone. 13895 */ 13896 static void 13897 sfmmu_kpm_pageunload(page_t *pp) 13898 { 13899 caddr_t vaddr; 13900 struct kpme *kpme, *nkpme; 13901 13902 ASSERT(pp != NULL); 13903 ASSERT(pp->p_kpmref); 13904 ASSERT(sfmmu_mlist_held(pp)); 13905 13906 vaddr = hat_kpm_page2va(pp, 1); 13907 13908 for (kpme = pp->p_kpmelist; kpme; kpme = nkpme) { 13909 ASSERT(kpme->kpe_page == pp); 13910 13911 if (pp->p_kpmref == 0) 13912 panic("sfmmu_kpm_pageunload: stale p_kpmref pp=%p " 13913 "kpme=%p", (void *)pp, (void *)kpme); 13914 13915 nkpme = kpme->kpe_next; 13916 13917 /* Add instance callback here here if needed later */ 13918 sfmmu_kpme_sub(kpme, pp); 13919 } 13920 13921 /* 13922 * Also correct after mixed kpme/nonkpme mappings. If nonkpme 13923 * segkpm clients have unlocked the page and forgot to mapout 13924 * we panic here. 13925 */ 13926 if (pp->p_kpmref != 0) 13927 panic("sfmmu_kpm_pageunload: bad refcnt pp=%p", (void *)pp); 13928 13929 sfmmu_kpm_mapout(pp, vaddr); 13930 } 13931 13932 /* 13933 * Remove a large kpm mapping from kernel TSB and all TLB's. 13934 */ 13935 static void 13936 sfmmu_kpm_demap_large(caddr_t vaddr) 13937 { 13938 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M); 13939 sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT); 13940 } 13941 13942 /* 13943 * Remove a small kpm mapping from kernel TSB and all TLB's. 13944 */ 13945 static void 13946 sfmmu_kpm_demap_small(caddr_t vaddr) 13947 { 13948 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT); 13949 sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT); 13950 } 13951 13952 /* 13953 * Demap a kpm mapping in all TLB's. 13954 */ 13955 static void 13956 sfmmu_kpm_demap_tlbs(caddr_t vaddr, int ctxnum) 13957 { 13958 cpuset_t cpuset; 13959 13960 kpreempt_disable(); 13961 cpuset = ksfmmup->sfmmu_cpusran; 13962 CPUSET_AND(cpuset, cpu_ready_set); 13963 CPUSET_DEL(cpuset, CPU->cpu_id); 13964 SFMMU_XCALL_STATS(ctxnum); 13965 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)vaddr, ctxnum); 13966 vtag_flushpage(vaddr, ctxnum); 13967 kpreempt_enable(); 13968 } 13969 13970 /* 13971 * Summary states used in sfmmu_kpm_vac_unload (KPM_VUL__*). 13972 * See also more detailed comments within in the sfmmu_kpm_vac_unload switch. 13973 * Abbreviations used: 13974 * BIG: Large page kpm mapping in use. 13975 * CONFL: VAC conflict(s) within a kpm_page. 13976 * INCR: Count of conflicts within a kpm_page is going to be incremented. 13977 * DECR: Count of conflicts within a kpm_page is going to be decremented. 13978 * UNMAP_SMALL: A small (regular page size) mapping is going to be unmapped. 13979 * TNC: Temporary non cached: a kpm mapped page is mapped in TNC state. 13980 */ 13981 #define KPM_VUL_BIG (0) 13982 #define KPM_VUL_CONFL_INCR1 (KPM_KS) 13983 #define KPM_VUL_UNMAP_SMALL1 (KPM_KS | KPM_S) 13984 #define KPM_VUL_CONFL_INCR2 (KPM_KC) 13985 #define KPM_VUL_CONFL_INCR3 (KPM_KC | KPM_KS) 13986 #define KPM_VUL_UNMAP_SMALL2 (KPM_KC | KPM_KS | KPM_S) 13987 #define KPM_VUL_CONFL_DECR1 (KPM_KC | KPM_C) 13988 #define KPM_VUL_CONFL_DECR2 (KPM_KC | KPM_C | KPM_KS) 13989 #define KPM_VUL_TNC (KPM_KC | KPM_C | KPM_KS | KPM_S) 13990 13991 /* 13992 * Handle VAC unload conflicts introduced by hme mappings or vice 13993 * versa when a hme conflict mapping is replaced by a non conflict 13994 * one. Perform actions and state transitions according to the 13995 * various page and kpm_page entry states. VACache flushes are in 13996 * the responsibiliy of the caller. We still hold the mlist lock. 13997 */ 13998 static void 13999 sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr) 14000 { 14001 kpm_page_t *kp; 14002 kpm_hlk_t *kpmp; 14003 caddr_t kpmvaddr = hat_kpm_page2va(pp, 1); 14004 int newcolor; 14005 kmutex_t *pmtx; 14006 uint_t vacunlcase; 14007 int badstate = 0; 14008 kpm_spage_t *ksp; 14009 kpm_shlk_t *kpmsp; 14010 14011 ASSERT(PAGE_LOCKED(pp)); 14012 ASSERT(sfmmu_mlist_held(pp)); 14013 ASSERT(!PP_ISNC(pp)); 14014 14015 newcolor = addr_to_vcolor(kpmvaddr) != addr_to_vcolor(vaddr); 14016 if (kpm_smallpages) 14017 goto smallpages_vac_unload; 14018 14019 PP2KPMPG(pp, kp); 14020 kpmp = KPMP_HASH(kp); 14021 mutex_enter(&kpmp->khl_mutex); 14022 14023 if (IS_KPM_ALIAS_RANGE(kpmvaddr)) { 14024 if (kp->kp_refcnta < 1) { 14025 panic("sfmmu_kpm_vac_unload: bad refcnta kpm_page=%p\n", 14026 (void *)kp); 14027 } 14028 14029 if (PP_ISKPMC(pp) == 0) { 14030 if (newcolor == 0) 14031 goto exit; 14032 sfmmu_kpm_demap_small(kpmvaddr); 14033 pmtx = sfmmu_page_enter(pp); 14034 PP_SETKPMC(pp); 14035 sfmmu_page_exit(pmtx); 14036 14037 } else if (newcolor == 0) { 14038 pmtx = sfmmu_page_enter(pp); 14039 PP_CLRKPMC(pp); 14040 sfmmu_page_exit(pmtx); 14041 14042 } else { 14043 badstate++; 14044 } 14045 14046 goto exit; 14047 } 14048 14049 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0); 14050 if (kp->kp_refcntc == -1) { 14051 /* 14052 * We should come here only if trap level tsb miss 14053 * handler is disabled. 14054 */ 14055 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 || 14056 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp)); 14057 } else { 14058 badstate |= (kp->kp_refcntc < 0); 14059 } 14060 14061 if (badstate) 14062 goto exit; 14063 14064 if (PP_ISKPMC(pp) == 0 && newcolor == 0) { 14065 ASSERT(PP_ISKPMS(pp) == 0); 14066 goto exit; 14067 } 14068 14069 /* 14070 * Combine the per kpm_page and per page kpm VAC states 14071 * to a summary state in order to make the vac unload 14072 * handling more concise. 14073 */ 14074 vacunlcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) | 14075 ((kp->kp_refcnts > 0) ? KPM_KS : 0) | 14076 (PP_ISKPMC(pp) ? KPM_C : 0) | 14077 (PP_ISKPMS(pp) ? KPM_S : 0)); 14078 14079 switch (vacunlcase) { 14080 case KPM_VUL_BIG: /* - - - - */ 14081 /* 14082 * Have to breakup the large page mapping to be 14083 * able to handle the conflicting hme vaddr. 14084 */ 14085 if (kp->kp_refcntc == -1) { 14086 /* remove go indication */ 14087 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 14088 &kpmp->khl_lock, KPMTSBM_STOP); 14089 } 14090 sfmmu_kpm_demap_large(kpmvaddr); 14091 14092 ASSERT(kp->kp_refcntc == 0); 14093 kp->kp_refcntc++; 14094 pmtx = sfmmu_page_enter(pp); 14095 PP_SETKPMC(pp); 14096 sfmmu_page_exit(pmtx); 14097 break; 14098 14099 case KPM_VUL_UNMAP_SMALL1: /* - - ks s */ 14100 case KPM_VUL_UNMAP_SMALL2: /* kc - ks s */ 14101 /* 14102 * New conflict w/ an active kpm page, actually mapped 14103 * in by small TSB/TLB entries. Remove the mapping and 14104 * update states. 14105 */ 14106 ASSERT(newcolor); 14107 sfmmu_kpm_demap_small(kpmvaddr); 14108 kp->kp_refcnts--; 14109 kp->kp_refcnt++; 14110 kp->kp_refcntc++; 14111 pmtx = sfmmu_page_enter(pp); 14112 PP_CLRKPMS(pp); 14113 PP_SETKPMC(pp); 14114 sfmmu_page_exit(pmtx); 14115 break; 14116 14117 case KPM_VUL_CONFL_INCR1: /* - - ks - */ 14118 case KPM_VUL_CONFL_INCR2: /* kc - - - */ 14119 case KPM_VUL_CONFL_INCR3: /* kc - ks - */ 14120 /* 14121 * New conflict on a active kpm mapped page not yet in 14122 * TSB/TLB. Mark page and increment the kpm_page conflict 14123 * count. 14124 */ 14125 ASSERT(newcolor); 14126 kp->kp_refcntc++; 14127 pmtx = sfmmu_page_enter(pp); 14128 PP_SETKPMC(pp); 14129 sfmmu_page_exit(pmtx); 14130 break; 14131 14132 case KPM_VUL_CONFL_DECR1: /* kc c - - */ 14133 case KPM_VUL_CONFL_DECR2: /* kc c ks - */ 14134 /* 14135 * A conflicting hme mapping is removed for an active 14136 * kpm page not yet in TSB/TLB. Unmark page and decrement 14137 * the kpm_page conflict count. 14138 */ 14139 ASSERT(newcolor == 0); 14140 kp->kp_refcntc--; 14141 pmtx = sfmmu_page_enter(pp); 14142 PP_CLRKPMC(pp); 14143 sfmmu_page_exit(pmtx); 14144 break; 14145 14146 case KPM_VUL_TNC: /* kc c ks s */ 14147 cmn_err(CE_NOTE, "sfmmu_kpm_vac_unload: " 14148 "page not in NC state"); 14149 /* FALLTHRU */ 14150 14151 default: 14152 badstate++; 14153 } 14154 exit: 14155 if (badstate) { 14156 panic("sfmmu_kpm_vac_unload: inconsistent VAC state, " 14157 "kpmvaddr=%p kp=%p pp=%p", 14158 (void *)kpmvaddr, (void *)kp, (void *)pp); 14159 } 14160 mutex_exit(&kpmp->khl_mutex); 14161 14162 return; 14163 14164 smallpages_vac_unload: 14165 if (newcolor == 0) 14166 return; 14167 14168 PP2KPMSPG(pp, ksp); 14169 kpmsp = KPMP_SHASH(ksp); 14170 14171 if (PP_ISKPMC(pp) == 0) { 14172 if (ksp->kp_mapped == KPM_MAPPEDS) { 14173 /* 14174 * Stop TL tsbmiss handling 14175 */ 14176 (void) sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 14177 &kpmsp->kshl_lock, KPM_MAPPEDSC); 14178 14179 sfmmu_kpm_demap_small(kpmvaddr); 14180 14181 } else if (ksp->kp_mapped != KPM_MAPPEDSC) { 14182 panic("sfmmu_kpm_vac_unload: inconsistent mapping"); 14183 } 14184 14185 pmtx = sfmmu_page_enter(pp); 14186 PP_SETKPMC(pp); 14187 sfmmu_page_exit(pmtx); 14188 14189 } else { 14190 if (ksp->kp_mapped != KPM_MAPPEDSC) 14191 panic("sfmmu_kpm_vac_unload: inconsistent mapping"); 14192 } 14193 } 14194 14195 /* 14196 * Page is marked to be in VAC conflict to an existing kpm mapping 14197 * or is kpm mapped using only the regular pagesize. Called from 14198 * sfmmu_hblk_unload when a mlist is completely removed. 14199 */ 14200 static void 14201 sfmmu_kpm_hme_unload(page_t *pp) 14202 { 14203 /* tte assembly */ 14204 kpm_page_t *kp; 14205 kpm_hlk_t *kpmp; 14206 caddr_t vaddr; 14207 kmutex_t *pmtx; 14208 uint_t flags; 14209 kpm_spage_t *ksp; 14210 14211 ASSERT(sfmmu_mlist_held(pp)); 14212 ASSERT(PP_ISMAPPED_KPM(pp)); 14213 14214 flags = pp->p_nrm & (P_KPMC | P_KPMS); 14215 if (kpm_smallpages) 14216 goto smallpages_hme_unload; 14217 14218 if (flags == (P_KPMC | P_KPMS)) { 14219 panic("sfmmu_kpm_hme_unload: page should be uncached"); 14220 14221 } else if (flags == P_KPMS) { 14222 /* 14223 * Page mapped small but not involved in VAC conflict 14224 */ 14225 return; 14226 } 14227 14228 vaddr = hat_kpm_page2va(pp, 1); 14229 14230 PP2KPMPG(pp, kp); 14231 kpmp = KPMP_HASH(kp); 14232 mutex_enter(&kpmp->khl_mutex); 14233 14234 if (IS_KPM_ALIAS_RANGE(vaddr)) { 14235 if (kp->kp_refcnta < 1) { 14236 panic("sfmmu_kpm_hme_unload: bad refcnta kpm_page=%p\n", 14237 (void *)kp); 14238 } 14239 14240 } else { 14241 if (kp->kp_refcntc < 1) { 14242 panic("sfmmu_kpm_hme_unload: bad refcntc kpm_page=%p\n", 14243 (void *)kp); 14244 } 14245 kp->kp_refcntc--; 14246 } 14247 14248 pmtx = sfmmu_page_enter(pp); 14249 PP_CLRKPMC(pp); 14250 sfmmu_page_exit(pmtx); 14251 14252 mutex_exit(&kpmp->khl_mutex); 14253 return; 14254 14255 smallpages_hme_unload: 14256 if (flags != P_KPMC) 14257 panic("sfmmu_kpm_hme_unload: page should be uncached"); 14258 14259 vaddr = hat_kpm_page2va(pp, 1); 14260 PP2KPMSPG(pp, ksp); 14261 14262 if (ksp->kp_mapped != KPM_MAPPEDSC) 14263 panic("sfmmu_kpm_hme_unload: inconsistent mapping"); 14264 14265 /* 14266 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it 14267 * prevents TL tsbmiss handling and force a hat_kpm_fault. 14268 * There we can start over again. 14269 */ 14270 14271 pmtx = sfmmu_page_enter(pp); 14272 PP_CLRKPMC(pp); 14273 sfmmu_page_exit(pmtx); 14274 } 14275 14276 /* 14277 * Special hooks for sfmmu_page_cache_array() when changing the 14278 * cacheability of a page. It is used to obey the hat_kpm lock 14279 * ordering (mlist -> kpmp -> spl, and back). 14280 */ 14281 static kpm_hlk_t * 14282 sfmmu_kpm_kpmp_enter(page_t *pp, pgcnt_t npages) 14283 { 14284 kpm_page_t *kp; 14285 kpm_hlk_t *kpmp; 14286 14287 ASSERT(sfmmu_mlist_held(pp)); 14288 14289 if (kpm_smallpages || PP_ISMAPPED_KPM(pp) == 0) 14290 return (NULL); 14291 14292 ASSERT(npages <= kpmpnpgs); 14293 14294 PP2KPMPG(pp, kp); 14295 kpmp = KPMP_HASH(kp); 14296 mutex_enter(&kpmp->khl_mutex); 14297 14298 return (kpmp); 14299 } 14300 14301 static void 14302 sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp) 14303 { 14304 if (kpm_smallpages || kpmp == NULL) 14305 return; 14306 14307 mutex_exit(&kpmp->khl_mutex); 14308 } 14309 14310 /* 14311 * Summary states used in sfmmu_kpm_page_cache (KPM_*). 14312 * See also more detailed comments within in the sfmmu_kpm_page_cache switch. 14313 * Abbreviations used: 14314 * UNC: Input state for an uncache request. 14315 * BIG: Large page kpm mapping in use. 14316 * SMALL: Page has a small kpm mapping within a kpm_page range. 14317 * NODEMAP: No demap needed. 14318 * NOP: No operation needed on this input state. 14319 * CACHE: Input state for a re-cache request. 14320 * MAPS: Page is in TNC and kpm VAC conflict state and kpm mapped small. 14321 * NOMAP: Page is in TNC and kpm VAC conflict state, but not small kpm 14322 * mapped. 14323 * NOMAPO: Page is in TNC and kpm VAC conflict state, but not small kpm 14324 * mapped. There are also other small kpm mappings within this 14325 * kpm_page. 14326 */ 14327 #define KPM_UNC_BIG (0) 14328 #define KPM_UNC_NODEMAP1 (KPM_KS) 14329 #define KPM_UNC_SMALL1 (KPM_KS | KPM_S) 14330 #define KPM_UNC_NODEMAP2 (KPM_KC) 14331 #define KPM_UNC_NODEMAP3 (KPM_KC | KPM_KS) 14332 #define KPM_UNC_SMALL2 (KPM_KC | KPM_KS | KPM_S) 14333 #define KPM_UNC_NOP1 (KPM_KC | KPM_C) 14334 #define KPM_UNC_NOP2 (KPM_KC | KPM_C | KPM_KS) 14335 #define KPM_CACHE_NOMAP (KPM_KC | KPM_C) 14336 #define KPM_CACHE_NOMAPO (KPM_KC | KPM_C | KPM_KS) 14337 #define KPM_CACHE_MAPS (KPM_KC | KPM_C | KPM_KS | KPM_S) 14338 14339 /* 14340 * This function is called when the virtual cacheability of a page 14341 * is changed and the page has an actice kpm mapping. The mlist mutex, 14342 * the spl hash lock and the kpmp mutex (if needed) are already grabbed. 14343 */ 14344 static void 14345 sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag) 14346 { 14347 kpm_page_t *kp; 14348 kpm_hlk_t *kpmp; 14349 caddr_t kpmvaddr; 14350 int badstate = 0; 14351 uint_t pgcacase; 14352 kpm_spage_t *ksp; 14353 kpm_shlk_t *kpmsp; 14354 int oldval; 14355 14356 ASSERT(PP_ISMAPPED_KPM(pp)); 14357 ASSERT(sfmmu_mlist_held(pp)); 14358 ASSERT(sfmmu_page_spl_held(pp)); 14359 14360 if (flags != HAT_TMPNC && flags != HAT_CACHE) 14361 panic("sfmmu_kpm_page_cache: bad flags"); 14362 14363 kpmvaddr = hat_kpm_page2va(pp, 1); 14364 14365 if (flags == HAT_TMPNC && cache_flush_tag == CACHE_FLUSH) { 14366 pfn_t pfn = pp->p_pagenum; 14367 int vcolor = addr_to_vcolor(kpmvaddr); 14368 cpuset_t cpuset = cpu_ready_set; 14369 14370 /* Flush vcolor in DCache */ 14371 CPUSET_DEL(cpuset, CPU->cpu_id); 14372 SFMMU_XCALL_STATS(ksfmmup->sfmmu_cnum); 14373 xt_some(cpuset, vac_flushpage_tl1, pfn, vcolor); 14374 vac_flushpage(pfn, vcolor); 14375 } 14376 14377 if (kpm_smallpages) 14378 goto smallpages_page_cache; 14379 14380 PP2KPMPG(pp, kp); 14381 kpmp = KPMP_HASH(kp); 14382 ASSERT(MUTEX_HELD(&kpmp->khl_mutex)); 14383 14384 if (IS_KPM_ALIAS_RANGE(kpmvaddr)) { 14385 if (kp->kp_refcnta < 1) { 14386 panic("sfmmu_kpm_page_cache: bad refcnta " 14387 "kpm_page=%p\n", (void *)kp); 14388 } 14389 sfmmu_kpm_demap_small(kpmvaddr); 14390 if (flags == HAT_TMPNC) { 14391 PP_SETKPMC(pp); 14392 ASSERT(!PP_ISKPMS(pp)); 14393 } else { 14394 ASSERT(PP_ISKPMC(pp)); 14395 PP_CLRKPMC(pp); 14396 } 14397 goto exit; 14398 } 14399 14400 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0); 14401 if (kp->kp_refcntc == -1) { 14402 /* 14403 * We should come here only if trap level tsb miss 14404 * handler is disabled. 14405 */ 14406 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 || 14407 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp)); 14408 } else { 14409 badstate |= (kp->kp_refcntc < 0); 14410 } 14411 14412 if (badstate) 14413 goto exit; 14414 14415 /* 14416 * Combine the per kpm_page and per page kpm VAC states to 14417 * a summary state in order to make the VAC cache/uncache 14418 * handling more concise. 14419 */ 14420 pgcacase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) | 14421 ((kp->kp_refcnts > 0) ? KPM_KS : 0) | 14422 (PP_ISKPMC(pp) ? KPM_C : 0) | 14423 (PP_ISKPMS(pp) ? KPM_S : 0)); 14424 14425 if (flags == HAT_CACHE) { 14426 switch (pgcacase) { 14427 case KPM_CACHE_MAPS: /* kc c ks s */ 14428 sfmmu_kpm_demap_small(kpmvaddr); 14429 if (kp->kp_refcnts < 1) { 14430 panic("sfmmu_kpm_page_cache: bad refcnts " 14431 "kpm_page=%p\n", (void *)kp); 14432 } 14433 kp->kp_refcnts--; 14434 kp->kp_refcnt++; 14435 PP_CLRKPMS(pp); 14436 /* FALLTHRU */ 14437 14438 case KPM_CACHE_NOMAP: /* kc c - - */ 14439 case KPM_CACHE_NOMAPO: /* kc c ks - */ 14440 kp->kp_refcntc--; 14441 PP_CLRKPMC(pp); 14442 break; 14443 14444 default: 14445 badstate++; 14446 } 14447 goto exit; 14448 } 14449 14450 switch (pgcacase) { 14451 case KPM_UNC_BIG: /* - - - - */ 14452 if (kp->kp_refcnt < 1) { 14453 panic("sfmmu_kpm_page_cache: bad refcnt " 14454 "kpm_page=%p\n", (void *)kp); 14455 } 14456 14457 /* 14458 * Have to breakup the large page mapping in preparation 14459 * to the upcoming TNC mode handled by small mappings. 14460 * The demap can already be done due to another conflict 14461 * within the kpm_page. 14462 */ 14463 if (kp->kp_refcntc == -1) { 14464 /* remove go indication */ 14465 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, 14466 &kpmp->khl_lock, KPMTSBM_STOP); 14467 } 14468 ASSERT(kp->kp_refcntc == 0); 14469 sfmmu_kpm_demap_large(kpmvaddr); 14470 kp->kp_refcntc++; 14471 PP_SETKPMC(pp); 14472 break; 14473 14474 case KPM_UNC_SMALL1: /* - - ks s */ 14475 case KPM_UNC_SMALL2: /* kc - ks s */ 14476 /* 14477 * Have to demap an already small kpm mapping in preparation 14478 * to the upcoming TNC mode. The demap can already be done 14479 * due to another conflict within the kpm_page. 14480 */ 14481 sfmmu_kpm_demap_small(kpmvaddr); 14482 kp->kp_refcntc++; 14483 kp->kp_refcnts--; 14484 kp->kp_refcnt++; 14485 PP_CLRKPMS(pp); 14486 PP_SETKPMC(pp); 14487 break; 14488 14489 case KPM_UNC_NODEMAP1: /* - - ks - */ 14490 /* fallthru */ 14491 14492 case KPM_UNC_NODEMAP2: /* kc - - - */ 14493 case KPM_UNC_NODEMAP3: /* kc - ks - */ 14494 kp->kp_refcntc++; 14495 PP_SETKPMC(pp); 14496 break; 14497 14498 case KPM_UNC_NOP1: /* kc c - - */ 14499 case KPM_UNC_NOP2: /* kc c ks - */ 14500 break; 14501 14502 default: 14503 badstate++; 14504 } 14505 exit: 14506 if (badstate) { 14507 panic("sfmmu_kpm_page_cache: inconsistent VAC state " 14508 "kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr, 14509 (void *)kp, (void *)pp); 14510 } 14511 return; 14512 14513 smallpages_page_cache: 14514 PP2KPMSPG(pp, ksp); 14515 kpmsp = KPMP_SHASH(ksp); 14516 14517 oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, 14518 &kpmsp->kshl_lock, KPM_MAPPEDSC); 14519 14520 if (!(oldval == KPM_MAPPEDS || oldval == KPM_MAPPEDSC)) 14521 panic("smallpages_page_cache: inconsistent mapping"); 14522 14523 sfmmu_kpm_demap_small(kpmvaddr); 14524 14525 if (flags == HAT_TMPNC) { 14526 PP_SETKPMC(pp); 14527 ASSERT(!PP_ISKPMS(pp)); 14528 14529 } else { 14530 ASSERT(PP_ISKPMC(pp)); 14531 PP_CLRKPMC(pp); 14532 } 14533 14534 /* 14535 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it 14536 * prevents TL tsbmiss handling and force a hat_kpm_fault. 14537 * There we can start over again. 14538 */ 14539 } 14540 14541 /* 14542 * unused in sfmmu 14543 */ 14544 void 14545 hat_dump(void) 14546 { 14547 } 14548 14549 /* 14550 * Called when a thread is exiting and we have switched to the kernel address 14551 * space. Perform the same VM initialization resume() uses when switching 14552 * processes. 14553 * 14554 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 14555 * we call it anyway in case the semantics change in the future. 14556 */ 14557 /*ARGSUSED*/ 14558 void 14559 hat_thread_exit(kthread_t *thd) 14560 { 14561 ASSERT(thd->t_procp->p_as == &kas); 14562 14563 sfmmu_setctx_sec(KCONTEXT); 14564 sfmmu_load_mmustate(ksfmmup); 14565 } 14566